query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Click the ``Confirm`` button and cancel the dialog.
def cancel(self): with self.handle_alert(confirm=False): self.q(css='button#confirm').first.click()
[ "def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()", "def confirm_cancel(self, button):\n if not self.confirm_window_open:\n raise Exception('confirmation window is not open')\n self.confirm_buttons.find_buttons()\n self.confirm_buttons.click(button)\n sleep(1)\n self.confirm_window_open = False", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def confirm_dialog_box():\n alert = world.browser.switch_to.alert\n alert.accept()", "def choose_cancel_on_next_confirmation(self):\n self._selenium.choose_cancel_on_next_confirmation()", "def cancel(self):\n self._ensure(running = True)\n self._ensure_button('Cancel')\n self._ensure_button_enabled('Cancel')\n self.buttons.click('cancel')\n sleep(1)\n self.confirm_window_open = True\n self.confirm_cancel('yes')\n self.page = self.cancelled_page\n self.installing = False\n self.buttons_valid = False", "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def runAskYesNoCancelDialog(self,c,title,\n message=None,yesMessage=\"Yes\",noMessage=\"No\",defaultButton=\"Yes\"):\n d = leoSwingDialog.swingAskYesNoCancel(\n c,title,message,yesMessage,noMessage,defaultButton)\n return d.run(modal=True)", "def click_cancel(self):\n self.click_element(self.cancel_button_selector)", "def on_cancel_button_click(self, obj):\n\t\t\n\t\tself.exitw_show()", "def exitConfirm():\n\n confirm = showDialogBox('Exit the game now?', 'question', 'yesno', 'no')\n if confirm == 'yes':\n raise SystemExit", "def Confirm(self):\r\n \r\n global references\r\n self.from_ed = self.ed_result.get(\"1.0\",'end-1c')\r\n references.append(self.from_ed)\r\n self.confirm_b.configure(state = 'disabled')\r\n self.discard_b.configure(state = 'disabled')\r\n self.finalresult.configure(state = 'normal')\r\n self.finalresult.delete('1.0', END)\r\n \r\n self.final()", "def __onConfirmNo(self):\n self.__confDlg.reject()", "def __window_confirm(self, text):\n return True", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()", "def buttonBox_accepted(self):\n # just close the dialog\n self.ok = True\n self.close()", "def runAskOkCancelStringDialog(self,c,title,message):\n d = leoSwingDialog.swingAskOkCancelString(c,title,message)\n return d.run(modal=True)", "def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count the number of div.test elements.
def num_divs(self): return len(self.q(css='div.test').results)
[ "def count_tests(self):\n return sum(1 for test in test_list_repr(self.context[0].testlist) if test.get('test_type') == 'test')", "def test_number_of_testcase_elements(self):\n testcases = self.root.findall('testcase')\n self.assertEqual(len(testcases), 4)", "def test_count(self):\n count = 0\n for network in self.config['network']:\n count += len(network['test'])\n return count", "def getItemCount(self):\r\n if self._testSetDocument:\r\n hierarchies = self._testSetDocument.findall('//hierarchy')\r\n testCases = self._testSetDocument.findall('//testcase')\r\n \r\n return len(hierarchies) + len(testCases)\r\n else:\r\n return 0", "def test_count(self):\n return len(self.tests) + sum(suite.test_count for suite in self.suites)", "def num_tests(self):\n return self.test_results.num_tests", "def test_element_count(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.element_count(2,\"F\"),6)", "def b_count_test(self):\n \t \n\tsel = self.selenium\n test = \"Test B - Count Articles, Titles, Headings, Etc.\"\n print test\n \n headers = sel.get_css_count(\"css=\" + CSS[1])\n images = sel.get_css_count(\"css=\" + CSS[2])\n authors = sel.get_css_count(\"css=\" + CSS[3])\n\tdots = sel.get_css_count(\"css=\" + CSS[7]) + sel.get_css_count(\"css=\" + CSS[6])\t\n \n if ((images < 8) or (dots < 8) or (authors < 8) or (headers < 8)):\n print \"Missing articles!\"\n L.log(BROWSERS[x], test, \"FAIL, MISSING CONTENT\", \"Images: \" + str(images) + \" Dots: \" + str(dots) + \" Authors: \" + str(authors) + \" Headers: \" + str(headers)) \n \n\telse:\n\t L.log(BROWSERS[x], test, \"PASS, OK\", \"None\")\n\t \n\t######################################################################## ", "def countTestCases(self):\n None", "def numberTests(self):\n for i, test in enumerate(self._tests):\n test.number = i + 1\n test.info.cs_test_num = test.number", "def num_tests(self):\n return len(self.test_results)", "def assertCountSeleniumElements(self, selector, count, root_element=None):\n from selenium.webdriver.common.by import By\n\n root_element = root_element or self.selenium\n self.assertEqual(\n len(root_element.find_elements(By.CSS_SELECTOR, selector)), count\n )", "def test_count(self):\n self._test_count_func(count)", "def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")", "def test_number_of_products():\r\n soup = get_test_data()\r\n\r\n results = BeautifulSoup.find(soup, \"div\", class_=\"sorthitscontainer\").text.strip()\r\n # extract numbers from string\r\n i = 0\r\n count = \"\"\r\n while results[i] != \" \":\r\n count += results[i]\r\n i += 1\r\n # convert to integer\r\n number_of_products = int(count)\r\n\r\n assert number_of_products == 199", "def test_count_elements(self):\n from pykml.util import count_elements\n\n test_datafile = path.join(\n path.dirname(__file__),\n 'testfiles',\n 'google_kml_developers_guide/complete_tour_example.kml'\n )\n with open(test_datafile) as f:\n doc = parse(f, schema=Schema('kml22gx.xsd'))\n summary = count_elements(doc)\n\n self.assertTrue('http://www.opengis.net/kml/2.2' in summary)\n self.assertEqual(4,\n summary['http://www.opengis.net/kml/2.2']['Placemark']\n )\n self.assertTrue('http://www.google.com/kml/ext/2.2' in summary)\n self.assertEqual(5,\n summary['http://www.google.com/kml/ext/2.2']['FlyTo']\n )\n self.assertEqual(2,\n summary['http://www.google.com/kml/ext/2.2']['Wait']\n )", "def elements_count(self, xpath):\r\n count = 0\r\n elements_number = 1\r\n while True:\r\n try:\r\n self.driver.find_element_by_xpath(xpath % elements_number)\r\n count += 1\r\n elements_number += 1\r\n except NoSuchElementException:\r\n break\r\n return count", "def testArticleCount(self):\n\n self.articleCount(17)", "def test_component_count(self, parser_instance):\n # grep -c '<component ' test_simple_odes.cellml\n assert len(parser_instance.components) == 21" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of text for each div.test element.
def div_text_list(self): return self.q(css='div.test').text
[ "def div_html_list(self):\n return self.q(css='div.test').html", "def _get_text(self, element):\n # for text in element.itertext():\n for text in self.iter_main_text(element):\n yield text.strip()", "def list():\n tests = getTestList()\n print \"Available tests:\"\n for test in tests:\n print test", "def div_value_list(self):\n return self.q(css='div.test').attrs('value')", "def list_tests(cls):\n print(_(\"List of available tests...:\\n\"))\n print(\"{:<50}{}\\n\".format(_(\"[Test Name]\"),\n _(\"[Description]\")))\n testdict = {name: clss.__doc__ for name, clss in cls.get_tests()}\n for test in sorted(testdict):\n if testdict[test] is None:\n raise Exception(\n _(\"No test description provided\"\n \" as doc string for the test: %s\") % test)\n else:\n test_description = testdict[test].split(\".\")[0]\n print(\"{test:<50}{desc}\\r\".format(\n test=test, desc=test_description))\n print(\"\\n\")", "def get_text(self) -> List[str]:\n return self.__texts", "def return_textview_elements(self):\n return self.driver.find_elements_by_class_name('android.widget.TextView')", "def get_texts(self) -> List[str]:\n return self.texts", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def list_tests(self, selected_suite_name=None):\r\n test_list = self.get_tests_for_suite(selected_suite_name)\r\n for test_method_name in (\r\n self.get_test_method_name(test)\r\n for test in test_list\r\n ):\r\n print(test_method_name)\r\n\r\n return test_list", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def get_text_data_child_list(self):\n return self.media_list + self.attribute_list", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def _generateDisplayedText(self, obj, **args ):\n result = self._generateSubstring(obj, **args)\n if result:\n return result\n\n displayedText = self._script.utilities.displayedText(obj)\n if not displayedText:\n return []\n\n return [displayedText]", "def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests", "def list_feature_tests(self):\n\t\treturn self.test_names", "def get_all_text(self) :\n return Patent.get_tree_text(self._root)", "def get_div_paragraphs(text_divs, namespace=NAMESPACE):\n div_pars = []\n for div in text_divs:\n div_pars.extend(div.findall('tei:p', namespace))\n return div_pars", "def tests_list(search_text) -> list:\n tests = []\n find_and_import_modules([])\n for _class in unittest.TestCase.__subclasses__():\n if _class.__module__.startswith('test.'):\n for test_method in unittest.defaultTestLoader.loadTestsFromTestCase(_class):\n test_method = test_method.__str__()[:test_method.__str__().find('(')]\n test_method = f\"{_class.__module__}.{_class.__name__}.{test_method.strip()}\"\n if '*' == search_text or search_text.lower() in test_method.lower():\n print(test_method)\n tests.append(test_method)\n return tests" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of values for each div.test element.
def div_value_list(self): return self.q(css='div.test').attrs('value')
[ "def div_text_list(self):\n return self.q(css='div.test').text", "def div_html_list(self):\n return self.q(css='div.test').html", "def get_elements(self):\n\t\treturn self._testing_cache", "def list():\n tests = getTestList()\n print \"Available tests:\"\n for test in tests:\n print test", "def get_test(self):\n return self.id_test, self.x_test", "def _build_tests_list_helper(self, suite):\n tests = list(iterate_tests(suite))\n return tests", "def get_test_values(*args: Variable) -> Union[Any, List[Any]]:\n\n if config.compute_test_value == \"off\":\n return []\n\n rval = []\n\n for i, arg in enumerate(args):\n try:\n rval.append(get_test_value(arg))\n except TestValueError:\n if hasattr(arg, \"name\") and arg.name is not None:\n missing_test_message(f\"Argument {i} ('{arg.name}') has no test value\")\n else:\n missing_test_message(f\"Argument {i} has no test value\")\n return []\n\n if len(rval) == 1:\n return rval\n\n return [tuple(rval)]", "def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt", "def _test_to_list(self, test_name):\n parts = test_name.split(\".\")\n result = []\n for part in parts:\n result += _parse_parametrized(part)\n return result", "def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def get_test_cases(self):\n\n return self._test_cases", "def getTestSet(self):\r\n return self.fTestData", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def Get_Test_Containers():\n\tlis = []\n\t\n\tlis.append(Container(0, 0.01, 0.01, 0.0025, 100, 293, 0))#Normal\n\tlis.append(Container(1, 0.01, 0.02, 0.0025, 75, 293*1.25, 0))#Nearly full and quite hot\n\tlis.append(Container(2, 0.03, 0.01, 0.0025, 10, 293, 0))#Nearly empty\n\tlis.append(Container(3, 0.02, 0.02, 0.0025, 1000, 293, 0))#Overfull\n\tlis.append(Container(0, 0.5*(2**0.5), 1, 0.0025, 10, 293, 3*(10**-9)))#Huge container with pump\n\t\n\treturn lis", "def selectTestCasesByResult(self, result):\r\n testCaseNodes = []\r\n\r\n # define search string\r\n if result == 'Tested':\r\n searchString = '//testresult[@result!=\"%s\"]' % NO_RUN\r\n elif result == 'Not passed':\r\n searchString = '//testresult[@result!=\"%s\" and @result!=\"%s\"]' % (NO_RUN,PASSED)\r\n else:\r\n searchString = '//testresult[@result = \"%s\"]' % result\r\n\r\n for testResultNode in self._testSetDocument.findall(searchString):\r\n testCaseNodes.append(TestCaseNode(testResultNode.parent(), self))\r\n\r\n return testCaseNodes", "def load_data(self):\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n data = []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n module = testcase.get('classname')\n name = testcase.get('name')\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err'):\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, module, message, time,\n extra_text))\n\n return testresults", "def list_feature_tests(self):\n\t\treturn self.test_names", "def test_data(self):\n return self._test_data.series" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of html for each div.test element.
def div_html_list(self): return self.q(css='div.test').html
[ "def div_text_list(self):\n return self.q(css='div.test').text", "def div_value_list(self):\n return self.q(css='div.test').attrs('value')", "def list():\n tests = getTestList()\n print \"Available tests:\"\n for test in tests:\n print test", "def _build_tests_list_helper(self, suite):\n tests = list(iterate_tests(suite))\n return tests", "def test_gettesttools_html(self):\n pass", "def test_html(self):\n tags = (\n ('<form', 1),\n # CSRF, Name, Start_date, End_date and Location\n ('<input', 5),\n ('type=\"text\"', 2),\n ('type=\"date\"', 2),\n ('type=\"submit\"', 1),\n )\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.response, text, count)", "def list_tests(cls):\n print(_(\"List of available tests...:\\n\"))\n print(\"{:<50}{}\\n\".format(_(\"[Test Name]\"),\n _(\"[Description]\")))\n testdict = {name: clss.__doc__ for name, clss in cls.get_tests()}\n for test in sorted(testdict):\n if testdict[test] is None:\n raise Exception(\n _(\"No test description provided\"\n \" as doc string for the test: %s\") % test)\n else:\n test_description = testdict[test].split(\".\")[0]\n print(\"{test:<50}{desc}\\r\".format(\n test=test, desc=test_description))\n print(\"\\n\")", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def html(self):\n failure = \"\"\n skipped = \"\"\n stdout = tag.text(self.stdout)\n stderr = tag.text(self.stderr)\n\n# if self.skipped:\n# skipped = \"\"\"\n# <hr size=\"1\"/>\n# <div class=\"skipped\"><b>Skipped: {msg}</b><br/>\n# <pre>{skip}</pre>\n# </div>\n# \"\"\".format(msg=tag.text(self.skipped_msg),\n# skip=tag.text(self.skipped))\n\n if self.failed():\n failure = \"\"\"\n <hr size=\"1\"/>\n <div class=\"failure\"><b>Failed: {msg}</b><br/>\n <pre>{fail}</pre>\n </div>\n \"\"\".format(msg=tag.text(self.failure_msg),\n fail=tag.text(self.failure))\n\n properties = [x.html() for x in self.properties]\n\n def render_stdoe():\n if self.stderr or self.stdout:\n return \"\"\"\n <div class=\"stdout\"><i>Stdout</i><br/>\n <pre>{stdout}</pre></div>\n <hr size=\"1\"/>\n <div class=\"stderr\"><i>Stderr</i><br/>\n <pre>{stderr}</pre></div>\n \"\"\".format(stderr=stderr, stdout=stdout)\n return \"\"\n\n return \"\"\"\n <a name=\"{anchor}\">\n <div class=\"testcase\">\n <div class=\"details\">\n <span class=\"testname\"><b>{testname}</b></span><br/>\n <span class=\"testclassname\">{testclassname}</span><br/>\n <span class=\"duration\">Time Taken: {duration}s</span>\n </div>\n {failure}\n <hr size=\"1\"/>\n {properties}\n {stdoe}\n </div>\n </a>\n \"\"\".format(anchor=self.anchor(),\n testname=self.name,\n testclassname=self.testclass.name,\n duration=self.duration,\n failure=failure,\n properties=\"\".join(properties),\n stdoe=render_stdoe())", "def test_html(self):\n test_case = ('<form', 1), ('<input', 6), ('type=\"text\"', 3), ('type=\"email\"', 1), ('type=\"submit\"', 1)\n for content, occurrences in test_case:\n self.assertContains(self.response, content, occurrences)", "def test_html(self):\n tags = (\n ('<form', 1),\n # Csrf, first_name, last_name, email, superuser, username and password\n ('<input', 7),\n ('type=\"text\"', 3),\n ('type=\"password\"', 1),\n ('type=\"checkbox\"', 1),\n ('type=\"email\"', 1),\n ('type=\"submit\"', 1),\n )\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.response, text, count)", "def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests", "def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt", "def test_listfield(self):\n self.assertEqual(self.scraped.urls, ['http://google.com', 'http://apple.com'])\n self.assertEqual(self.scraped.in_divs, ['Nested'])", "def parse(response):\n\n soup = bs.BeautifulSoup(response.text, 'lxml')\n title = str(soup.title.string)\n\n if title.find('Problem') == -1:\n raise Exception('Problem could not be found')\n\n test_cases_html = soup.find('div', class_='sample-test')\n\n # stores all input test cases.\n input_cases = get_sample_cases(test_cases_html, 'input')\n\n # stores all corresponding output test cases.\n output_cases = get_sample_cases(test_cases_html, 'output')\n\n # for a json with input and output test cases.\n test_cases = {}\n test_cases['input'] = input_cases\n test_cases['output'] = output_cases\n\n return test_cases", "def num_divs(self):\n return len(self.q(css='div.test').results)", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def generate_tests(cls):\n cases_pat = join(dirname(__file__), cls.cases_dir, \"*.html\")\n for html_path in glob(cases_pat):\n # Load an options (`*.opts` file, if any).\n # It must be a Python dictionary. It will be passed as\n # kwargs to the markdown function.\n opts = {}\n opts_path = splitext(html_path)[0] + \".opts\"\n if exists(opts_path):\n try:\n opts = eval(open(opts_path, 'r').read())\n except Exception:\n _, ex, _ = sys.exc_info()\n print(\"WARNING: couldn't load `%s' opts file: %s\" \\\n % (opts_path, ex))\n\n test_func = lambda self, t=html_path, o=opts: \\\n self._assertSimpleHtmlPath(t, opts=o)\n\n tags_path = splitext(html_path)[0] + \".tags\"\n if exists(tags_path):\n tags = []\n for line in open(tags_path):\n if '#' in line: # allow comments in .tags files\n line = line[:line.index('#')]\n tags += line.split()\n test_func.tags = tags\n\n name = splitext(basename(html_path))[0]\n name = name.replace(' - ', '_')\n name = name.replace(' ', '_')\n name = re.sub(\"[(),]\", \"\", name)\n test_name = \"test_%s\" % name\n setattr(cls, test_name, test_func)", "def list_tests(self, selected_suite_name=None):\r\n test_list = self.get_tests_for_suite(selected_suite_name)\r\n for test_method_name in (\r\n self.get_test_method_name(test)\r\n for test in test_list\r\n ):\r\n print(test_method_name)\r\n\r\n return test_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of the ids of outer divs with the specified text in a child element.
def ids_of_outer_divs_with_inner_text(self, child_text): return self.q(css='div.outer').filter( lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')] ).attrs('id')
[ "def find_elements_by_text(self, text):\n # XPATH have to be string in Python 2 and unicode in Python 3.\n text = force_text(text)\n if not six.PY3:\n text = text.encode('utf8')\n elms = self.find_elements_by_xpath(\n './/*/text()[contains(., \"{}\") and not(ancestor-or-self::*[@data-selenium-not-search])]/..'.format(text)\n )\n return elms", "def findChildren(widget=None, name=\"\", text=\"\"):\n\t\t# TODO: figure out why the native QWidget.findChildren method\n\t\t# does not seem to work from PythonQt\n\t\tif not widget:\n\t\t\twidget = mainWindow()\n\t\tchildren = []\n\t\tparents = [widget]\n\t\twhile parents != []:\n\t\t\tp = parents.pop()\n\t\t\tparents += p.children()\n\t\t\tif name and p.name.find(name) >= 0:\n\t\t\t\tchildren.append(p)\n\t\t\telif text:\n\t\t\t\ttry:\n\t\t\t\t\tp.text\n\t\t\t\t\tif p.text.find(text) >= 0:\n\t\t\t\t\t\tchildren.append(p)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\treturn children", "def __get_ids(self, course_page, search_str):\n res_ids = []\n\n # determine course content start and end positions\n content_start_at = course_page.find(\"<div id=\\\"content\\\"\") # current version of moodle\n if content_start_at == -1:\n content_start_at = course_page.find(\"<div id=\\\"page-content\\\"\") # moodle archives use this id!\n if content_start_at == -1:\n return [] # return empty list, no content was found\n\n next_div_end = course_page.find(\"</div\", content_start_at)\n inner_div_start = course_page.find(\"<div\", content_start_at + 1, next_div_end)\n while inner_div_start != -1:\n next_div_end = course_page.find(\"</div\", next_div_end + 1)\n inner_div_start = course_page.find(\"<div\", inner_div_start + 1, next_div_end)\n content_end_at = next_div_end\n\n found_res_at = course_page.find(search_str)\n while found_res_at != -1:\n found_res_end_at = course_page.find('\"', found_res_at + len(search_str))\n\n id = course_page[found_res_at + len(search_str):found_res_end_at]\n\n #check if id position within course content\n if found_res_at > content_start_at and found_res_at < content_end_at:\n res_ids.append(int(id))\n elif found_res_at > content_end_at:\n break\n\n found_res_at = course_page.find(search_str, found_res_at + 1)\n\n return res_ids", "def findThreadIds(s):", "def _find_ids(args_dict):\n ids = []\n try:\n ids.append(args_dict['id'])\n except KeyError:\n pass\n\n for key, value in args_dict.iteritems():\n if key == 'elements':\n for element in value:\n ids += _find_ids(element)\n return ids", "def _find_with_text(self, selector, text):\n stripped = text.strip()\n elements = self.selenium.find_elements_by_css_selector(selector)\n return [e for e in elements if e.text.strip() == stripped]", "def get_ontology_ids(text):\n\tpattern = r\"[A-Z]+:[0-9]{7}\"\n\tresults = re.findall(pattern, text)\n\treturn(results)", "def get_child_elements_by_id(self, id):\n for item in self._elements:\n if item.get_parent_id() == id:\n yield item", "def _ascendants(self, _id):\n x = _id\n result = []\n while x is not None:\n result.append(x)\n x = self._parent[x]\n return result", "def find_by_text(soup, text, tag):\n elements = soup.find_all(tag)\n matches = []\n for element in elements:\n for txt in text:\n if element.find(text=like(txt)):\n matches.append(element)\n break\n return matches", "def activeChildWellIds(self):\n lst=[]\n if self.isReplicateGroup():\n for tc in self.activeChildWells():\n lst.extend(tc.activeChildWellIds())\n else:\n if self.wellids is not None and self.wellids[0] is not None:\n wellid=self.wellids[0]\n else:\n wellid=str(self.childWellIndices()[0])\n lst.append(wellid)\n return lst", "def div_text_list(self):\n return self.q(css='div.test').text", "def find_indexes(self, word: str, text: str):\n temp = re.match(r\"\\[([0-9\\-]{0,}):([0-9\\-]{0,})\\]\", word)\n if temp:\n start = int(temp.group(1)) if temp.group(1) != \"\" else 0\n end = int(temp.group(2)) if temp.group(2) != \"\" else len(text)\n start = len(text) + start if start < 0 else start\n end = len(text) + end if end < 0 else end\n return [(start, end)]\n indexes = []\n index = text.find(word)\n while index != -1:\n indexes.append((index, index + len(word)))\n index = text.find(word, index + len(word))\n return indexes", "def grab_next_unit_ids(text) -> list:\n unit_regex = data_groups['unit_id_group']\n\n return fetch_group_and_splitlines(unit_regex, text)", "def _get_contained_text_tags(xmlschema, namespaces, elem, text_tags):\n text_list = []\n for child in elem:\n child_type = _remove_xsd_namespace(child.tag, namespaces)\n\n if child_type == 'element':\n if child.attrib['name'] in text_tags:\n text_list.append(child.attrib['name'])\n elif child_type in ['sequence', 'all', 'choice']:\n new_tags = _get_contained_text_tags(xmlschema, namespaces, child, text_tags)\n for tag in new_tags:\n text_list.append(new_tags.original_case[tag])\n\n text_set = CaseInsensitiveFrozenSet(text_list)\n assert len(set(text_list)) == len(text_set), f'Lost Information: {text_list}'\n\n return text_set", "def map_text_to_id(self, text: str) -> List[int]:\n return self.map_token_to_id(self.map_text_to_token(text))", "def extract_data_listing(html):\n id_finder = re.compile(r'PR[\\d]+~')\n return html.find_all('div', id=id_finder)", "def get_coverages_ids(eoobj):\n def _get_children_ids(eoobj):\n \"\"\" recursive dataset series lookup \"\"\"\n qset = (\n eoobj.cast().eo_objects\n .filter(real_content_type=eoobj.real_content_type)\n )\n id_list = [eoobj.id]\n for child_eoobj in qset:\n id_list.extend(_get_children_ids(child_eoobj))\n return id_list\n\n return list(\n Coverage.objects\n .filter(collections__id__in=_get_children_ids(eoobj))\n .values_list('identifier', flat=True)\n )", "def get_phrase_group_id(word):\n pg_ids = []\n n = 0\n for phraseGroups in data: \n for phrase in phraseGroups:\n if find_pgs(word, phrase):\n pg_ids.append(n)\n n = n + 1\n return pg_ids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait for click handlers to be installed, then click a button and retrieve the output that appears after a delay.
def trigger_output(self): EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill() self.q(css='div#fixture button').first.click() EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill()
[ "def background_and_wait(self):\n return self.wait_for_click()", "def wait_until_insurance_displayed(self):", "def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break", "def click_submit_button_with_wait(self):\n self.switch_to_detail_frame()\n self.set_existing_handles()\n self.click_element(self.submit_button_locator, True)\n self.switch_to_default_content()\n timeout = 300\n try:\n end_time = time.time() + timeout\n while time.time() < end_time:\n self.wait(5)\n except:\n raise\n self.switch_to_window()", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def click_button(self):\n self.widgets.get('button').click()", "def click_on_demand_extract_button(self):\n self.switch_to_detail_frame()\n self.set_existing_handles()\n self.click_element(self.on_demand_extract_button_locator, True)\n self.switch_to_default_content()\n self.switch_to_window()", "def poll(self):\n\tself.met = self.button.poll()", "def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")", "def wait(self, secs):\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def wait_until_lemonade_insurance_page_displayed(self):", "def takeControl(self):\n mainloop()", "def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))", "def click(self):\r\n pass", "def wait_step(self):\n pass", "def WaitForTest(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('waitForTest', payload=payload, response_object=None)", "def click_button_ok(self):\n # AutoGen method\n self.click_element(self.BUTTON_OK)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a promise that will not be fulfilled. Should raise a `BrokenPromise` exception.
def make_broken_promise(self): return EmptyPromise( self.q(css='div#not_present').is_present, "Invalid div appeared", try_limit=3, try_interval=0.01 ).fulfill()
[ "def rejected(reason):\n p = Promise()\n p._state = \"rejected\"\n p.reason = reason\n return p", "def test_willNotAllowNonDeferredOrCoroutine(self):\n with self.assertRaises(ValueError):\n defer.ensureDeferred(\"something\")", "def reject(self, reason):\n if self._state != \"pending\":\n raise RuntimeError(\"Promise is no longer pending.\")\n\n self.reason = reason\n self._state = \"rejected\"\n errbacks = self._errbacks\n self._errbacks = None\n for errback in errbacks:\n errback(reason)", "def never() -> AsyncObservable:\n\n return Never()", "def _failure_task(self) -> asyncio.Future:\n if self._maybe_failure_task is None:\n self._maybe_failure_task = asyncio.Future()\n return self._maybe_failure_task", "async def rejected(error: Exception) -> Any:\n raise error", "def reject_waiting_call(self) -> None:", "def maybe_future(result, *, on_error=None, log=LOG, loop=None):\n if result is None:\n return None\n try:\n future = asyncio.ensure_future(result, loop=loop)\n except TypeError:\n if on_error:\n on_error(result)\n else:\n log.warning('maybe_future() ignoring non-awaitable result %r', result)\n return None\n return future", "def maybe_future(x):\r\n if is_future(x):\r\n return x\r\n else:\r\n fut = Future()\r\n fut.set_result(x)\r\n return fut", "def maybe_future(x):\n if is_future(x):\n return x\n else:\n fut = Future()\n fut.set_result(x)\n return fut", "def promise(self):\n return Promise(self)", "def is_promise_type(self):\n raise exceptions.NotImplementedError()", "def test_deferred_failure_result(self):\n passthrough = self.make_wrapped_function()\n result = passthrough(fail(ZeroDivisionError()))\n self.assertIsInstance(result, EventualResult)\n self.assertRaises(ZeroDivisionError, result.wait, 0.1)", "def never() -> ObservableBase:\n from ..operators.observable.never import never\n return never()", "def as_deferred(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n d = Deferred()\n try:\n d.resolve(func(*args, **kwargs))\n except Exception as e:\n d.reject(exception=e)\n return d.promise()\n return wrapper", "def makeDeferred(value):\n return SyncDeferred(value)", "def _on_future_cancelled(self, promise):\n promise.setCanceled()", "def test_dies_if_no_job(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n work = Async(\"dir\", kwargs={'something': None})\n work._options.pop('job')\n assert 'job' not in work._options\n\n with _ExecutionContext(work):\n self.assertRaises(Exception, run_job)", "def test_maybeDeferredAsyncError(self):\n d = defer.Deferred()\n d2 = defer.maybeDeferred(lambda: d)\n d.errback(failure.Failure(RuntimeError()))\n self.assertImmediateFailure(d2, RuntimeError)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the page named `page_name` after waiting for `delay_sec`.
def load_next(self, page, delay_sec): time.sleep(delay_sec) page.visit()
[ "def load_page(self, url: str):\n self.__driver.get(url)\n sleep(SLEEP_LONG_TIME)", "def wait(self, url):\r\n domain = urlsplit(url).netloc\r\n last_accessed = self.domains.get(domain)\r\n if self.delay > 0 and last_accessed is not None:\r\n sleep_secs = self.delay - (datetime.now() - last_accessed).seconds\r\n if sleep_secs > 0:\r\n time.sleep(sleep_secs)\r\n self.domains[domain] = datetime.now()", "def wait(self, url):\n domain = urlparse.urlsplit(url).netloc\n last_accessed = self.domains.get(domain)\n if self.delay > 0 and last_accessed is not None:\n sleep_secs = self.delay - (datetime.now() - last_accessed).seconds\n if sleep_secs > 0:\n time.sleep(sleep_secs)\n self.domains[domain] = datetime.now()", "def wait(self, url):\n domain = urllib.parse.urlsplit(url).netloc\n last_accessed = self.domains.get(domain)\n if self.delay > 0 and last_accessed is not None:\n sleep_secs = self.delay - (datetime.now() - last_accessed).seconds\n if sleep_secs > 0:\n time.sleep(sleep_secs)\n self.domains[domain] = datetime.now()", "def scroll_and_sleep(self):\n time.sleep(self.load_time + (random.randint(10, 100) / 1500))\n self.driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)\n return", "def check_page_loaded(driver, delay, elem_type, elem_name):\n\n try:\n if elem_type == \"CLASS_NAME\":\n elem = WebDriverWait(driver, delay) \\\n .until(EC.presence_of_element_located((By.CLASS_NAME, elem_name)))\n elif elem_type == \"ID\":\n elem = WebDriverWait(driver, delay) \\\n .until(EC.presence_of_element_located((By.ID, elem_name)))\n elif elem_type == \"NAME\":\n elem = WebDriverWait(driver, delay) \\\n .until(EC.presence_of_element_located((By.NAME, elem_name)))\n\n print(\"The page element <\" , elem_name, \"> is ready!\")\n\n except TimeoutException:\n print(\"Loading page took to long. Please try again.\")", "def wait_for_page_load(self):\n # For right now, just wait for 2 seconds since webdriver returns when loaded.\n # TODO: switch to waiting for network idle\n time.sleep(2)", "def verify_element_has_loaded(self, element_name, delay=3):\n try:\n test_element = WebDriverWait(self.driver, delay).until(EC.presence_of_element_located((By.NAME, element_name)))\n print(\"Page is ready!\")\n except TimeoutException:\n print(\"Timeout has occurred.\")", "def fetch(url,delay=(1,3)):\n time.sleep(random.randint(delay[0],delay[1])) # wait random seconds\n try:\n response = requests.get(url)\n except ValueError as e:\n print(str(e))\n return '', BeautifulSoup('', \"html.parser\")\n html = response.text\n soup = BeautifulSoup(html, \"html.parser\")\n return (html,soup)", "def delay():\r\n time.sleep(2)", "def waiting_page_load(self, locator, timeout=None, attempts=None):\n timeout = settings.DOWNLOAD_TIMEOUT if not timeout else timeout\n attempts = settings.DOWNLOAD_ATTEMPTS if not attempts else attempts\n i = 0\n logging.debug(f\"Ждем {timeout}сек. для загрузки контента \"\n f\"(всего попыток {attempts})\")\n with self.driver_lock:\n self.driver.switch_to.window(self.tab_name)\n for i in range(1, attempts + 1):\n try:\n logging.debug(f\"Попытка {i}\")\n WebDriverWait(self.driver, timeout).until(\n EC.visibility_of_element_located(locator))\n logging.debug(\"данные успешно загруженны\")\n break\n except TimeoutException:\n logging.warning(f\"Время ожидания загрузки истекло.\",\n exc_info=True)\n self.driver.switch_to.window(self.tab_name)\n\n if i == attempts:\n logging.critical(f\"Использован лимит попыток на загрузку контента\")\n self.stop()", "def wait_for_page_to_load(self):\n self.wait.until(lambda s: self.is_page_loaded())\n return self", "def wait_for(old_html, timeout=60):\n\tstart_time = time.time() \n\twhile time.time() < start_time + timeout: \n\t\tif check_new_page_loaded(old_html): \n\t\t\treturn time.time() - start_time \n\t\telse: \n\t\t\ttime.sleep(0.1) \n\traise Exception('WebPage Load Timeout')", "def delay_response(delay):\n delay = min(float(delay), 10)\n\n time.sleep(delay)\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\")\n )", "def wait(delaySec, host='default'):\n global lastCallSec\n delaySec = float(delaySec)\n nowSec = time.time()\n sinceLastCallSec = nowSec - lastCallSec.get(host, nowSec)\n if sinceLastCallSec > 0.1 and sinceLastCallSec < delaySec:\n waitSec = max(0.0, delaySec - sinceLastCallSec)\n logging.info('Waiting for %f seconds before downloading from host %s' % (waitSec, host))\n time.sleep(waitSec)\n lastCallSec[host] = time.time()", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def slow_page(request):\n\tprint(\"starting long wait\")\n\ttime.sleep(5)\n\tprint(\"ended long wait\")\n\treturn HttpResponse('Finished waiting for 5 seconds')", "def set_page_load_timeout(self, seconds):\n self.driver.set_page_load_timeout(seconds)", "def wait_for_page_load(self):\n old_page = self.browser.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.browser, self.wait).until(\n staleness_of(old_page)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Give focus to the element with the ``maincontent`` ID.
def focus_on_main_content(self): self.browser.execute_script("$('#main-content').focus()")
[ "def onMnemoToMain(self):\n self.second_main_text.SetFocus()", "def _focus(self, element):\n actions = ActionChains(self.selenium.driver)\n actions.move_to_element(element).click().perform()\n self.selenium.set_focus_to_element(element)", "def giveFocus(self):\r\n self.focus = True", "def focus_to_doc(self):\n self.frame.getContainerWindow().setFocus()", "def focus(self, locator):\n \n self.element = self._element_finder(locator)\n self._current_browser().execute_script(\"arguments[0].focus();\", self.element)\n log.mjLog.LogReporter(\"WebUIOperation\",\"debug\",\"focus operation successful: %s\" %(locator))", "def focus_primary(self):\n\n if len(self.windows):\n\n if not self.windows[0].focus():\n\n del self.windows[0]\n self.tile_windows()", "def setFocus(self):\n self._urlEdit.setFocus()", "def onMainToMnemo(self):\n self.second_mnemo_text.SetFocus()", "def SetFocus(self):\n super(CommandBarBase, self).SetFocus()\n if self.MainControl:\n self.MainControl.SetFocus()", "def setFocusId(*args):", "def focus():\n\tpass", "def set_focus(self):\n self.focus_target().setFocus(Qt.OtherFocusReason)", "def setFocus(self):\n\t\t## Up until r6816, the following was wrapped in a callAfter(), which made for\n\t\t## lousy performance, especially on Windows.\n\t\tself.SetFocus()", "def focus_master(qtile):\n grp = qtile.current_group\n if grp.layout.clients.current_index > 0:\n c = grp.layout.clients.focus_first()\n grp.focus(c, True)\n elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:\n grp.layout.cmd_down()", "def OnFocus(self, evt):\n self.ActivateParent()\n self.GetParent().SetFocus()\n evt.Skip()", "def focus(self):\n self.image_window.focus_set()", "def XPSetKeyboardFocus(inWidget):\n pass", "def focus_body(self):\n if self._w.original_widget.focus_part != 'body':\n self._w.original_widget.set_focus('body')\n self.parent.input.statusbox.update_help_text(' '.join([\n self.default_help, self.page_help]).strip())", "def edit_widget_focus(self):\n if self.goto:\n self.goto_node()\n self.update_position(self.get_position())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reload the page, wait for JS, then trigger the output.
def reload_and_trigger_output(self): self.browser.refresh() self.wait_for_js() # pylint: disable=no-member self.q(css='div#fixture button').first.click()
[ "def reload_webpage(browser):\n browser.execute_script(\"location.reload()\")\n update_activity()\n sleep(2)\n\n return True", "def refresh_page(self):\n self.m_driver.refresh()\n time.sleep(30)", "def reload_content(self):\n pass", "def reloadContent(self):\n\n self._scriptbuffer = open(self.__filename, 'r')\n PythonScript.reloadContent(self)\n self._scriptbuffer.close()", "def Reload(self):\n self._inspector_backend.Navigate(self.url, None, 10)", "def refreshPage(self):\n try:\n self.objDriver.refresh()\n except Exception as e:\n raise Exception(\"Error: while refreshing page: {} Error : {}\".format(e))", "def js(self, script):\n self.page().mainFrame().evaluateJavaScript(script)", "def trigger_reloading(self) -> None:\n self.trigger_signal(\"reloading\")", "def execute_js(self, script):\n self.driver.execute_script(script)", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def wait_until_javascript_is_complete(self):\n for each in range(1, 20):\n jquery_started = self.execute_javascript(\"return jQuery.active==1\")\n if jquery_started:\n break\n for each in range(1, 50):\n jquery_completed = self.execute_javascript(\"return window.jQuery!=undefined && jQuery.active==0\")\n if jquery_completed:\n break\n zoomba.sleep(\"0.5s\")", "def reload_sites(self):\n self.get_event_bus().publish(AppActionEvent(self, action=ACTION_RELOAD))", "def onReload(self, event):\n\n\t\tself.wv.Reload()", "def pyscript_run(\n self,\n snippet,\n *,\n extra_head=\"\",\n wait_for_pyscript=True,\n timeout=None,\n check_js_errors=True,\n ):\n doc = self._pyscript_format(\n snippet, execution_thread=self.execution_thread, extra_head=extra_head\n )\n if not wait_for_pyscript and timeout is not None:\n raise ValueError(\"Cannot set a timeout if wait_for_pyscript=False\")\n filename = f\"{self.testname}.html\"\n self.writefile(filename, doc)\n self.goto(filename)\n if wait_for_pyscript:\n self.wait_for_pyscript(timeout=timeout, check_js_errors=check_js_errors)", "async def async_trigger_reloading(self) -> None:\n await self.async_trigger_signal(\"reloading\")", "def refresh_page(self, check=True):\n url = self.app.page_base.url\n self.app.page_base.refresh()\n\n if check:\n assert_that(self.app.page_base.url, equal_to(url))", "def refresh(self, id):\n exports.execute_export.delay(id)\n return render({\"id\": id})", "def js_run(self):\n\n js = js_bridge()\n js.args = copy.deepcopy(self.js_args)\n js.run()", "def modifyUI(self):\r\n url = self.urlInput.text()\r\n selectors = self.selectorInput.text()\r\n self.web.load(QUrl(url))\r\n print(\"Webpage Loaded \\n\")\r\n\r\n self.script = ScriptGenerator(url,selectors).generate()\r\n\r\n self.scraper_ = scraper.Scraper(str(url),str(selectors))\r\n self.scraper_.threadChange.connect(self.addScriptAndData)\r\n self.scraper_.start()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click button and wait until playing class disappeared from DOM
def is_class_absent(self): self.q(css='#spinner').first.click() self.wait_for_element_absence('.playing', 'Animation Stopped')
[ "def poll(self):\n\tself.met = self.button.poll()", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def _check_play_button(self, mouse_pos):\n button_clicked = self.single_player.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n self.settings.initialize_dynamic_settings()\n self.stats.reset_stats()\n self.stats.game_active = True\n self.score_board.prep_score()\n self.score_board.prep_level()\n self.score_board.prep_ships()\n\n self.aliens.empty()\n self.bullets.empty()\n\n self._create_fleet()\n self.ship.center_ship()\n\n # hide mouse cursor\n pygame.mouse.set_visible(False)", "def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break", "def play_video(self):\n\n self.wait.until(self.visible((By.ID, \"video-title\")))\n self.driver.find_element_by_xpath(\"//button[@class='ytp-large-play-button ytp-button']\").click()", "def wait_to_play(self):\n\n\t\tself.player_model.current_player = self.player_model.rival_player\n\t\tself.player_frame.prepare_to_wait_turn(self.player_model.rival_player.name, self.player_model.available_cells)", "def wait_for_video_class(self):\r\n self.wait_for_ajax()\r\n\r\n video_selector = '{0}'.format(CSS_CLASS_NAMES['video_container'])\r\n self._wait_for_element(video_selector, 'Video is initialized')", "def click_music(self, button):\n if cf.music_on is True:\n cf.music_on = False\n elif cf.music_on is False:\n cf.music_on = True\n # Remove old button.\n self.remove_button()\n # Re-add the button.\n self.add_button()", "def test_presence_button(self):\n\t\tmy_button = self.open_page()[1]\n\t\tassert my_button is not None", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def wait_until_insurance_displayed(self):", "def on_play_pause(self, button: Gtk.Button) -> None:\n del button # Unused.\n if self.play_pause_stack.get_visible_child_name() == 'play':\n self._player.play()\n else:\n self._player.pause()", "def loop_button_pressed(self) -> None:\n \n current_song = self.songbox.get(tk.ACTIVE)\n pygame.mixer.music.queue(current_song)", "def click_using_class(self, text, search_text, delay=3, is_button=False):", "def _playAgain(self):\n self._click()\n if self._last is None and self._touch is not None:\n self._state = STATE_RESET", "def background_and_wait(self):\n return self.wait_for_click()", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def wait_for_start_flash(self) -> None:\n counter = 0\n led_state = False\n while not self.start_button.is_pressed:\n if counter % 6 == 0:\n led_state = not led_state\n self._run_led.state = led_state\n sleep(0.05)\n counter += 1", "def wait_until_played():\n\n sounddevice.wait()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click button and wait until spinner is disappeared.
def is_spinner_invisible(self): self.q(css='#spinner').first.click() self.wait_for_element_invisibility('#anim', 'Button Output is Visible')
[ "def wait_until_insurance_displayed(self):", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def poll(self):\n\tself.met = self.button.poll()", "def click_submit_button_with_wait(self):\n self.switch_to_detail_frame()\n self.set_existing_handles()\n self.click_element(self.submit_button_locator, True)\n self.switch_to_default_content()\n timeout = 300\n try:\n end_time = time.time() + timeout\n while time.time() < end_time:\n self.wait(5)\n except:\n raise\n self.switch_to_window()", "def wait_until_lemonade_insurance_page_displayed(self):", "def _click_button(self, button_name):\n self.q(css=self.COMPONENT_BUTTONS[button_name]).first.click()\n self.wait_for_ajax()", "def showSpinner():\n\tglobal stopEvent\n\tglobal thread\n\n\tstopEvent = threading.Event()\n\tthread = threading.Thread(target=spinnerThread, args=(1, stopEvent))\n\tthread.start()", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def click_button(self):\n self.widgets.get('button').click()", "def click_received_charges_cancel_changes_button(self):\n self.click_element(self.received_charges_cancel_changes_button_locator)\n self.wait_for_ajax_spinner_load()", "def is_class_absent(self):\n self.q(css='#spinner').first.click()\n self.wait_for_element_absence('.playing', 'Animation Stopped')", "def hideSpinner():\n\tglobal stopEvent\n\tglobal thread\n\n\tstopEvent.set()\n\tthread.join()", "def wait_for_load(browser):\n loader = browser.find_element_by_class_name('ui-loader')\n while loader.is_displayed():\n time.sleep(0.1)", "def hide(self) -> None:\n self.spinner.stop()\n self.hidden = True", "def hide_spinner(self):\n\n\t\tGObject.idle_add(self.buttons_area.set_sensitive, True)\n\t\tGObject.idle_add(self.header_eventbox.show)\n\t\tGObject.idle_add(self.pages.show)\n\t\tGObject.idle_add(self.pages_loading.hide)", "def click(self, buttonName):\n super(LicenseActivation, self).click(buttonName)\n if buttonName == 'Next':\n timeout = 60\n self.parent.LoadingPopup.update()\n while self.parent.LoadingPopup.isLoaded():\n print \"Debug: Activating license...\"\n time.sleep(15)\n timeout = timeout - 15\n if timeout <= 0:\n raise Exception(\"License Manager: Activating licenses timeout...\")\n self.parent.VmCallHome.update()", "def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break", "def background_and_wait(self):\n return self.wait_for_click()", "def wait_for_button(self, block=True):\n # Loop until all buttons are released (if currently pressed)\n while self.enter.value() == 0 or self.page.value() == 0:\n pass\n\n # Wait for first button press\n checks = 0\n while self.enter.value() == 1 and self.page.value() == 1:\n checks += 1\n if not block and checks > NONBLOCKING_CHECKS:\n break\n\n if self.enter.value() == 0:\n # Wait for release\n while self.enter.value() == 0:\n pass\n return BUTTON_ENTER\n\n if self.page.value() == 0:\n # Wait for release\n while self.page.value() == 0:\n pass\n return BUTTON_PAGE\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if value is prime
def is_prime(value: int) -> bool: if value == 1: return False if value <= 0: raise ValueError("Value must be greater than zero") for i in range(2, int(value**(1/2)) + 1): if value % i == 0: return False return True
[ "def is_prime(num: int) -> bool:\n pass", "def is_prime(self):\n\n vroot = int(self.value ** 0.5) + 1\n for i in range(3, vroot, 2):\n if self.value % i == 0:\n return False\n return True", "def is_prime(val):\n if val == 2:\n return True\n if val % 2 == 0:\n return False # Checking this makes this much more efficient\n for i in xrange(2, int(math.sqrt(val)) + 1):\n if val % i == 0:\n return False\n return True", "def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False", "def is_prime(a):\n return all(a % i for i in xrange(2, a))", "def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True", "def isprime(n):\r\n\treturn is_prime(n)", "def is_prime(x):\n return int(gmpy2.is_prime(x))", "def isprime(n):\n\treturn is_prime(n)", "def isitprime(p):\r\n for i in range(2,int(p**0.5)+1):\r\n if p % i == 0:\r\n return False\r\n return True", "def isprime(n):\n if prime(n):\n return True\n else:\n return False", "def isPrime(p):\n\n if p != int(p):\n return False\n\n return pari(\"isprime(\" + str(int(p)) + \")\")", "def good_prime(p):\n return p % 4 == 3 and probablyPrime(p, accuracy=100)", "def isprime(n):\n if n % 2 == 0:return False\n return all(n % i for i in range(3, int(n**0.5) + 1, 2))", "def prime_check(number: int) -> bool:\n\n if number % 2 == 0 and number > 2:\n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))", "def prime_with(x, s):\n for i in s:\n if x % i == 0:\n return False\n return True", "def test_is_prime():\r\n\r\n assert is_prime(2) == True\r\n assert is_prime(3) == True\r\n assert is_prime(9) == False\r\n assert is_prime(50) == False\r\n assert is_prime(53) == True", "def test_11(self):\n\t\tself.assertFalse(is_prime(11))", "def is_prime(digit):\n if digit < 2:\n return False\n i = 2\n while i <= digit / 2:\n if digit % i == 0:\n return False\n else:\n i += 1\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all prime factors of the given value
def factors(value: int) -> list: prime_factors: list = [] for i in range(2, value + 1): if i > 2 and i % 2 == 0 or not is_prime(i): continue while value % i == 0: value = int(value / i) prime_factors.append(i) if value == 1: break return prime_factors
[ "def prime_factors(value):\n\n checkutils.check_instance(int, value, 'positive ')\n checkutils.check_nonnegative(value)\n\n if value < 4:\n return [value]\n\n ret = list()\n for prime in primes:\n if value == 1:\n assert len(ret) > 0\n return ret\n while value % prime == 0:\n ret.append(prime)\n value //= prime", "def factors(value):\n\n # get the prime factors, also does argument checking\n primes = prime_factors(value)\n checkutils.check_positive(value)\n\n # make a histogram of the prime counts\n prime_counts = defaultdict(int)\n for prime in primes:\n prime_counts[prime] += 1\n\n # make a discrete space of the factors that each prime can contribute\n factor_sets = list()\n for prime, count in prime_counts.iteritems():\n factors = [1]\n for _ in xrange(count):\n factors.append(factors[-1] * prime)\n factor_sets.append(tuple(factors))\n \n # do the combinatorics on the factors\n multiplyem = partial(reduce, mul)\n factors = set(multiplyem(factorpoint) for factorpoint in discrete.iterspace(factor_sets))\n\n ret = tuple(sorted(factors))\n assert ret[0] == 1 and ret[-1] == value\n return ret", "def prime_factors_of(value):\n\n # Okay, so we need to \"solve\" two problems here:\n # is a given number a factor of `value`?\n # and\n # is a given number PRIME?\n\n # I think the simplest non-stupid approach is to generate all \n # FACTORS OF VALUE, and then check to see which are prime!\n # actually, a cute approach would be to start from the top down\n # and just return the first one. we'll see if i need that optimization.\n # (don't optimize prematurely!)\n\n # WELP. I tried to generate all primes up to value//2! what a mistake.\n # or was it? maybe it was just a bad implementation of prime-finding?\n\n factors = []\n\n for i in range(2, value//2):\n if value % i == 0:\n factors.append(i)\n\n prime_factors = []\n \n for i in factors:\n if is_prime(i):\n prime_factors.append(i)\n\n return prime_factors", "def factor_primes(x, iter):\n factors = []\n for factor in prime:\n while x % factor == 0:\n x = x / factor\n factors.append(factor)\n if x == 1:\n break\n return factors", "def prime_factors(num):\n if prime_checker(num):\n return num\n if num > 10^5:\n maxPrime = round(num**0.5) + 1\n else:\n maxPrime = round(num/2)+1\n primelist = prime_generator(maxPrime)\n factors = []\n\n while num > 1 and num not in primelist:\n for prime in primelist:\n if num % prime == 0:\n factors.append(prime)\n num = int(num / prime)\n break\n if not num == 1:\n factors.append(num)\n \n return factors", "def find_prime_factors(n):\n factors = []\n current_number = n\n current_divisor = 2\n while current_number > 1:\n while current_number % current_divisor == 0:\n current_number /= current_divisor\n factors += [current_divisor]\n current_divisor += 1\n return factors", "def get_prime_factors(self, number):\n for prime in self.get_primes():\n while number % prime == 0:\n yield prime\n number /= prime\n \n if number == 1:\n break", "def getallprimefactors(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n print(n)\n n /= d\n d += 1\n return factors", "def get_factors(number):\n\n factors = [1, number]\n\n for i in range(2, int(math.sqrt(number))):\n if number % i == 0:\n factors.extend([i, number / i])\n\n return(factors)", "def get_prime_factors(n):\n factors = []\n i = 2\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors", "def prime_factors(n):\n\n prime_set = primes(n)\n factors = []\n for prime in prime_set:\n if n % prime == 0:\n factors.append(prime)\n return factors", "def _get_all_factors(self):\n factors = []\n end = (self.total_cores // 2) + 1 # factor can't ever be more than half the number\n for i in range(1, end):\n if self.total_cores % i == 0:\n factors.append(self.total_cores // i)\n\n factors.append(1)\n return factors", "def gen_factors(value: int) -> iter:\n for n in range(2, value + 1):\n if value == 1:\n break\n while value % n == 0:\n yield n\n value //= n", "def get_prime_factors(number):\n if number == 1:\n return []\n\n # We have to begin with 2 instead of 1 or 0\n # to avoid the calls infinite or the division by 0\n for i in range(2, number):\n # Get remainder and quotient\n rd, qt = divmod(number, i)\n if not qt: # if equal to zero\n return [i] + get_prime_factors(rd)\n\n return [number]", "def prime_factors(num):\n factors = defaultdict(int)\n for i in possible_divisors(num):\n while not num % i:\n num /= i\n factors[i] += 1\n if num > 1:\n factors[num] += 1\n return factors", "def prime_factors(num, start=2):\n candidates = xrange(start, int(sqrt(num)) + 1)\n factor = next((x for x in candidates if (num % x == 0)), None)\n return ([factor] + prime_factors(num / factor, factor) if factor else [num])", "def factors(n, primes):\n\n for p in takewhile(lambda p: p*p < n, primes):\n exponent = 0\n\n while n % p == 0:\n exponent += 1\n n /= p\n\n if exponent > 0:\n yield p, exponent\n\n if n > 1:\n yield n, 1", "def primefactors(num):\n\n factors = [num]\n while not isprime(factors[-1]):\n for y in range(2, int(factors[-1] ** 0.5) + 1):\n if num % y == 0 and isprime(y):\n del factors[-1]\n factors.append(y)\n leftover = num // y\n factors.append(leftover)\n num = leftover\n break\n return set(factors)", "def get_factors(num):\n factors = []\n\n # Extend range by 1 to include num\n for i in range(1, num+1):\n if num % i == 0:\n factors.append(i)\n return factors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets up the connection. Will optionally accept a size or else will use a chunked TransferEncoding.
def setup(self, size=None): if size: self.size = size if not self.size: self.size = UNKNOWN_LENGTH self.body.length = self.size req = self.conn.make_request('PUT', self.url, headers=self.headers, data=self.body) self.req = req print "ChunkedTwistedConnection: STARTED REQUEST"
[ "def enable_chunked_encoding(self, chunk_size: Optional[int] = ...) -> None:\n ...", "def setup_connection(self):\n if self.conn:\n self.conn.close()\n self.buffer = ''\n self.conn = pycurl.Curl()\n self.conn.setopt(pycurl.URL, API_ENDPOINT_URL)\n self.conn.setopt(pycurl.USERAGENT, USER_AGENT)\n self.conn.setopt(pycurl.ENCODING, 'deflate, gzip')\n self.conn.setopt(pycurl.POST, 1)\n self.conn.setopt(pycurl.POSTFIELDS, urllib.urlencode(POST_PARAMS))\n self.conn.setopt(pycurl.HTTPHEADER, ['Host: stream.twitter.com',\n 'Authorization: %s' % self.get_oauth_header()])\n # self.handle_tweet is the method that are called when new tweets arrive\n self.conn.setopt(pycurl.WRITEFUNCTION, self.handle_tweet)", "def setup_connection(self):\n\t\tif self.conn:\n\t\t\tself.conn.close()\n\t\t\tself.buffer = ''\n\t\t\n\t\tself.conn = pycurl.Curl()\n\t\t\n\t\tif isinstance(self.timeout, int):\t# Restart connection if less than 1 byte/s is received during \"timeout\" seconds\n\t\t\tself.conn.setopt(pycurl.LOW_SPEED_LIMIT, 1)\n\t\t\tself.conn.setopt(pycurl.LOW_SPEED_TIME, self.timeout)\n\t\tself.conn.setopt(pycurl.URL, API_ENDPOINT_URL)\n\t\tself.conn.setopt(pycurl.USERAGENT, USER_AGENT)\n\t\t\n\t\tself.conn.setopt(pycurl.ENCODING, 'deflate, gzip')\t# Using gzip is optional but saves us bandwidth.\n\t\tself.conn.setopt(pycurl.POST, 1)\n\t\tself.conn.setopt(pycurl.POSTFIELDS, urllib.urlencode(POST_PARAMS))\n\t\t\n\t\t#self.conn.setopt(pycurl.USERPWD, \"%s:%s\" % (username, password))\n\t\t\n\t\t#self.conn.setopt(pycurl.CAINFO, SSL_CERTIFICATE)\t# SSL_CERTIFCATE should point to the .crt file location!\n\t\tself.conn.setopt(pycurl.SSL_VERIFYPEER, 0)\t# bypasses SSL certification file, \n\t\tself.conn.setopt(pycurl.SSL_VERIFYHOST, 0)\t# vulernable to \"man-in-the-middle\" attacks!\n\n\t\tself.conn.setopt(pycurl.HTTPHEADER, ['Host: stream.twitter.com', 'Authorization: %s' % self.auth_header])\n\t\t\n\t\tself.conn.setopt(pycurl.WRITEFUNCTION, self.callback)", "def initiate_chunked_upload(self):\n raise NotImplementedError", "def __init__(self, size, connection):\n pycastle_log.debug(str(self)+\" start\")\n try:\n assert isinstance(connection, CastleConnection), \"wtf\"\n self.buf = castle_shared_buffer_create(connection.conn, size)\n self.size = size\n self.connection = connection\n pycastle_log.info(\"Made buffer {0} of size {1} with connection {2}\".format(self.buf, self.size, self.connection.conn))\n except Exception, e:\n pycastle_log.error(str(self)+\" got exception {0}:{1}\".format(type(e), e))\n raise\n finally:\n pycastle_log.debug(str(self)+\" stop\")", "def initialize(self):\n self.connection = HTTPConnection(self.host, self.port)\n self.fetch_description()", "def setup(chunk_size=0, try_times=0):\n\tglobal _chunk_size, _try_times\n\n\tif chunk_size == 0:\n\t\tchunk_size = 1 << 18\n\n\tif try_times == 0:\n\t\ttry_times = 3\n\n\t_chunk_size, _try_times = chunk_size, try_times", "def __init__(self, data_size):\n try:\n self.data_size = int(data_size)\n except ValueError as exc:\n raise ValueError(\"Exepected arg 'size' to be int: \" + str(exc))\n self.packet = bytearray()\n self.in_data = False\n self.header_pos = 0\n self.transport = None", "def __init__(self, payload_size, encoded_size):\n assert payload_size <= encoded_size\n self.payload_size = payload_size\n self.encoded_size = encoded_size", "def _init_connection(self, settings):\n raise NotImplementedError()", "def __init__(self, stream, progress_callback, progress_chunk_size):\n self._stream = stream\n self._progress_callback = progress_callback\n self._progress_chunk_size = progress_chunk_size\n self._bytes_transferred = 0\n self._progress_chunk = 0", "def connection_made(self, transport):\n self._transport = transport\n self._when_connected = datetime.datetime.now()\n self._last_received = datetime.datetime.now()\n\n reader_factory = self._reader_factory\n writer_factory = self._writer_factory\n reader_kwds = {}\n writer_kwds = {}\n\n if self.default_encoding:\n reader_kwds[\"fn_encoding\"] = self.encoding\n writer_kwds[\"fn_encoding\"] = self.encoding\n reader_kwds[\"encoding_errors\"] = self._encoding_errors\n writer_kwds[\"encoding_errors\"] = self._encoding_errors\n reader_factory = self._reader_factory_encoding\n writer_factory = self._writer_factory_encoding\n\n if self._limit:\n reader_kwds[\"limit\"] = self._limit\n\n self.reader = reader_factory(**reader_kwds)\n\n self.writer = writer_factory(\n transport=transport,\n protocol=self,\n reader=self.reader,\n server=True,\n **writer_kwds\n )\n\n logger.info(\"Connection from %s\", self)\n\n self._waiter_connected.add_done_callback(self.begin_shell)\n asyncio.get_event_loop().call_soon(self.begin_negotiation)", "def init_connection(self):\n self.log('Initializing connection to %s' % (self.bosh_service.netloc))\n self.connection = httplib.HTTPConnection(self.bosh_service.netloc)\n self.log('Connection initialized')\n # TODO add exceptions handler there (URL not found etc)", "def set_chunk_size(self, n):\n self.chunk_size = n", "def _init_stream(**kwargs):\n pass", "def __init__(self):\n super(MITM, self).__init__()\n self.buffer_size = 10485760 #10MB\n self.host = '0.0.0.0' # all ip\n self.client_port = 80\n self.server_port = 80\n self.parse_function = deault_parse_function\n self.network_type = network_type.TCP", "def __init__(self, host, port, use_socket=None, server=False, handler=False, bufsize=\"auto\", compress=False, compress_level=6):\n super(BinarySocket, self).__init__(host, port, server=server, use_socket=use_socket, use_pickle=False, bufsize=bufsize, handler=handler)\n self.__header_buffer = \"\"\n self.__binary_buffer = \"\"\n self.__meta_buffer = \"\"\n self.__header_length = 2 * 4 + 1 # 2 Unsigned Ints, 1 Bool\n self.__binary_length = None\n self.__binary_compressed = False\n self.__meta_length = None\n self.__buffer_lock = threading.Lock()\n\n self.set_compression(compress, compress_level)", "def connection_made(self, transport):\n #self._transport = transport\n\n self._server_ip, self._server_port = (\n transport.get_extra_info('peername')[:2])\n\n self.stream = self._stream_factory(\n transport=transport, client=True, log=self.log)\n\n# self.reader = self._factory_reader()\n# self.reader.set_transport(transport)\n self.shell = self._shell_factory(client=self, log=self.log)\n\n self.init_environment_values()\n self.set_stream_callbacks()\n self._last_received = datetime.datetime.now()\n self._connected = datetime.datetime.now()\n\n # begin connect-time negotiation\n self._loop.call_soon(self.begin_negotiation)\n\n # resolve server fqdn (and later, reverse-dns)\n self._server_host = self._loop.run_in_executor(\n None, socket.gethostbyaddr, self._server_ip)\n self._server_host.add_done_callback(self.after_server_lookup)\n\n self.log.info(self)", "async def read_chunk(self, size: int = ...) -> bytes:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends a chunk of data.
def send_chunk(self, chunk): print "ChunkedTwistedConnection: send chunk" return self.body.send(chunk)
[ "def send_chunk(chunk, send_socket):\n length = len(chunk)\n data = str(length).zfill(MAX_CHUNK_SIZE).encode() + chunk\n send_socket.send(data)", "def send_byte_data(self, data):\n data_len = len(data)\n print(\"Data is of length \",)\n n_blocks = int(data_len//1024 if data_len//1024 == data_len/1024 else data_len//1024 + 1) # get rid of 1024, magic number\n print(\"Data is \",n_blocks,\" block in length\")\n try:\n self.socket.send(str(n_blocks).encode('ascii'))\n for i in range(n_blocks):\n self.socket.send(data[i*1024:(i+1)*1024])\n return True\n except Exception as e:\n print(e)\n return False", "def send_data(self, data):\n for byte in data:\n self.send_byte(byte)", "def recv_chunk(self, data):", "def sendData(self, data, sync = False, chopsize = None):\r\n if chopsize and chopsize > 0:\r\n i = 0\r\n n = len(data)\r\n done = False\r\n while not done:\r\n j = i + chopsize\r\n if j >= n:\r\n done = True\r\n j = n\r\n self.send_queue.append((data[i:j], True))\r\n i += chopsize\r\n self._trigger()\r\n else:\r\n if sync or len(self.send_queue) > 0:\r\n self.send_queue.append((data, sync))\r\n self._trigger()\r\n else:\r\n self.transport.write(data)\r\n if self.logOctets:\r\n self.logTxOctets(data, False)", "def send(self, data, is_data=True, chunk_size=4096):\n # Set DC low for command, high for data.\n GPIO.output(self._dc, is_data)\n # Convert scalar argument to list so either can be passed as parameter.\n if isinstance(data, numbers.Number):\n data = [data & 0xFF]\n # Write data a chunk at a time.\n for start in range(0, len(data), chunk_size):\n end = min(start + chunk_size, len(data))\n self._spi.xfer(data[start:end])", "def send_data(self, data):\n self._transport.write(data)", "async def send(self):\n for chunk in self.chunks():\n await self.channel.send(chunk)\n log_response('\\n'.join(self.lines()))", "def send_chunked(self, chunks, payload, trailers):\r\n\r\n chunk_list = chunks.split(',')\r\n pointer = 0\r\n for cwidth in chunk_list:\r\n cwidth = int(cwidth)\r\n # send chunk length indicator\r\n self.wfile.write(format(cwidth, 'x').upper() + \"\\r\\n\")\r\n # send chunk payload\r\n self.wfile.write(payload[pointer:pointer + cwidth] + \"\\r\\n\")\r\n pointer += cwidth\r\n\r\n # is there another chunk that has not been configured? Send it anyway for the sake of completeness..\r\n if len(payload) > pointer:\r\n # send chunk length indicator\r\n self.wfile.write(format(len(payload) - pointer, 'x').upper() + \"\\r\\n\")\r\n # send chunk payload\r\n self.wfile.write(payload[pointer:] + \"\\r\\n\")\r\n\r\n # we're done with the payload. Send a zero chunk as EOF indicator\r\n self.wfile.write('0'+\"\\r\\n\")\r\n\r\n # if there are trailing headers :-) we send them now..\r\n for trailer in trailers:\r\n self.wfile.write(\"%s: %s\\r\\n\" % (trailer[0], trailer[1]))\r\n\r\n # and finally, the closing ceremony...\r\n self.wfile.write(\"\\r\\n\")", "def send(self, data):", "def _send_from_buffer(cls, buf, stream):\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n stream.write( buf[chunk_start:chunk_stop] )\n remaining_bytes -= next_chunk_bytes", "def send_data(self, data: bytes):\n self.out_socket.sendto(self.pack_data(data), self.server_in_address)", "def _sendFile(self, c):\n req = self.BUFFER[0]\n chunks = self._chunker(req, self.ChunkSize)\n n = len(chunks)\n self.logger.info(f'File is chunked into {n}')\n for i in tqdm(range(n), desc=\"sending file\"):\n c.send(chunks[i])\n c.send(bytes(self.ChunkSize))", "def send_to_data_channel(self, sock, data):\n resp = sock.send(data)\n print_debug(resp)\n self.logger.log(\"Sent: %s\" % data)\n return resp", "def _send(self, data):\n return 0", "async def send_raw(self, data: bytes) -> None:\n await self.socket.sendall(data)", "def sendChunk():\n if objectCount == 0:\n return\n logger.debug(\n 'Sending huge inv message with %i objects to just this'\n ' one peer', objectCount)\n self.append_write_buf(protocol.CreatePacket(\n 'inv', addresses.encodeVarint(objectCount) + payload))", "def senddata(self, data, **kwargs):\n raise NotImplementedError", "def _send(self, data):\n address = self.best_path()\n log.debug('Sending %s to %s' % (len(data), address))\n self.transmit(data, address)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the distance and rotation to the edge of the desk
def getDistanceAndRotationToEdge(l, f, r): if DEBUG: print "lfr:", l,",",f,",",r # Maths help from: http://xaktly.com/MathNonRightTrig.html # - Specfically the law of cosines, but at least one of their # examples is wrong, but methods are correct... sigh. # # For triangle with forward length, shortest of # left and right length, and desk edge as sides... # # f = forward distance length # l = left distance length # r = right distance length # e = length of desk edge between left and right views # s = shortest of left and right distance length # v = "view" angle of how much robot looks left or right # g = angle between f and e # d = distance between robot and edge of desk # a = angle between the way the robot is facing and edge of desk # (i.e. if the robot is facing directly towards edge it's 0) # (in radians or degrees?..) # # e² = f² + s² - 2 * f * s * cos(v) # g = sin⁻¹ * (s * sin(v) / e) # d = f * sin(g) # a = 180 - 90 - g (minus or positive depending on if s is left or right) # Figure out if the edge of the desk is more to the right or left # s = min(l, r) <-- Used to use this, but need additional things. # r | l | s # x | x | ? # 1 | 1 | ? Logic table for _r_ight, _l_eft, and output # 0 | 0 | ? _s_hortest distances from robot to desk edge # x | 0 | l # 1 | x | r x = None # 0 | 1 | r 1 = arbitrary high-ish value # x | 1 | l 0 = arbitrary low-ish value # 1 | 0 | l # 0 | x | r # Distance to right and left are missing? if r is None and l is None: if DEBUG: print "INFO: Skipping edge calcs because of missing distances." return int(round(f)), 0 # Distance to right and left identical? elif r == l: if DEBUG: print "INFO: Skipping edge calcs because of identical distances." # This is unlikely-ish because l, f, r are floats... # # r < f r > f # ◆ | or ◼ # ____➘| __🠛__ # return int(round(min(r, f))), 0 # Figure out if _l_eft or _r_ight is the shorter distance else: if r is None: s = l direction = -1 elif l is None: s = r direction = 1 elif l < r: s = l direction = -1 elif r < l : s = r direction = 1 cosV = math.cos(math.radians(45)) sinV = math.sin(math.radians(45)) e = f**2 + s**2 - 2 * f * s * cosV e = math.sqrt(e) g = math.degrees(math.asin(s * sinV / e)) d = f * math.sin(math.radians(g)) # Switching degrees/radians f'debugging a = (90 - g) * direction ''' # Debug stuff print "f =", f print "l =", l print "r =", r print "e =", e print "s =", s print "v =", 45 print "g =", g print "d =", d print "a =", a ''' distance = int(round(d)) rotation = int(round(a)) if DEBUG: print "Distance to edge:", str(distance) + "cm" print "Rotation to edge:", str(rotation) + "°" return distance, rotation
[ "def calculate_distance_edge(self):\n if self.mu > 0:\n # right interface is intersected next\n dx = self.cell_xr - self.x\n self.next_cell_index = self.cell_index + 1\n else:\n # left interface is intersected next\n dx = self.cell_xl - self.x\n self.next_cell_index = self.cell_index - 1\n\n return dx / self.mu", "def getEdgeDistance():\n '''\n a\n ◿\n b c\n\n hypotenuse\n ◿ adjacent\n opposite\n\n tan(a) = opposite/adjacent\n adjacent * tan(a) = opposite\n '''\n\n # An estimated multiplier to take into account the larger infrared dot\n # observed when further away from as surface - think torch beam onto a\n # wall getting larger as it gets further away, but only the radius\n # (center downwards) being relevant.\n # TODO: Maybe move into infrared sensor code?\n MULTI = 1.2\n\n edgeDistance = BOT_HEIGHT * math.tan(math.radians(getEdgeAngle()))\n edgeDistance *= MULTI\n\n if DEBUG:\n print \"Distance to edge: \", int(round(edgeDistance))\n\n return edgeDistance", "def calculate_distance_edge(self):\n mu_star = -np.sqrt(1. - (self.cell_xl / self.x)**2)\n\n if self.mu <= mu_star:\n\n l_edge = (-self.mu * self.x -\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xl**2))\n self.next_cell_index = self.cell_index - 1\n\n else:\n\n l_edge = (-self.mu * self.x +\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xr**2))\n self.next_cell_index = self.cell_index + 1\n\n return l_edge", "def getDistances():\n\n # If there's a wall in the way then there's no edge that way (probably)\n\n wallL, edgeL = getDistance(-45) # Left\n wallF, edgeF = getDistance( 0) # Forward\n wallR, edgeR = getDistance( 45) # Right\n\n panTilt.pan() # Recenter\n\n return wallL, edgeL, wallF, edgeF, wallR, edgeR", "def calculate_clockwise_angle_and_distance(self, center_node, spoke_node): # pylint: disable=R0201\n if not spoke_node['id'] in center_node['relations']:\n raise Exception('spoke_node_id must be related to center node')\n\n refvec = [0, 1]\n point = spoke_node['coords']\n origin = center_node['coords']\n\n # Vector between point and the origin: v = p - o\n vector = [point[0] - origin[0], point[1] - origin[1]]\n # Length of vector: ||v||\n lenvector = math.hypot(vector[0], vector[1])\n # If length is zero there is no angle\n if lenvector == 0:\n return -math.pi, 0\n\n # Normalize vector: v/||v||\n normalized = [vector[0]/lenvector, vector[1]/lenvector]\n dotprod = normalized[0]*refvec[0] + normalized[1]*refvec[1] # x1*x2 + y1*y2\n diffprod = refvec[1]*normalized[0] - refvec[0]*normalized[1] # x1*y2 - y1*x2\n angle = math.atan2(diffprod, dotprod)\n\n # Negative angles represent counter-clockwise angles so we need to subtract them\n # from 2*pi (360 degrees)\n if angle < 0:\n return 2 * math.pi + angle, lenvector\n\n # I return first the angle because that's the primary sorting criterium\n # but if two vectors have the same angle then the shorter distance should come first.\n # (lenvector should never really be needed, however, since that would mean edges overlap)\n return angle, lenvector", "def distance_between_wheels():", "def _geodesic_distance(mesh, face1, face2, edge):\n edge_center = (mesh.vertices[edge[0]].co + mesh.vertices[edge[1]].co)/2\n return (edge_center - _face_center(mesh, face1)).length + \\\n (edge_center - _face_center(mesh, face2)).length", "def faceDiagonal(self):\n faceDiagonal = (2**(1/2)) * self.sideLength\n return faceDiagonal", "def edge_dxy(self):\r\n loc = self.loc\r\n rect = loc.coord\r\n p1 = rect[0]\r\n p2 = rect[1]\r\n edx = p2[0] - p1[0] # Find edge direction\r\n edy = p2[1] - p1[1]\r\n return edx, edy", "def edge_length(self):\n return 2 * self.cradius * sin(pi/self.edges)", "def calc_edge2ref_dist(self):\r\n if self.ref_line and self.ref_line is not NotImplemented:\r\n\r\n # Get the relative position of the first point on the reference edge line. This will be used as origin\r\n # for the projected edge points on the reference line.\r\n origin = np.dot(self.ref_line.parallel, self.edge_points[0].coords)\r\n\r\n position = []\r\n distance = []\r\n\r\n for edge_point in self.edge_points:\r\n # Find the distances from the the real edge and the ref line points to the (0, 0). Based on which one is\r\n # further away from the origin, the sign of the distance is assigned\r\n edge = np.linalg.norm(np.r_[0, 0] - edge_point.coords[:2])\r\n refp = np.linalg.norm(np.r_[0, 0] - self.ref_line.xy_for_z(edge_point.coords[2])[:2])\r\n s = np.sign(refp - edge)\r\n\r\n # calculate the distance of the edge point to the ref line and give this distance the sign calculated.\r\n distance.append(s * edge_point.distance_to_line(self.ref_line))\r\n\r\n # calculate the position of the projected real edge point on the reference line, using as origin the\r\n # projection of the first point (see above).\r\n position.append(abs(origin - np.dot(self.ref_line.parallel, edge_point.coords)))\r\n\r\n # assign positions and distances on the parent object\r\n self.edge2ref_dist = [position, distance]\r\n\r\n\r\n else:\r\n print('No reference line. First, add a reference line to the object. Check if the fitting process on the '\r\n 'edge points converged. Edge ignored.')\r\n return NotImplemented", "def __calcEdgeDistance(self,edgeP1,edgeP2,P): \n x = P.X \n y = P.Y\n \n # distance calculation by Hesse normal form\n \n if (not edgeP2.X == edgeP1.X):\n \n \n yDiff = (edgeP2.Y-edgeP1.Y)\n xDiff = (edgeP2.X-edgeP1.X) \n a = yDiff/xDiff\n b = 1.0\n\n c = - (edgeP1.Y - a*edgeP1.X)\n\n # calculate a straight line defining the normal vector\n normal = -a*x + b*y + c\n \n offset = math.fabs(normal / math.sqrt(a*a + b*b))\n \n elif (not edgeP2.Y == edgeP1.Y):\n # if y coordinate is the same, the points form a vertical line\n offset = math.fabs(x - edgeP1.X)\n \n else: # => edgeP1 == edgeP2\n raise NotEnoughTracePointsException(1)\n \n return offset", "def distancia_da_origem(self):\r\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance(self):\n self.data.df['edge_distance'] = edge_distance.distance(\n self.data.df[['x', 'y']].values, self.data.metadata['boundary'])", "def get_sight_vector(self):\r\n\t\tx, y = self.rotation\r\n\t\t# y ranges from -90 to 90, or -pi/2 to pi/2, so m ranges from 0 to 1 and\r\n\t\t# is 1 when looking ahead parallel to the ground and 0 when looking\r\n\t\t# straight up or down.\r\n\t\tm = math.cos(math.radians(y))\r\n\t\t# dy ranges from -1 to 1 and is -1 when looking straight down and 1 when\r\n\t\t# looking straight up.\r\n\t\tdy = math.sin(math.radians(y))\r\n\t\tdx = math.cos(math.radians(x - 90)) * m\r\n\t\tdz = math.sin(math.radians(x - 90)) * m\r\n\t\treturn (dx, dy, dz)", "def chord_distances(self):\n r = self.pi_radii()\n da = np.abs(self.deflection_angles())\n return 2 * r * np.sin(da/2)", "def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance", "def edge_direction(self, edge):\n vector = self.edge_vector(edge)\n vector.unitize()\n return vector", "def get_direction(self, ele1, ele2, dist_thresh=0):\n first_pt = (ele1['object']['centroid']['x'],\n ele1['object']['centroid']['y'])\n last_pt = (ele2['object']['centroid']['x'],\n ele2['object']['centroid']['y'])\n\n # Compute direction only if dist beween two points is more than x\n dist_in_m = spatial.get_euc_dist(first_pt, last_pt)\n orientation = None\n if dist_in_m > dist_thresh:\n orientation_rad = spatial.get_radangle_flat_earth(\n first_pt, last_pt)\n orientation = math.degrees(orientation_rad)\n else:\n orientation = ele1['object']['direction']\n return orientation" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a value break it down by the ilk of node (usb or pci), the vendor, and the device or product.
def parse_value(value: str) -> Tuple[str, str, str]: value_pattern = r'^(usb|pci)\(([^:]{4}):([^:]{4})\)$' matches = re.match(value_pattern, value) assert matches, value ilk, vendor, device = matches.group(1), matches.group(2), matches.group(3) return ilk, vendor, device
[ "def device_get_vendor(udev_info):\n return udev_info.get(\"ID_VENDOR_FROM_DATABASE\", udev_info.get(\"ID_VENDOR\"))", "def findVendor(self):\n if self.model in ['nokia39','nokia56','chkp40','chkp20','chkp120','chkp13']:\n return 'nokia'\n elif self.model in ['asa10','asa25','asa50','asa85']:\n return 'cisco'", "def get_manufacturer_and_product(self):\n product = self.get_value('creator')\n if product:\n manufacturer = GarminDB.Device.Manufacturer.Unknown\n if product is not None:\n match = re.search('Forerunner|Fenix', product)\n if match:\n manufacturer = GarminDB.Device.Manufacturer.Garmin\n match = re.search('Microsoft', product)\n if match:\n manufacturer = GarminDB.Device.Manufacturer.Microsoft\n return (manufacturer, product)\n return (None, None)", "def get_vendor(self, v: Optional[str]) -> Optional[\"Vendor\"]:\n if v is None or v.startswith(\"OEM\") or v == \"None\":\n v = \"NONAME\"\n v = v.upper()\n if v in self.vendors:\n return self.vendors[v]\n # Temporary fix\n if v == \"D-LINK\":\n v = \"DLINK\"\n if v == \"48 47 20 47 45 4e 55 49 4e 45 00 00 00 00 00 00\":\n v = \"HUAWEI\"\n if \"INTEL\" in v:\n v = \"INTEL\"\n if \"FINISAR\" in v:\n v = \"FINISAR\"\n o = Vendor.objects.filter(code=v).first()\n if o:\n self.vendors[v] = o\n return o\n else:\n self.vendors[v] = None\n return None", "def vendor_list():\n return ['nxos', 'eos', 'cumulus']", "def get_vendor(self):\n func = self.pqos.lib.pqos_get_vendor\n func.restype = ctypes.c_int\n\n vendor = func(self.p_cpu)\n\n if vendor == CPqosCpuInfo.PQOS_VENDOR_INTEL:\n return \"INTEL\"\n if vendor == CPqosCpuInfo.PQOS_VENDOR_AMD:\n return \"AMD\"\n\n return \"UNKNOWN\"", "def get_vendor(mac):\r\n return p.get_manuf(mac) or 'None'", "def search_vendors(value: str) -> dict:\n original_value = value\n app.logger.info('Searching for specific vendors {}'.format(original_value))\n vendors_data = get_vendors()\n\n value = '/vendors/{}'.format(value).rstrip('/')\n part_names = ['vendor', 'platform', 'software-version', 'software-flavor']\n parts = {}\n for part_name in part_names[::-1]:\n value, _, parts[part_name] = value.partition('/{}s/{}/'.format(part_name, part_name))\n if not parts['vendor']:\n return vendors_data\n vendors_data = {'vendors': vendors_data}\n previous_part_name = ''\n for part_name in part_names:\n if not (part := parts[part_name]):\n break\n previous_part_name = part_name\n for chunk in vendors_data['{}s'.format(part_name)][part_name]:\n if chunk['name'] == part:\n vendors_data = chunk\n break\n else:\n abort(404, description='No {}s found on path {}'.format(part_name, original_value))\n return {'yang-catalog:{}'.format(previous_part_name): [vendors_data]}", "def manufacturer(self, value: str | None):\n\n if value is not None:\n attest(\n is_string(value),\n f'\"manufacturer\" property: \"{value}\" type is not \"str\"!',\n )\n\n self._manufacturer = value", "def device_catalog_path_value_converter(value):\n paths = []\n for path in value:\n pt = tuple(path.split(\"/\"))\n if pt and pt[-2]==\"devices\":\n pt = pt[:-2] + pt[-1:]\n paths.append(pt)\n return paths", "def vendor(self):\n if self.dutinformation:\n return self.dutinformation.vendor\n return None", "def chipset_device(self, type):\n\t\tif (type is 'id') or (type is 'value'):\n\t\t\treturn self.__info_dict['info']['chipset_device'][type]\n\t\telse:\n#\t\t\tprint \"DEBUG: The variable '%s' was not defined, it should be 'id' or 'value'.!!!\"%type\n\t\t\tpass", "def get_vendor(self, result, host, mac):\n if \"vendor\" in result['scan'][host] and mac in result['scan'][host]['vendor']:\n return result['scan'][host]['vendor'][mac]\n else:\n return \"\"", "def getDevice(self) :\n buses = usb.busses()\n for bus in buses :\n\t print \"bus length = %d\" % len(bus.devices)\n\t print \"Devices = \",\n print bus.devices\n for device in bus.devices :\n print \"Device VID = %0X\" % device.idVendor\n print \"Device PID = %0X\" % device.idProduct\n if device.idVendor == self.vendor_id :\n if device.idProduct == self.product_id :\n\t\t\treturn device\n return None", "def device_vendor(self):\n return self._device_vendor", "def vendor(self) -> str:\n return self.properties[DBUS_ATTR_VENDOR]", "def get_vendor(disk):\n\n if DISKINFO[\"/dev/\"+disk][\"Type\"] == \"Partition\":\n #We need to use the info from the host disk, which will be whatever came before.\n return DISKINFO[DISKINFO[\"/dev/\"+disk][\"HostDevice\"]][\"Vendor\"]\n\n else:\n try:\n vendor = PLIST[\"MediaName\"].split()[0]\n\n except KeyError:\n vendor = \"Unknown\"\n\n return vendor", "def _vendor_out(self, value, index, buf=None):\n result = self._connection.controlTransfer(\n self.PROLIFIC_VENDOR_OUT_REQTYPE,\n self.PROLIFIC_VENDOR_WRITE_REQUEST,\n value,\n index,\n buf,\n (0 if buf is None else len(buf)),\n self.USB_WRITE_TIMEOUT_MILLIS,\n )\n\n return result", "def set_value_to_device(self, dev_name, value):\n dev = self.devices[dev_name]\n # If it is an analog channel\n if 'model' in dev.properties:\n if dev.properties['model'] == 'ni':\n daq = self.devices[dev.properties['connection']['device']]\n conditions = {\n 'dev': dev,\n 'value': value\n }\n daq.driver.analog_output_dc(conditions)\n else:\n dev.apply_values(value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update_cmdb_item('sys', 'name', u'ATM 容灾', 'tkt78f0ad3d7dcf', 'ci549abda9', 'ci467b8875', cfg='TimeZone=Asia/Shanghai')
def update_cmdb_item(cfg_type, pk_name, pk_value, ticket_id, applicant, approve, **item): cfg_model = get_cfg_model_by_type(cfg_type) if cfg_model: kwargs = {pk_name: pk_value} instance = cfg_model.objects.get(**kwargs) if instance: ins, err = cfg_model.objects.update(instance.id, ticket_id, applicant, approve, **item) return err is None return False
[ "def bala(update, context):\n update.message.reply_text(\"\"\"BALAKUMAR-191MC110\n MOBILE-8903220635\"\"\")", "def catalog_update_request(table_name: str, stac_item_key: str):\n\n get_client(\"dynamodb\").put_item(\n TableName=table_name,\n Item={\n \"stacitem\": {\"S\": stac_item_key},\n \"datetime\": {\"S\": str(datetime.datetime.now())},\n },\n )", "def update_command():\n AppbookstoredbBACKEND.update_data(selected_tuple[0],title_text.get(),author_text.get(),year_text.get(),isbn_text.get())", "def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))", "def setName(kb,name):\n\tassert type(name) is str\n\tassertKeyBinder(kb)\n\tkb[\"name\"] = name\n\treturn 0", "def update_cm_contract_for_masking(**kwargs):\n query = str()\n bind = tuple()\n db = kwargs.get('db')\n try:\n query = \"\"\"\n UPDATE\n CM_CONTRACT\n SET\n TEL_NO = %s,\n UPDATED_DTM = NOW()\n WHERE\n CONTRACT_NO = %s\n \"\"\"\n bind = (\n kwargs.get('cust_tel_no'),\n kwargs.get('contract_no'),\n )\n db.check_alive()\n db.cursor.execute(query, bind)\n except Exception:\n print(query)\n print(bind)\n exc_info = traceback.format_exc()\n db.conn.rollback()\n raise Exception(exc_info)", "def update_anime_info(name, new):\n db = opendb()\n anime = tinydb.Query()\n info = db.get(anime.name == name)\n try:\n print('\\nUpdating {}:'.format(name))\n except UnicodeEncodeError:\n print('\\nUpdating {}:'.format(name.encode('gbk', 'ignore')))\n print('Unicode Encode Error raised')\n for key in new:\n if key.startswith('new_'):\n new_key = key[4:]\n info[new_key] = new[key]\n print('{} is replaced with {}'.format(new_key, new[key]))\n db.update(info, anime.name == name)\n db.close()", "def update_biblio_item(id, PUT_data, conn, c):\n try:\n c.execute(\n \"\"\"UPDATE {0} \n SET abbreviation = ?, \n description = ?,\n added_by = ?,\n project_type = ?\n WHERE id = ?;\"\"\".format(TABLE_NAME), (PUT_data['abbreviation'],\n PUT_data['bibtex_json'],\n PUT_data['added_by'],\n PUT_data['project_type'],\n id))\n conn.commit()\n except sqlite3.Error as e:\n conn.rollback()\n raise e", "def updatecdb(self,info):\n\t\tself.lCoolDB.clear()\n\t\tfor cinfo in info:\n\t\t\tself.lCoolDB.addItem(cinfo)", "def update_cust_info_for_masking(**kwargs):\n query = str()\n bind = tuple()\n db = kwargs.get('db')\n try:\n query = \"\"\"\n UPDATE\n CUST_INFO\n SET\n CUST_TEL_NO = %s,\n CUST_NM = %s\n WHERE\n CUST_ID = %s\n \"\"\"\n bind = (\n kwargs.get('tel_no'),\n kwargs.get('cust_nm'),\n kwargs.get('cust_id'),\n )\n db.check_alive()\n db.cursor.execute(query, bind)\n except Exception:\n print(query)\n print(bind)\n exc_info = traceback.format_exc()\n db.conn.rollback()\n raise Exception(exc_info)", "def updateOEMInfo(samdb, rootdn):\n res = samdb.search(expression=\"(objectClass=*)\", base=rootdn,\n scope=SCOPE_BASE, attrs=[\"dn\", \"oEMInformation\"])\n if len(res) > 0:\n if res[0].get(\"oEMInformation\"):\n info = str(res[0][\"oEMInformation\"])\n else:\n info = \"\"\n info = \"%s, upgrade to %s\" % (info, version)\n delta = ldb.Message()\n delta.dn = ldb.Dn(samdb, str(res[0][\"dn\"]))\n delta[\"oEMInformation\"] = ldb.MessageElement(info, ldb.FLAG_MOD_REPLACE,\n \"oEMInformation\" )\n samdb.modify(delta)", "def pytest_collection_modifyitems(config,items):\n for item in items:\n item.name = item.name.encode(\"utf-8\").decode(\"unicode_escape\")\n item._nodeid = item.nodeid.encode(\"utf-8\").decode(\"unicode_escape\")+\" : \"+eval(config.getoption(\"--cmdopt\"))[\"deviceName\"]", "def commandModifyVm(self, name):\n\t\t# memory and private nic settings\n\t\ttemplate = \"%s modifyvm %s\" % (self.cmd, name) \\\n\t\t\t\t+ \" --memory %s --vram %s\" % (self.VM[\"mem_base\"], self.VM[\"mem_vram\"]) \\\n\t\t\t\t+ \" --nic1 %s --nictype1 %s --intnet1 %s\" % (self.VM[\"private_nic\"], self.VM[\"private_nictype\"], self.VM[\"vm_network\"])\n\t\t# add public nic for frontend\n\t\tif self.VM.has_key(\"public_nic\"):\n\t\t\ttemplate += \" --nic2 %s --nictype2 %s\" % (self.VM[\"public_nic\"], self.VM[\"public_nictype\"])\n\n\t\t# add audio\n\t\tif self.VM.has_key(\"audio\"):\n\t\t\ttemplate += \" --audio %s\" % self.VM[\"audio\"]\n\n\t\t# add mouse\n\t\tif self.VM.has_key(\"mouse\"):\n\t\t\ttemplate += \" --mouse %s\" % self.VM[\"mouse\"]\n\n\t\t# add boot order\n\t\ttemplate += \" %s\" % self.VM[\"boot_order\"] \n\n\t\tself.logger.info(template)\n\t\tos.system(template)\n\n\t\tself.commandModifyHotPlug(name)\n\t\tself.commandModifyCpu(name)", "def set_name(net_id, name):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Member SET name='\"+name+\"' WHERE netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()", "def update_instrument(row):\n import demat_config\n import demat_swift_mt598\n\n isin, amount, mminstype, wtaxflag, soracc, issbpid, shortcode = row[1], row[2], row[3], row[4], row[5], row[6], row[7]\n wtaxflag = (wtaxflag== 'Yes') and 'Yes' or 'No'\n ins = acm.FInstrument.Select01(\"isin = %s\" % (isin),\"\")\n sc = acm.FStateChart[MMSS_ISIN_REQUEST_STATE_CHART_NAME]\n\n if not ins is None:\n print 'Instrument:', ins.Name()\n bp = acm.FBusinessProcess.Select01(\"subject_seqnbr = %d \" % (ins.Oid()) , \"\")\n if bp == None:\n bp = acm.BusinessProcess.InitializeProcess(ins, sc)\n bp.ForceToState('Active', str(amount))\n try:\n bp.Commit()\n #print 'Instrument [%s] inserted into Business process ' % (ins.Name())\n print 'Commit 1 done'\n except Exception, e:\n 'Commit 1 failed', e\n create_add_info(ins, 'Demat_Instrument', 'Yes')\n \"\"\"\n Get the pre settle trade\n \"\"\"\n ps_trade = get_first_trade(ins)\n if not ps_trade is None:\n create_add_info(ps_trade, 'MM_DEMAT_PRE_SETT', 'Yes')\n\n create_add_info(ins, 'MM_MMInstype',mminstype)\n create_add_info(ins, 'Demat_MinTrdDeno',0.01)\n create_add_info(ins, 'Demat_WthhldTax',wtaxflag)\n create_add_info(ins, 'Demat_Ins_SOR_Acc',soracc)\n create_add_info(ins, 'Demat_Issuer_BPID',issbpid)\n\n try:\n ins.Commit()\n #print 'update Instrument: Instrument [%s] updated ' % (ins.Name())\n print 'Commit 2 done'\n except Exception, e:\n #print 'update Instrument: Could not coomit instrument [%s]' % (ins.Name()), e\n 'Commit 2 failed', e\n\n print 'Demat_Issuer_BPID', ins.AdditionalInfo().Demat_Issuer_BPID()\n \"\"\"\n helper = demat_swift_mt598.Demat_MT598_Helper(ins)\n short_code = helper.IdentificationSecurities()\n \"\"\"\n create_add_info(ins, 'Demat_IsinShortDesc',shortcode)\n try:\n ins.Commit()\n #print 'update Instrument: Instrument [%s] updated ' % (ins.Name())\n print 'Commit 3 done'\n except Exception, e:\n #print 'update Instrument: Could not commit instrument [%s]' % (ins.Name()), e\n print 'Commit 3 failed', e", "def update_by_name(name):\n pass", "def bucketlist_item_update():\n pass", "def test013_Update_account(self):\n self.log.info(\" Update account [C1] name, should succeed.\")\n data,response= self.api.cloudbroker.account.update(self.response.json())\n self.assertEqual(response.status_code, 200) \n\n self.log.info(\"Check that account name updated, should succeed.\")\n response = self.api.cloudapi.accounts.get(self.response.json())\n self.assertEqual(data[\"name\"], response.json()[\"name\"])", "def changeSysdateCommand(self, sysdate:datetime.date):\n commandLine = \"date -s %s\" % sysdate.strftime(\"%Y-%m-%d\")\n return self.submitCommand(commandLine)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
I have some docs
def docs():
[ "def show_doc(self):\n pass", "def docs():\n return render_template(\"docs.html\")", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def documento():\r\n\tpass", "def printdoc():", "def print_docs(self, docs: DocList) -> None:\n for doc in docs:\n print(f\"----[{doc['filename']}]----\")\n print(doc[\"content\"])", "def django_show_docs():\r\n app = wingapi.gApplication\r\n app.ExecuteCommand('show-document', section=\"howtos/django\")", "def documentation():\n doclist = ''\n\n doclist += f\"Json {version()}\"\n doclist += 'Help Documentation'\n doclist += '------------------'\n doclist += 'Docs will go here eventually.'\n doclist += ''\n doclist += ''\n doclist += ''\n doclist += ''\n\n docs = '\\n'.join(doclist)\n return docs", "def getDoc(self):\r\n return self.__doc__", "def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()", "def api_docs():\n return render_template('api_docs.html')", "def merge_docs(self):", "def documents(self):\r\n return doc.Documents(self)", "def build_document(self):\n pass", "def test_client_document_list(self):\n pass", "def docs():\n\n if not os.path.exists('docs'):\n os.mkdir('docs')\n\n import csv\n rows = [['Name', 'Documentation', 'Types', 'Access']]\n for magicwordKey in spellbook.words:\n magicword = spellbook.words[magicwordKey]\n access = magicword.access if magicword.access else '0'\n doc = magicword.doc.strip() if magicword.doc else 'N\\A'\n rows.append([magicword.name, doc, magicword.types, access])\n\n with open('docs/serverdocs.csv', 'w') as docsFile:\n writer = csv.writer(docsFile, delimiter=',', lineterminator='\\n')\n writer.writerows(rows)\n\n return 'Command documentation generated'", "def test_Documentation(self):\n self.assertTrue(len(models.user.__doc__) > 0)", "def docs():\n\n if not os.path.exists('docs'):\n os.mkdir('docs')\n\n import csv\n rows = [['Name', 'Documentation', 'Types', 'Access']]\n for magicwordKey in spellbook.words:\n magicword = spellbook.words[magicwordKey]\n access = magicword.access if magicword.access else '0'\n doc = magicword.doc.strip() if magicword.doc else 'N\\A'\n rows.append([magicword.name, doc, magicword.types, access])\n\n with open('docs/clientdocs.csv', 'w') as docsFile:\n writer = csv.writer(docsFile, delimiter=',', lineterminator='\\n')\n writer.writerows(rows)", "def generate_document(self) -> dict:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
LÊ arquivo de INDICES arquivoIndices.txt.
def lerArquivoIndices(indices): arq = open("arquivoIndices.txt", "r") linha = arq.readline() gravaIndices(indices, linha) while linha: linha = arq.readline() registro = gravaDados(indices, linha) arq.close() return
[ "def gravarArquivoIndices(indices):\n arq = open(\"arquivoIndices.txt\", \"w\")\n for i in indices.indices:\n linha = i.codigo + \",\" + str(i.indice) + \",\" + str(i.excluido) + \"\\n\"\n arq.write(linha)\n arq.close()\n return", "def _load_split_indices(self):\n split_file = self.SPLITS.get(self.split)\n indices_file = self._filepath(split_file)\n\n with open(indices_file) as txt_file:\n idx_data = [int(i) for i in txt_file.readline().split()]\n\n return idx_data", "def list_indices(self):", "def read_index_mols_from_file(filename: str, indices: set) -> oechem.OEMol:\n ifs = oechem.oemolistream(filename)\n index = 0\n for mol in ifs.GetOEMols():\n if index in indices:\n yield oechem.OEMol(mol)\n index += 1", "def CreateIdxFile(listidx,path):\n f = open(path,'w')\n for i in listidx:\n f.write('%s\\n'%i)\n f.close()", "def read_index_file(read_name=False):\n lines = read_file(INDEX_PATH).split('\\n')[:-1]\n if read_name:\n lines = [x.split()[-1] for x in lines]\n return lines", "def _readFileIndex(self, run, part):\n fname = 'indexes_p%02d_r%03d.txt' % (part, run)\n if fname not in self._indexCache:\n with open(\"%s/indexes/%s\" % (self._dataDir,fname), 'r') as fin:\n index = {}\n for line in fin:\n key,val = line.split(' -> ')\n index[int(key)] = int(val)\n self._indexCache[fname] = index\n return self._indexCache[fname]", "def get_header_indices(filepath):\n\theaders = get_header_list(filepath, sort=False)\n\treturn {h: i for i, h in enumerate(headers)}", "def read_index_file(filename: str):\n arr = []\n with open(filename, \"a+\") as file_data:\n file_data.seek(0)\n for line_str in file_data:\n line = line_str.strip()\n if line == \"\":\n continue\n line_id, record_offset = line.split(\"|\")\n arr.append((line_id, record_offset))\n return arr", "def create_indices(filename, lines):\n def find(key, condition):\n \"\"\"Find and return index from str-list.\"\"\"\n for i, line in enumerate(lines):\n if condition(line, key):\n return i\n return None\n\n guard = ('INCLUDE_GUARD_KEN3_' +\n filename.replace('/', '_').replace('.', '_').upper())\n\n return (\n # doxygen comment in header. [0] to [7]\n find('/**\\n', str.__eq__),\n find(' * @file ken3/{0}\\n'.format(filename), str.__eq__),\n find(' * @brief ', str.startswith),\n find(' * @author toda\\n', str.__eq__),\n find(' * @date ', str.startswith),\n find(' * @version ', str.startswith),\n find(' * @remark ', str.startswith),\n find(' */\\n', str.__eq__),\n # first blank line. [8]\n find('\\n', str.__eq__),\n # start include guard (only in .hpp files). [9] and [10]\n find('#ifndef {0}\\n'.format(guard), str.__eq__),\n find('#define {0}\\n'.format(guard), str.__eq__),\n # include macro (optional). [11]\n find('#include ', str.startswith),\n # start and end namespace ken3. [12] and [13]\n find('namespace ken3 {\\n', str.__eq__),\n find('} // namespace ken3 {\\n', str.__eq__),\n # end include guard (only in .hpp files). [14]\n find('#endif // #ifndef {0}\\n'.format(guard), str.__eq__),\n )", "def indices(self):", "def get_indices(self, name):\n name = name.upper()\n indices = []\n for i, (_, _, fname) in enumerate(self._field_pos):\n if fname == name:\n indices.append(i)\n return indices", "def read_index_data(data_path):\n index_keywords = []\n with open(data_path) as data:\n for line in data:\n index_keywords.append(line.rstrip())\n return index_keywords", "def _get_band_indexes(indexes, input_file):\n if indexes:\n if isinstance(indexes, list):\n return indexes\n else:\n return [indexes]\n else:\n with rasterio.open(input_file, \"r\") as src:\n return src.indexes", "def get_robot_indices(fname):\n D = {}\n f = open(fname,'rU')\n lines = ' '\n while lines:\n lines = f.readline()\n mat = re.match(\"(.*)(group)(.*)(ns)(.*)(\\\")(.*)(\\\")\",lines)\n if mat:\n ns = mat.groups()[-2]\n mat = None\n while not mat:\n l = f.readline()\n if not l:\n rospy.logerr(\"Failed to get all indices!\")\n sys.exit(1)\n mat = re.match(\n \"(.*)(robot_index)(.*)(value)(.*)(\\\")(.*)(\\\")\", l)\n val = mat.groups()[-2]\n if val.rfind('arg') == -1:\n num = int(val)\n else:\n rospy.logwarn(\"Could not determine index for namespace = %s\"%ns)\n num = -1\n D[ns] = num\n return D", "def _load_image_set_index(self):\n with open(self.img_set) as f:\n image_index = [x.strip() for x in f.readlines()]\n\n return image_index", "def get_rep_mol_indexes():\n f = open(FILE_WITH_REP_MOL_IDXS, \"r\")\n rd = csv.reader(f)\n mols = rd.next()\n f.close()\n mol_idxs = [int(i) - 1 for i in mols]\n os.unlink(FILE_WITH_REP_MOL_IDXS)\n return mol_idxs", "def test02_readIndices1(self):\n\n icol = self.icol\n indicescol = np.argsort(icol[:]).astype('uint64')\n indicescol2 = icol.index.read_indices()\n if common.verbose:\n print(\"Original indices column:\", indicescol)\n print(\"The values from the index:\", indicescol2)\n self.assertTrue(common.allequal(indicescol, indicescol2))", "def get_index(self):\n inverted_index = {}\n\n index_file = open(self.output_index)\n index_file.readline()\n\n for line in index_file:\n line = line.split()\n inverted_index[line[0]] = sorted(map(int, (line[1:])))\n\n return inverted_index" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grava objetos em formato texto no arquivo de Indices.
def gravarArquivoIndices(indices): arq = open("arquivoIndices.txt", "w") for i in indices.indices: linha = i.codigo + "," + str(i.indice) + "," + str(i.excluido) + "\n" arq.write(linha) arq.close() return
[ "def __bert_text_to_index(self, file_path: str):\r\n data_ids = []\r\n data_types = []\r\n label_ids = []\r\n data_masks = []\r\n with open(file_path, 'r',encoding='UTF-8') as f:\r\n line_data_ids = []\r\n line_data_types = []\r\n line_label = []\r\n line_mask = []\r\n for line in f:\r\n if line != '\\n':\r\n w, t = line.split()\r\n # bert 需要输入index和types 由于是单语句模型,所以type都为0\r\n w_index = self.w2i.get(w, self.unk_index)\r\n t_index = self.tag2index.get(t, 0)\r\n line_data_ids.append(w_index) # index\r\n line_data_types.append(0) # types\r\n line_label.append(t_index) # label index\r\n line_mask.append(0) # we don't mask\r\n else:\r\n # 处理填充开始和结尾 bert 输入语句每个开始需要填充[CLS] 结束[SEP]\r\n max_len_buff = self.max_len-2\r\n if len(line_data_ids) > max_len_buff: # 先进行截断\r\n line_data_ids = line_data_ids[:max_len_buff]\r\n line_data_types = line_data_types[:max_len_buff]\r\n line_label = line_label[:max_len_buff]\r\n line_mask = line_mask[:max_len_buff]\r\n line_data_ids = [self.cls_index] + line_data_ids + [self.sep_index]\r\n line_data_types = [0] + line_data_types + [0]\r\n line_label = [0] + line_label + [0]\r\n line_mask = [0] + line_mask + [0]\r\n\r\n # padding\r\n if len(line_data_ids) < self.max_len: # 填充到最大长度\r\n pad_num = self.max_len - len(line_data_ids)\r\n line_data_ids = [self.pad_index]*pad_num + line_data_ids\r\n line_data_types = [0] * pad_num + line_data_types\r\n line_label = [0] * pad_num + line_label\r\n line_mask = [0] * pad_num + line_mask\r\n data_ids.append(np.array(line_data_ids))\r\n data_types.append(np.array(line_data_types))\r\n label_ids.append(np.array(line_label))\r\n data_masks.append(np.array(line_mask))\r\n line_data_ids = []\r\n line_data_types = []\r\n line_label = []\r\n line_mask = []\r\n print(\"data_ids shape:\"+str(np.array(data_ids).shape))\r\n print(\"data_types shape:\"+str(np.array(data_types).shape))\r\n print(\"data_masks shape:\"+str(np.array(data_masks).shape))\r\n return [np.array(data_ids), np.array(data_types), np.array(data_masks)], np.array(label_ids)", "def lerArquivoIndices(indices):\n arq = open(\"arquivoIndices.txt\", \"r\")\n linha = arq.readline()\n gravaIndices(indices, linha)\n while linha:\n linha = arq.readline()\n registro = gravaDados(indices, linha)\n\n arq.close()\n return", "def obj_index(self) -> str:\n return str(self._data[\"index\"])", "def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary", "def indexer(self):\n index_file = open(\"../index_files/index.txt\", \"w+\")\n inv_index_file = open(\"../index_files/index_inverse.txt\", \"w+\")\n index = dict()\n inv_index = dict()\n df = dict()\n norm_index = dict()\n words = set() # vocabulary\n stemmer = tr.PorterStemmer()\n for (i, d) in self.docs.items():\n index[i] = stemmer.getTextRepresentation(d.get_text()) #counter(d.get_text())\n norm_index[i] = {key: value / sum(index[i].values()) for (key, value) in index[i].items()}\n words = words.union(set(index[i].keys()))\n index_file.write(\"{'\" + str(i) + \"': \" + str(index[i]) + \"}\\n\")\n for word in index[i]:\n if word not in inv_index.keys():\n inv_index[word] = {}\n inv_index[word][i] = index[i][word]\n df[word] = 1\n else:\n inv_index[word][i] = index[i][word]\n df[word] += 1\n\n for word in df.keys():\n inv_index_file.write(\"{'\" + word + \"': \" + str(inv_index[word]) + \"}\\n\")\n inv_index_n = {w: {d.get_id(): norm_index[d.get_id()][w] for d in self.docs.values() if w in norm_index[d.get_id()]} for w in words}\n self.index = index\n self.inv_index = inv_index\n self.index_n = norm_index\n self.inv_index_n = inv_index_n\n self.df = df\n index_file.close()\n inv_index_file.close()\n print(\"L'indexation a été effectuée avec succès ! taille du corpus : {}\".format(len(self.index)))", "def read_index(self):\n temp_index_data = np.genfromtxt(\n self.file_name, skip_header=MesaProfileIndex.index_start_line - 1,\n dtype=None)\n self.model_number_string = MesaProfileIndex.index_names[0]\n self.profile_number_string = MesaProfileIndex.index_names[-1]\n if temp_index_data.ndim > 1:\n self.index_data = temp_index_data[np.argsort(temp_index_data[:, 0])]\n else:\n self.index_data = temp_index_data\n self.index_data = dict(zip(MesaProfileIndex.index_names,\n temp_index_data.T))\n self.profile_numbers = self.data(self.profile_number_string)\n self.model_numbers = self.data(self.model_number_string)", "def color_obj():\n path = \"E:/3ds_intern/meshcnn/datasets/coseg_aliens/labels_aliens/5.obj\"\n path2 = \"E:/3ds_intern/meshcnn/datasets/coseg_aliens/labels_aliens/5.eseg\"\n opt = TestOptions().parse()\n mesh = Mesh(file=path, opt=opt, hold_history=False, export_folder=\"E:/3ds_intern/meshcnn/datasets/coseg_aliens/labels_aliens\")\n seg = read_seg(path2).astype(int)\n mesh.export_segments(seg)", "def convertInvoiceToObj(index, path):\n\n with open(path, 'r') as f:\n l = list(csv.reader(f))\n myDict = {i[0]: [x for x in i[1:]] for i in zip(*l)}\n invoice = Invoice(myDict[\"user\"][index], myDict[\"toolTitle\"][index], myDict[\"hirePrice\"][index],\n myDict[\"riderPrice\"][index], myDict[\"fine\"][index])\n return invoice", "def _get_objects(self,label_fh):\n objects = []\n for line in label_fh.readlines():\n try:\n object = {}\n line = line.replace(u'\\ufeff', '')\n if line != '':\n x1, y1, x2, y2, x3, y3, x4, y4= [int(i) for i in line.split(',')[:-1]]\n p1 = (x1, y1)\n p2 = (x2, y2)\n p3 = (x3, y3)\n p4 = (x4, y4)\n object['polygon'] = [p1,p2,p3,p4]\n objects.append(object)\n except:\n pass\n return objects", "def ReadIndex_text(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n idList = []\n v1 = array('B') # dbfile index\n v2 = array('L') # offset\n v3 = array('I') # block size\n apd1 = idList.append\n apd2 = v1.append\n apd3 = v2.append\n apd4 = v3.append\n indexFileHeaderText = []\n origdbname=\"\"\n origversion=\"\"\n origext=\"\"\n origprefix=\"\"\n try:\n\n hdl = mybase.ReadLineByBlock(indexfile)\n lines = hdl.readlines()\n while lines != None:\n for line in lines:\n if not line or line[0] == \"#\":\n continue\n strs = line.split()\n if strs[0] == \"DEF_DBNAME\":\n if len(strs)>=2:\n origdbname=strs[1]\n elif strs[0] == \"DEF_VERSION\":\n if len(strs)>=2:\n origversion=strs[1]\n elif strs[0] == \"DEF_EXTENSION\":\n if len(strs)>=2:\n origext=strs[1]\n elif strs[0] == \"DEF_PREFIX\":\n if len(strs)>=2:\n origprefix = strs[1]\n else:\n apd1(strs[0])\n apd2(int(strs[1]))\n apd3(int(strs[2]))\n apd4(int(strs[3]))\n lines = hdl.readlines()\n\n indexList.append(idList)\n indexList.append(v1)\n indexList.append(v2)\n indexList.append(v3)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n\n numRecord = len(idList)\n lastDBFileIndex = v1[numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n if isPrintWarning:\n if origversion == \"\":\n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0],indexfile,\n origversion, version), file=sys.stderr)\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)", "def _format_as_index(indices):\n\n if not indices:\n return \"\"\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)", "def format_genome():", "def OBJFormat( self ):\n keys = self.materials.keys()\n keys.sort()\n s = ''\n for key in keys:\n mat = self.materials[ key ]\n if ( mat.isAssigned ):\n s += mat.OBJFormat()\n return s", "def gen(input_filenames, c2p2c_output_filename, index_output_filename):\n c2p2c_out = open(c2p2c_output_filename, 'w')\n index_out = open(index_output_filename, 'w')\n\n for filename in input_filenames:\n f = open(filename)\n for l in f:\n l = l.decode(\"utf8\")\n tmp = l.split(\",\", 1)\n if len(tmp) == 2 :\n index, detail = tmp\n index = int(index)\n detail = json.loads(detail)\n # print index_out\n index_data = {u\"公司編號\":index}\n index_fields = [u\"公司名稱\", u\"代表人姓名\"]\n for index_field in index_fields:\n index_data[index_field] = detail.get(index_field)\n index_int_fields = [u\"資本總額(元)\", u\"實收資本額(元)\"]\n for index_int_field in index_int_fields:\n if detail.get(index_int_field):\n index_data[index_int_field] = int(detail.get(index_int_field).replace(\",\",\"\"))\n print >> index_out, json.dumps(index_data)\n # print c2p2c\n if detail.get(u\"董監事名單\") and len(detail.get(u\"董監事名單\")) > 0:\n for person_item in detail.get(u\"董監事名單\"):\n c2p2c_data = {}\n c2p2c_fields = [u'職稱', u'姓名']\n for c2p2c_field in c2p2c_fields:\n c2p2c_data[c2p2c_field] = person_item.get(c2p2c_field).strip()\n c2p2c_int_fields = [u'出資額']\n for c2p2c_int_field in c2p2c_int_fields:\n if person_item.get(c2p2c_int_field):\n c2p2c_data[c2p2c_int_field] = int(person_item.get(c2p2c_int_field).replace(\",\",\"\"))\n if person_item.get(u'所代表法人') and len(person_item.get(u'所代表法人'))>0:\n if person_item[u'所代表法人'][0] == 0: ## wrong index\n continue\n else:\n c2p2c_data[u'所代表法人'] = int(person_item[u'所代表法人'][0])\n c2p2c_data[u'被投資公司編號'] = index\n print json.dumps(c2p2c_data, ensure_ascii=False)\n print >> c2p2c_out, json.dumps(c2p2c_data)\n index_out.close()\n c2p2c_out.close()", "def format_as_index(indices):\r\n\r\n if not indices:\r\n return \"\"\r\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)", "def encodeindex(self):\n if not self.format and len(self.flist) == 1 and list(self.flist[0].keys()) == ['s']:\n # Now we can and want to use the short format\n self.format = 0\n return msgpack.packb(self.index['f'][0]['s'])\n self.format = 1\n return msgpack.packb(self.index)", "def index_label_reader(index_list):\n index_dict = OrderedDict()\n for doc_id, doc_pos in index_list:\n doc_pos = doc_pos.split(\" \")\n temp_dict = []\n for i in doc_pos:\n # (global_instance_idx, token_idx, sense)\n pair = i.split(\":\")\n temp_dict.append((int(pair[0]), int(pair[1]), pair[2]))\n index_dict[doc_id] = temp_dict\n return index_dict", "def indirectobject(self, index, io):\n if self.indices != '':\n self.indices += ' '\n self.indices += '%d %d' % (index, len(self.ios))\n self.ios += io\n self.objects.append(index)", "def convertToObj(index):\n\n with open(strings.filePath_tool, 'r') as f:\n l = list(csv.reader(f))\n dict = {i[0]: [x for x in i[1:]] for i in zip(*l)}\n tool = Tool(dict[\"ID\"][index], dict[\"owner\"][index], dict[\"title\"][index],\n dict[\"description\"][index], dict[\"condition\"][index],\n dict[\"priceFullDay\"][index], dict[\"priceHalfDay\"][index],\n dict[\"riderCharge\"][index], dict[\"imgPath\"][index], dict[\"availability\"][index])\n return tool" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all row and column groups.
def get_regular_groups(self, grid, min=3): row_groups = self._get_row_groups(grid.grid, models.patterns.RowPattern, min) col_groups = self._get_row_groups(grid.grid.T, models.patterns.ColumnPattern, min) return row_groups + col_groups
[ "def genBlockGroups(self):\n \n try:\n \n byRow = byColumn = int(math.sqrt(self.groupSize))\n groupByRow = [] \n \n for row in range (self.rows):\n list = []\n for aux in range(self.columns/byColumn):\n list.append(range( aux*byColumn + row*self.columns, byColumn*(aux + 1) + row*self.columns))\n groupByRow.append(list) \n \n groupList = []\n \n for v_aux in range (self.rows/byRow):\n for aux in range (self.columns/byColumn):\n list = []\n for index in range(byRow): \n list.extend(groupByRow[v_aux*byRow+index][aux])\n groupList.append(list)\n return groupList\n except ValueError:\n raise ValueError(\"group size must be a perfect square\")", "def genVertGroups(self):\n list =[]\n for aux in range (self.columns):\n group_list = range ( 0 + aux, self.rows*self.columns, self.columns)\n for element in group_list:\n list.append(element)\n\n groupList =[]\n for aux in range(self.numFunctions):\n group = list[ 0 + aux*self.groupSize : self.groupSize*(aux + 1)]\n groupList.append(group)\n \n return groupList", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def _get_thrift_row_groups(\n cls,\n pf,\n filename,\n row_groups,\n ):\n\n real_row_groups = []\n for _, rg_global in row_groups:\n row_group = pf.row_groups[rg_global]\n columns = row_group.columns\n for c, col in enumerate(columns):\n if c:\n col.file_path = None\n md = col.meta_data\n md.key_value_metadata = None\n # NOTE: Fastparquet may need the null count in the\n # statistics, so we cannot just set statistics\n # to none. Set attributes separately:\n st = md.statistics\n if st:\n st.distinct_count = None\n st.max = None\n st.min = None\n st.max_value = None\n st.min_value = None\n md.encodings = None\n md.total_uncompressed_size = None\n md.encoding_stats = None\n row_group.columns = columns\n real_row_groups.append(row_group)\n return real_row_groups", "def _get_all_groups(hdf, scan_mode):\n groups = np.unique(list(hdf5_groups(hdf[scan_mode])) + [\"\"])\n return groups", "def row_group_limits():\r\n from pymatgen import Element, periodic_table\r\n \r\n # Get all available elements in periodic table.\r\n rs = [e.row for e in periodic_table.Element]\r\n gs = [e.group for e in periodic_table.Element]\r\n \r\n return (max(rs), max(gs))", "def _get_groups(X, y):\n if SK18:\n X, y = _indexable(X, y)\n return X, y, None", "def get_all_cells(self):\n\n cells = OrderedDict()\n\n if self._type == 'fill' or self._type == 'lattice':\n cells.update(self._fill.get_all_cells())\n\n return cells", "def _spanning_iterator(self):\n # TODO implement in Java and support not only Rows\n\n columns = set(str(c) for c in self.columns)\n\n def spanning_iterator(partition):\n def key_by(columns):\n for row in partition:\n k = Row(**{c: row.__getattr__(c) for c in columns})\n for c in columns:\n del row[c]\n\n yield (k, row)\n\n for g, l in groupby(key_by(columns), itemgetter(0)):\n yield g, list(_[1] for _ in l)\n\n return spanning_iterator", "def grid(cols, rows):\n for col in range(cols):\n for row in range(rows):\n yield (col, row)", "def iter_groups(self):\n\t\treturn iter(self._groups)", "def print_groups():", "def groupCells(self):\r\n for cellcol in self.cells:\r\n for c in cellcol:\r\n if (c and isinstance(c, cell.IntersectionCell) and (not c.block)):\r\n x = c.getX()\r\n y = c.getY()\r\n width = c.style.width\r\n height = c.style.height\r\n # The cell is on the left of the intersection\r\n if (self.getCell(x+1,y)) and self.getCell(x+1,y).isMiddle():\r\n block.IntersectionBlock(x, y-2, self.cells)\r\n #print \"inter\", x, y-2\r\n # The cell is on the top of the intersection\r\n elif (self.getCell(x,y+1)) and self.getCell(x,y+1).isMiddle():\r\n block.IntersectionBlock(x-1, y, self.cells)\r\n #print \"inter\", x-1, y\r\n # The cell is on the right of the intersection\r\n elif (self.getCell(x-1,y)) and self.getCell(x-1,y).isMiddle():\r\n block.IntersectionBlock(x-3, y-1, self.cells)\r\n #print \"inter\", x-3, y-1\r\n # The cell is on the bottom of the intersection\r\n elif (self.getCell(x,y-1)) and self.getCell(x, y-1).isMiddle():\r\n block.IntersectionBlock(x-2, y-3, self.cells)\r\n #print \"inter\", x, y-1\r", "def get_all_groups(self):\n return self.groups + ['all']", "def __iter__(self):\n for g, xs in self._groups.items():\n dtype = dt.Struct(self._item_fields)\n df = ta.Column(dtype).append(\n tuple(\n tuple(\n self._parent._data.child_at(\n self._parent._data.type().get_child_idx(f.name)\n )[x]\n for f in self._item_fields\n )\n for x in xs\n )\n )\n\n yield g, df", "def get_grid(self):\n\t\txvec = numpy.linspace(self.x_lower, self.x_upper, self.nx + 1)\n\t\tyvec = numpy.linspace(self.y_lower, self.y_upper, self.ny + 1)\n\t\t\n\t\t(xgrid, ygrid) = numpy.meshgrid(xvec, yvec)\n\t\t\n\t\treturn (xgrid, ygrid, self.data)", "def _iter_groups(self, df, y=None):\n groups = df.groupby(self.groupby).indices\n for key, sub_idx in groups.items():\n sub_df = df.iloc[sub_idx]\n if y is not None:\n # y is either a numpy array or a pd.Series so index accordingly\n sub_y = y.iloc[sub_idx] if type(y) is pd.Series else y[sub_idx]\n else:\n sub_y = None\n yield key, sub_df, sub_y", "def get_groups(self, *args):\n def getgroups(a, b=self.__num_groups - 1):\n return self.__groups[a:b + 1]\n return apply_freeform_two_args(getgroups, None, args)", "def genHorizGroups(self):\n groupList =[]\n for aux in range (self.numFunctions):\n group = range ( 0 + self.groupSize*aux, self.groupSize*(aux + 1) )\n groupList.append(group)\n return groupList" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete acl in a secret.
def delete_acls_for_secret(cls, secret, session=None): session = cls.get_session(session=session) for entity in secret.secret_acls: entity.delete(session=session)
[ "def delete_secret(self, secret_id: str, project_id: Optional[str] = None):", "def delete_acl(self, sg):\n self.security_group_driver.delete_acl(sg)", "def delete(self, secret_name):\n with self.__client as client:\n return self.__exec_cmd(client, Command.DELETE, secret_name)", "def delete_secret(secret_name):\n print('Removing existing secret {}'.format(secret_name))\n stdout, stderr, return_code = shakedown.run_dcos_command('security secrets delete {}'.format(secret_name))\n assert return_code == 0, \"Failed to remove existing secret\"", "def delete_secret(id, ctx=None):\n if ctx is None:\n ctx = context.current()\n key_manager.API().delete(ctx, id)", "def delete_barbican_secret(self, context, name):\n self._openstack.delete_barbican_secret(context=context, name=name)", "def delete_secret(self, requestor_id, secret_id):\n response = requests.delete(\n url=\"{base_url}{resource}/{secret_id}\".format(\n base_url=self.DELTA_URL,\n resource=self.RESOURCE_SECRETS,\n secret_id=secret_id),\n auth=self.signer(requestor_id))\n response.raise_for_status()", "def _DeleteAclRule(self, entry):\n \n self.cal_client.DeleteAclEntry(entry.GetEditLink().href)", "def delete_secret(self, secret):\n try:\n del self.Variable[secret] # type: ignore\n del self.Type[secret]\n del self._secrets[secret]\n except KeyError:\n pass\n else:\n self._changed = True", "def delete_secret_link(link_id):\n\n Secret_Link.objects.filter(link_id=link_id).delete()", "def delete_secret(self, account, bucket, secret_id=\"1\", **kwargs):\n resp_, body_ = self.kms_request(\n account,\n bucket,\n \"DELETE\",\n \"delete-secret\",\n params={\"secret_id\": secret_id},\n **kwargs\n )", "def DeleteAclSample():\n client = CreateClient()\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc)\n acl_entry = gdata.docs.data.AclEntry(\n scope=gdata.acl.data.AclScope(value='user@example.com', type='user'),\n role=gdata.acl.data.AclRole(value='reader'),\n )\n acl_entry = client.AddAclEntry(doc, acl_entry)\n client.DeleteAclEntry(acl_entry)", "def _DeleteAclRule(self, entry):\n\n self.cal_client.Delete(entry.GetEditLink().href)", "def delete(key, **kwargs):\n cluster_call(\n \"secret_delete\",\n key=key,\n **kwargs,\n confirm=f\"Delete secret {key}\",\n prefix=f\"Deleting secret {key}...\",\n postfix=\"deleted.\",\n )", "def test_delete_secret(self):\n pass", "def test_vault_delete_authorization_for_vault_section(self):\n pass", "def test_delete_acl(self, env):\n # Create ACL Expressions\n self.suite_logger.debug(\"Create ACL Expressions\")\n expressions = [(1, 'DstMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:01:01:01'),\n (2, 'SrcMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:02:02:02')]\n env.switch[1].ui.create_acl(expressions=expressions)\n # Verify ACL Expression\n expressions_table = env.switch[1].ui.get_table_acl(\"ACLExpressions\")\n # Verify first expression has been added\n expr_1 = {\"expressionId\": expressions[0][0],\n \"field\": expressions[0][1],\n \"mask\": expressions[0][2],\n \"data\": expressions[0][3]\n }\n assert expr_1 in expressions_table, \\\n \"Expression {0} was not added\".format(expressions[0])\n # Verify second expression has been added\n expr_2 = {\"expressionId\": expressions[1][0],\n \"field\": expressions[1][1],\n \"mask\": expressions[1][2],\n \"data\": expressions[1][3]\n }\n assert expr_2 in expressions_table,\\\n \"Expression {0} was not added\".format(expressions[1])\n # Delete Expression\n self.suite_logger.debug(\"Delete ACL Expression\")\n env.switch[1].ui.delete_acl(expression_ids=[(2, 'SrcMac'), ])\n # Verify Expression has been deleted\n expressions_table = env.switch[1].ui.get_table_acl(\"ACLExpressions\")\n assert expr_2 not in expressions_table, \\\n \"Expression {0} was not deleted\".format(expressions[1])\n\n # Create ACL Actions\n self.suite_logger.debug(\"Create ACL Actions\")\n actions = [(1, 'Drop', ''),\n (2, 'Count', '')]\n env.switch[1].ui.create_acl(actions=actions)\n # Verify ACL Action\n actions_table = env.switch[1].ui.get_table_acl(\"ACLActions\")\n # Verify first action has been added\n act_1 = {\"actionId\": actions[0][0],\n \"action\": actions[0][1],\n \"param\": actions[0][2]\n }\n assert act_1 in actions_table, \"Action {0} was not added\".format(actions[0])\n # Verify second action has been added\n act_2 = {\"actionId\": actions[1][0],\n \"action\": actions[1][1],\n \"param\": actions[1][2]\n }\n assert act_2 in actions_table, \"Action {0} was not added\".format(actions[1])\n # Delete Action\n self.suite_logger.debug(\"Delete ACL Action\")\n env.switch[1].ui.delete_acl(action_ids=[(2, 'Count'), ])\n # Verify Action has been deleted\n actions_table = env.switch[1].ui.get_table_acl(\"ACLActions\")\n assert act_2 not in actions_table, \"Action {0} was not deleted\".format(actions[1])\n\n # Create ACL Rule\n self.suite_logger.debug(\"Create ACL Rule\")\n rules = [(1, 1, 1, 'Ingress', 'Enabled', 0), ]\n env.switch[1].ui.create_acl(ports=[1, ], rules=rules)\n # Verify ACL Rule has been added\n rules_table = env.switch[1].ui.get_table_acl(\"ACLRules\")\n rule = {\"ruleId\": rules[0][0],\n \"expressionId\": rules[0][1],\n \"actionId\": rules[0][2],\n \"stage\": rules[0][3],\n \"enabled\": rules[0][4],\n \"priority\": rules[0][5]\n }\n assert rule in rules_table, \"Rule {0} was not added\".format(rules[0])\n # Delete Rule\n self.suite_logger.debug(\"Delete ACL Rule\")\n env.switch[1].ui.delete_acl(ports=[1, ], rule_ids=[1, ])\n # Verify Rule has been deleted\n rules_table = env.switch[1].ui.get_table_acl(\"ACLRules\")\n assert rule not in rules_table, \"Rule {0} was not deleted\".format(rules[0])", "def delete_web_acl(WebACLId=None, ChangeToken=None):\n pass", "def delete_acl_rule(self, sgr):\n self.security_group_driver.delete_acl_rule(sgr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates a torch model so that input minibatches are parallelized across the batch dimension to utilise multiple gpus. If model parallel is set to True and execution is in test mode, then model is partitioned to perform full volume inference. This assumes the model has been created, that the optimizer has not yet been created, and the the model has not been adjusted twice. This method should not be called externally. Use instead adjust_model_for_gpus or adjust_mean_teacher_model_for_gpus
def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase, model_execution_mode: ModelExecutionMode) -> DeviceAwareModule: if config.use_gpu: model = model.cuda() logging.info("Adjusting the model to use mixed precision training.") # If model parallel is set to True, then partition the network across all available gpus. if config.use_model_parallel: devices = config.get_cuda_devices() assert devices is not None # for mypy model.partition_model(devices=devices) # type: ignore else: logging.info("Making no adjustments to the model because no GPU was found.") # Update model related config attributes (After Model Parallel Activated) config.adjust_after_mixed_precision_and_parallel(model) # DataParallel enables running the model with multiple gpus by splitting samples across GPUs # If the model is used in training mode, data parallel is activated by default. # Similarly, if model parallel is not activated, data parallel is used as a backup option use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel) if config.use_gpu and use_data_parallel: logging.info("Adjusting the model to use DataParallel") # Move all layers to the default GPU before activating data parallel. # This needs to happen even though we put the model to the GPU at the beginning of the method, # but we may have spread it across multiple GPUs later. model = model.cuda() model = DataParallelModel(model, device_ids=config.get_cuda_devices()) return model
[ "def initialize_model_parallel(model_parallel_size_):\n if torch.distributed.get_rank() == 0:\n print('> initializing model parallel with size {}'.format(\n model_parallel_size_))\n # Get world size and rank. Ensure some consistencies.\n assert torch.distributed.is_initialized()\n world_size = torch.distributed.get_world_size()\n model_parallel_size = min(model_parallel_size_, world_size)\n ensure_divisibility(world_size, model_parallel_size)\n rank = torch.distributed.get_rank()\n\n # Build the data parallel groups.\n global _DATA_PARALLEL_GROUP\n assert _DATA_PARALLEL_GROUP is None, \\\n 'data parallel group is already initialized'\n for i in range(model_parallel_size):\n ranks = range(i, world_size, model_parallel_size)\n group = torch.distributed.new_group(ranks)\n if i == (rank % model_parallel_size):\n _DATA_PARALLEL_GROUP = group\n\n # Build the model parallel groups.\n global _MODEL_PARALLEL_GROUP\n assert _MODEL_PARALLEL_GROUP is None, \\\n 'model parallel group is already initialized'\n for i in range(world_size // model_parallel_size):\n ranks = range(i * model_parallel_size,\n (i + 1) * model_parallel_size)\n group = torch.distributed.new_group(ranks)\n if i == (rank // model_parallel_size):\n _MODEL_PARALLEL_GROUP = group", "def data_parallel(self, model):\r\n logger.info('<!_!> ==> Data Parallel')\r\n gpus = [int(i) for i in self.config.SYSTEM.gpus]\r\n model_parallel = torch.nn.DataParallel(model.cuda(), device_ids=gpus)\r\n return model_parallel", "def model_to_gpu(self):\n # Multi-GPU\n if torch.cuda.device_count() >= 1:\n gpu_count = torch.cuda.device_count()\n for mname in self.model:\n self.model[mname] = nn.DataParallel(self.model[mname])\n else:\n gpu_count = 0\n self.model.to(self.device) # dtype=model_type", "def init_model_parallel(self, global_rank: int, world_size: int) -> None:\n app_state = AppState()\n\n # we initialize megatron-lm model parallel and data parallel groups\n # after initializing DDP with PTL.\n if app_state.model_parallel_size is not None:\n # destroy groups in case they have already been created\n # this happens with multiple calls to trainer.test for example\n parallel_state.destroy_model_parallel()\n if torch.distributed.is_initialized():\n parallel_state.initialize_model_parallel(\n tensor_model_parallel_size=app_state.tensor_model_parallel_size,\n pipeline_model_parallel_size=app_state.pipeline_model_parallel_size,\n virtual_pipeline_model_parallel_size=app_state.virtual_pipeline_model_parallel_size,\n pipeline_model_parallel_split_rank=app_state.pipeline_model_parallel_split_rank,\n use_fp8=app_state.use_fp8,\n )\n\n # assert that fake tp and pp rank match after model parallel init\n assert app_state.tensor_model_parallel_rank == parallel_state.get_tensor_model_parallel_rank()\n assert app_state.pipeline_model_parallel_rank == parallel_state.get_pipeline_model_parallel_rank()\n\n app_state.tensor_model_parallel_group = parallel_state.get_tensor_model_parallel_group()\n app_state.data_parallel_group = parallel_state.get_data_parallel_group()\n app_state.data_parallel_rank = parallel_state.get_data_parallel_rank()\n app_state.data_parallel_size = parallel_state.get_data_parallel_world_size()\n app_state.pipeline_model_parallel_group = parallel_state.get_pipeline_model_parallel_group()\n\n # create MPI process group for UCX-based communication APIs\n if app_state.init_mpi_proc_group:\n torch.distributed.new_group(backend='mpi')", "def mount(xpu, model):\n if isinstance(model, (torch.nn.DataParallel, DataSerial)):\n # raise ValueError('Model is already in parallel mode.')\n model = model.module\n model = xpu.move(model)\n if xpu.devices:\n model = torch.nn.DataParallel(model, device_ids=xpu.devices,\n output_device=xpu.main_device)\n else:\n model = DataSerial(model)\n return model", "def make_parallel(model, gpu_count):\n def get_slice(data, idx, parts):\n shape = tf.shape(data)\n size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)\n stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)\n start = stride * idx\n return tf.slice(data, start, size)\n\n outputs_all = []\n for i in range(len(model.outputs)):\n outputs_all.append([])\n\n # Place a copy of the model on each GPU, each getting a slice of the batch\n for i in range(gpu_count):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % i) as scope:\n\n inputs = []\n # Slice each input into a piece for processing on this GPU\n for x in model.inputs:\n input_shape = tuple(x.get_shape().as_list())[1:]\n slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x)\n inputs.append(slice_n)\n\n outputs = model(inputs)\n\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n # Save all the outputs for merging back together later\n for l in range(len(outputs)):\n outputs_all[l].append(outputs[l])\n\n # merge outputs on CPU\n with tf.device('/cpu:0'):\n merged = []\n for outputs in outputs_all:\n merged.append(concatenate(outputs, axis=0))\n\n return Model(input=model.inputs, output=merged)", "def _upload_model(model, dev):\n if isinstance(dev, list):\n if len(dev) == 0:\n return model.cpu()\n elif len(dev) == 1:\n return model.cuda(dev[0])\n else:\n return torch.nn.DataParallel(model, device_ids=dev).cuda(dev[0])\n else:\n return model.to(dev)", "def set_model_parallel_world_size(world_size):\n raise RuntimeError('Model parallelism is managed by DeepSpeed. '\n 'Setting the model parallel size is prohibited')", "def sync_moe_model_param(model: nn.Module):\r\n if is_using_ddp():\r\n\r\n param_dict = get_moe_epsize_param_dict(model)\r\n\r\n # synchronize the parameters whose dp_group is the whole world\r\n if 1 in param_dict:\r\n src_rank = gpc.get_ranks_in_group(ParallelMode.DATA)[0]\r\n for param in param_dict[1]:\r\n dist.broadcast(param, src=src_rank, group=gpc.get_group(ParallelMode.DATA))\r\n\r\n for ep_size in param_dict:\r\n # When ep_size = world_size, communication is not needed\r\n if ep_size != 1 and ep_size != MOE_CONTEXT.world_size:\r\n src_rank = dist.get_rank(MOE_CONTEXT.parallel_info_dict[ep_size].ep_group)\r\n for param in param_dict[ep_size]:\r\n dist.broadcast(param, src=src_rank, group=param.moe_info.dp_group)", "def _distribute_model(self):\n self.feature_extractor.cuda(1)\n self.feature_adapter.cuda(1)", "def partition_data_parallel(\n graph: GraphModule,\n model: nn.Module,\n optimizer: Optional[torch.optim.Optimizer],\n params_buffers: Dict[str, torch.Tensor],\n named_states: Dict[str, Any],\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n mesh: DeviceMesh,\n parallel_style: DataParallelStyle,\n input_batch_dim: int,\n) -> GraphModule:\n num_params_buffers = len(params_buffers)\n flattened_states = pytree.tree_flatten(named_states)[0]\n num_states = len(flattened_states)\n\n changed = graph.graph.eliminate_dead_code()\n if changed:\n graph.recompile()\n\n # 1. First build up data parallel strategies for the whole graph\n strategy_map = build_data_parallel_strategies(\n graph, num_params_buffers, num_states, mesh=mesh, batch_dim=input_batch_dim\n )\n\n # 2. Next we mark the data parallel strategy for each node base on\n # the parallel_style\n mark_data_parallel_shardings(\n graph,\n num_parameters=num_params_buffers,\n num_states=num_states,\n dp_strategy_map=strategy_map,\n parallel_mode=parallel_style,\n )\n\n # 3. Partition the single machine graph to the distribute graph\n partitioned_graph = partitioner(graph)\n\n # preserve node types for the expanded graph\n for node in partitioned_graph.graph.nodes:\n if node in strategy_map:\n node_strategy = strategy_map[node]\n if isinstance(node_strategy, DataParallelStrategy):\n node.meta[\"node_type\"] = node_strategy.node_type\n elif isinstance(node_strategy, TupleStrategy):\n node.meta[\"node_type\"] = NodeType.NON_TENSOR\n else:\n raise RuntimeError(f\"Unknown node strategy {node_strategy}\")\n else:\n # if the nodes are expanded nodes (collectives), we mark them\n # the same type as the input node.\n input_node = node.all_input_nodes[0]\n node.meta[\"node_type\"] = input_node.meta[\"node_type\"]\n\n # 4. Last, inplace partition the weights and optim states to\n # DTensors base on the parallel style\n accessor = NamedMemberAccessor(model)\n for param_key, param in params_buffers.items():\n placement: Placement = Replicate()\n if parallel_style == DataParallelStyle.FULLY_SHARD:\n placement = Shard(0)\n elif parallel_style != DataParallelStyle.REPLICATE:\n raise RuntimeError(f\"parallel style {parallel_style} not supported yet\")\n\n dtensor_param = distribute_tensor(param, mesh, [placement])\n # update re-parameterized module param dict and optim states dict to DTensor\n params_buffers[param_key] = dtensor_param.to_local()\n # update module parameters to DTensor\n accessor.set_tensor(param_key, dtensor_param)\n\n # update the optimizer state key and values to DTensor\n if optimizer is not None and param in optimizer.state:\n param_states = named_states[param_key]\n param_dtensor_states = {}\n for state_key, state_val in param_states.items():\n if isinstance(state_val, torch.Tensor) and state_val.ndim > 0:\n # shard/replicate non-scalar tensors, for scalar tensor, we\n # don't do anything\n dtensor_state = distribute_tensor(state_val, mesh, [placement])\n param_dtensor_states[state_key] = dtensor_state\n param_states[state_key] = dtensor_state.to_local()\n else:\n param_dtensor_states[state_key] = state_val\n\n optimizer.state.pop(param) # type: ignore[call-overload]\n optimizer.state[dtensor_param] = param_dtensor_states # type: ignore[index]\n\n return partitioned_graph", "def configure_ddp(self):\n\n if (hasattr(self.model, 'megatron_amp_o2') and self.model.megatron_amp_o2) or (\n hasattr(self.model, 'with_distributed_adam') and self.model.with_distributed_adam\n ):\n # do not use DDP if using megatron amp O2 or distributed optimizer\n self._model = _LightningModuleWrapperBase(self.model)\n else:\n app_state = AppState()\n\n if app_state.model_parallel_size is not None:\n\n logging.info(f\"Configuring DDP for model parallelism.\")\n\n # With model parallelism, multiple GPUs form a large \"logical GPU\"\n # this means that data parallel groups span multiple GPUs\n # and are non-trivial\n # TODO: for megatron-lm self.model is a list\n # Removing self.pre_configure_ddp() as DDP's 'find_unused_parameters' now defaults\n # to False in PTL 2.0 and hence pre_configure_ddp() is removed in ddp.py\n # self.pre_configure_ddp()\n # device_ids = self.determine_ddp_device_ids()\n self._model = DistributedDataParallel(\n _LightningModuleWrapperBase(self.model),\n process_group=parallel_state.get_data_parallel_group(),\n **self._ddp_kwargs,\n )\n\n if self.no_ddp_communication_hook:\n # When using custom gradient accumulation and allreduce, disable\n # DDP communication hook that works on the gradient bucket.\n # Instead, use the custom gradient function and communication hook,\n # which is defined in the master optimizer wrapper.\n self._model.require_backward_grad_sync = False\n self._model.register_comm_hook(None, noop_hook)\n\n else:\n super().configure_ddp()", "def test_smmodelparallel(sagemaker_session, instance_type, ecr_image, tmpdir, framework_version, test_script, num_processes):\n instance_type = \"ml.p3.16xlarge\"\n _, image_framework_version = get_framework_and_version_from_tag(ecr_image)\n image_cuda_version = get_cuda_version_from_tag(ecr_image)\n if Version(image_framework_version) < Version(\"2.3.1\") or image_cuda_version != \"cu110\":\n pytest.skip(\"Model Parallelism only supports CUDA 11, and on TensorFlow 2.3.1 or higher\")\n smmodelparallel_path = os.path.join(RESOURCE_PATH, 'smmodelparallel')\n estimator = TensorFlow(entry_point=test_script,\n role='SageMakerRole',\n instance_count=1,\n instance_type=instance_type,\n source_dir=smmodelparallel_path,\n distributions={\n \"mpi\": {\n \"enabled\": True,\n \"processes_per_host\": num_processes,\n \"custom_mpi_options\": \"-verbose --mca orte_base_help_aggregate 0\",\n }\n },\n sagemaker_session=sagemaker_session,\n image_uri=ecr_image,\n framework_version=framework_version,\n py_version='py3',\n base_job_name='smp-test1')\n estimator.fit()", "def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)", "def send_model_to_device(model, num_gpu: int = 1, device: str = \"cuda\"):\n\n device = torch.device(device)\n\n if num_gpu > 1:\n model = torch.nn.DataParallel(model, device_ids=list(range(num_gpu)))\n else:\n model = model\n\n try:\n model = model.to(device=device)\n except AttributeError:\n print(\"Warning: to method not found, using default object\")\n return model, device\n\n return model, device", "def set_model_parallel_world_size(world_size):\n global _MPU_WORLD_SIZE\n _MPU_WORLD_SIZE = world_size", "def test_t5_model_parallel(self):\n with tempdir() as tmpdir:\n # test finetuning\n mf = os.path.join(tmpdir, 'model')\n valid, test = testing_utils.train_model(\n dict(\n task='integration_tests:reverse',\n model='hugging_face/t5',\n optimizer='adam',\n learningrate=3e-5,\n batchsize=1,\n num_epochs=0.1,\n short_final_eval=True,\n validation_max_exs=12,\n model_file=mf,\n t5_model_parallel=True,\n t5_model_arch='t5-small',\n )\n )", "def init_model_parallel(worker_name, rpc_backend=RpcBackend.PROCESS_GROUP):\n _init_rpc(worker_name, rpc_backend)\n from .rpc import _agent\n autograd._init(_agent.get_worker_id().id)", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a model (with temperature scaling) according to the config given.
def create_model(self) -> None: self._model = create_model_with_temperature_scaling(self.config)
[ "def create_model_with_temperature_scaling(config: ModelConfigBase) -> Any:\n # wrap the model around a temperature scaling model if required\n model = config.create_model()\n if isinstance(config, SequenceModelBase) and config.temperature_scaling_config:\n model = ModelWithTemperature(model, config.temperature_scaling_config)\n return model", "def new_model_from_config(cfg: omegaconf.DictConfig):\n\n dict_cfg = omegaconf.OmegaConf.to_container(cfg, resolve=True)\n args = {\n \"batch_size\": cfg.training.batch_size,\n \"learning_rate\": cfg.training.learning_rate,\n **dict_cfg[\"model\"],\n **dict_cfg[\"data\"],\n }\n constructor = model_class(cfg.model_meta.name)\n return constructor(**args)", "def _create_model(self, cfg, ckpt_file): \n\n # specify models hyperparameters - loaded from config yaml\n model_params = cfg['MODEL']\n filter_widths = model_params['filter_widths'] #[3,3,3,3,3]\n dropout = model_params['dropout'] #0.25\n channels = model_params['channels'] #1024\n causal = model_params['causal'] #False\n\n n_joints_in = cfg['IN_FORMAT']['num_joints']\n n_joints_out = cfg['OUT_FORMAT']['num_joints']\n\n # create model and load checkpoint\n model_pos = TemporalModel(n_joints_in, 2, n_joints_out, filter_widths, \n causal, dropout, channels)\n\n checkpoint = torch.load(ckpt_file, map_location=lambda storage, loc: storage)\n if 'pretrained_h36m_detectron_coco.bin' in ckpt_file:\n model_pos.load_state_dict(checkpoint['model_pos'])\n elif 'pretrained_video2bvh.pth' in ckpt_file:\n pretrained_dict = checkpoint['model_state']\n model_dict = model_pos.state_dict()\n pretrained_dict = {\n k: v for k, v in pretrained_dict.items()\n if k in model_dict\n }\n model_dict.update(pretrained_dict)\n model_pos.load_state_dict(model_dict)\n else:\n model_pos.load_state_dict(checkpoint)\n model_pos.eval() # Important for dropout!\n\n # push to gpu\n if torch.cuda.is_available():\n model_pos = model_pos.cuda()\n model_pos.eval()\n\n return model_pos", "def create(modelConfig, logLevel=logging.ERROR): ## model_factory.py ##\n logger = ModelFactory.__getLogger()\n logger.setLevel(logLevel)\n logger.debug(\"ModelFactory returning Model from dict: %s\", modelConfig)\n modelClass = None\n if modelConfig['model'] == \"HTMPrediction\":\n modelClass = HTMPredictionModel\n def __init__(self,sensorParams={},inferenceType=InferenceType.TemporalNextStep,spEnable=True,spParams={},trainSPNetOnlyIfRequested=False,tmEnable=True,tmParams={},\n clEnable=True,clParams={},anomalyParams={},minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP,network=None):\n if not inferenceType in self.__supportedInferenceKindSet:\n raise ValueError(\"{0} received incompatible inference type: {1}\".format(self.__class__, inferenceType))\n # Call super class constructor\n super(HTMPredictionModel, self).__init__(inferenceType)\n # self.__restoringFromState is set to True by our __setstate__ method and back to False at completion of our _deSerializeExtraData() method.\n self.__restoringFromState = False\n self.__restoringFromV1 = False\n # Intitialize logging\n self.__logger = initLogger(self)\n self.__logger.debug(\"Instantiating %s.\" % self.__myClassName)\n self._minLikelihoodThreshold = minLikelihoodThreshold\n self._maxPredictionsPerStep = maxPredictionsPerStep\n # set up learning parameters (note: these may be replaced via enable/disable//SP/TM//Learning methods)\n self.__spLearningEnabled = bool(spEnable)\n self.__tpLearningEnabled = bool(tmEnable)\n # Explicitly exclude the TM if this type of inference doesn't require it\n if not InferenceType.isTemporal(self.getInferenceType()) or self.getInferenceType() == InferenceType.NontemporalMultiStep:\n tmEnable = False\n self._netInfo = None\n self._hasSP = spEnable\n self._hasTP = tmEnable\n self._hasCL = clEnable\n self._classifierInputEncoder = None\n self._predictedFieldIdx = None\n self._predictedFieldName = None\n self._numFields = None\n # init anomaly\n # -----------------------------------------------------------------------\n if network is not None:\n self._netInfo = NetworkInfo(net=network, statsCollectors=[])\n else:\n # Create the network\n self._netInfo = self.__createHTMNetwork(sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable,clParams, anomalyParams)\n # Initialize Spatial Anomaly detection parameters\n if self.getInferenceType() == InferenceType.NontemporalAnomaly:\n self._getSPRegion().setParameter(\"anomalyMode\", True)\n # Initialize Temporal Anomaly detection parameters\n if self.getInferenceType() == InferenceType.TemporalAnomaly:\n self._getTPRegion().setParameter(\"anomalyMode\", True)\n # -----------------------------------------------------------------------\n # This flag, if present tells us not to train the SP network unless the user specifically asks for the SP inference metric\n self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested\n self.__numRunCalls = 0\n # Tracks whether finishedLearning() has been called\n self.__finishedLearning = False\n self.__logger.debug(\"Instantiated %s\" % self.__class__.__name__)\n self._input = None\n return\n elif modelConfig['model'] == \"TwoGram\":\n modelClass = TwoGramModel\n elif modelConfig['model'] == \"PreviousValue\":\n modelClass = PreviousValueModel\n else:\n raise Exception(\"ModelFactory received unsupported Model type: %s\" % modelConfig['model'])\n return modelClass(**modelConfig['modelParams'])", "def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model", "def create_transformer_model(model_name: str, config: Optional[BaseModelConfig] = None) -> nn.Module:\n # Make sure model is supported\n assert is_transformer_model(model_name)\n\n # Import here because it's an optional dependency\n from transformers import AutoConfig, AutoModel\n\n if config is not None and hasattr(config, \"output_dir\"): # Load trained model from config\n kwargs = {\n \"pretrained_model_name_or_path\": get_file_path(config.output_dir, config.model_name_or_path),\n \"from_tf\": False,\n \"config\": AutoConfig.from_pretrained(get_file_path(config.output_dir, config.model_config_path)),\n }\n model = AutoModel.from_pretrained(**kwargs)\n\n else: # Load default pre-trained model\n model = AutoModel.from_pretrained(model_name)\n\n return model", "def run_model(config_file):\n config_file = os.path.join(os.getcwd(), config_file)\n result = Tethys(config_file=config_file)\n result.run_model()\n return result", "def create_model(model_name: str, config: Optional[BaseModelConfig] = None) -> nn.Module:\n if model_name == \"single_layer_classifier\":\n model = SingleLayerClassifier(config)\n elif model_name == \"multi_layer_classifier\":\n model = MultiLayerClassifier(config)\n elif model_name == \"single_layer_regressor\":\n model = SingleLayerRegressor(config)\n elif model_name == \"multi_layer_regressor\":\n model = MultiLayerRegressor(config)\n elif is_transformer_model(model_name):\n model = create_transformer_model(model_name, config)\n else:\n raise RuntimeError(f\"Unknown model name {model_name}.\")\n return model", "def from_model_config(cls, config: PretrainedConfig, task: str = \"default\") -> \"OnnxConfig\":\n return cls(config, task=task)", "def create_shunt_model(self):\r\n\r\n print('\\nCreate shunt model')\r\n\r\n if not self.original_model:\r\n raise ValueError('Original model not yet initialized! Either call create_original_model or set it manually.')\r\n if not self.shunt_params:\r\n raise ValueError('No parameters found in config for shunt model! Create the field [SHUNT_MODEL]')\r\n\r\n logging.info('')\r\n logging.info('#######################################################################################################')\r\n logging.info('############################################ SHUNT MODEL ##############################################')\r\n logging.info('#######################################################################################################')\r\n logging.info('')\r\n\r\n dilation_rate_input, dilation_rate_output = find_input_output_dilation_rates(self.original_model, self.shunt_params['locations'])\r\n\r\n print('Used dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('Creating shunt with dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('')\r\n\r\n with self.activate_distribution_scope():\r\n if self.shunt_params['from_file']:\r\n self.shunt_model = keras.models.load_model(self.shunt_params['filepath'])\r\n print('Shunt model loaded successfully!')\r\n else:\r\n input_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][0]).input_shape[1:]\r\n if isinstance(input_shape_shunt, list):\r\n input_shape_shunt = input_shape_shunt[0][1:]\r\n output_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][1]).output_shape[1:]\r\n if isinstance(output_shape_shunt, list):\r\n output_shape_shunt = output_shape_shunt[0][1:]\r\n\r\n self.shunt_model = Architectures.createShunt(input_shape_shunt,\r\n output_shape_shunt,\r\n arch=self.shunt_params['arch'],\r\n use_se=False,\r\n dilation_rate_input=dilation_rate_input,\r\n dilation_rate_output=dilation_rate_output,\r\n expansion_factor=1.0)\r\n\r\n if self.shunt_params['pretrained']:\r\n self.shunt_model.load_weights(self.shunt_params['weightspath'])\r\n print('Shunt weights loaded successfully!')\r\n\r\n self.shunt_model.summary(print_fn=self.logger.info, line_length=150)\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def generate_model(config_file=None, config_dict={}, initialize_site_data=None, log_level='info'):\n\n return Model(config_file, config_dict, initialize_site_data, log_level)", "def create_model(self, model_config, is_training=True):\n return model_builder.build(model_config, is_training=is_training)", "def from_config(cls, config):\n d_conf = config.model.structure.density\n velocity = quantity_linspace(config.model.structure.velocity.start,\n config.model.structure.velocity.stop,\n config.model.structure.velocity.num + 1).cgs\n\n adjusted_velocity = velocity.insert(0, 0)\n v_middle = (adjusted_velocity[1:] * 0.5 +\n adjusted_velocity[:-1] * 0.5)\n no_of_shells = len(adjusted_velocity) - 1\n time_explosion = config.supernova.time_explosion.cgs\n\n if d_conf.type == 'branch85_w7':\n density_0 = calculate_power_law_density(v_middle, d_conf.w7_v_0,\n d_conf.w7_rho_0, -7)\n time_0 = d_conf.w7_time_0\n elif d_conf.type == 'uniform':\n density_0 = (d_conf.value.to('g cm^-3') *\n np.ones(no_of_shells))\n time_0 = d_conf.get('time_0', time_explosion)\n elif d_conf.type == 'power_law':\n density_0 = calculate_power_law_density(v_middle, d_conf.v_0,\n d_conf.rho_0,\n d_conf.exponent)\n time_0 = d_conf.get('time_0', time_explosion)\n elif d_conf.type == 'exponential':\n density_0 = calculate_exponential_density(v_middle, d_conf.v_0,\n d_conf.rho_0)\n time_0 = d_conf.get('time_0', time_explosion)\n else:\n raise ValueError(\"Unrecognized density type \"\n \"'{}'\".format(d_conf.type))\n return cls(density_0, time_0)", "def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])", "def prepare_ML(config): \n\n scaler = machine_learning.define_scaling(config)\n\n clf = machine_learning.define_model(config)\n\n return scaler, clf", "def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def createModel(modelParams):\n model = ModelFactory.create(modelParams)\n model.enableInference({\"predictedField\": \"c1\"})\n return model" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates the model summary, which is required for model partitioning across GPUs, and then moves the model to GPU with data parallel/model parallel by calling adjust_model_for_gpus.
def create_summary_and_adjust_model_for_gpus(self) -> None: if self._model is None: raise ValueError("Model must be created before it can be adjusted.") if self.config.is_segmentation_model: summary_for_segmentation_models(self.config, self._model) # Prepare for mixed precision training and data parallelization (no-op if already done). # This relies on the information generated in the model summary. self.adjust_model_for_gpus()
[ "def _distribute_model(self):\n self.feature_extractor.cuda(1)\n self.feature_adapter.cuda(1)", "def model_to_gpu(self):\n # Multi-GPU\n if torch.cuda.device_count() >= 1:\n gpu_count = torch.cuda.device_count()\n for mname in self.model:\n self.model[mname] = nn.DataParallel(self.model[mname])\n else:\n gpu_count = 0\n self.model.to(self.device) # dtype=model_type", "def get_model_summary(model, shape, device=None):\n if not device:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n summary(model.to(device), shape)", "def make_model_summary(self):\n logger.info(f'Generating model summary for {self.model_name}.')\n self.json_stats['model_summary'] = {\n 'model_name': self.model_name,\n 'number_of_statements': self.latest_round.get_total_statements(),\n 'stmts_type_distr': self.latest_round.get_statement_types(),\n 'agent_distr': self.latest_round.get_agent_distribution(),\n 'stmts_by_evidence': self.latest_round.get_statements_by_evidence(),\n 'sources': self.latest_round.get_sources_distribution(),\n 'assembled_beliefs': self.latest_round.get_beliefs(),\n 'all_stmts': self.latest_round.get_english_statements_by_hash()\n }", "def make_parallel(model, gpu_count):\n def get_slice(data, idx, parts):\n shape = tf.shape(data)\n size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)\n stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)\n start = stride * idx\n return tf.slice(data, start, size)\n\n outputs_all = []\n for i in range(len(model.outputs)):\n outputs_all.append([])\n\n # Place a copy of the model on each GPU, each getting a slice of the batch\n for i in range(gpu_count):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('tower_%d' % i) as scope:\n\n inputs = []\n # Slice each input into a piece for processing on this GPU\n for x in model.inputs:\n input_shape = tuple(x.get_shape().as_list())[1:]\n slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x)\n inputs.append(slice_n)\n\n outputs = model(inputs)\n\n if not isinstance(outputs, list):\n outputs = [outputs]\n\n # Save all the outputs for merging back together later\n for l in range(len(outputs)):\n outputs_all[l].append(outputs[l])\n\n # merge outputs on CPU\n with tf.device('/cpu:0'):\n merged = []\n for outputs in outputs_all:\n merged.append(concatenate(outputs, axis=0))\n\n return Model(input=model.inputs, output=merged)", "def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model", "def train(hparams, summary_dir, num_gpus, model_type, max_steps, save_step,\r\n data_dir, num_targets, dataset, validate):\r\n # summary_dir += '/train/'\r\n # with tf.Graph().as_default():\r\n # # Build model\r\n # train_features = get_features('train', FLAGS.batch_size, num_gpus, max_ngram_len=FLAGS.max_ngram_len)\r\n # valid_features = get_features('valid', FLAGS.batch_size, num_gpus, max_ngram_len=FLAGS.max_ngram_len)\r\n # model = models[model_type](hparams)\r\n # train_result, _ = model.multi_gpu(train_features, num_gpus)\r\n # # valid_result, _ = model.multi_gpu(valid_features, num_gpus)\r\n # # result = [train_result, valid_result]\r\n # # # Print stats\r\n # param_stats = tf.contrib.tfprof.model_analyzer.print_model_analysis(\r\n # tf.get_default_graph(),\r\n # tfprof_options=tf.contrib.tfprof.model_analyzer.\r\n # TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\r\n # sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\r\n # writer = tf.summary.FileWriter(summary_dir)\r\n # run_experiment(load_training, summary_dir, writer, train_experiment, train_result,\r\n # max_steps, save_step)\r\n # writer.close()\r\n gpu_id = 3\r\n with tf.device('/gpu:%d' % gpu_id):\r\n with tf.Graph().as_default():\r\n features = dict()\r\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\r\n init_op = tf.group(tf.global_variables_initializer(),\r\n tf.local_variables_initializer())\r\n sess.run(init_op)\r\n model = models[model_type](hparams)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\r\n for i in range(epoch):\r\n print('--------------------epoch:{}------------------'.format(i + 1))\r\n data = next_batch(batch_size, 'train')\r\n total_correct = 0\r\n total_loss = 0\r\n count = 0\r\n for batched_data in data:\r\n X, Y, ngram_num = batched_data\r\n count += 1\r\n features['text'], features['labels'] = X, Y\r\n features['num_classes'], features['max_ngram_len'] = len(user2idx), max_len\r\n features['ngram_num'] = ngram_num\r\n out = model._single_tower(gpu_id, features)\r\n loss, correct = sess.run([out.losses, out.correct])\r\n total_loss += loss\r\n total_correct += correct\r\n print('train_loss: {}, train_acc: {}'.format(total_loss / i, total_correct / (i * batch_size)))\r\n print('-------------------valid:{}--------------------'.format(i + 1))\r\n data = next_batch(batch_size, 'valid')\r\n total_correct = 0\r\n total_loss = 0\r\n count = 0\r\n for batched_data in data:\r\n X, Y, ngram_num = batched_data\r\n count += 1\r\n features['text'], features['labels'] = X, Y\r\n features['num_classes'], features['max_ngram_len'] = len(user2idx), max_len\r\n features['ngram_num'] = ngram_num\r\n out = model._single_tower(gpu_id, features)\r\n loss, correct = sess.run(out.losses, out.correct)\r\n total_loss += loss\r\n total_correct += correct\r\n print('valid_loss: {}, valid_acc: {}'.format(total_loss / i, total_correct / (i * batch_size)))\r\n coord.join(threads)\r\n sess.close()", "def _model_to_device(self):\n if next(self.model.parameters()).is_cuda is False:\n self.model.to(self.device)", "def train(hparams, summary_dir, num_gpus, model_type, max_steps, save_step,\n data_dir, num_targets, dataset, validate, seed, shuffled, shift,\n pad, batch_size=128):\n summary_dir += '/train/'\n with tf.Graph().as_default():\n # Build model\n features = get_features('train', batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=False,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n # Print stats\n param_stats = tf.profiler.profile(\n tf.get_default_graph(),\n options=tf.contrib.tfprof.model_analyzer.\n TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\n writer = tf.summary.FileWriter(summary_dir)\n run_experiment(load_training, summary_dir, writer, train_experiment,\n model, result, max_steps, save_step)\n writer.close()", "def draw_model_stats(arch, grps, data_dir, num_iters=None):\n if not num_iters:\n num_iters = [1]\n fp = os.path.join(\n data_dir,\n \"model_stats_{}_NI_{}_G_{}.pdf\".format(\n arch,\n \"-\".join([str(ni) for ni in num_iters]),\n \"-\".join([str(g) for g in grps]),\n ),\n )\n\n print(\"Plot to file: {}\".format(fp))\n\n fig, ax = plt.subplots(figsize=(5, 4))\n\n print(\"Running on model {} ...\".format(arch))\n\n model = utils.load_model(arch, \"imagenet\", pretrained=True)\n results = {\"num_iters\": [], \"num_groups\": [], \"ratio\": []}\n\n for ni in num_iters:\n\n for G in grps:\n print(\"G = {} NI = {}\".format(G, ni))\n\n mods = {}\n\n # Collect statistics for a single model\n for name, mod in model.named_modules():\n if not isinstance(mod, nn.Conv2d):\n continue\n\n W = mod.weight\n F, C = W.shape[:2]\n\n if F % G != 0 or C % G != 0:\n continue\n\n C = W.norm(dim=(2, 3)).cpu().detach().numpy()\n gnd_in, gnd_out, cost = run_mbm(C, G, perm=\"GRPS\", num_iters=ni)\n mods[name] = (cost, C.sum(), cost / C.sum() * 100)\n\n # print('{:30s}\\t {:.2e}\\t {:.2e}\\t {:.2f}%'.format(\n # name, mods[name][0], mods[name][1], mods[name][2]))\n\n # Summarise results\n sum_cost = sum([val[0] for val in mods.values()])\n total_cost = sum([val[1] for val in mods.values()])\n\n results[\"num_iters\"].append(\"$N_S={}$\".format(ni))\n results[\"num_groups\"].append(\"$G={}$\".format(G))\n results[\"ratio\"].append(sum_cost / total_cost * 100)\n\n df = pd.DataFrame(results)\n sns.barplot(x=\"num_groups\", y=\"ratio\", hue=\"num_iters\", data=df)\n\n ax.legend()\n plt.tight_layout()\n fig.savefig(fp)\n\n df.to_csv(fp.replace(\".pdf\", \".csv\"))", "def output_models(self):\n bounds = self.manipulator.get_bounds()\n cfg_vecs = self.manipulator.get_random_vecs(self.sample_cnt, bounds)\n\n results = {}\n for model in self.models:\n results[model.metric] = model.sample_models(cfg_vecs)\n\n info = DebugInfo(tuning_run=self.driver.tuning_run,\n info=[cfg_vecs, results])\n self.driver.session.add(info)", "def summary_for_segmentation_models(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n assert isinstance(model, BaseModel)\n crop_size = config.crop_size\n if isinstance(crop_size, int):\n crop_size = (crop_size, crop_size, crop_size)\n try:\n model.generate_model_summary(crop_size, log_summaries_to_files=config.log_summaries_to_files)\n except AttributeError as e:\n logging.warning(f\"summary_for_segmentation_models failed with exception {e}\")", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def model_summary(dataset, training_data, validation_data, test_data, parameters, loss='cross', save=False):\n\n # Initialize the network\n network = Network(dataset.input_dim, dataset.num_labels,\n l=parameters['l'], loss=loss)\n\n # Check initial accuracy\n init_acc_train = network.compute_accuracy(\n training_data['X'], training_data['y'])\n init_acc_val = network.compute_accuracy(\n validation_data['X'], validation_data['y'])\n init_acc_test = network.compute_accuracy(\n test_data['X'], test_data['y'])\n\n # Format string to ouput model stats\n model_parameters = 'Model parameters: \\n' + \\\n ' loss: \\t{}\\n'.format(loss) + \\\n ' lambda: \\t{}\\n'.format(parameters['l']) + \\\n ' eta: \\t{}\\n'.format(parameters['eta']) + \\\n ' n_epochs: \\t{}\\n'.format(parameters['n_epochs']) + \\\n ' n_batches: \\t{}\\n'.format(parameters['n_batches'])\n\n if parameters['decay'] != 1:\n model_parameters = model_parameters + \\\n ' decay: \\t{}\\n'.format(parameters['decay'])\n if parameters['shuffle']:\n model_parameters = model_parameters + \\\n ' shuffle: \\t{}\\n'.format(parameters['shuffle'])\n if parameters['noise'] is not None:\n model_parameters = model_parameters + \\\n ' noise: \\t{}\\n'.format(parameters['noise'])\n\n print(model_parameters)\n\n # Train network\n cost_train, cost_val, accuracy_train, accuracy_val = network.train(\n training_data, validation_data, parameters['eta'], parameters[\n 'n_epochs'], parameters['n_batches'], parameters['shuffle'],\n parameters['decay'], parameters['noise'])\n\n accuracy_test = network.compute_accuracy(test_data['X'], test_data['y'])\n cost_test = network.compute_cost(test_data['X'], test_data['Y'])\n\n model_performance = 'Training data:\\n' + \\\n ' accuracy (untrained): \\t{:.2f}%\\n'.format(init_acc_train) + \\\n ' accuracy (trained): \\t\\t{:.2f}%\\n'.format(accuracy_train[-1]) + \\\n ' cost (final): \\t\\t{:.2f}\\n'.format(cost_train[-1]) + \\\n 'Validation data:\\n' + \\\n ' accuracy (untrained): \\t{:.2f}%\\n'.format(init_acc_val) + \\\n ' accuracy (trained): \\t\\t{:.2f}%\\n'.format(accuracy_val[-1]) + \\\n ' cost (final): \\t\\t{:.2f}\\n'.format(cost_val[-1]) + \\\n 'Test data:\\n' + \\\n ' accuracy (untrained): \\t{:.2f}%\\n'.format(init_acc_test) + \\\n ' accuracy (trained): \\t\\t{:.2f}%\\n'.format(accuracy_test) + \\\n ' cost (final): \\t\\t{:.2f}\\n'.format(cost_test)\n\n print(model_performance)\n\n if save:\n filename = loss+\"_lambda\" + \\\n str(parameters['l'])+\"_eta\"+str(parameters['eta'])\n with open('summary_{}.txt'.format(filename), 'w') as f:\n f.write(model_parameters + model_performance)\n else:\n filename = None\n\n plot_performance(cost_train, cost_val, accuracy_train,\n accuracy_val, filename=filename)\n plot_weights(network.W, dataset.get_labels(), filename=filename)\n plt.show()", "def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')", "def to_cuda(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be moved to GPU.\")\n self._model = self._model.cuda()", "def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):\n # datetime object containing current date and time\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n pdf = FPDF()\n pdf.set_title(\"training_summary_{}_{}\".format(model_name.lower(), dt_string))\n pdf.add_page()\n pdf.set_xy(0, 10)\n pdf.set_font(\"Helvetica\", \"BI\", 16)\n pdf.set_text_color(25, 33, 78)\n pdf.set_draw_color(25, 33, 78)\n pdf.cell(20)\n pdf.cell(\n 200,\n 10,\n \"Model Training Summary: {}\".format(model_name.upper()),\n 0,\n 2,\n )\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(\n 200,\n 5,\n dt_string,\n 0,\n 2,\n )\n\n # Model Configuration Section\n pdf.cell(150, 10, \"Model Configuration:\", 0, 2)\n pdf.cell(30, 10, \"Parameter\", 1, 0)\n pdf.cell(140, 10, \"Value\", 1, 2)\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-30)\n attributes = [\n \"model_dir\",\n \"log_dir\",\n \"check_dir\",\n \"current_epoch\",\n \"overwrite\",\n \"exp_name\",\n ]\n for i, val in enumerate(hyperparams):\n if val not in attributes:\n pdf.cell(30, 10, \"%s\" % (val), 1, 0)\n pdf.cell(140, 10, \"%s\" % (hyperparams[val]), 1, 2)\n pdf.cell(-30)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Model Performance Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Performance Stats:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n\n loss = train_stats[\"test_loss\"]\n acc = train_stats[\"test_acc\"]\n\n pdf.set_text_color(255, 96, 80)\n pdf.cell(35, 6, \"Best Loss:\", 0, 0)\n pdf.cell(\n 45, 6, \"{:.3f} (Epoch {})\".format(min(loss), loss.index(min(loss)) + 1), 0, 0\n )\n pdf.cell(60, 6, \"Training Duration:\", 0, 0)\n pdf.cell(30, 6, \"{:.3f} (s)\".format(train_stats[\"total_dur\"]), 0, 2)\n pdf.cell(-140)\n pdf.cell(35, 6, f\"Best Accuracy:\", 0, 0)\n pdf.cell(45, 6, \"{:.3f} (Epoch {})\".format(max(acc), acc.index(max(acc)) + 1), 0, 0)\n pdf.cell(60, 6, \"Average Epoch Duration:\", 0, 0)\n pdf.cell(\n 30,\n 6,\n \"{:.3f} (s)\".format(train_stats[\"total_dur\"] / hyperparams[\"current_epoch\"]),\n 0,\n 2,\n )\n pdf.cell(-140)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Loss Curve Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Loss Curve:\", 0, 2)\n pdf.image(img_path, x=None, y=None, w=160, h=0, type=\"PNG\", link=\"\")\n\n # Second Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20)\n\n # Model Arch Section\n pdf.cell(150, 20, \"Model Configuration:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n if model_arch is None:\n model_arch = \"No model configuration was provided\"\n pdf.set_text_color(255, 96, 80)\n pdf.multi_cell(180, 8, str(model_arch))\n\n # Third Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20, \" \")\n\n # Training Loss Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 20, \"Detailed Loss Output:\", 0, 2)\n pdf.cell(40, 8, \"Epoch\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Acc\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Acc\", 1, 2, \"C\")\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-130)\n for i in range(0, len(train_stats[\"train_loss\"])):\n pdf.cell(40, 8, \"{}\".format((i + 1)), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_acc\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_acc\"][i])), 1, 2, \"C\")\n pdf.cell(-130)\n pdf.cell(90, 3, \"\", 0, 2)\n\n pdf.output(\n os.path.join(\n os.path.dirname(img_path),\n \"training_summary_{}.pdf\".format(model_name.lower()),\n ),\n \"F\",\n )", "def _prepare_models(self):\n if self.freeze_layers is not None:\n self._set_freeze_layers()\n self._load_weight_if_possible()\n print(self.keras_model.summary())\n self.show_configuration()", "def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an optimizer and loads its state from a checkpoint.
def try_create_optimizer_and_load_from_checkpoint(self) -> bool: self.create_optimizer() if self.checkpoint_path: return self.try_load_checkpoint_for_optimizer() return True
[ "def _load_optimizer(optimizer_config):\n logger.info(\"\\t Loading optimizer ...\")\n return create_tf_optimizer(optimizer_config)", "def create_optimizer(self) -> None:\n # Make sure model is created before we create optimizer\n if self._model is None:\n raise ValueError(\"Model checkpoint must be created before optimizer checkpoint can be loaded.\")\n\n # Select optimizer type\n if self.config.optimizer_type in [OptimizerType.Adam, OptimizerType.AMSGrad]:\n self._optimizer = torch.optim.Adam(self._model.parameters(), self.config.l_rate,\n self.config.adam_betas, self.config.opt_eps, self.config.weight_decay,\n amsgrad=self.config.optimizer_type == OptimizerType.AMSGrad)\n elif self.config.optimizer_type == OptimizerType.SGD:\n self._optimizer = torch.optim.SGD(self._model.parameters(), self.config.l_rate, self.config.momentum,\n weight_decay=self.config.weight_decay)\n elif self.config.optimizer_type == OptimizerType.RMSprop:\n self._optimizer = RMSprop(self._model.parameters(), self.config.l_rate, self.config.rms_alpha, self.config.opt_eps,\n self.config.weight_decay, self.config.momentum)\n else:\n raise NotImplementedError(f\"Optimizer type {self.config.optimizer_type.value} is not implemented\")", "def opt_from_checkpoint(\n checkpoint_path: str,\n config_path: Optional[str] = None,\n extra_bindings=tuple([])\n) -> Optimizer:\n\n if config_path is None:\n config_path = \"/\".join(checkpoint_path.split(\"/\")[:-1]) + \"/config.gin\"\n\n logging.info(\"Restoring configs from: %s\", config_path)\n with gin.unlock_config():\n scope = f\"opt_from_checkpoint__{str(uuid.uuid4()).replace('-', '_')}\"\n with gin.config_scope(None):\n with gin.config_scope(scope):\n if config_path:\n with file_open(config_path, \"rb\") as f:\n content = bytes(f.read()).decode(\"utf-8\")\n\n # gin writes out multi line sometimes, undo this.\n content = content.replace(\"\\\\\\n\", \"\")\n\n def maybe_add_scope(c):\n # filter out train as this overlaps with outer_training.\n if c.startswith(\"#\"):\n return None\n if \"=\" in c:\n return scope + \"/\" + c\n return c\n\n bindings = [maybe_add_scope(c) for c in content.split(\"\\n\")]\n bindings = [b for b in bindings if b]\n bindings = bindings + [maybe_add_scope(c) for c in extra_bindings]\n\n logging.info(\"Parsing bindings\")\n for b in bindings:\n logging.info(b)\n print(b)\n gin.parse_config(bindings, skip_unknown=True)\n\n configurable = gin.query_parameter(f\"{scope}/run_train.lopt\")\n if isinstance(configurable, gin.config._UnknownConfigurableReference): # pylint: disable=protected-access\n raise ValueError(\"Gin couldn't find the learned optimizer in current\"\n \" imports. Did you forget to import the module?\")\n\n # with summary.summary_scope(\"opt_from_checkpoint\"):\n lopt = configurable.configurable.wrapped()\n theta = lopt.init(jax.random.PRNGKey(0))\n logging.info(f\"Restoring checkpoint {checkpoint_path}\") # pylint: disable=logging-fstring-interpolation\n ckpt = ParameterCheckpoint(theta, \"\", 0)\n ckpt = load_state(checkpoint_path, ckpt)\n opt = lopt.opt_fn(ckpt.params)\n return opt\n # wrapped = _GinScopeClass(opt, scope)\n # For now, just add the lopt to the returned class.\n # TODO(lmetz) change this api to return a more structured class?\n # wrapped.lopt = lopt\n # return wrapped # type: ignore", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def load(self):\n last_checkpoint = torch.load(self.checkpoint_path)\n self.model.load_state_dict(last_checkpoint['model_state_dict'])\n self.optim.load_state_dict(last_checkpoint['optimizer_state_dict'])", "def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def resume(self, checkpoint):\n if checkpoint is None:\n return None\n\n if isinstance(checkpoint, str):\n print('Loading checkpoint from {}'.format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=self.device)\n\n self.model.load_state_dict(checkpoint['model'])\n\n # Load the saved params of the optimizer only if it's the same\n current_optimizer = self.hyperparameters.optimizer.use\n saved_optimizer = checkpoint['hyperparameters']['optimizer']['use']\n if current_optimizer == saved_optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n for state in self.optimizer.state.values():\n for k, val in state.items():\n if torch.is_tensor(val):\n state[k] = val.to(self.device)\n else:\n print(\"WARN: Cannot load optimizer's params because the selected one is different from the saved one. \"\n \"{} (selected) != {} (checkpoint)\".format(current_optimizer, saved_optimizer))\n\n if 'scheduler' in checkpoint:\n # The scheduler could not be mapped to gpu, it raises errors\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n if not self.hyperparameters.restart_best_loss:\n print('Getting last best loss from checkpoint')\n self.best_loss = checkpoint.get('best_loss', 1e10)\n\n return {'epoch': checkpoint['epoch']}", "def create_optimizer(self, context, optimizer, host):\n pass", "def _load_state_dict(optimizer, state: dict) -> None:\n if is_scheduler(optimizer):\n optimizer.load_state_dict(state[\"scheduler\"])\n optimizer.optimizer.load_state_dict(state[\"optimizer\"])\n else:\n optimizer.load_state_dict(state)", "def init_checkpoint(self, skip_optimizer=False):\n if not self.ckpt:\n kwargs = {\n \"step\": tf.Variable(1),\n \"optimizer\": self.optimizer[0],\n \"model\": self.model\n }\n\n if skip_optimizer:\n del kwargs['optimizer']\n\n self.ckpt = tf.train.Checkpoint(**kwargs)", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def initialize_from_checkpoint(state):\n\n # The checkpoint's work_dir should contain the most recently trained model.\n model_paths = glob.glob(os.path.join(FLAGS.checkpoint_dir,\n 'work_dir/model.ckpt-*.pb'))\n if len(model_paths) != 1:\n raise RuntimeError('Expected exactly one model in the checkpoint work_dir, '\n 'got [{}]'.format(', '.join(model_paths)))\n start_model_path = model_paths[0]\n\n # Copy the latest trained model into the models directory and use it on the\n # first round of selfplay.\n state.best_model_name = 'checkpoint'\n shutil.copy(start_model_path,\n os.path.join(fsdb.models_dir(), state.best_model_name + '.pb'))\n\n # Copy the training chunks.\n golden_chunks_dir = os.path.join(FLAGS.checkpoint_dir, 'golden_chunks')\n for basename in os.listdir(golden_chunks_dir):\n path = os.path.join(golden_chunks_dir, basename)\n shutil.copy(path, fsdb.golden_chunk_dir())\n\n # Copy the training files.\n work_dir = os.path.join(FLAGS.checkpoint_dir, 'work_dir')\n for basename in os.listdir(work_dir):\n path = os.path.join(work_dir, basename)\n shutil.copy(path, fsdb.working_dir())", "def testSaveAndLoad(self):\n layers = (2, 3)\n net_options = {\"layers\": layers, \"initializer\": \"zeros\"}\n num_unrolls = 2\n num_epochs = 1\n\n problem = problems.simple()\n\n # Original optimizer.\n with tf.Graph().as_default() as g1:\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options=net_options))\n minimize_ops = optimizer.meta_minimize(problem, 3)\n\n with self.test_session(graph=g1) as sess:\n sess.run(tf.global_variables_initializer())\n train(sess, minimize_ops, 1, 2)\n\n # Save optimizer.\n tmp_dir = tempfile.mkdtemp()\n save_result = optimizer.save(sess, path=tmp_dir)\n net_path = next(iter(save_result))\n\n # Retrain original optimizer.\n cost, x = train(sess, minimize_ops, num_unrolls, num_epochs)\n\n # Load optimizer and retrain in a new session.\n with tf.Graph().as_default() as g2:\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options=net_options,\n net_path=net_path))\n minimize_ops = optimizer.meta_minimize(problem, 3)\n\n with self.test_session(graph=g2) as sess:\n sess.run(tf.global_variables_initializer())\n cost_loaded, x_loaded = train(sess, minimize_ops, num_unrolls, num_epochs)\n\n # The last cost should be the same.\n self.assertAlmostEqual(cost, cost_loaded, places=3)\n self.assertAlmostEqual(x[0], x_loaded[0], places=3)\n\n # Cleanup.\n os.remove(net_path)\n os.rmdir(tmp_dir)", "def build_trainer(restore_state=None, train_policies=None, config=None):\n \n print(\"Using config\")\n print(config)\n cls = PPOTrainer\n trainer = cls(config=config)\n env = trainer.workers.local_worker().env\n if restore_state is not None:\n trainer.restore_from_object(restore_state)\n return trainer", "def load_from_checkpoint(checkpoint_path, model, optimizer=None, scheduler=None, verbose=True):\n if not os.path.exists(checkpoint_path):\n raise (\"File does not exist {}\".format(checkpoint_path))\n\n if torch.cuda.is_available():\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n\n check_keys = list(checkpoint.keys())\n\n model.load_state_dict(checkpoint['model'])\n\n if 'optimizer' in check_keys:\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n if 'scheduler' in check_keys:\n if scheduler is not None:\n scheduler.load_state_dict(checkpoint['scheduler'])\n\n if 'epoch' in check_keys:\n epoch = checkpoint['epoch']\n else:\n epoch = None\n\n if verbose: # optional printing\n print(f\"Loaded model from checkpoint {checkpoint_path}\")\n\n return epoch", "def _load_optimizer(self):\n # loss function\n with tf.variable_scope(\"forward\"):\n self.loss_fwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_fwd,\n self.labels, self.weights, self.vocab_size)\n\n # optimizer\n # self.optimizer_fwd = tf.train.MomentumOptimizer(self.learning_rate,\n # self.momentum)\n self.optimizer_fwd = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.train_op_fwd = self.optimizer_fwd.minimize(self.loss_fwd)\n\n with tf.variable_scope(\"backward\"):\n self.loss_bwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_bwd,\n self.labels, self.weights, self.vocab_size)\n\n # optimizer\n # self.optimizer_bwd = tf.train.MomentumOptimizer(self.learning_rate,\n # self.momentum)\n self.optimizer_bwd = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.train_op_bwd = self.optimizer_bwd.minimize(self.loss_bwd)", "def optimizer_factory(args, model):\n if args.probs_only:\n params = model._primitive_layer._primitive_params[\"probs\"].parameters()\n else:\n params = model.parameters()\n\n if args.optimizer == \"SGD\":\n return optim.SGD(\n params,\n lr=args.lr,\n momentum=args.momentum\n )\n elif args.optimizer == \"Adam\":\n return optim.Adam(\n params,\n lr=args.lr\n )", "def from_checkpoint(cls, checkpoint: Checkpoint) -> \"XGBoostPredictor\":\n with checkpoint.as_directory() as path:\n bst = xgboost.Booster()\n bst.load_model(os.path.join(path, MODEL_KEY))\n preprocessor_path = os.path.join(path, PREPROCESSOR_KEY)\n if os.path.exists(preprocessor_path):\n with open(preprocessor_path, \"rb\") as f:\n preprocessor = cpickle.load(f)\n else:\n preprocessor = None\n return XGBoostPredictor(model=bst, preprocessor=preprocessor)", "def _create_train_op(self):\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n self.train_op = self.optimizer.minimize(self.loss)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a human readable summary of the present segmentation model, writes it to logging.info, and stores the ModelSummary object inside the argument `model`.
def summary_for_segmentation_models(config: ModelConfigBase, model: DeviceAwareModule) -> None: assert isinstance(model, BaseModel) crop_size = config.crop_size if isinstance(crop_size, int): crop_size = (crop_size, crop_size, crop_size) try: model.generate_model_summary(crop_size, log_summaries_to_files=config.log_summaries_to_files) except AttributeError as e: logging.warning(f"summary_for_segmentation_models failed with exception {e}")
[ "def model_summary(model):\n summary = model.summary()\n return summary", "def print_model_summary():\n global model\n global model_name\n if validModel():\n \"\"\"textBrowser.append(\"\\nModel summary:\\n\")\n # print model summary to textbrowser\n stringlist = []\n model.summary(print_fn=lambda x: stringlist.append(x))\n short_model_summary = \"\\n\".join(stringlist)\n textBrowser.append(short_model_summary)\"\"\"\n textBrowser.append(\"Model name: \" + model_name + \"\\n\")\n textBrowser.append(model_summary() + \"\\n\")\n elif not validModel():\n textBrowser.append(\"\\nNo valid model selected.\\n\")", "def print_model_summary(model, summary_path):\n with open(summary_path, 'w') as fh:\n # Pass the file handle in as a lambda function to make it callable\n model.summary(print_fn=lambda x: fh.write(x + '\\n'))\n\n with open(summary_path, 'r') as fin:\n print(fin.read(), end='')", "def summary(self):\n\n self.model.summary(print_fn=lambda x: logging.info(x))", "def model_summary_to_file(model, save_path):\n with open(save_path, 'w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + \"\\n\"))", "def save_summary(model, model_name, stage_no):\n stringlist = []\n model.summary(print_fn=lambda x: stringlist.append(x))\n short_model_summary = \"\\n\".join(stringlist)\n \n with open(eval_path+\"{}_model_summary_stage_{}.txt\".format(model_name, stage_no), \"w\") as text_file:\n print(short_model_summary, file=text_file)", "def log_model_info(model, cfg, use_train_input=True):\n print(\"Model:\\n{}\".format(model))\n print(\"Params: {:,}\".format(params_count(model)))\n print(\n \"Flops: {:,} G\".format(\n get_model_stats(model, cfg, \"flop\", use_train_input)\n )\n )\n print(\n \"Activations: {:,} M\".format(\n get_model_stats(model, cfg, \"activation\", use_train_input)\n )\n )\n print(\"Mem: {:,} GB\".format(gpu_mem_usage()))\n print(\"nvidia-smi\")\n os.system(\"nvidia-smi\")", "def get_model_summary(model, shape, device=None):\n if not device:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n summary(model.to(device), shape)", "def print_summary(self):\n self.model.summary()", "def make_model_summary(self):\n logger.info(f'Generating model summary for {self.model_name}.')\n self.json_stats['model_summary'] = {\n 'model_name': self.model_name,\n 'number_of_statements': self.latest_round.get_total_statements(),\n 'stmts_type_distr': self.latest_round.get_statement_types(),\n 'agent_distr': self.latest_round.get_agent_distribution(),\n 'stmts_by_evidence': self.latest_round.get_statements_by_evidence(),\n 'sources': self.latest_round.get_sources_distribution(),\n 'assembled_beliefs': self.latest_round.get_beliefs(),\n 'all_stmts': self.latest_round.get_english_statements_by_hash()\n }", "def print(self):\n self.model.summary()", "def model_stats(opt, epoch, model):\n log = rlog.getLogger(opt.experiment + \".model\")\n if hasattr(opt, \"log\") and opt.log.detailed:\n # log histogram also\n assert isinstance(\n model, SVIModel\n ), \"This stat only makes sense for SVI models.\"\n for mu, std in zip(model.mu(), model.std()):\n log.put(mu=mu, std=std)\n log.trace(step=epoch, **model.summarize())\n log.reset()", "def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)", "def test_summaries(self):\n try:\n ans = str(self.model)\n except:\n assert False, \"Model __repr__ failed.\"\n\n try:\n print(self.model)\n except:\n assert False, \"Model print failed.\"\n\n try:\n self.model.summary()\n except:\n assert False, \"Model summary failed.\"", "def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)", "def summary(self):\n output = self._model_json[\"output\"]\n #centers = output[\"centers\"]\n print \"Model Summary:\"\n print\n print\n print \"Cluster Sizes: \" + str(output[\"size\"])\n print\n print \"Within-Cluster MSE: \" + str(output[\"within_mse\"])\n print\n print \"Average Between-Cluster SSE: \" + str(output[\"avg_between_ss\"])\n print \"Average Overall SSE: \" + str(output[\"avg_ss\"])\n print", "def print_summary(self):\n print(\"Word Level\")\n self.model_word.summary()\n \n print(\"Sent Level\")\n self.model_sent.summary()\n\n print(\"Doc Level\")\n self.model.summary()", "def _print_model_info(self, model_name, model):\n if self._verbose > 0:\n print(\"=\"*20, \"Model info\", \"=\"*19)\n print(\"\\t Model name: {}\".format(model_name))\n print(\"\\t Total number of model parameters: {}\".format(sum(p.numel() for p in model.parameters())))\n print(\"=\"*51)", "def do_summary():\n try:\n section_selection = st.session_state.section\n if \"--\" in section_selection: \n section_selection = section_selection.split(\"-- \")[1]\n article = wiki_page.section(section_selection)\n except:\n article = wiki_page.summary\n\n summary = \"\"\n if article.strip():\n summary = summarize_text(article, model_obj)\n\n col1, col2 = st.beta_columns(2)\n col1.subheader('Model Summary')\n col1.write(f\"\\n{summary}\")\n\n col2.subheader('Original Text')\n snippets = match_most_text(summary, article)\n if snippets:\n highlighted_article = highlight_text(snippets, article)\n for paragraph in highlighted_article.split(\"\\n\"):\n col2.write(paragraph, unsafe_allow_html=True)\n else:\n col2.write(article)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a model with temperature scaling by wrapping the result of config.create_model with ModelWithTemperature, if temperature scaling config has been provided, otherwise return the result of config.create_model
def create_model_with_temperature_scaling(config: ModelConfigBase) -> Any: # wrap the model around a temperature scaling model if required model = config.create_model() if isinstance(config, SequenceModelBase) and config.temperature_scaling_config: model = ModelWithTemperature(model, config.temperature_scaling_config) return model
[ "def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)", "def infer_temperature_model(self):\n temperature_model_parameters = tuple(\n array.temperature_model_parameters for array in self.system.arrays)\n params = _common_keys(temperature_model_parameters)\n # remove or statement in v0.9\n if {'a', 'b', 'deltaT'} <= params or (\n not params and self.system.racking_model is None\n and self.system.module_type is None):\n return self.sapm_temp\n elif {'u_c', 'u_v'} <= params:\n return self.pvsyst_temp\n elif {'u0', 'u1'} <= params:\n return self.faiman_temp\n elif {'noct_installed'} <= params:\n return self.fuentes_temp\n elif {'noct', 'module_efficiency'} <= params:\n return self.noct_sam_temp\n else:\n raise ValueError(f'could not infer temperature model from '\n f'system.temperature_model_parameters. Check '\n f'that all Arrays in system.arrays have '\n f'parameters for the same temperature model. '\n f'Common temperature model parameters: {params}.')", "def create(model: TModel) -> ModelTransformer:\n model_backend = get_backend(model)\n if model_backend == BackendType.ONNX:\n from nncf.onnx.graph.model_transformer import ONNXModelTransformer\n\n return ONNXModelTransformer(model)\n if model_backend == BackendType.OPENVINO:\n from nncf.openvino.graph.model_transformer import OVModelTransformer\n\n return OVModelTransformer(model)\n if model_backend == BackendType.TORCH:\n from nncf.torch.model_transformer import PTModelTransformer\n\n return PTModelTransformer(model)\n raise RuntimeError(\n \"Cannot create backend-specific model transformer because {} is not supported!\".format(model_backend)\n )", "def create_scaling_model(params, experiments, reflections):\n autos = [None, Auto, \"auto\", \"Auto\"]\n use_auto_model = params.model in autos\n # Determine non-auto model to use outside the loop over datasets.\n if not use_auto_model:\n model_class = None\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == params.model:\n model_class = entry_point.load()\n break\n if not model_class:\n raise ValueError(f\"Unable to create scaling model of type {params.model}\")\n\n for expt, refl in zip(experiments, reflections):\n if not expt.scaling_model or params.overwrite_existing_models:\n # need to make a new model\n if use_auto_model:\n if not expt.scan:\n model = KBScalingModel\n else: # set model as physical unless scan < 1.0 degree\n osc_range = expt.scan.get_oscillation_range()\n abs_osc_range = abs(osc_range[1] - osc_range[0])\n if abs_osc_range < 1.0:\n model = KBScalingModel\n else:\n model = PhysicalScalingModel\n else:\n model = model_class\n expt.scaling_model = model.from_data(params, expt, refl)\n else:\n # allow for updating of an existing model.\n expt.scaling_model.update(params)\n return experiments", "def calibrate_temperature(task_id, data, mnet, hnet, hhnet, device, config,\n shared, logger, writer, cal_per_model=False,\n only_correctly_classified=False,\n cal_target_entropy=-1):\n logger.info('Temperature calibration for task %d ...' % (task_id+1))\n\n # FIXME We could also follow the code from\n # https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py\n # but they don't consider BNNs. Note, there code is much more efficient\n # since they compute the logits before entering the training loop (which\n # is possible when only having one model). Though, in general, we have\n # multiple models.\n\n set_train_mode(True, mnet, hnet, hhnet, None)\n\n gauss_main = False\n if isinstance(mnet, GaussianBNNWrapper):\n gauss_main = True\n\n # Whether the hypernet represents an implicit distribution (i.e., it's\n # input is a random variable), or whether it has task embeddings as input.\n det_hnet = False\n if hnet is not None:\n if hnet.num_known_conds > 0:\n assert hhnet is None\n\n det_hnet = True\n # Can currently only be the case if we train a BbB setup with option\n # `mean_only` enabled.\n if not gauss_main:\n assert hasattr(config, 'mean_only') and config.mean_only\n\n # The single parameter to be tuned by this method.\n temp_param = torch.nn.Parameter(shared.softmax_temp[task_id],\n requires_grad=True)\n assert temp_param == 1.\n\n # Which temperature transfer function to use during training. Note, this\n # can ensure that temperatures don't become negative.\n # ttf = temperature transfer function\n ttf_choice = 'softplus'\n if ttf_choice == 'linear':\n ttf = lambda x : x\n #torch.nn.init.ones_(temp_param.data)\n elif ttf_choice == 'exp':\n ttf = torch.exp\n torch.nn.init.zeros_(temp_param.data)\n else:\n ttf = F.softplus\n temp_param.data = torch.log(torch.exp(torch.ones(1)) - \\\n torch.ones(1)).to(device)\n\n allowed_outputs = pmutils.out_units_of_task(config, data, task_id,\n config.num_tasks)\n\n optimizer = tutils.get_optimizer([temp_param], config.lr,\n momentum=config.momentum, weight_decay=config.weight_decay,\n use_adam=config.use_adam, adam_beta1=config.adam_beta1,\n use_rmsprop=config.use_rmsprop, use_adadelta=config.use_adadelta,\n use_adagrad=config.use_adagrad)\n\n mnet_kwargs = pmutils.mnet_kwargs(config, task_id, mnet)\n\n num_w_samples = config.train_sample_size if config.cal_sample_size == -1 \\\n else config.cal_sample_size\n\n with torch.no_grad():\n # We don't change any network parameters, so these calls produce\n # constant outputs.\n theta_current = None\n if hhnet is not None:\n theta_current = hhnet.forward(cond_id=task_id)\n theta_current = [p.detach() for p in theta_current]\n\n if gauss_main:\n assert hhnet is None\n\n if hnet is not None:\n hnet_out = hnet.forward(cond_id=task_id)\n else:\n hnet_out = None\n w_mean, w_rho = mnet.extract_mean_and_rho(weights=hnet_out)\n w_std = putils.decode_diag_gauss(w_rho,\n logvar_enc=mnet.logvar_encoding)\n\n elif det_hnet:\n w_mean = hnet.forward(cond_id=task_id)\n\n ### We first compute the logit outputs over all samples for all models,\n ### since they don't change anymore.\n # FIXME Could lead to memory issues for large datasets and might not be\n # inefficient if ``config.cal_temp_iter`` is small, since we won't\n # iterate over the whole dataset.\n inputs = data.get_train_inputs()\n targets = data.get_train_outputs()\n\n T = data.output_to_torch_tensor(targets, device, mode='train')\n # Modify 1-hot encodings according to CL scenario.\n assert T.shape[1] == data.num_classes\n # In CL1, CL2 and CL3 (with seperate heads) we do not have to modify the\n # targets.\n if config.cl_scenario == 3 and not config.split_head_cl3:\n raise NotImplementedError('Temperature calibration not ' +\n 'implemented for CL3 without split-head.')\n\n _, labels = torch.max(T, 1) # Integer labels.\n #labels = labels.detach()\n\n num_samples = inputs.shape[0]\n\n logit_outputs = torch.empty((num_w_samples, num_samples, T.shape[1])). \\\n to(device)\n\n for j in range(num_w_samples):\n if gauss_main: # Gaussian weight posterior.\n # In case of the local-reparam trick, we anyway have a different\n # weight per sample. So, the demand of having the same model for\n # all samples in the dataset drops.\n if config.local_reparam_trick:\n # Note, the sampling will happen inside the forward method.\n weights = None\n emean = w_mean\n erho = w_rho\n else:\n weights = putils.sample_diag_gauss(w_mean, w_std,\n is_radial=config.radial_bnn)\n emean = None\n erho = None\n\n elif det_hnet:\n weights = w_mean\n\n else:\n if hnet is not None: # Implicit hypernetwork.\n z = torch.normal(torch.zeros(1, shared.noise_dim),\n config.latent_std).to(device)\n weights = hnet.forward(uncond_input=z,\n weights=theta_current)\n else: # Main network only training.\n weights = None\n\n # I use the validation batch size on purpose, since it is usually\n # bigger and we just want to quickly compute the logits.\n curr_bs = config.val_batch_size\n n_processed = 0\n\n while n_processed < num_samples:\n if n_processed + curr_bs > num_samples:\n curr_bs = num_samples - n_processed\n n_processed += curr_bs\n\n sind = n_processed - curr_bs\n eind = n_processed\n\n ### Compute negative log-likelihood (NLL).\n X = data.input_to_torch_tensor(inputs[sind:eind, :], device,\n mode='train')\n\n if gauss_main:\n Y = mnet.forward(X, weights=None, mean_only=False,\n extracted_mean=emean, extracted_rho=erho,\n sample=weights, **mnet_kwargs)\n else:\n Y = mnet.forward(X, weights=weights, **mnet_kwargs)\n\n if allowed_outputs is not None:\n Y = Y[:, allowed_outputs]\n\n logit_outputs[j, sind:eind, :] = Y\n\n # Since we computed all training logits, we might as well compute\n # the training accuracy on the predictive distributions at temperature 1\n # (note, temperature doesn't change predicted labels).\n pred_dists = F.softmax(logit_outputs, dim=2).mean(dim=0)\n assert pred_dists.ndim == 2\n _, pred_labels = torch.max(pred_dists, 1)\n train_acc = 100. * torch.sum(pred_labels == labels) / num_samples\n logger.debug('Task %d -- training accuracy: %.2f%%.' % \\\n (task_id+1, train_acc))\n\n log_pred_dists = torch.log(torch.clamp(pred_dists, min=1e-5))\n in_entropies = -torch.sum(pred_dists * log_pred_dists, dim=1)\n\n # Normalize by maximum entropy.\n max_ent = - np.log(1.0 / data.num_classes)\n in_entropies /= max_ent\n\n in_entropies_mean = in_entropies.mean()\n in_entropies_std = in_entropies.std()\n logger.debug('Task %d -- training in-dist. entropy: %f.' % \\\n (task_id+1, in_entropies_mean))\n\n if not hasattr(shared, 'train_in_ent_mean'):\n shared.train_in_ent_mean = []\n shared.train_in_ent_std = []\n shared.train_in_ent_mean.append( \\\n in_entropies_mean.detach().cpu().numpy())\n shared.train_in_ent_std.append(in_entropies_std.detach().cpu().numpy())\n\n if only_correctly_classified:\n num_correct = torch.sum(pred_labels == labels)\n\n logger.info('Task %d -- only using %d/%d correctly classified ' \\\n % (task_id+1, num_correct, num_samples) + \\\n 'samples for calibration.')\n\n logit_outputs = logit_outputs[:, pred_labels == labels, :]\n num_samples = num_correct\n assert logit_outputs.shape[1] == num_correct\n\n labels = labels[pred_labels == labels]\n assert labels.shape[0] == num_correct\n\n # Sanity check!\n pred_dists = F.softmax(logit_outputs, dim=2).mean(dim=0)\n _, pred_labels = torch.max(pred_dists, 1)\n assert torch.sum(pred_labels == labels) == num_correct\n\n logit_outputs = logit_outputs.detach()\n\n ### Calibrate temperature.\n for i in range(config.cal_temp_iter):\n optimizer.zero_grad()\n\n batch_inds = np.random.randint(0, num_samples, config.batch_size)\n\n batch_logits = logit_outputs[:, batch_inds, :]\n batch_labels = labels[batch_inds]\n assert batch_logits.ndim == 3\n\n # Note, this first option is more numerically stable when calibrating NLL.\n if cal_per_model or num_w_samples == 1:\n loss = 0\n for j in range(num_w_samples):\n if cal_target_entropy != -1:\n batch_sm = F.softmax(batch_logits[j, :, :] / \\\n ttf(temp_param), dim=1)\n # For numerical stability.\n batch_log_sm = torch.log(torch.clamp(batch_sm, min=1e-5))\n\n # Mean entropy within the batch.\n batch_entropy = -torch.sum(batch_sm * batch_log_sm,\n dim=1).mean()\n\n loss += (batch_entropy - cal_target_entropy)**2\n else: # Compute NLL loss\n # Note, softmax will be computed inside the `cross_entropy`.\n loss += F.cross_entropy( \\\n batch_logits[j, :, :] / ttf(temp_param), batch_labels,\n reduction='mean')\n loss /= num_w_samples\n\n else:\n batch_pred_dist = F.softmax(batch_logits / ttf(temp_param),\n dim=2).mean(dim=0)\n # FIXME nll_loss expects log_softmax as input. To compute the\n # predictive distribution, we have to first average softmax outputs\n # before we can apply the log, which might lead to numerical\n # instabilities.\n #batch_log_pd = batch_pred_dist\n #batch_log_pd[batch_pred_dist < 1e-5] = 1e-5\n batch_log_pd = torch.clamp(batch_pred_dist, min=1e-5)\n batch_log_pd = torch.log(batch_log_pd)\n if cal_target_entropy != -1:\n # Mean entropy within the batch.\n batch_entropy = -torch.sum(batch_pred_dist * batch_log_pd,\n dim=1).mean()\n\n loss += (batch_entropy - cal_target_entropy)**2\n else: # Compute NLL loss\n loss = F.nll_loss(batch_log_pd, batch_labels, reduction='mean')\n\n loss.backward()\n if config.clip_grad_value != -1:\n torch.nn.utils.clip_grad_value_(optimizer.param_groups[0]['params'],\n config.clip_grad_value)\n elif config.clip_grad_norm != -1:\n torch.nn.utils.clip_grad_norm_(optimizer.param_groups[0]['params'],\n config.clip_grad_norm)\n optimizer.step()\n\n if ttf_choice == 'linear':\n # NOTE In this case, nothing prevents the temperature from going\n # negative (e.g., when starting with a large learning rate).\n # Therefore, we have to actively capture this case.\n temp_param.data = torch.clamp(temp_param, min=1e-5)\n\n if i % 50 == 0:\n writer.add_scalar('cal/task_%d/loss' % task_id, loss, i)\n writer.add_scalar('cal/task_%d/temp' % task_id,\n ttf(temp_param), i)\n\n final_temp = ttf(temp_param).data\n shared.softmax_temp[task_id] = final_temp.data\n\n logger.info('Calibrated softmax temperature of task %d is: %f.' % \\\n (task_id+1, final_temp))\n\n logger.info('Temperature calibration for task %d ... Done' % (task_id+1))", "def create_transformer_model(model_name: str, config: Optional[BaseModelConfig] = None) -> nn.Module:\n # Make sure model is supported\n assert is_transformer_model(model_name)\n\n # Import here because it's an optional dependency\n from transformers import AutoConfig, AutoModel\n\n if config is not None and hasattr(config, \"output_dir\"): # Load trained model from config\n kwargs = {\n \"pretrained_model_name_or_path\": get_file_path(config.output_dir, config.model_name_or_path),\n \"from_tf\": False,\n \"config\": AutoConfig.from_pretrained(get_file_path(config.output_dir, config.model_config_path)),\n }\n model = AutoModel.from_pretrained(**kwargs)\n\n else: # Load default pre-trained model\n model = AutoModel.from_pretrained(model_name)\n\n return model", "def create(modelConfig, logLevel=logging.ERROR): ## model_factory.py ##\n logger = ModelFactory.__getLogger()\n logger.setLevel(logLevel)\n logger.debug(\"ModelFactory returning Model from dict: %s\", modelConfig)\n modelClass = None\n if modelConfig['model'] == \"HTMPrediction\":\n modelClass = HTMPredictionModel\n def __init__(self,sensorParams={},inferenceType=InferenceType.TemporalNextStep,spEnable=True,spParams={},trainSPNetOnlyIfRequested=False,tmEnable=True,tmParams={},\n clEnable=True,clParams={},anomalyParams={},minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP,network=None):\n if not inferenceType in self.__supportedInferenceKindSet:\n raise ValueError(\"{0} received incompatible inference type: {1}\".format(self.__class__, inferenceType))\n # Call super class constructor\n super(HTMPredictionModel, self).__init__(inferenceType)\n # self.__restoringFromState is set to True by our __setstate__ method and back to False at completion of our _deSerializeExtraData() method.\n self.__restoringFromState = False\n self.__restoringFromV1 = False\n # Intitialize logging\n self.__logger = initLogger(self)\n self.__logger.debug(\"Instantiating %s.\" % self.__myClassName)\n self._minLikelihoodThreshold = minLikelihoodThreshold\n self._maxPredictionsPerStep = maxPredictionsPerStep\n # set up learning parameters (note: these may be replaced via enable/disable//SP/TM//Learning methods)\n self.__spLearningEnabled = bool(spEnable)\n self.__tpLearningEnabled = bool(tmEnable)\n # Explicitly exclude the TM if this type of inference doesn't require it\n if not InferenceType.isTemporal(self.getInferenceType()) or self.getInferenceType() == InferenceType.NontemporalMultiStep:\n tmEnable = False\n self._netInfo = None\n self._hasSP = spEnable\n self._hasTP = tmEnable\n self._hasCL = clEnable\n self._classifierInputEncoder = None\n self._predictedFieldIdx = None\n self._predictedFieldName = None\n self._numFields = None\n # init anomaly\n # -----------------------------------------------------------------------\n if network is not None:\n self._netInfo = NetworkInfo(net=network, statsCollectors=[])\n else:\n # Create the network\n self._netInfo = self.__createHTMNetwork(sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable,clParams, anomalyParams)\n # Initialize Spatial Anomaly detection parameters\n if self.getInferenceType() == InferenceType.NontemporalAnomaly:\n self._getSPRegion().setParameter(\"anomalyMode\", True)\n # Initialize Temporal Anomaly detection parameters\n if self.getInferenceType() == InferenceType.TemporalAnomaly:\n self._getTPRegion().setParameter(\"anomalyMode\", True)\n # -----------------------------------------------------------------------\n # This flag, if present tells us not to train the SP network unless the user specifically asks for the SP inference metric\n self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested\n self.__numRunCalls = 0\n # Tracks whether finishedLearning() has been called\n self.__finishedLearning = False\n self.__logger.debug(\"Instantiated %s\" % self.__class__.__name__)\n self._input = None\n return\n elif modelConfig['model'] == \"TwoGram\":\n modelClass = TwoGramModel\n elif modelConfig['model'] == \"PreviousValue\":\n modelClass = PreviousValueModel\n else:\n raise Exception(\"ModelFactory received unsupported Model type: %s\" % modelConfig['model'])\n return modelClass(**modelConfig['modelParams'])", "def create_shunt_model(self):\r\n\r\n print('\\nCreate shunt model')\r\n\r\n if not self.original_model:\r\n raise ValueError('Original model not yet initialized! Either call create_original_model or set it manually.')\r\n if not self.shunt_params:\r\n raise ValueError('No parameters found in config for shunt model! Create the field [SHUNT_MODEL]')\r\n\r\n logging.info('')\r\n logging.info('#######################################################################################################')\r\n logging.info('############################################ SHUNT MODEL ##############################################')\r\n logging.info('#######################################################################################################')\r\n logging.info('')\r\n\r\n dilation_rate_input, dilation_rate_output = find_input_output_dilation_rates(self.original_model, self.shunt_params['locations'])\r\n\r\n print('Used dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('Creating shunt with dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('')\r\n\r\n with self.activate_distribution_scope():\r\n if self.shunt_params['from_file']:\r\n self.shunt_model = keras.models.load_model(self.shunt_params['filepath'])\r\n print('Shunt model loaded successfully!')\r\n else:\r\n input_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][0]).input_shape[1:]\r\n if isinstance(input_shape_shunt, list):\r\n input_shape_shunt = input_shape_shunt[0][1:]\r\n output_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][1]).output_shape[1:]\r\n if isinstance(output_shape_shunt, list):\r\n output_shape_shunt = output_shape_shunt[0][1:]\r\n\r\n self.shunt_model = Architectures.createShunt(input_shape_shunt,\r\n output_shape_shunt,\r\n arch=self.shunt_params['arch'],\r\n use_se=False,\r\n dilation_rate_input=dilation_rate_input,\r\n dilation_rate_output=dilation_rate_output,\r\n expansion_factor=1.0)\r\n\r\n if self.shunt_params['pretrained']:\r\n self.shunt_model.load_weights(self.shunt_params['weightspath'])\r\n print('Shunt weights loaded successfully!')\r\n\r\n self.shunt_model.summary(print_fn=self.logger.info, line_length=150)\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))", "def _create_model(self, cfg, ckpt_file): \n\n # specify models hyperparameters - loaded from config yaml\n model_params = cfg['MODEL']\n filter_widths = model_params['filter_widths'] #[3,3,3,3,3]\n dropout = model_params['dropout'] #0.25\n channels = model_params['channels'] #1024\n causal = model_params['causal'] #False\n\n n_joints_in = cfg['IN_FORMAT']['num_joints']\n n_joints_out = cfg['OUT_FORMAT']['num_joints']\n\n # create model and load checkpoint\n model_pos = TemporalModel(n_joints_in, 2, n_joints_out, filter_widths, \n causal, dropout, channels)\n\n checkpoint = torch.load(ckpt_file, map_location=lambda storage, loc: storage)\n if 'pretrained_h36m_detectron_coco.bin' in ckpt_file:\n model_pos.load_state_dict(checkpoint['model_pos'])\n elif 'pretrained_video2bvh.pth' in ckpt_file:\n pretrained_dict = checkpoint['model_state']\n model_dict = model_pos.state_dict()\n pretrained_dict = {\n k: v for k, v in pretrained_dict.items()\n if k in model_dict\n }\n model_dict.update(pretrained_dict)\n model_pos.load_state_dict(model_dict)\n else:\n model_pos.load_state_dict(checkpoint)\n model_pos.eval() # Important for dropout!\n\n # push to gpu\n if torch.cuda.is_available():\n model_pos = model_pos.cuda()\n model_pos.eval()\n\n return model_pos", "def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model", "def create_model(model_name: str, config: Optional[BaseModelConfig] = None) -> nn.Module:\n if model_name == \"single_layer_classifier\":\n model = SingleLayerClassifier(config)\n elif model_name == \"multi_layer_classifier\":\n model = MultiLayerClassifier(config)\n elif model_name == \"single_layer_regressor\":\n model = SingleLayerRegressor(config)\n elif model_name == \"multi_layer_regressor\":\n model = MultiLayerRegressor(config)\n elif is_transformer_model(model_name):\n model = create_transformer_model(model_name, config)\n else:\n raise RuntimeError(f\"Unknown model name {model_name}.\")\n return model", "def create_model(self, model_config, is_training=True):\n return model_builder.build(model_config, is_training=is_training)", "def create_model(self, mode='kronecker'):\n vision_model = self.vision_model\n if len(vision_model._output_layers) > 2: # retinanet\n vision_model_fusion_branch = vision_model.output[2]\n else: # cnn\n vision_model_fusion_branch = vision_model.output\n\n\n transformer = self.language_translation_model\n transformer_fusion_branch = transformer.output[1]\n\n if mode == 'concatenate':\n fusion_bottleneck = keras.layers.concatenate([vision_model_fusion_branch, transformer_fusion_branch], axis=1)\n elif mode == 'kronecker':\n fusion_bottleneck = Lambda(kronecker_product2D)([vision_model_fusion_branch, transformer_fusion_branch])\n\n fusion_bottleneck = Flatten()(fusion_bottleneck)\n fusion_bottleneck = self.create_fusionnet_layers(fusion_bottleneck, layers=self.fusionnet_layers)\n fusionnet_output = Dense(self.fusionnet_model_config['coordinate_number'], kernel_initializer=keras.initializers.glorot_uniform(),\n activation='sigmoid', name='fusionnet_regression')(fusion_bottleneck)\n\n inputs = [self.vision_model.input] + self.language_translation_model.input\n\n vision_outputs,language_translation_outputs,fusionnet_outputs = list(), list(), list() # [retinanet.output[0], retinanet.output[1]] + [transformer.output[0]] + [fusionnet_output]\n if any('retinanet_regression' in filt for filt in self.fusionnet_model_config['output_filter']):\n vision_outputs.append(vision_model.output[0])\n if any('retinanet_classification' in filt for filt in self.fusionnet_model_config['output_filter']):\n vision_outputs.append(vision_model.output[1])\n\n if any('transformer_classification' in filt for filt in self.fusionnet_model_config['output_filter']):\n language_translation_outputs.append(transformer.output[0])\n\n if any('fusionnet_regression' in filt for filt in self.fusionnet_model_config['output_filter']):\n fusionnet_outputs.append(fusionnet_output)\n # get the output of the model\n\n model = Model(inputs=inputs, outputs= vision_outputs + language_translation_outputs + fusionnet_outputs)\n\n self.fusionnet_model = model\n return model", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def prepare_ML(config): \n\n scaler = machine_learning.define_scaling(config)\n\n clf = machine_learning.define_model(config)\n\n return scaler, clf", "def set_up_and_parameterise_model_for_experiment(self):\n self.experiment_unique_steps_to_model = {}\n for op_number, op in enumerate(self.experiment.unique_steps):\n new_model = self.model.new_copy()\n new_parameter_values = self.parameter_values.copy()\n\n if op.type != \"current\":\n # Voltage or power control\n # Create a new model where the current density is now a variable\n # To do so, we replace all instances of the current density in the\n # model with a current density variable, which is obtained from the\n # FunctionControl submodel\n # check which kind of external circuit model we need (differential\n # or algebraic)\n if op.type == \"voltage\":\n submodel_class = pybamm.external_circuit.VoltageFunctionControl\n elif op.type == \"power\":\n submodel_class = pybamm.external_circuit.PowerFunctionControl\n\n # Build the new submodel and update the model with it\n submodel = submodel_class(new_model.param, new_model.options)\n variables = new_model.variables\n submodel.variables = submodel.get_fundamental_variables()\n variables.update(submodel.variables)\n submodel.variables.update(submodel.get_coupled_variables(variables))\n variables.update(submodel.variables)\n submodel.set_rhs(variables)\n submodel.set_algebraic(variables)\n submodel.set_initial_conditions(variables)\n new_model.rhs.update(submodel.rhs)\n new_model.algebraic.update(submodel.algebraic)\n new_model.initial_conditions.update(submodel.initial_conditions)\n\n # Set the \"current function\" to be the variable defined in the submodel\n new_parameter_values[\"Current function [A]\"] = submodel.variables[\n \"Current [A]\"\n ]\n self.update_new_model_events(new_model, op)\n # Update parameter values\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n experiment_parameter_values = self.get_experiment_parameter_values(\n op, op_number\n )\n new_parameter_values.update(\n experiment_parameter_values, check_already_exists=False\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[repr(op)] = parameterised_model\n\n # Set up rest model if experiment has start times\n if self.experiment.initial_start_time:\n new_model = self.model.new_copy()\n # Update parameter values\n new_parameter_values = self.parameter_values.copy()\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n new_parameter_values.update(\n {\"Current function [A]\": 0, \"Ambient temperature [K]\": \"[input]\"},\n check_already_exists=False,\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[\n \"Rest for padding\"\n ] = parameterised_model", "def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError", "def apply_temperature(output, temperature):\n output[\"logits_prescaled\"] = output[\"logits\"]\n output[\"logits\"] = apply_temperature_on_logits(\n logits=output[\"logits_prescaled\"], temperature=temperature\n )\n return output", "def load_model_temperature(self, overwrite=False):\n if 'T' in self.data and not overwrite:\n return\n t = cloudnet.load_as_df(self.data.major_axis, self.data.minor_axis,\n variable='temperature') - 273.15\n self.data['T'] = t" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load twine from a .json filename, filelike or a json string and validates twine contents.
def _load_twine(self, source=None): if source is None: # If loading an unspecified twine, return an empty one rather than raising error (like in _load_data()) raw_twine = {} logger.warning("No twine source specified. Loading empty twine.") else: raw_twine = self._load_json("twine", source, allowed_kinds=("file-like", "filename", "string", "object")) self._validate_against_schema("twine", raw_twine) self._validate_twine_version(twine_file_twined_version=raw_twine.get("twined_version", None)) return raw_twine
[ "def loadTweets(self, fileName):\n\t\tself.rawTweets = loadJSON(fileName)", "def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))", "def load_tweets(file):\n with open(file) as f:\n data = json.load(f)\n return data", "def test_load_json_str():\n\n file_name = 'test_fooof_all'\n\n data = load_json(file_name, TEST_DATA_PATH)\n\n assert data", "def test_verifies_token_file_contains_json(self):\n\n with open(self.sample_token_file, 'w',\n encoding=\"utf8\", errors=\"surrogateescape\") as stf_h:\n stf_h.write(\"Bad JSON\")\n\n with self.assertRaises(json.decoder.JSONDecodeError):\n badgr = BadgrLite(token_filename=self.sample_token_file)\n badgr.load_token()", "def read_file(self, filename):\n with open(filename) as json_file:\n json_string = \"\"\n reading_object = False\n line_number = 0\n for line in json_file:\n line_number += 1\n if line == '{\\n':\n reading_object = True\n if reading_object:\n if line == \"},\\n\":\n json_string += \"}\"\n reading_object = False\n try:\n json_object = json.loads(json_string)\n except ValueError as e:\n print e\n print \"ERROR in line : \", line_number\n break\n self.process_tweet(json_object)\n json_string = \"\"\n else:\n json_string += line\n print \"Total number of tweets parsed: \", self.tweets\n print \"Total number of related tweets: \", self.related_tweets", "def load_jsonl(file):\n tweets = []\n with open(file, 'rb') as temp_file:\n for tweet in json_lines.reader(temp_file, broken=True):\n reduced_tweet = {\n 'created_at': tweet['created_at'],\n 'id': tweet['id_str'],\n 'username': tweet['user']['name'],\n 'user_joined': tweet['user']['created_at'][-4:],\n 'user_location': tweet['user']['location'],\n 'user_bio': tweet['user']['description'],\n 'follower_count': tweet['user']['followers_count']\n }\n if 'extended_tweet' in tweet:\n reduced_tweet['text'] = tweet['extended_tweet']['full_text']\n else:\n reduced_tweet['text'] = tweet['text']\n if tweet['place'] is not None:\n reduced_tweet['country_code'] = tweet['place']['country_code'],\n reduced_tweet['place'] = tweet['place']['full_name']\n reduced_tweet['coordinates']=tweet['place']['bounding_box']['coordinates']\n if 'retweeted_status' in tweet:\n reduced_tweet['retweeted_user'] = {\n 'user_id': tweet['retweeted_status']['user']['id_str'],\n 'username': tweet['retweeted_status']['user']['screen_name'],\n 'user_joined': tweet['retweeted_status']['user']['created_at'][-4:],\n 'user_location': tweet['retweeted_status']['user']['location'],\n 'user_bio': tweet['retweeted_status']['user']['description'],\n 'follower_count': tweet['retweeted_status']['user']['followers_count']\n }\n tweets.append(reduced_tweet)\n return (tweets)", "def load_rentals_file(filename):\n with open(filename) as file:\n try:\n data = json.load(file)\n except FileNotFoundError:\n LOGGER.error(\"Missing file %s\", filename)\n exit(1)\n return data", "def test_file(self):\n try:\n with open( settings_app.ISSN_JSON_PATH, 'r' ) as f:\n json.loads( f.read() )\n integrity_check = True\n except:\n integrity_check = False\n self.assertEqual( True, integrity_check )", "def json_is_valid(path: str) -> bool:\n\n try:\n json.load(open(path, 'r'))\n except (FileNotFoundError, json.decoder.JSONDecodeError):\n return False\n\n return True", "def safe_json_load(fpath):\n\n validate_filepath(fpath, file_extension='.json', exception_type=JSONError)\n return _read_data_file(fpath, json.load, exception_type=JSONError)", "def load_life(name):\n\tif not '.json' in name:\n\t\tname += '.json'\n\t\n\twith open(os.path.join(LIFE_DIR, name), 'r') as e:\n\t\treturn json.loads(''.join(e.readlines()))", "def load(self):\n filename = self._filename\n if not os.path.exists(filename):\n self.service.log.store('Cannot load %s, does not exist' % filename)\n return False\n \n # Read from file\n self.service.log.store('Loading %s' % filename)\n f = open(filename, 'r')\n raw = f.read()\n f.close()\n \n self.from_json(raw)\n return True", "def load_tweets(fname):\n tweets = []\n for line in open(fname):\n tweets.append(json.loads(line))\n return tweets", "def load_json_file(filename, force_path=None):\n if force_path:\n path = os.path.dirname(os.path.abspath(force_path))\n filename = os.path.join(path, filename)\n f = codecs.open(filename, 'r', 'utf-8')\n return json.load(f)", "def load(fp, object_hook=object_hook, **kwargs):\n return json.load(fp, object_hook=object_hook, **kwargs)", "def load_levin(self, path):\n\t\ttry:\n\t\t\tlexicon_file = open(path)\n\t\t\tself.levin_dict = json.loads(lexicon_file.read())\n\t\texcept:\n\t\t\tprint 'fail to laod levin verb classes'", "def process_jsonld_file(fname):\n with open(fname, 'r', encoding='utf-8') as fh:\n json_dict = json.load(fh)\n return process_jsonld(json_dict)", "def load_json(filename):\n with open(filename, \"r\") as f:\n my_dyct = json.load(f)\n return to_timer(my_dyct)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the schema for the given strand.
def _get_schema(self, strand): if strand == "twine": # The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain # schema itself. The twine schema is distributed with this packaged to ensure version consistency... schema_path = "schema/twine_schema.json" elif strand in CHILDREN_STRANDS: # The data is a list of children. The "children" strand of the twine describes matching criteria for # the children, not the schema of the "children" data, which is distributed with this package to ensure # version consistency... schema_path = "schema/children_schema.json" elif strand in MANIFEST_STRANDS: # The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to # filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which # is distributed with this package to ensure version consistency... schema_path = "schema/manifest_schema.json" else: if strand not in SCHEMA_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.") # Get schema from twine.json file. schema_key = strand + "_schema" try: return getattr(self, schema_key) except AttributeError: raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine") return jsonlib.loads(pkg_resources.resource_string("twined", schema_path))
[ "def getschema(self, indir='', schemaname=''):\n if schemaname not in self.registry:\n try:\n self.convert_schema(indir=indir, schemaname=schemaname)\n except IOError:\n pass\n outschema = self.registry.get(schemaname)\n return outschema", "def get_schema(self, schema_id):\n return self.schema_id_index.get(schema_id, None)", "def _get_schema(schema_name):\n if not schema_name:\n return\n for schema in get_active_schemata():\n if schema_name == schema.schema or schema_name == schema:\n return schema", "def _get_schema(session: ObjectExplorerSession, dbid: any, scid: any) -> Schema:\n return session.server.databases[int(dbid)].schemas[int(scid)]", "def schema(self):\n if not self._schema:\n response = self.api.make_request('GET', '%s/schema' % self.path)\n self._schema = response.data\n \n return self._schema", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def scheming_get_dataset_schema(dataset_type, expanded=True):\n schemas = scheming_dataset_schemas(expanded)\n if schemas:\n return schemas.get(dataset_type)", "def get_schema(self) -> ArchiveSchema:\n return self.schema", "def get_schema(struct):\n return struct._schema", "def sample_schema(self):\n if 'sample' not in self._schemas:\n logging.debug(f\"{self.id} - no schema? {self._schemas}\")\n return None\n return self._schemas['sample']", "def get_schema(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/streamingendpoints/%s/schema\" % (self.project_key, self.streaming_endpoint_name))", "def _get_schema(name):\n item = _get_notebook_item(name)\n if not item:\n item = _get_table(name)\n\n if isinstance(item, gcp.bigquery.Schema):\n return item\n if hasattr(item, 'schema') and isinstance(item.schema, gcp.bigquery._schema.Schema):\n return item.schema\n return None", "def getSchema( sourceDirectory ):\r\n if( sourceDirectory == settings.LEXISNEXIS_FILETAG ): return LexisNexisSchema()\r\n raise Exception( \"Filer for source <%s> is not registered in getSchema( source ).\" % ( sourceDirectory ) )", "def get_schema(self) -> dict:", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def get_schema(self, name):\n return Schema(self, name)", "def get_schema():\n # replace references\n from ..schemas import resolve\n # get zip resolver to access referenced assets\n from ..resolvers import ZipResolver\n\n # get a blob of a zip file including the GLTF 2.0 schema\n blob = resources.get(\n 'schema/gltf2.schema.zip', decode=False)\n # get the zip file as a dict keyed by file name\n archive = util.decompress(util.wrap_as_stream(blob), 'zip')\n # get a resolver object for accessing the schema\n resolver = ZipResolver(archive)\n # get a loaded dict from the base file\n unresolved = json.loads(util.decode_text(\n resolver.get('glTF.schema.json')))\n # resolve `$ref` references to other files in the schema\n schema = resolve(unresolved, resolver=resolver)\n\n return schema", "def _get_schema(name):\n global SCHEMA\n\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = \"{}/{}.json\".format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n\n return SCHEMA.get(name)", "def schema(self):\n return self._schema" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate that the installed version is consistent with an optional version specification in the twine file.
def _validate_twine_version(self, twine_file_twined_version): installed_twined_version = pkg_resources.get_distribution("twined").version logger.debug( "Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version ) if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version): raise exceptions.TwineVersionConflict( f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed" )
[ "def require_version(self, version):", "def validate_configurator_version():\n if settings.CONFIGURATOR_MODULE == \"bootmachine.contrib.configurators.salt\":\n pkgver = settings.SALT_AUR_PKGVER\n pkgrel = settings.SALT_AUR_PKGREL\n response = urllib2.urlopen(\"https://aur.archlinux.org/packages/sa/salt/PKGBUILD\")\n for line in response:\n if line.startswith(\"pkgver=\") and not pkgver in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgver, line.strip()))\n if line.startswith(\"pkgrel=\") and not pkgrel in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgrel, line.strip()))", "def check_version(self, version):\n # TODO\n return False", "def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()", "def validate_version(self):\n installed = get_installed_version()\n if not versions_compatible(*self.dbt_version):\n msg = IMPOSSIBLE_VERSION_ERROR.format(\n package=self.project_name,\n version_spec=[\n x.to_version_string() for x in self.dbt_version\n ]\n )\n raise DbtProjectError(msg)\n\n if not versions_compatible(installed, *self.dbt_version):\n msg = INVALID_VERSION_ERROR.format(\n package=self.project_name,\n installed=installed.to_version_string(),\n version_spec=[\n x.to_version_string() for x in self.dbt_version\n ]\n )\n raise DbtProjectError(msg)", "def test_valid_version():\n v_curr = parse_version(pypkgla01.__version__)\n v_orig = parse_version(\"0.1.0-dev\")\n assert v_curr >= v_orig", "def check_legitimate_ver(version):\n return re.match(\"^[0-9.]+$\", version)", "def test_check_version():\n # Test valid version formats.\n for version in ['12.4.5', '12.4']:\n assert UserConfig._check_version(version)\n\n # Test not valid version formats.\n for version in ['0.1.5.dev0', 12, '12', '1.3.4.2', (0, 1, 3)]:\n with pytest.raises(ValueError):\n UserConfig._check_version(version)", "def version_check(self, app_version):\n return True", "def test_schema_version(self):\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )", "def validate_project_version(config: Dict[str, Any]) -> None:\n spacy_version = config.get(\"spacy_version\", None)\n if spacy_version and not is_compatible_version(about.__version__, spacy_version):\n err = (\n f\"The {PROJECT_FILE} specifies a spaCy version range ({spacy_version}) \"\n f\"that's not compatible with the version of spaCy you're running \"\n f\"({about.__version__}). You can edit version requirement in the \"\n f\"{PROJECT_FILE} to load it, but the project may not run as expected.\"\n )\n msg.fail(err, exits=1)", "def test_valid_hh_version():\n # TODO: Basically only enforcing correct main segment, since not using `re.fullmatch`\n # TODO: Probably want `re.fullmatch` here - Currently ignoring any potentially invalid suffix\n version_pattern = r\"^[0-9]+\\.[0-9]+\\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])\"\n res = re.match(version_pattern, hh.__version__)\n assert res is not None", "def checkBakefileVersion(version):\n vcur = mk.vars['BAKEFILE_VERSION'].split('.')\n vreq = version.split('.')\n return vcur >= vreq", "def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion", "def test_check_version():\n assert check_version('0.9.4-1', '0.9.4', '>=')\n assert check_version('3.0.0rc1', '3.0.0', '<')\n assert check_version('1.0', '1.0b2', '>')", "def rpn_version_check(self):", "def test_version_not_unknown():\n import lstchain\n assert lstchain.__version__ != 'unknown'", "def validate_version_pragma(version_str: str, start: ParserPosition) -> None:\n from vyper import __version__\n\n version_arr = version_str.split(\"@version\")\n\n raw_file_version = version_arr[1].strip()\n strict_file_version = _convert_version_str(raw_file_version)\n strict_compiler_version = Version(_convert_version_str(__version__))\n\n if len(strict_file_version) == 0:\n raise VersionException(\n \"Version specification cannot be empty\", start,\n )\n\n try:\n npm_spec = NpmSpec(strict_file_version)\n except ValueError:\n raise VersionException(\n f'Version specification \"{raw_file_version}\" is not a valid NPM semantic '\n f\"version specification\",\n start,\n )\n\n if not npm_spec.match(strict_compiler_version):\n raise VersionException(\n f'Version specification \"{raw_file_version}\" is not compatible '\n f'with compiler version \"{__version__}\"',\n start,\n )", "def check_version(self, node):\n assert \"version\" in node, \"Version node does not contain attribute 'version'\"\n assert len(node[\"version\"]) >= 1, \"Expecting at least one 'version' value\"\n # TODO: add more thorough checks" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that all nonoptional datasets specified in the corresponding manifest strand in the twine are present in the given manifest.
def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest): # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files. manifest_schema = getattr(self, manifest_kind) for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items(): if expected_dataset_name in manifest["datasets"]: continue if expected_dataset_schema.get("optional", False): continue raise exceptions.invalid_contents_map[manifest_kind]( f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing." )
[ "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def check_manifest_entities(manifest):\n def blueprint_exists(name, nodepath):\n if not cache.get_blueprint(name=name):\n msg = 'Blueprint `{0}` does not exist.'.format(name)\n raise validate.ValidationError(msg, nodepath)\n return True\n\n def image_exists(name, nodepath):\n if not cache.get_image(name=name):\n msg = 'Image `{0}` does not exist.'.format(name)\n raise validate.ValidationError(msg, nodepath)\n return True\n\n def can_load_class(name):\n return bool(util.load_class(name))\n\n def check(path, check):\n try:\n validate.validate_node(manifest, path, check)\n except validate.ValidationError as e:\n path = validate.pathref(e[1])\n error.raise_error('{0}:{1}: {2}', manifest['_filename'], path, e[0])\n\n check('/applications/*/blueprint', blueprint_exists)\n check('/applications/*/vms/*/image', image_exists)\n check('/applications/*/tasks/*/class', can_load_class)\n check('/defaults/vms/tasks/*/class', can_load_class)\n check('/languages/*/vms/tasks/*/class', can_load_class)", "def _validate_manifest_download(expected_objs, download_results):\n downloaded_objs = {\n r['object']\n for r in download_results\n if r['success'] and r['action'] in ('download_object',)\n }\n return set(expected_objs).issubset(downloaded_objs)", "def _warn_for_missing_datasets(self, datasets: set[str]):\n any_missing = False\n for ds in datasets:\n if not self.frames.has_dataset(ds):\n any_missing = True\n logger.warn(f'dataset \"{ds}\" is not in the database')\n if any_missing:\n logger.warn(f\"datasets in the databse: {self.all_datasets()}\")", "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def checkMetadataEntries(metadataSizes):\n for e in EXPECTED_METADATA_ENTRIES:\n if e not in metadataSizes:\n print 'WARNING: Metadata entry \"%s\" not found' % e", "def test_valid_and_empty_manifest(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(None, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 1\n }", "def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)", "def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True", "def check_allowed_dataset(self, dataset_name):\n\n if self.include_datasets is not None:\n for pattern in self.include_datasets:\n if pattern.match(dataset_name):\n break\n else:\n # no pattern matched\n LOG.debug('Dataset %s is not in include list.', dataset_name)\n return False\n\n if self.exclude_datasets is not None:\n for pattern in self.exclude_datasets:\n if pattern.match(dataset_name):\n LOG.debug('Dataset %s is in exclude list.', dataset_name)\n return False\n\n return True", "def _check_dataset_exists(self):\n error_filenames=[]\n for fn in self.dataset:\n if not os.path.exists(gpi_utils.expand_path(self.input_path+os.sep+fn)) : error_filenames.append(fn)\n if error_filenames != []:\n raise IOError(\"File(s) %s do not exist.\" % \", \".join(error_filenames))", "def check(self):\n manifest_count = int(self.get_manifest_count())\n manifest_actual_count = len(self.message_tree.findall(self.manifest_tag + \"/itk:manifestitem\", self.namespaces))\n\n if manifest_count != manifest_actual_count:\n logging.warning(\"Manifest count did not equal number of instances: (expected : found) - (%i : %i)\",\n manifest_count, manifest_actual_count)\n return True, \"The number of manifest instances does not match the manifest count specified\"\n\n return False, None", "def test_sanity(tmpdir, manifest_file, manifest):\n _file = tmpdir.join('manifest.yaml')\n _file.write(manifest_file)\n assert get_manifest_from_path(str(_file)).contents == manifest.contents", "def _has_datasets(self) -> bool:\n pass", "def test_manifest(self, quiet=False):\n self.parse_manifest()\n\n ids = {}\n errors = []\n collisions = []\n manifest = self.cryptomattes[self.selection][\"names_to_IDs\"]\n for name, idvalue in manifest.items():\n if mm3hash_float(name) != idvalue:\n errors.append(\"computed ID doesn't match manifest ID: (%s, %s)\" % (idvalue, mm3hash_float(name)))\n else:\n if idvalue in ids:\n collisions.append(\"colliding: %s %s\" % (ids[idvalue], name))\n ids[idvalue] = name\n\n if not quiet:\n print(\"Tested %s, %s names\" % (self.nuke_node.name(), len(manifest)))\n print(\" \", len(errors), \"non-matching IDs between python and c++.\")\n print(\" \", len(collisions), \"hash collisions in manifest.\")\n\n return errors, collisions", "def test_theme_manifest(err, xpi_package=None):\n\n # Don't even both with the test(s) if there's no chrome.manifest.\n chrome = err.get_resource('chrome.manifest')\n if not chrome:\n return\n\n for triple in chrome.triples:\n subject = triple['subject']\n # Test to make sure that the triple's subject is valid\n if subject not in ('skin', 'style'):\n err.warning(\n err_id=('themes', 'test_theme_manifest',\n 'invalid_chrome_manifest_subject'),\n warning='Invalid chrome.manifest subject',\n description=('chrome.manifest files for full themes are only '\n \"allowed to have 'skin' and 'style' items. \"\n 'Other types of items are disallowed for '\n 'security reasons.',\n 'Invalid subject: %s' % subject),\n filename=triple['filename'],\n line=triple['line'],\n context=triple['context'])", "def manifest_xml_not_empty(self, input_zipfile):\n manifest = None\n try:\n manifest = input_zipfile.read(\"manifest.xml\")\n except KeyError:\n return False\n\n if manifest:\n if len(str(manifest)) > 0:\n # Has some content\n return True\n else:\n return False\n else:\n return False\n\n # Default return\n return None", "def _check_file_manifest(self, path_prefix, file_manifest, file_manifest_name):\n\n for artifact_file in file_manifest.files:\n if artifact_file.ftype != \"file\":\n continue\n\n chksums.check_artifact_file(path_prefix=path_prefix, artifact_file=artifact_file)\n\n # check the extract archive for any extra files.\n filewalker = FileWalker(collection_path=path_prefix)\n prefix = path_prefix + \"/\"\n found_file_set = set([string_utils.removeprefix(fp, prefix) for fp in filewalker.walk()])\n\n file_manifest_file_set = set([artifact_file.name for artifact_file in file_manifest.files])\n # The artifact contains MANIFEST.json and FILES.JSON, but they aren't\n # in file list in FILES.json so add them so we match expected.\n file_manifest_file_set.add(\"MANIFEST.json\")\n file_manifest_file_set.add(file_manifest_name)\n\n difference = sorted(list(found_file_set.difference(file_manifest_file_set)))\n\n if difference:\n err_msg = f\"Files in the artifact but not the file manifest: {difference}\"\n raise exc.FileNotInFileManifestError(unexpected_files=difference, msg=err_msg)\n\n return True", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the names of strands that are found in this twine.
def available_strands(self): return self._available_strands
[ "def iter_strands(self):\n return iter(self.strand_list)", "def station_names(self):\n return self.stations.keys()", "def names(self):\n for wool in self.wools.items():\n yield wool.name", "def stations_names_list(self):\n return [station[\"station_name\"] for station in self.__stations_objects_list]", "def list_available_strains(self):\n return [strain for strain in self.sample_dict]", "def wells_list(self, wtype='all'):\n list_names = []\n for well_data in self.wells:\n if wtype == 'all':\n list_names.append(well_data.drawdown.name)\n elif wtype == well_data._type - 2:\n list_names.append(well_data.drawdown.name)\n return(list_names)", "def findStrands(self, data):\n if len(data) == 0: return data\n else:\n for sheet in data:\n strandsBreak = []\n strandData = []\n for i in range(1,len(sheet[6])):\n if i != 1 and \\\n int(sheet[6][i].number) - int(sheet[6][i-1].number)!=1:\n strandsBreak.append(i)\n\n if len(strandsBreak) == 0:\n strandData = sheet\n else:\n i = 0\n strandData.append(sheet[0],sheet[1],sheet[2],\n sheet[3],sheet[4],sheet[5],\n sheet[6][:strandsBreak[i]])\n i = i+1\n while i!= len(strandsBreak):\n strandData.append(sheet[0],sheet[1],sheet[2],\n sheet[3],sheet[4],sheet[5],\n sheet[6][strandsBreak[i-1]:\n strandsBreak[i]])\n i = i+1\n\n strandData.append(sheet[0],sheet[1],sheet[2],\n sheet[3],sheet[4],sheet[5],\n sheet[6][strandsBreak[i-1]:])\n return strandData", "def stl_names(self):\n return [stl.member.get_full_name() for stl in self.stls.all()]", "def bandNames(self):\n return self.metadata[\"bands\"]", "def names(self) -> List[str]:\n return list(self.duties.keys()) + list(self.aliases.keys())", "def _getAllWorklistNames(self):\n log.debug(\"Finding all worklists mentioned in this statemachine.\")\n worklists = {}\n names = [s.getTaggedValue('worklist')\n for s in self.sm.getStates(no_duplicates = 1)\n if s.getTaggedValue('worklist')]\n for name in names:\n worklists[name] = 'just filtering out doubles'\n result = worklists.keys()\n log.debug(\"Found the following worklists: %r.\", result)\n return result", "def get_basis_names(self) -> List[str]:\n pass", "def getSchemataNames():", "def bandNames(self, reference='all', renamed=False):\n if reference == 'all':\n if not renamed:\n bands = [band.id for band in self.bands]\n else:\n bands = [band.name for band in self.bands]\n else:\n if not renamed:\n bands = [band.id for band in self.bands if band.reference == reference]\n else:\n bands = [band.name for band in self.bands if band.reference == reference]\n return bands", "def speciesNames(self):\n nsp = self.nSpecies()\n return map(self.speciesName,range(nsp))", "def get_station_names(self):\n station_names = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n station_name = ' '.join(wrapper.find(\"h3\").text.split(' ')[:-1])\n station_names.append(station_name)\n return np.array(station_names).T", "def bosons(self) -> Set[str]:\n return self._taxonomy_dict['boson']", "def get_words_from_sysets(synset):\n synlist = []\n for s in synset:\n syns = s.lemmas()[0].name()\n synlist.append(syns)\n return synlist", "def getrcubandnames(self):\n return list(self.rcumode_passbands.values())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the names of the manifest strands that are found in this twine.
def available_manifest_strands(self): return self._available_manifest_strands
[ "def available_strands(self):\n return self._available_strands", "def manifest(self):\n return self._client[\"manifests\"]", "def list_manifests():\n import enaml\n with enaml.imports():\n from .pulses.manifest import PulsesManagerManifest\n from .tasks.manifest import PulsesTasksManifest\n from .measure.manifest import PulsesMeasureManifest\n return [PulsesManagerManifest, PulsesTasksManifest, PulsesMeasureManifest]", "def smooth_manifests(self):\n # type: () -> list[EncodingOutputPathsSmoothManifest]\n return self._smooth_manifests", "def dash_manifests(self):\n # type: () -> list[EncodingOutputPathsDashManifest]\n return self._dash_manifests", "def getAtomNames(self):\n return self._raw_data['ATOM_NAME']", "def names(self):\n for wool in self.wools.items():\n yield wool.name", "def hls_manifests(self):\n # type: () -> list[EncodingOutputPathsHlsManifest]\n return self._hls_manifests", "def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames", "def fqns(self):\n return [fqn for fqn in self.runinfos]", "def GetResourceNames(self):\r\n return [x.name for x in self.resources]", "def get_names(self):\n name_list = []\n for name in self.output_name_list:\n name_list += glob.glob(name)\n name_list = sorted(list(set(name_list)))\n return name_list", "def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def abandoned_arm_names(self) -> Set[str]:\n return set(self._abandoned_arms_metadata.keys())", "def names(self) -> List[str]:\n return list(self.duties.keys()) + list(self.aliases.keys())", "def onboot_names(self):\n ext_names = []\n for ext in self.extensions.values():\n if not ext.onboot:\n continue\n ext_names.append(ext.name)\n return ', '.join(sorted(ext_names))", "def get_raftnames_run(run):\n raftnames = get_sflat_files_run(run).keys()\n return sorted(raftnames)", "def get_app_names(self):\n groups = self['__store']\n lookup = {\n g.group_id: g.name[2:]\n for g in groups\n if (g.name.startswith('a_'))\n }\n return set(map(lookup.get, self.get_app_ids()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate that the children values, passed as either a file or a json string, are correct.
def validate_children(self, source, **kwargs): # TODO cache this loaded data keyed on a hashed version of kwargs children = self._load_json("children", source, **kwargs) self._validate_against_schema("children", children) strand = getattr(self, "children", []) # Loop the children and accumulate values so we have an O(1) check children_keys = {} for child in children: children_keys[child["key"]] = children_keys.get(child["key"], 0) + 1 # Check there is at least one child for each item described in the strand # TODO add max, min num specs to the strand schema and check here for item in strand: strand_key = item["key"] if children_keys.get(strand_key, 0) <= 0: raise exceptions.InvalidValuesContents(f"No children found matching the key {strand_key}") # Loop the strand and add unique keys to dict so we have an O(1) check strand_keys = {} for item in strand: strand_keys[item["key"]] = True # Check that each child has a key which is described in the strand for child in children: child_key = child["key"] if not strand_keys.get(child_key, False): raise exceptions.InvalidValuesContents( f"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine." ) # TODO Additional validation that the children match what is set as required in the Twine return children
[ "def check_dict_child(_json_data):\n nb_child = 0\n for test_dict in _json_data:\n if type(_json_data[test_dict]) is dict:\n nb_child += 1\n return nb_child", "def check_children_attributes(self, branch):\n attributes = branch.get_attributes()\n for attr in attributes:\n if not isinstance(attributes[attr], str) and not isinstance(attributes[attr], list) :\n print('Attribute '+str(attr)+' of '+ branch.__class__.__name__ + ' should be str or list')\n self.assertTrue(False)\n children = branch.get_children()\n for child in children:\n self.check_children_attributes(child)", "def verify_json(self, incoming_json, respond_obj, file=False):\n if isinstance(incoming_json, str):\n incoming_json = json.loads(incoming_json)\n if file:\n for expect_json_file in respond_obj[\"request_verify_data\"]:\n expect_json_file = getAbsPath(expect_json_file, getDirName(self.datafile))\n expect_json = json.load(open(expect_json_file))\n if sorted(incoming_json.items()) == sorted(expect_json.items()):\n return True\n return False\n else:\n for json_pair in respond_obj[\"request_verify\"]:\n json_keys = json_pair.split(\",\")[0][4:].split(\"[\")\n # Since datafile is xml and it only have string\n # must have a way to process different object type in json\n json_value = literal_eval(json_pair.split(\",\")[1][6:])\n # travesing to get the child element value\n incoming_json_index = incoming_json\n for json_key in json_keys:\n json_key = json_key.replace(\"]\", \"\")\n if json_key not in incoming_json_index:\n return False\n else:\n incoming_json_index = incoming_json_index[json_key]\n if incoming_json_index != json_value:\n return False\n return True", "def validate_strict_JSON_serializability(arg, /) -> None:\n if (arg is None) or type(arg) in [int, bool, str]:\n return\n elif type(arg) is list: # No sub-typing! Also, fail on tuples.\n for item in arg:\n validate_strict_JSON_serializability(item)\n return\n elif type(arg) is dict:\n for key, value in arg.items():\n if type(key) is not str:\n raise ValueError(\"Invalid Config: non-string dict key\")\n validate_strict_JSON_serializability(value)\n return\n else:\n raise ValueError(\"Invalid Config: Contains non-allowed python type\")", "def _check_format(file_path, content):\n # TODO: replace with JSON schema validation\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n raise exceptions.FileFormatError(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)\n raise exceptions.FileFormatError(err_msg)", "def test_children(self):\n self.assertIn(self.ts, self.nwbfile.children)\n self.assertIn(self.ts2, self.nwbfile.children)\n self.assertIn(self.mod, self.nwbfile.children)\n self.assertIn(self.ts3, self.mod.children)", "def _assert_valid_deep(value):\n if isinstance(value, dict):\n for v in value.itervalues():\n _assert_valid_deep(v)\n elif isinstance(value, list):\n for v in value:\n _assert_valid_deep(v)\n else:\n if hasattr(value, \"assert_valid\"):\n value.assert_valid()", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def _check_allowed_values(self, key: str, value: Any):\n allowedValues = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedValues\", None)\n if allowedValues is not None and value not in allowedValues:\n raise Exception(\n f\"Value '{value}' is not an allowed value for '{key}'. Allowed values are: {', '.join(allowedValues)}\"\n )", "def _check_children(self):\n def froze_list(l):\n return frozenset(frozenset(child) for child in l)\n children, values = self._get_children()\n if froze_list(children) != froze_list(self.children) or frozenset(values) != frozenset(self.values):\n self._children_watcher()", "def _check_data_type(self, key: str, value: Any):\n allowedDataType = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedDataType\", None)\n if allowedDataType is not None and not isinstance(value, allowedDataType):\n raise Exception(\n f\"Value '{value}' is not of the correct type. The allowed data type is: {allowedDataType.__name__}\"\n )", "def __is_json_of_downloaded_file_info_in_correct_format(self, json_data: List[dict]):\n\n \"\"\"\n The format of the JSON should be like :\n [\n {\n 'name': 'ipg150519.tar.gz', # (str)\n 'size': 11134012, # Bytes (int)\n 'modified_date': 'Mar 01 2018' # (str)\n },\n ...\n ]\n \"\"\"\n\n for file_info in iter(json_data):\n if (not isinstance( file_info.get('name', ''), str) or\n not isinstance( file_info.get('size', 0), int) or\n not isinstance( file_info.get('modified_date', ''), str)\n ):\n return False\n\n return True", "def is_valid_child(self, child):\n return isinstance(child, baseobject.PBXBaseObject) \\\n and child.isa in self.allow_children_types()", "def test_bad_files_object(self):\n # \"current\":null should be disallowed by the schema\n worm_file_text1 = (\n ('{\"files\":{\"current\":null, \"prev\":null, \"next\":[\"_1\", \"_2\"]},'\n '\"units\":{\"t\":\"s\",\"x\":\"mm\",\"y\":\"mm\"},'\n '\"data\":[{\"id\":\"3\", \"t\":[1.3], '\n '\"x\":[[3,4]], \"y\":[[5.4,3]]}]}'))\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n WCONWorms.load(StringIO(worm_file_text1))\n\n # missing \"current\" should be disallowed by the schema\n worm_file_text2 = (\n ('{\"files\":{\"prev\":null, \"next\":[\"_1\", \"_2\"]},'\n '\"units\":{\"t\":\"s\",\"x\":\"mm\",\"y\":\"mm\"},'\n '\"data\":[{\"id\":\"3\", \"t\":[1.3], '\n '\"x\":[[3,4]], \"y\":[[5.4,3]]}]}'))\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n WCONWorms.load(StringIO(worm_file_text2))", "def is_valid_value(self, value: Any) -> bool:\n return self.type_registry.is_valid_nested(value)", "def _CheckJson(input_api, output_api):\n for affected_file in input_api.AffectedFiles(include_deletes=False):\n filename = affected_file.AbsoluteLocalPath()\n if os.path.splitext(filename)[1] != '.json':\n continue\n try:\n input_api.json.load(open(filename))\n except ValueError:\n return [output_api.PresubmitError('Error parsing JSON in %s!' % filename)]\n return []", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file content is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n\n elif not isinstance(content, (list, dict)):\n # testcase file content does not match testcase format\n err_msg = u\"Testcase file content format invalid: {}\".format(file_path)\n logger.log_error(err_msg)", "def validate_fields(self, tree):\n # Check fields\n fields = list(tree.keys())\n for k in self.fields:\n assert (k in fields)", "def _validate_file(\n instance: typing.Dict[str, typing.Any],\n schema: typing.Dict[str, typing.Any],\n path: typing.List[str],\n file_names_by_id: typing.Optional[typing.Dict[int, str]] = None\n) -> None:\n if not isinstance(instance, dict):\n raise ValidationError('instance must be dict', path)\n valid_keys = {'_type', 'file_id'}\n required_keys = valid_keys\n schema_keys = set(instance.keys())\n invalid_keys = schema_keys - valid_keys - OPT_IMPORT_KEYS\n if invalid_keys:\n raise ValidationError(f'unexpected keys in schema: {invalid_keys}', path)\n missing_keys = required_keys - schema_keys\n if missing_keys:\n raise ValidationError(f'missing keys in schema: {missing_keys}', path)\n if instance['_type'] != 'file':\n raise ValidationError('expected _type \"file\"', path)\n if not isinstance(instance['file_id'], int):\n raise ValidationError('file_id must be int', path)\n if file_names_by_id is not None:\n if instance['file_id'] not in file_names_by_id:\n raise ValidationError('file does not exist', path)\n file_name = file_names_by_id[instance['file_id']]\n if 'extensions' in schema:\n for extension in schema['extensions']:\n if file_name.lower().endswith(extension.lower()):\n break\n else:\n raise ValidationError(f'file name should have one of these extensions: {\", \".join(schema[\"extensions\"])}', path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate that all credentials required by the twine are present. Credentials must be set as environment variables, or defined in a '.env' file. If stored remotely in a secrets manager (e.g. Google Cloud Secrets), they must be loaded into the environment before validating the credentials strand. If not present in the environment, validate_credentials will check for variables in a .env file (if present) and populate the environment with them. Typically a .env file resides at the root of your application (the working directory) although a specific path may be set using the `dotenv_path` argument. .env files should never be committed to git or any other version control system.
def validate_credentials(self, *args, dotenv_path=None, **kwargs): if not hasattr(self, "credentials"): return set() # Load any variables from the .env file into the environment. dotenv_path = dotenv_path or os.path.join(".", ".env") load_dotenv(dotenv_path) for credential in self.credentials: if credential["name"] not in os.environ: raise exceptions.CredentialNotFound( f"Credential {credential['name']!r} missing from environment or .env file." ) return self.credentials
[ "def validate_credentials(self, credentials_to_validate):\n\n #TO-DO\n pass", "def load_credentials():\n\n # Get the config file path\n path = os.path.expanduser('~/.stirplate/config')\n\n # Default the credentials to NoneType\n user_id = None\n key = None\n secret = None\n location = None\n\n # Read the config data\n if os.path.exists(path):\n with open(path, 'r') as f_h:\n config = json.load(f_h)\n\n # Get the credentials from the config files\n if 'access_key' in config and 'access_secret' in config:\n key = config['access_key']\n secret = config['access_secret']\n user_id = config['id']\n location = config['location']\n\n if all(v is None for v in [user_id, key, secret, location]):\n raise EnvironmentError('Please install your Stirplate credentials.')\n\n return user_id, key, secret, location", "def _check_credentials():\n try:\n faculty.config.resolve_profile()\n except faculty.config.CredentialsError:\n _ensure_creds_file_present()\n _check_creds_file_perms()", "def check_credentials():\n\n required_variables = ('OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD')\n\n logging.debug(\"checking openstack auth environment variables\")\n ok = True\n for var in required_variables:\n if not var in os.environ:\n logging.warning(\"missing required environment variable: {}\".format(var))\n ok = False\n else:\n logging.debug(\"OpenStack Auth Var: {} = {}\".format(var, os.environ[var]))\n\n return ok", "def check_credentials(credentials):", "def _find_credentials(self) -> None:\n if self.project_id is None or self.project_api_key is None:\n if self.cfg_file is None:\n raise ValueError(\n \"Provide project_id and project_api_key via arguments or config file!\"\n )\n\n # Source credentials from config file.\n try:\n with open(self.cfg_file) as src:\n config = json.load(src)\n try:\n self.project_id = config[\"project_id\"]\n self.project_api_key = config[\"project_api_key\"]\n except KeyError as e:\n raise ValueError(\n \"Provided config file does not contain project_id and \"\n \"project_api_key!\"\n ) from e\n logger.info(\"Got credentials from config file.\")\n except FileNotFoundError as e:\n raise ValueError(\"Selected config file does not exist!\") from e\n\n elif all(\n v is not None\n for v in [self.cfg_file, self.project_id, self.project_api_key]\n ):\n logger.info(\n \"Credentials are provided via arguments and config file, \"\n \"now using the argument credentials.\"\n )", "def _validate_creds_file(self, verbose=False):\n oauth1 = False\n oauth1_keys = [\"app_key\", \"app_secret\", \"oauth_token\", \"oauth_token_secret\"]\n oauth2 = False\n oauth2_keys = [\"app_key\", \"app_secret\", \"access_token\"]\n if all(k in self.oauth for k in oauth1_keys):\n oauth1 = True\n elif all(k in self.oauth for k in oauth2_keys):\n oauth2 = True\n\n if not (oauth1 or oauth2):\n msg = f\"Missing or incorrect entries in {self.creds_file}\\n\"\n msg += pprint.pformat(self.oauth)\n raise ValueError(msg)\n elif verbose:\n print(f'Credentials file \"{self.creds_file}\" looks good')", "def check_credentials():\n if environ.get('REPO_USER') and environ.get('REPO_PASS'):\n return {'username': environ['REPO_USER'], 'password': environ['REPO_PASS']}\n return {}", "def __get_credentials():\n username = os.getenv('username')\n password = os.getenv('password')\n if not username or not password:\n raise ValueError('Username or Password value is not set in .env file')\n return username, password", "def test_validate_google_application_credentials(self):\n\n with CliRunner().isolated_filesystem():\n # Make google application credentials\n credentials_file_path = os.path.join(pathlib.Path().absolute(), \"google_application_credentials.json\")\n with open(credentials_file_path, \"w\") as f:\n f.write(\"\")\n validator = ObservatoryConfigValidator(self.schema)\n\n # google_application_credentials tag and existing file\n validator.validate({\"google_cloud\": {\"credentials\": credentials_file_path}})\n self.assertEqual(len(validator.errors), 0)\n\n # google_application_credentials tag and non-existing file\n validator.validate({\"google_cloud\": {\"credentials\": \"missing_file.json\"}})\n self.assertEqual(len(validator.errors), 1)", "def check_for_credential_file(self):\r\n if 'AWS_CREDENTIAL_FILE' in os.environ:\r\n path = os.environ['AWS_CREDENTIAL_FILE']\r\n path = os.path.expanduser(path)\r\n path = os.path.expandvars(path)\r\n if os.path.isfile(path):\r\n fp = open(path)\r\n lines = fp.readlines()\r\n fp.close()\r\n for line in lines:\r\n if line[0] != '#':\r\n if '=' in line:\r\n name, value = line.split('=', 1)\r\n if name.strip() == 'AWSAccessKeyId':\r\n if 'aws_access_key_id' not in self.args:\r\n value = value.strip()\r\n self.args['aws_access_key_id'] = value\r\n elif name.strip() == 'AWSSecretKey':\r\n if 'aws_secret_access_key' not in self.args:\r\n value = value.strip()\r\n self.args['aws_secret_access_key'] = value\r\n else:\r\n print 'Warning: unable to read AWS_CREDENTIAL_FILE'", "def verify_credentials(cls):\r\n\r\n if not cls.username:\r\n raise ValueError(\"Username is empty.\")\r\n if not cls.password:\r\n raise ValueError(\"Password is empty.\")\r\n return True", "def test_missing_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with self.assertRaises(exceptions.CredentialNotFound):\n twine.validate_credentials()", "def test_validate_credentials(self):\n pass", "def test_getcredentials_from_env(netrc):\n netrc.side_effect = FileNotFoundError(\"\")\n server = KasServer()\n assert server._username == USERNAME\n assert server._password == PASSWORD", "def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or\n credentials.access_token_expired):\n return None\n return credentials", "def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if credentials.invalid or credentials.access_token_expired:\n return None\n return credentials", "def are_credentials_valid(self):\n try:\n conn = self.conn\n return True\n except (libcloud.types.InvalidCredsError, AttributeError):\n return False", "def _validate_client_credentials(self):\r\n self._validate_access_credentials()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate monitor message against the monitor message schema strand.
def validate_monitor_message(self, source, **kwargs): return self._validate_values(kind="monitor_message", source=source, **kwargs)
[ "def validate(self):\n\n # Check if motherboard record exists\n motherboard_record_exists = False\n board_info_records = self.groups[constants.RecordType.BASEBOARD_RECORD]\n for handle_id in board_info_records:\n record = self.records[handle_id]\n if 'Type' in record.props and record.props['Type'].val == 'Motherboard':\n motherboard_record_exists = True\n break\n if not motherboard_record_exists:\n self.err_msgs['Motherboard SMBIOS record is missing.'] = (\n 'There should be at least one structure defining the motherboard '\n '(Board Type: 0xA).')\n\n return self.err_msgs", "def validate_message(self, state_id, msg):\n pass", "def schema_check(self):\n\n try:\n self.schema.assertValid(self.get_content())\n except lxml.etree.DocumentInvalid:\n logger.error(\"PDU failed schema check\")\n for line in self.pretty_print_content().splitlines():\n logger.warning(line)\n raise", "def validation_event(self, message):", "def message_error_validator():\n\n return validator.MessageErrorSchema()", "def Validate(self, schema, allow_extra_fields=False):\r\n assert schema, \"A schema must be provided in order to validate.\"\r\n try:\r\n validictory.validate(self.dict, schema)\r\n if not allow_extra_fields:\r\n self._FindExtraFields(self.dict, schema, True)\r\n self.schema = schema\r\n except Exception as e:\r\n raise BadMessageException(e.message), None, sys.exc_info()[2]", "def test_well_structured_message(self):\n m = self._create_message(self.rsp_k21)\n self.assertTrue(m.validate())", "def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)", "def validate_create_message(cls, message: MessageAPI) -> None:\n # this method does not become relevant until the Shanghai hard fork\n pass", "def validate_message_payload(payload):\n return objects_module.messages.validate_message_payload(payload)", "def notify_message_is_valid(message: object) -> bool:\n schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"title\": \"Notify elements.\",\n \"description\": \"Elements that one client can send to one or many other clients.\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"projectors\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"integer\"},\n },\n \"reply_channels\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n \"users\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"integer\"},\n }\n }\n },\n \"minItems\": 1,\n }\n try:\n jsonschema.validate(message, schema)\n except jsonschema.ValidationError:\n return False\n else:\n return True", "def test_wrong_datatype_field(self):\n msg = self._create_message(self.rsp_k21)\n msh_9 = Field('MSH_9', datatype='ST')\n msh_9.msh_9_1 = 'RSP_K21'\n msg.msh.msh_9 = msh_9\n self.assertRaises(ValidationError, msg.validate, report_file=self.report_file)\n self._test_report_file('ERROR')", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def valid_message(msg: str) -> bool:\n return bool(msg.replace(' ', '')) and len(msg) < 100", "def validate(self):\n if self.data.Header.PacketType != type(self).TYPE:\n raise BadPeer(\"bad packet type: \", self.data.Header.PacketType)\n #TODO: test data 0xdeadbeef should be changed to other value to pass this validation\n #if self.data.Time > 0x7fffffff:\n # raise BadPeer(\"Time field range is invalid\")", "def validate(self):\n\n if not re.fullmatch(r'\\d{10}', self.phone_number):\n raise ValidationError({'phone_number': \"Phone number must be 10 digits\"})\n\n valid_statuses = [ status[0] for status in MessengerSubscriber.STATUS_CHOICES ]\n if self.status not in valid_statuses:\n raise ValidationError({'status': \"Valid status values are {}\".format(valid_statuses)})\n\n if not is_address_valid(street_address=self.address):\n raise ValidationError({'address': \"Address '{}'' is not specific enough\".format(self.address)})\n\n if not (self.latitude and self.longitude):\n raise ValidationError({'address': \"Address '{}'' did not map to a latitude or longitude\".format(self.address)})\n\n valid_languages = [ choice[0] for choice in MessengerSubscriber.LANG_CHOICES ]\n if self.lang not in valid_languages:\n raise ValidationError({'lang': \"Valid languages are {}\".format(valid_languages)})\n\n if not (self.created_at and self.last_status_update) or self.created_at > self.last_status_update:\n raise ValidationError({'created_at': \"Subscriber timestamps do not appear valid\"})", "def validateMessage(self):\n assert self.validation_class is not None, (f'{self.__class__.__name__}'\n ' must include a validation'\n '_attribute or override '\n 'validateMessage method.')\n\n validation_class = self.validation_class\n registry = validation_class(data=self.message, context={'request': None})\n\n if registry.is_valid():\n self.is_valid = True\n self.registry = registry\n else:\n self.result = registry.errors\n super().finishTask(failed=True)\n\n return self.is_valid", "def validate_message(self, message):\n if len(message) > 140:\n raise Exception(\"Mensagem inválida: excede 140 caracteres\")", "def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate the output manifest, passed as either a file or a json string.
def validate_output_manifest(self, source, **kwargs): return self._validate_manifest("output_manifest", source, **kwargs)
[ "def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)", "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def perform_valid_manifest_post(context, manifest, url):\n filename = \"data/{manifest}\".format(manifest=manifest)\n files = {'manifest[]': open(filename, 'rb')}\n endpoint = \"{coreapi_url}{url}\".format(coreapi_url=context.coreapi_url, url=url)\n response = requests.post(endpoint, files=files)\n response.raise_for_status()\n context.response = response.json()\n print(response.json())", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def _parse_and_validate_manifest(manifest_filename):\n\n # Strip comments while keeping line numbers.\n s = \"\"\n with open(manifest_filename, \"r\") as f_in:\n for line in f_in:\n comment_pos = line.find(\"//\")\n s += line[:comment_pos] + \"\\n\"\n\n\n manifest = json.loads(s)\n manifest_val = ArgumentsValidator(manifest, \"Dataset manifest\")\n with manifest_val:\n\n compression_type = manifest_val.get(\"compression\", [ATYPE_NONE, ATYPE_STRING], True)\n if compression_type is not None:\n compression_type = compression_type.upper()\n if compression_type not in [\"ZLIB\", \"GZIP\"]:\n raise ValueError(\"Unsupported compression type: %s\" % compression_type)\n\n allow_var_len = manifest_val.get(\"allow_var_len\", ATYPE_BOOL, True)\n features_list = manifest_val.get(\"features\", ATYPE_DICTS_LIST, True)\n\n\n\n # Validate each feature and create parser objects.\n feat_parsers = {}\n feat_shapes = {}\n feat_dtypes = {}\n\n for feat in features_list:\n\n feat_val = ArgumentsValidator(feat, \"Dataset feature\")\n with feat_val:\n name = feat_val.get(\"name\", ATYPE_STRING, True)\n dtype = tf.as_dtype(feat_val.get(\"dtype\", ATYPE_STRING, True))\n shape = feat_val.get(\"shape\", ATYPE_INTS_LIST, True)\n deserialize_type = feat_val.get(\"deserialize_type\", ATYPE_STRING, True)\n deserialize_args = feat_val.get(\"deserialize_args\", ATYPE_DICT, False, default={})\n var_len = feat_val.get(\"var_len\", ATYPE_BOOL, allow_var_len, default=False)\n\n if var_len and not allow_var_len:\n raise ValueError(\"Variable length features not allowed for this dataset.\")\n\n try:\n shape = [int(x) for x in list(shape)]\n except:\n raise ValueError(\"Invalid shape for feature `%s`: %s\" % (name, shape))\n\n \n try:\n feat_parsers[name] = _PARSERS[deserialize_type](shape, dtype, deserialize_args, var_len)\n except KeyError:\n raise ValueError(\"Unsupported deserialization type: %s\" % deserialize_type)\n\n\n if var_len:\n feat_shapes[name] = [-1] + shape\n else:\n feat_shapes[name] = shape\n\n feat_dtypes[name] = dtype\n\n\n return compression_type, allow_var_len, feat_parsers, feat_shapes, feat_dtypes", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def validate(args):\n validation_script = os.path.join(\n \"bin\", \"buddy\", \"buddy\", \"validate_file_contents.py\"\n )\n subprocess.run([\"python\", validation_script] + args.yaml)", "def validate(self):\n self.failedTests = []\n ret = True\n @testCase\n def testZip(self): self.zipfile.testzip()\n ret = testZip(self) and ret\n\n @testCase\n def validateManifest(self):\n jsonschema.validate(self.manifest, Aptofile.SCHEMA,\n Aptofile.VALIDATOR,\n format_checker = jsonschema.FormatChecker())\n ret = validateManifest(self) and ret\n\n @testCase\n def fileDate(self): self._checkTimestamp(self.manifest['date'])\n ret = fileDate(self) and ret\n\n return ret", "def check_exe(self, input_filename):\n if pathlib.Path(input_filename).suffix != '.json':\n return\n output_name = str(pathlib.Path(self.output_path, pathlib.Path(input_filename).stem))\n cmd = [self.check_exe_path, input_filename, output_name]\n print(cmd)\n process = Popen(cmd, shell=True)\n process.communicate()", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def test_sanity(tmpdir, manifest_file, manifest):\n _file = tmpdir.join('manifest.yaml')\n _file.write(manifest_file)\n assert get_manifest_from_path(str(_file)).contents == manifest.contents", "def valid_and_export(template, dashname):\n\n if not json_validation(template):\n print('Bad json format for ' + dashname + ' grafana dashboard')\n else:\n if export_file(template, dashname + '.json'):\n print('Successfully generated dashboard: ' + dashname)\n else:\n print('Error during export dashboard: ' + dashname)", "def schema_validate_kubernetes_output(validate_data, cache_dir):\n (kind, version), validate_files = validate_data\n KubernetesManifestValidator(cache_dir).validate(validate_files, kind=kind, version=version)", "def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def test_empty_output_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['outputs'] = {}\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def eval_manifest(manifest_file: str, opts: argparse.Namespace) -> None:\n mf = ShExManifest(manifest_file, fmt=opts.manifest_format)\n if not opts.manifest_entry: # All entries\n for k, v in sorted(mf.entries.items(), key=lambda x: x[0]):\n eval_manifest_entry(k, v, opts)\n else: # Single entry\n if opts.manifest_entry in mf.entries:\n eval_manifest_entry(opts.manifest_entry, mf.entries[opts.manifest_entry], opts)\n else:\n print(\"%s not found in manifest\" % opts.manifest_entry)", "def valid_is_json(self):\n return self.file_name.endswith('.json')", "def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Getter that will return cls[name] if cls is a dict or cls otherwise
def _get_cls(name, cls): return cls.get(name, None) if isinstance(cls, dict) else cls
[ "def getInstacefromcls(cls, clsname, valuedict=None):\n for i in range(len(clslist)):\n if clsname == clslist[i]:\n return clslist[i](valuedict)\n return None", "def lookup_by_class(dict_,class_):\n v = None\n for c in classlist(class_)[::-1]:\n if c in dict_:\n v = dict_[c]\n break\n return v", "def lookup(self, cls, name, mode):\n mro = [el.__name__ for el in cls.mro()]\n registry = self.method_registry if mode=='method' else self.type_registry\n\n for class_name in mro:\n entries = registry[class_name]\n if name in entries:\n return entries[name]\n raise KeyError(\"Could not find method named %r.\"\n \" Please ensure classes using component decorators\"\n \" are decorated with the Model.definition\"\n \" class decorator.\" % name)", "def _get(obj, name):\n try:\n # try to get value using dict's __getitem__ descriptor first\n return dict.__getitem__(obj, name)\n except TypeError:\n # if it's a dict, then preserve the TypeError\n if isinstance(obj, dict):\n raise\n # otherwise try one last time, relying on __getitem__ if any\n return obj[name]", "def get_class(cls, name_or_sl):\n try:\n return cls.sl_map[int(name_or_sl)]\n\n except TypeError as e:\n raise TypeError(\"Bad name or sl: {} : {}\".format(name_or_sl, e))\n except ValueError:\n try:\n return cls.class_map[name_or_sl.lower()]\n except (KeyError, ValueError):\n raise NotASummaryName(\"Value '{}' is not a valid summary level\".format(name_or_sl))", "def by_name(name, cls=None):\n\n if cls is None:\n cls = base.Point\n\n if cls.__name__ == name:\n return cls\n\n for c in cls.__subclasses__():\n cc = by_name(name, c)\n if cc is not None:\n return cc\n\n return None", "def __getitem__(self, name):\n ikEl = self.infoKinds.get(name, None)\n if ikEl:\n return ikEl.toDict(self)\n return None", "def get(self, name_or_klass):\n if not isinstance(name_or_klass, str):\n name_or_klass = name_or_klass.__name__\n return self._modes[name_or_klass]", "def _get_child_by_name(cls, class_name=str) -> \"Base\":\n class_family = [cls] + cls.__subclasses__()\n matched_cls = [\n el for el in class_family if el.__name__.split(\".\")[-1] == class_name\n ]\n if len(matched_cls) != 1:\n raise KeyError(\n f\"Could not find specialization {class_name}\"\n + f\" for base class {cls}.\"\n + \"\\nPossible options:\\n\\t\"\n + \"\\n\\t\".join([el.__name__.split(\".\")[-1] for el in class_family])\n )\n else:\n expected_cls = matched_cls[0]\n\n return expected_cls", "def get(self, cls, id):\n if cls not in classes.values():\n return None\n\n all_of_class = models.storage.all(cls)\n for item in all_of_class.values():\n if item.id == id:\n return item\n\n return None", "def get(self, cls=None, id=None):\n # if parameters not specified, returns None\n if cls is None or id is None:\n return None\n # call all method with specified class to get dictionary\n # of all objects of that class in current MySQL session\n all_objs = self.all(cls)\n # checks for matching id in class objects\n if all_objs is not {}:\n for obj in all_objs.values():\n # if found matching id, return the retrieved object\n if id == obj.id:\n return obj\n # if no matching object was found in MySQL session, return None\n return None", "def _get_optimised(self, cls: Type[RV]) -> Type[RV]:\n try:\n return self._optimised[cls]\n except KeyError:\n pass\n\n # Check if the class comes from psycopg.types and there is a class\n # with the same name in psycopg_c._psycopg.\n from psycopg import types\n\n if cls.__module__.startswith(types.__name__):\n new = cast(Type[RV], getattr(_psycopg, cls.__name__, None))\n if new:\n self._optimised[cls] = new\n return new\n\n self._optimised[cls] = cls\n return cls", "def get(self, name):\n if name in self.java_classes:\n return self.java_classes[name]\n else:\n return self.load(name)", "def get_single(self, cls_or_name, id=None, strict=False):\r\n if strict or isinstance(cls_or_name, str):\r\n results = self.stack.get(cls_or_name, [])\r\n else:\r\n results = chain(\r\n *map(\r\n itemgetter(1),\r\n filter(matching_class0(cls_or_name), self.stack.items()),\r\n )\r\n )\r\n\r\n if id:\r\n results = [obj for obj in results if obj.id == id]\r\n else:\r\n results = list(results)\r\n\r\n return None if len(results) != 1 else results[0]", "def get_class(name):\n try:\n cls, constructor = registry[name]\n except KeyError:\n raise UnregisteredClassError(\"'%s' is not a registered \"\n \"JSONAlizable class name\" % name, name)\n if constructor is not None:\n return constructor\n return cls", "def __getattr__(self, name: str) -> Any:\n return getattr(self, name)", "def __getattr__(cls, name):\n name = name.lower()\n\n try:\n if cls.section is None:\n return _CONFIG_YAML[name]\n elif cls.subsection is None:\n return _CONFIG_YAML[cls.section][name]\n else:\n return _CONFIG_YAML[cls.section][cls.subsection][name]\n except KeyError as e:\n # If one of the handler lists isn't defined, return an empty list.\n log.warning(f\"{name} is not defined in the config.yaml file -- returning an falsy value.\")\n if cls._get_annotation(name) == list:\n return []\n elif cls._get_annotation(name) == dict:\n return {}\n else:\n return None", "def getContainerfromCls(cls, clsname):\n for i in range(len(clslist)):\n if clsname == clslist[i]:\n return clslist[i]().getcontainer()\n return None", "def get_class(self, name):\n return self.host.get_class(name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate a single strand by name.
def validate_strand(self, name, source, **kwargs): return self.validate({name: source}, **kwargs)[name]
[ "def _validate_from_name(self, name):\n return name[:15]", "def testValidName(self, name: unicode, isPath: bool) -> None:\n ...", "def validate_name(self, name):\n\n lesson = Lessons.query.filter_by(id=self.lesson.data).first()\n academy = Academy.query.filter_by(name=self.academy.data).first()\n student = Student.query.filter_by(academy_id=academy.id).filter_by(name=self.name.data).first()\n\n if student is not None:\n raise ValidationError('Student name is already in the system with this Academy.')", "def validate_name(name, reserved_names=()):", "def is_valid_name(name: str) -> bool:\n return bool(re.fullmatch(pattern=r\"\\w{4,16}\", string=name))", "def test_validate_name_valid(benchmark_name):\n assert benchmark_utils.validate_name(benchmark_name)", "def check_schema_name(name: str):\n if not is_valid_schema_name(name):\n raise ValidationError(\"Invalid string used for the schema name.\")", "def check_name(name):\n if len(name) > WorkflowCRD.NAME_MAX_LENGTH:\n raise ValueError(\n \"Name is too long. Max length: {}, now: {}\"\n \"\".format(WorkflowCRD.NAME_MAX_LENGTH, len(name))\n )\n if \".\" in name:\n raise ValueError(\"Name cannot include dot.\")\n if \"_\" in name:\n raise ValueError(\"Name cannot include underscore.\")\n\n match_obj = re.match(WorkflowCRD.NAME_PATTERN, name)\n if not match_obj:\n raise ValueError(\n \"Name is invalid. Regex used for validation is %s\"\n % WorkflowCRD.NAME_PATTERN\n )", "def validate_name(name: str) -> None:\n\n # Disallow empty.\n if not name:\n raise CleanError('Feature set name cannot be empty.')\n\n # Require starting with a letter.\n if not name[0].isalpha():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - names must start with a letter.'\n )\n\n # Require only letters, numbers, and underscores.\n if not name.replace('_', '').isalnum():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only letters, numbers, and underscores are allowed.'\n )\n\n # Require all lowercase.\n if not name.islower():\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - only lowercase letters are allowed.'\n )\n\n # Disallow leading, trailing, or consecutive underscores.\n # (these will result in a '' in the split results which evals to False)\n if not all(name.split('_')):\n raise CleanError(\n f\"Invalid feature set name '{name}'\"\n ' - leading, trailing, and consecutive underscores are'\n ' not allowed.'\n )", "def is_valid_name(name):\n return bool(STANDARD_NAME_REGEX.match(name))", "def validateResourceName(name):\n\n illegalChar = _resourceRe.search(name)\n if illegalChar:\n raise WrongNameError(illegalChar.start(), name[illegalChar.start()])\n if not _resourceFirstRe.match(name):\n if len(name) > 0:\n raise WrongNameError(0, name[0])\n else:\n raise WrongNameError(0, 0)", "def check_name(name):\n if not isinstance(name, str):\n raise TypeError('Donor Name must be a string.')\n if name == \"\":\n raise ValueError('Name is required for every donor.')", "def validate_name(name:str) -> bool:\r\n return name.isalpha() and name.count(\" \") == 0 and len(name) >= 2", "def checkName(self, event=None):\r\n self.Validate()", "def validate_species(self, name):\n accepted_species = Species.objects.values_list('name', flat=True)\n if name not in accepted_species:\n raise serializers.ValidationError(\n 'Species {0} is not allowed.'.format(name))\n else:\n return name", "def isValidDataTypeName(name: unicode) -> bool:\n ...", "def is_valid_name_error(name: str) -> Optional[GraphQLError]:\n if not isinstance(name, str):\n raise TypeError(\"Expected name to be a string.\")\n if name.startswith(\"__\"):\n return GraphQLError(\n f\"Name {name!r} must not begin with '__',\"\n \" which is reserved by GraphQL introspection.\"\n )\n if not re_name.match(name):\n return GraphQLError(\n f\"Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but {name!r} does not.\"\n )\n return None", "def verify_spec_name(spec_name):\n if not isinstance(spec_name, text_type):\n raise ValueError(\n \"expected spec name of string type, but got '{0}' of type '{1}'\".\n format(spec_name, to_str(type(spec_name))))", "def test_args_valid_name(self):\n output = addr.args_valid(name='someBox', addr=None, component=None)\n\n self.assertTrue(output)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that the nonce is correct, less than one hour old, and not more than two minutes in the future Callers should also store used nonces and reject messages with previouslyused ones.
def verify_and_burn_nonce(nonce): ret = re.match(r'^001[2-9][0-9]{3}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])' r'T([01][0-9]|2[0-3])(:[0-5][0-9]){2}Z[A-Za-z0-9]{6}$', nonce) if ret: date = parser.parse(nonce[3:-6]) now = datetime.utcnow().replace(tzinfo=tz.tzutc()) ret = date < (now + timedelta(minutes=2)) and date > (now + timedelta(hours=-1)) return ret # TODO: keep a record (at least for the last hour) of burned nonces
[ "def is_valid_nonce(prev_nonce, nonce):\n guess = '{0}{1}'.format(prev_nonce, nonce).encode()\n guess_hash = sha256(guess).hexdigest()\n\n return guess_hash[:5] == '00000'", "def check_and_redeem_nonce(cls, actor_id, nonce_id, level):\n def _transaction(nonces):\n \"\"\"\n This function can be passed to nonce_store.within_transaction() to atomically check \n whether a nonce is expired and, if not, redeem a use. The parameter, nonces, should\n be the value under the key `actor_id` associated with the nonce.\n \"\"\"\n # first pull the nonce from the nonces parameter\n try:\n nonce = nonces[nonce_id]\n except KeyError:\n raise PermissionError(\"Nonce does not exist.\")\n # check if the nonce level is sufficient\n try:\n if PermissionLevel(nonce['level']) < level:\n raise PermissionError(\"Nonce does not have sufficient permissions level.\")\n except KeyError:\n raise PermissionError(\"Nonce did not have an associated level.\")\n\n # check if there are remaining uses\n try:\n if nonce['remaining_uses'] == -1:\n logger.debug(\"nonce has infinite uses. updating nonce.\")\n nonce['current_uses'] += 1\n nonce['last_use_time'] = get_current_utc_time()\n nonce_store.update(actor_id, nonce_id, nonce)\n elif nonce['remaining_uses'] > 0:\n logger.debug(\"nonce still has uses remaining. updating nonce.\")\n nonce['current_uses'] += 1\n nonce['remaining_uses'] -= 1\n nonce_store.update(actor_id, nonce_id, nonce)\n else:\n logger.debug(\"nonce did not have at least 1 use remaining.\")\n raise PermissionError(\"No remaining uses left for this nonce.\")\n except KeyError:\n logger.debug(\"nonce did not have a remaining_uses attribute.\")\n raise PermissionError(\"No remaining uses left for this nonce.\")\n\n # first, make sure the nonce exists for the actor id:\n try:\n nonce_store[actor_id][nonce_id]\n except KeyError:\n raise PermissionError(\"Nonce does not exist.\")\n # atomically, check if the nonce is still valid and add a use if so:\n nonce_store.within_transaction(_transaction, actor_id)", "def test_expired_thread_token_is_valid(self):\n self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)\n assert not self.token.is_valid()", "def verify_nonce(self, new_nonce):\n return new_nonce == self.current_nonce + 1", "def check_nonce(self, id, timestamp, nonce):\n # We want to fetch the recorded clock skew for this id, along with\n # any existing cache entry for the provided nonce.\n key_skew = self._key(id, \"skew\")\n key_nonce = self._key(id, \"nonce\", str(timestamp), nonce)\n # Use get_multi to fetch both keys in a single request.\n # If the data appears to be corrupted then fail out for safety.\n try:\n cached = self.mcclient.get_multi([key_skew, key_nonce])\n except ValueError:\n return False\n # If the nonce appears in the cache, it must be stale.\n if key_nonce in cached:\n return False\n # If we've never recorded a clock skew for this id, record it now.\n try:\n skew = cached[key_skew]\n except KeyError:\n skew = int(time.time() - timestamp)\n self.mcclient.add(key_skew, skew, time=self.id_ttl)\n # If the adjusted timestamp is too old or too new, it is stale.\n # XXX TODO: we should use a monotonic clock here.\n if abs(timestamp + skew - time.time()) >= self.nonce_ttl:\n return False\n # The nonce is fresh, add it into the cache.\n self.mcclient.add(key_nonce, True, time=self.nonce_ttl)\n return True", "def useNonce(self, nonce):\r\n query = datastore.Query('Nonce')\r\n query['nonce ='] = nonce\r\n query['created >='] = (datetime.datetime.now() -\r\n datetime.timedelta(hours=6))\r\n\r\n results = query.Get(1)\r\n if results:\r\n datastore.Delete(results[0].key())\r\n return True\r\n else:\r\n return False", "def generate_nonce():\n return int(time.time() + 100)", "def nonceVerification(nonce, decryptedNonce):\r\n if (nonce == decryptedNonce):\r\n status = \"150 OK\"\r\n else:\r\n status = \"400 Error\"\r\n return status", "def _validate_token(self):\n expire = datetime.strptime(self.access.expire, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n if expire > datetime.utcnow():\n return True\n else:\n return False", "def nonce_length(self):\n return 5, 50", "def validate_nonce(self) -> bool:\n return pulumi.get(self, \"validate_nonce\")", "def _nonce(self):\n return str(int(round(time.time() * 10000)))", "def __check_token(self) -> bool:\r\n\r\n now = datetime.now(self.__tz)\r\n\r\n if (self.__token_expiration_date - now).total_seconds() < 0:\r\n log.debug('Token needs update!')\r\n return self.__update_token()\r\n return False", "def useNonce(self, server_url, timestamp, salt):\n\n if is_nonce_old(timestamp):\n return False\n\n try:\n mist_nonces = MistNonce.objects(server_url=server_url, salt=salt,\n timestamp=timestamp)\n except me.DoesNotExist:\n mist_nonces = []\n\n if len(mist_nonces) == 0:\n print(\"Timestamp = %s\" % timestamp)\n MistNonce(\n server_url=server_url, salt=salt, timestamp=timestamp\n ).save()\n return True\n\n return False", "def default_nonce_duration():\n return now() + timedelta(hours=4)", "def test_blind_sig_expiration(self):\n signer_obj = ECCBlind(year=2020, month=1)\n point_r = signer_obj.signer_init()\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n msg = os.urandom(64)\n msg_blinded = requester_obj.create_signing_request(point_r, msg)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n verifier_obj = ECCBlind(pubkey=signer_obj.pubkey())\n self.assertFalse(verifier_obj.verify(msg, signature))", "def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,\n request, request_token=None,\n access_token=None):\n logger.debug('called')\n tn = TimestampAndNonce.objects.filter(\n consumer__key=client_key,\n timestamp=timestamp,\n nonce=nonce).count()\n if tn > 0:\n return False\n else:\n try:\n c = Consumer.objects.get(key=client_key)\n except Consumer.DoesNotExist:\n logger.debug('wrong consumer key')\n return False\n t = TimestampAndNonce(\n consumer=c,\n timestamp=timestamp,\n nonce=nonce)\n t.save()\n return True", "def test_nonce(self):\n # GIVEN\n new_nonce = self.chain.get_new_nonce_by_address(\"ABC\")\n util.logger.spam(f\"test_blockchain:test_nonce new_nonce({new_nonce})\")\n\n # WHEN\n verify_result = self.chain.verify_nonce_by_address(\"ABC\", new_nonce)\n util.logger.spam(f\"test_blockchain:test_nonce verify_result({verify_result})\")\n self.assertTrue(verify_result)\n set_result = self.chain._BlockChain__set_nonce_by_address(\"ABC\", new_nonce)\n self.assertTrue(set_result)\n\n # THEN\n next_new_nonce = self.chain.get_new_nonce_by_address(\"ABC\")\n util.logger.spam(f\"test_blockchain:test_nonce new_nonce({next_new_nonce})\")\n self.assertEqual(hex(int(new_nonce, 16) + 1), next_new_nonce)", "def test_oneMinute(self):\n self.assertEqual(common.Token.validity.total_seconds(), 60)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Map Juniper SRX Policy Object into xml config element
def to_xml(self): policy_element = create_element('policy') create_element('name', text=self.name, parent=policy_element) match_element = create_element('match', parent=policy_element) for s in self.src_addresses: create_element('source-address', text=s.name, parent=match_element) for d in self.dst_addresses: create_element('destination-address', text=d.name, parent=match_element) then_element = create_element('then', parent=policy_element) create_element(JuniperSRXPolicy.ActionMap[self.action], parent=then_element) log_element = create_element('log', parent=then_element) for log_type in self.logging: create_element(JuniperSRXPolicy.LoggingMap[log_type], parent=log_element) return policy_element
[ "def policy_settings():\n liquid_header = \"{% if jwt.typ== \\'Bearer\\' %}{{ jwt.exp }};{{ jwt.iat }};{{ jwt.iss }};\" \\\n \"{{ jwt.aud }};{{ jwt.typ }};{{ jwt.azp }}{% else %}invalid{% endif %}\"\n\n return rawobj.PolicyConfig(\"headers\", {\n \"response\": [{\"op\": \"set\", \"header\": \"X-RESPONSE-CUSTOM-JWT\", \"value\": liquid_header, \"value_type\": \"liquid\"}],\n \"request\": [{\"op\": \"set\", \"header\": \"X-REQUEST-CUSTOM-JWT\", \"value\": liquid_header, \"value_type\": \"liquid\"}],\n })", "def _wrap_policy(policy_doc):\n return {\"IAMPolicy\": policy_doc}", "def get_config(self):\n config = super(Sc2Policy, self).get_config()\n config['eps'] = self.eps\n config['testing'] = self.testing\n return config", "def get_config(self):\n config = super(LinearAnnealedPolicy, self).get_config()\n config['attr'] = self.attr\n config['value_max'] = self.value_max\n config['value_min'] = self.value_min\n config['value_test'] = self.value_test\n config['nb_steps'] = self.nb_steps\n config['inner_policy'] = get_object_config(self.inner_policy)\n return config", "def policy_settings(certificate):\n embedded_cert = embedded(certificate.certificate, \"tls.crt\", \"pkix-cert\")\n embedded_key = embedded(certificate.key, \"tls.key\", \"x-iwork-keynote-sffkey\")\n return rawobj.PolicyConfig(\"upstream_mtls\", {\"certificate_type\": \"embedded\",\n \"certificate_key_type\": \"embedded\",\n \"certificate\": embedded_cert,\n \"certificate_key\": embedded_key})", "def translate_policy(policy: dict):\n if 'PolicyName' in policy:\n # This is a normal policy that should not be expanded\n return policy\n template_name = next(iter(policy))\n template_parameters = policy[template_name]\n try:\n # 'convert' will return a list of policy statements\n policy_document = processor.convert(template_name, template_parameters)\n except InsufficientParameterValues as e:\n # Exception's message will give lot of specific details\n raise ValueError(str(e))\n except InvalidParameterValues:\n raise ValueError(\"Must specify valid parameter values for policy template '{}'\".format(template_name))\n return {\n \"PolicyName\": template_name + '-' + str(uuid.uuid4()),\n \"PolicyDocument\": policy_document\n }", "def module_config_template():\n\n template = {\n 'gwms_to_aws_data': {\n 'module': 'modules.glideinwms.t_gwms_to_aws_config',\n 'name': 'AWSFactoryEntryDataPublisher',\n 'parameters': {\n 'data_file': '/path/to/aws_instance_limits.csv',\n 'spot_occupancy_config': '/path/to/spot_occupancy_config.py'\n }\n }\n }\n print('Entry in channel configuration')\n pprint.pprint(template)", "def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config", "def create_policies(data):", "def create_export_policy():\n config = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n return config", "def module_config_template():\n\n d = {\"AWSPricePerformancePublisher\": {\n \"module\": \"modules.AWS.publishers.AWS_price_performance\",\n \"name\": \"AWSPricePerformancePublisher\",\n }, }\n print(\"Entry in channel cofiguration\")\n pprint.pprint(d)\n print(\"where\")\n print(\"\\t name - name of the class to be instantiated by task manager\")\n print(\"\\t publish_to_graphite - publish to graphite if True\")\n print(\"\\t graphite_host - graphite host name\")", "def test_create_hyperflex_ucsm_config_policy(self):\n pass", "def test_create_hyperflex_sys_config_policy(self):\n pass", "def _config(self):\n tmpl = self._template_interface\n for p in tmpl._params:\n setattr(self, p._name, p.get_value())", "def setPolicyOnObject(obj, policy_in=None, policy_below=None):\n placeful_workflow = getToolByName(obj, 'portal_placeful_workflow')\n if not base_hasattr(obj, '.wf_policy_config'):\n cmfpw = 'CMFPlacefulWorkflow'\n obj.manage_addProduct[cmfpw].manage_addWorkflowPolicyConfig()\n config = placeful_workflow.getWorkflowPolicyConfig(obj)\n if policy_in is not None:\n config.setPolicyIn(policy=policy_in)\n if policy_below is not None:\n config.setPolicyBelow(policy=policy_below)", "def generate_puppet_resource(self):\n\t\tself.puppet_resource = Template(\"\"\"\n# -- BEGIN \"$domain\" --\napache::vhost::enable { \"$user\": }\n# -- END \"$domain\" --\n\"\"\").safe_substitute({\n\t\t\t\t\"user\": self.argv.get('user'),\n\t\t})", "def extend_l2_policy_dict(self, session, result):\n pass", "def policy_repr(self, policy):\n return policy.__repr__()", "def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new ColumnInfo and update the size
def update(self, size) -> 'ColumnInfo': return ColumnInfo( size, self.directive, self.period )
[ "def SetColumn(self, column, info):\r\n \r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n w = self._columns[column].GetWidth()\r\n self._columns[column] = info\r\n \r\n if w != info.GetWidth():\r\n self._total_col_width += info.GetWidth() - w\r\n self._owner.AdjustMyScrollbars()\r\n \r\n self._owner._dirty = True", "def _update_width(self, is_commit_in_existing_columns):\n max_cols = self.num_columns + self.num_parents\n\n # Even if the current commit has no parents to be printed, it still\n # takes up a column for itself.\n if self.num_parents < 1:\n max_cols += 1\n\n # We added a column for the current commit as part of self.num_parents.\n # If the current commit was already in self.columns, then we have double\n # counted it.\n if is_commit_in_existing_columns:\n max_cols -= 1\n\n # Each column takes up 2 spaces\n self.width = max_cols * 2", "def system_update_column_family(self, cf_def):\r\n pass", "def UpdateColumns(self):\r\n data = self.data\r\n columns = data.getParam('columns',data.tankColumns[:])\r\n col_name = data.getParam('colNames',{})\r\n col_width = data.getParam('colWidths',{})\r\n col_align = data.getParam('colAligns',{})\r\n for index,column in enumerate(columns):\r\n name = col_name.get(column,_(column))\r\n width = col_width.get(column,30)\r\n align = wxListAligns[col_align.get(column,'LEFT')]\r\n self.gList.InsertColumn(index,name,align)\r\n self.gList.SetColumnWidth(index, width)", "async def create_column(db_session, grid_guid, column):\n err = \"marketplace_schema -> create_column\"\n with ax_model.try_catch(db_session, err) as db_session:\n existing_column = db_session.query(AxColumn).filter(\n AxColumn.guid == ax_misc.guid_or_none(column['guid'])\n ).first()\n new_column = None\n\n if existing_column:\n existing_column.position = int(column['position'] or 0)\n existing_column.options_json = column['options_json']\n existing_column.column_type = column['column_type']\n existing_column.aggregation_type = column['aggregation_type']\n else:\n new_column = AxColumn()\n new_column.guid = ax_misc.guid_or_none(column['guid'])\n new_column.position = int(column['position'] or 0)\n new_column.options_json = column['options_json']\n new_column.field_guid = ax_misc.guid_or_none(column['field_guid'])\n new_column.grid_guid = ax_misc.guid_or_none(grid_guid)\n new_column.column_type = column['column_type']\n new_column.aggregation_type = column['aggregation_type']\n db_session.add(new_column)", "def adjustColumns(self):\n for col in [COL_ID, COL_TYPE]:\n self.resizeColumnToContents(col)", "def add_info_table_cols(self, new_cols):\n\n cols = set([x.header for x in self.info_table.columns])\n missing = set(new_cols) - cols\n if len(missing) == 0:\n return\n\n # iterate on new_cols since they are in order\n for c in new_cols:\n if c in missing:\n self.info_table.add_column(c)", "def _set_size(self):\n if 'Size' in self.anno_df.columns: return\n w_label = (self.anno_df.XMax - self.anno_df.XMin).values\n h_label = (self.anno_df.YMax - self.anno_df.YMin).values\n self.anno_df['Size'] = w_label * h_label", "def test_modify_column_data(self):\n self.df_squisher.modify_column_data()\n squished_dataframe = self.df_squisher.squished_dataframe\n for index, column in enumerate(squished_dataframe.columns):\n self.assertEqual(\n max_column_width(squished_dataframe[column]),\n [*self.requested_column_size.values()][index]\n )", "def create_column(self, name, type, **kwargs):\n name = self._get_column_name(name)\n if self.has_column(name):\n log.debug(\"Column exists: %s\" % name)\n return\n self._sync_table((Column(name, type, **kwargs),))", "def __store_column_width(self):\n self.header_width = []\n for i in range(0, self.view.header().count()):\n self.header_width.append(self.view.columnWidth(i))", "def system_add_column_family(self, cf_def):\r\n pass", "def addColumn(self, id, header = id):", "def new_column( self, delta = 1, ):\n self.ix_row = 0\n self.ix_col += delta", "def _addColumn(self, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + TABLE_NAME + \" ADD COLUMN \" + str(column) + \" \" + self.getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")", "def create_column(self, new_column, dtype):\n self.logger.debug(\"[%u] Ready to add column %s\" %\n (os.getpid(), new_column))\n ddl = \"\"\"\n ALTER TABLE {schema}.{table}\n ADD COLUMN IF NOT EXISTS {col} {type}\n \"\"\"\n # TODO Replace by execute_ddl func and test it\n with get_sink_connection_string(self) as conn:\n with conn.cursor() as cursor:\n cursor.execute(ddl.format(schema=self.config['schema'],\n table=self.config['table'],\n col=new_column,\n type=dtype))\n self.logger.debug(\"[%u] Column %s has been added\" %\n (os.getpid(), new_column))", "def add_column_definition(self, c):\n pass", "def OnColumnResize(self,event):\r\n iColumn = event.GetColumn()\r\n column = self.data.getParam('columns')[iColumn]\r\n self.data.updateParam('colWidths')[column] = self.gList.GetColumnWidth(iColumn)", "def add_column(self, column_name, data_type=\"int\", not_null=False, data_target=\"name\"):\n\n self.columns[column_name] = Column(\n\n # add column properties\n column_name= column_name,\n data_type= data_type,\n ai= False,\n not_null= not_null,\n data_target=data_target,\n\n # auto add table properties\n n_rows=self._n_rows,\n table_objet=self\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simply copy metadata from source to target
def copy_stock_metas( meta_source, target, copy_columns_info=True, ) -> None: set_attr( target, KEY_ALIAS_MAP, copy(getattr(meta_source, KEY_ALIAS_MAP)) ) if copy_columns_info: set_attr( target, KEY_COLUMNS_INFO_MAP, deepcopy(getattr(meta_source, KEY_COLUMNS_INFO_MAP)) ) else: set_attr(target, KEY_COLUMNS_INFO_MAP, {})
[ "def _copy_metadata(from_dir, to_dir):\n if not FLAGS.dry_run:\n tf.io.gfile.makedirs(to_dir)\n for fname in tfds.core.utils.list_info_files(from_dir):\n from_path = os.path.join(from_dir, fname)\n to_path = os.path.join(to_dir, fname)\n logging.info('cp %s %s', from_path, to_path)\n if not FLAGS.dry_run:\n tf.io.gfile.copy(from_path, to_path, overwrite=True)", "def test_metadata(self):\n assert self.copy.metadata == self.orig.metadata, 'Copied metadata should be equivalent'", "def transcode_metadata(self):\r\n source = self.source_pattern.extras\r\n dest = self.destination_pattern.extras\r\n dest.update(source)", "def __transferMetadata(self, metadataToTransfer, currentPath):\n if metadataToTransfer is not None:\n sourceID = metadataToTransfer.get(\"sourceID\",None)\n if sourceID is not None:\n # search for restrt file\n sourcePath = os.path.join(currentPath,\"../\",sourceID)\n self.__copyRestartFile(sourcePath, currentPath)\n else:\n raise IOError('the only metadtaToTransfer that is available in RELAP5 is \"sourceID\". Got instad: '+', '.join(metadataToTransfer.keys()))", "def copy_metadata():\n\n common_root = os.path.join(os.path.dirname(__file__), \"wsdotroute\", \"esri\")\n src = os.path.join(common_root, \"toolboxes\")\n dest = os.path.join(common_root, \"help\", \"gp\", \"toolboxes\")\n\n if os.path.exists(dest):\n shutil.rmtree(dest)\n\n shutil.copytree(src, dest, ignore=shutil.ignore_patterns(\"*.pyt\"))\n\n print(\"Completed copying metadata XML files\")", "def copy(self, src_path: str, tgt_path: str) -> None:", "def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):\n source_structure = self._lookup_course(source_course_key).structure\n with self.bulk_operations(dest_course_key):\n original_structure = self._lookup_course(dest_course_key).structure\n index_entry = self._get_index_if_valid(dest_course_key)\n new_structure = self.version_structure(dest_course_key, original_structure, user_id)\n\n new_structure['assets'] = source_structure.get('assets', {})\n new_structure['thumbnails'] = source_structure.get('thumbnails', [])\n\n # update index if appropriate and structures\n self.update_structure(dest_course_key, new_structure)\n\n if index_entry is not None:\n # update the index entry if appropriate\n self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])", "def copy(self, source_host, dest_host, filename):", "def copy(self, other, exif=True, iptc=True, xmp=True, comment=True):\n if comment:\n other.setComment(self.getComment())\n if exif:\n for metadata in [ 'Exif.Image.Make', 'Exif.Image.Model', 'Exif.Photo.DateTimeOriginal',\n 'Exif.Photo.ExposureTime', 'Exif.Photo.FNumber', 'Exif.Photo.ExposureBiasValue',\n 'Exif.Photo.Flash', 'Exif.Photo.FocalLength', 'Exif.Photo.ISOSpeedRatings',\n \"Exif.Image.Orientation\", \"Exif.Photo.UserComment\"\n ]:\n if metadata in self.getExifKeys():\n try:\n other[metadata] = self[metadata]\n except:\n print(\"Unable to copying metadata %s in file %s, value: %s\" % (metadata, self.filename, self.exif[metadata]))", "def _load_meta(self, db, metadata, source_name) -> None:\n db.metadata.put_item(Item={\n 'src_name': source_name,\n 'data_license': metadata.data_license,\n 'data_license_url': metadata.data_license_url,\n 'version': metadata.version,\n 'data_url': metadata.data_url,\n 'rdp_url': metadata.rdp_url,\n 'data_license_attributes': metadata.data_license_attributes,\n 'genome_assemblies': metadata.genome_assemblies\n })", "def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }", "def copy_metadata():\n\n committish = config[\"committish\"]\n output_dir = config[\"output_dir\"]\n\n bitbake_dir = \"MONTAVISTA/bitbake\"\n bitbake_dir_ref = \"%s:%s\" % (committish, bitbake_dir)\n\n try:\n\tcall([git, \"rev-parse\", bitbake_dir_ref], stdout=None, stderr=None)\n except:\n\tsys.stderr.write(\"Directory %s not found in %s\\n\" %\n\t\t\t (bitbake_dir, committish))\n\tsys.exit(1)\n\n repo_dir = os.getcwd()\n\n try:\n\tos.chdir(output_dir)\n except:\n\tsys.stderr.write(\"failed: chdir %s\\n\" % output_dir)\n\tusage()\n\n cmd = [git, 'archive', '--remote=%s' % repo_dir, bitbake_dir_ref]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n\n tar = tarfile.open(fileobj=p.stdout,mode='r|tar')\n file = tar.next()\n #tar.extractall()\n while file != None:\n \ttar.extract(file)\n\tfile = tar.next()\n tar.close()\n\n git_exit_code = p.wait()\n if git_exit_code:\n\traise Exception('%s returned %d\\n' % (' '.join(cmd), git_exit_code))", "def copy_metadata_from(self, o_game_container):\n\n # Modification of data\n self.u_name = o_game_container.u_name\n self.u_description = o_game_container.u_description\n self.u_version = o_game_container.u_version\n self.u_comment = o_game_container.u_comment\n self.u_type = o_game_container.u_type\n self.u_author = o_game_container.u_author\n\n u_log_message = u''\n u_log_message += u'Metadata copied: '\n u_log_message += u'u_name=\"%s\" ' % self.u_name\n u_log_message += u'u_desc=\"%s\" ' % self.u_description\n u_log_message += u'u_version=\"%s\" ' % self.u_version\n u_log_message += u'u_comment=\"%s\" ' % self.u_comment\n u_log_message += u'u_type=\"%s\" ' % self.u_type\n u_log_message += u'u_author=\"%s\" ' % self.u_author", "def _copy_output(src: Graph, dst: Graph):\n for n_src, n_dst in zip(src.nodes, dst.nodes):\n if n_src.op == 'output':\n n_dst.meta = n_src.meta", "def _migrate_data(schema,source_config,target_config,migration_config):\n # create database connections\n source_engine = connect_to_source(source_config)\n target_engine = connect_to_target(target_config,target_config['database'])\n \n # load the schema metadata profile\n source_metadata = sqlalchemy.MetaData(source_engine)\n source_metadata.reflect(schema=schema)\n\n # iterate the tables, loading the data\n for t in source_metadata.sorted_tables:\n _copy_data(source_engine,schema,target_engine,t,migration_config['batchsize'],\n migration_config['logged'],trialrun=migration_config['trialrun'])", "def copy_object_metadata(self, bucket_name, src_object_name, dst_object_name):\n\n return h3lib.copy_object_metadata(self._handle, bucket_name, src_object_name, dst_object_name, self._user_id)", "def _copy_target_attributes(self):\n return ['provides']", "def _copy_file ( self, source, dest ):\n return", "def copyattr( self, source, destination, stats=None ):\n\t\ts_stat = stats or os.lstat(source)\n\t\tshutil.copystat(source, destination, follow_symlinks=False)\n\t\tos.chown(destination, s_stat[stat.ST_GID], s_stat[stat.ST_UID], follow_symlinks=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of all public modules relative to a path.
def get_public_modules(path, base_package=None): result = [] for subdir, _, files in os.walk(path): # Skip folders that start with _. if any([part.startswith('_') for part in subdir.split(os.path.sep)]): continue _, rel_dir = subdir.split(path) rel_dir = rel_dir.lstrip(os.path.sep) for filename in files: if is_valid_module(filename): mod_name, _ = os.path.splitext(filename) rel_path = os.path.join(rel_dir, mod_name) if base_package is not None: rel_path = os.path.join(base_package, rel_path) # Turn into a Python module rather than a file path. result.append(rel_path.replace(os.path.sep, '.')) return result
[ "def module_list(path):\n\n if os.path.isdir(path):\n folder_list = os.listdir(path)\n elif path.endswith('.egg'):\n from zipimport import zipimporter\n try:\n folder_list = [f for f in zipimporter(path)._files]\n except:\n folder_list = []\n else:\n folder_list = []\n #folder_list = glob.glob(os.path.join(path,'*'))\n folder_list = [\n p for p in folder_list\n if (os.path.exists(os.path.join(path, p, '__init__.py')) or\n p[-3:] in {'.py', '.so'} or\n p[-4:] in {'.pyc', '.pyo', '.pyd'})]\n\n folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]\n return folder_list", "def moduleList(path):\n\n if os.path.isdir(path):\n folder_list = os.listdir(path)\n elif path.endswith('.egg'):\n try:\n folder_list = [f for f in zipimporter(path)._files]\n except:\n folder_list = []\n else:\n folder_list = []\n #folder_list = glob.glob(os.path.join(path,'*'))\n folder_list = [p for p in folder_list \\\n if os.path.exists(os.path.join(path, p,'__init__.py'))\\\n or p[-3:] in ('.py','.so')\\\n or p[-4:] in ('.pyc','.pyo','.pyd')]\n\n folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]\n return folder_list", "def dir_list(subname, p_name, pypath=None, static=None):\n ret = []\n for path in pypath:\n mod = importlib.import_module(path)\n for m_path in mod.__path__:\n # If we are inside of an executable the path will be different\n ret.append(m_path)\n ret.extend(static)\n return ret", "def list_modules(path):\n modules = []\n for root, dirs, files in os.walk(path): # pylint: disable=unused-variable\n for file in files:\n if file.endswith(\".js\"):\n with open(os.path.join(path, file), 'r') as modfile:\n content = modfile.readlines()\n module_re = r\"/\\*\\* @module +([\\w.]+) +\\*/\"\n m = re.search(module_re, content[0])\n # test if its supposed to be a module\n if m and m.group(1):\n # great its a module ! lets see its content\n logger.debug(\"Module detected %s\" % m.group(1))\n modules.append((m.group(1), content))\n return modules", "def retrieve_module_list():\n\n current_dir = getcwd()\n mod_list = []\n\n for item in listdir(current_dir):\n\n if item.endswith('db'):\n\n mod_list.append(item)\n\n return mod_list", "def modules(self):\n for modpath in self.modpaths():\n yield modpath.module", "def module_list(self):\n return self.mod.modules()", "def modules():\n return [os.path.relpath(os.path.join(root, filename), 'groot_ansible')\n for root, _, filenames in os.walk('groot_ansible/playbooks/library') for filename in filenames if '.git' not in root.split(os.sep)\n ]", "def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name", "def list_dir(self, path):", "def _get_path_module_entries(\n self, path: Path, subpath: str | Path, modules: list[tuple[Path, Path]]\n ) -> None:\n try:\n # Special case: let's save some time and skip the whole 'babase'\n # package since we know it doesn't contain any meta tags.\n fullpath = Path(path, subpath)\n entries = [\n (path, Path(subpath, name))\n for name in os.listdir(fullpath)\n # Actually scratch that for now; trying to avoid special cases.\n # if name != 'babase'\n ]\n except PermissionError:\n # Expected sometimes.\n entries = []\n except Exception:\n # Unexpected; report this.\n logging.exception('metascan: Error in _get_path_module_entries.')\n self.results.announce_errors_occurred = True\n entries = []\n\n # Now identify python packages/modules out of what we found.\n for entry in entries:\n if entry[1].name.endswith('.py'):\n modules.append(entry)\n elif (\n Path(entry[0], entry[1]).is_dir()\n and Path(entry[0], entry[1], '__init__.py').is_file()\n ):\n modules.append(entry)", "def listdir(path):\r\n return os.listdir(path)", "def modpaths(self):\n for module_info in pkgutil.iter_modules([self.directory]):\n bits = self.relbits + [module_info[1]]\n yield Modulepath(\".\".join(bits), self.basedir)\n\n if module_info[2]: # module is a package because index 2 is True\n submodules = Dirpath(os.sep.join(bits), self.basedir)\n for submodule in submodules.modpaths():\n #subbits = [module_info[1]] + submodule.relbits\n #yield Modulepath(u\".\".join(subbits), self.basedir)\n yield submodule", "def list_path(self, path):\n return LocalResources(\"\").list_path(path)", "def get_module_list(app_name):\n\treturn get_file_items(get_app_path(app_name, \"modules.txt\"))", "def listModules():\n\n modules = []\n\n for _, name, _ in pkgutil.iter_modules([\"modules\"]):\n modules.append(name)\n\n return modules", "def get_modules_list(self):\n list = []\n for mod in self.modules:\n list.append(mod.__name__)\n return list", "def list_submodules(package):\n\n return list_file_submodules(package.__path__)", "def modules(self):\n return self._modules.keys()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get losses of last computation if existing
def get_losses(self): if self.loss is not None: return [self.loss] else: return []
[ "def last_loss(self):\n return self._internal.get_last_loss()", "def losses(self):\n pass", "def loss(self) -> Optional[float]:\n return self._last_loss_value", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def build_loss(self):\n if self.mode != \"encode\":\n total_loss = tf.losses.get_total_loss()\n tf.summary.scalar(\"losses/total\", total_loss)\n\n self.total_loss = total_loss", "def get_internal_loss(self):\n loss = 0\n c = 0\n losses = self.get_named_internal_losses()\n for loss_name, loss_values in losses.items():\n loss += torch.mean(torch.stack(loss_values))\n c += 1\n if c == 0:\n return loss\n else:\n return loss / c", "def losses(self):\n losses = []\n for layer in self.layers:\n losses += layer.losses\n if context.in_eager_mode():\n return losses\n\n relevant_inputs = self.inputs or []\n for i in range(1, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n reachable = tf_layers_util.get_reachable_from_inputs(relevant_inputs,\n losses)\n relevant_conditional_losses = [x for x in losses if x in reachable]\n unconditional_losses = [\n x for x in losses if x._unconditional_loss] # pylint: disable=protected-access\n return list(set(\n relevant_conditional_losses + unconditional_losses + self._losses))", "def get_loss(self, batch: List[Query], backpropagate_loss: Callable) -> float:\n total_loss = 0\n result = self.model.solve(batch)\n for r in result:\n self.timing[0] += r.ground_time / len(batch)\n self.timing[1] += r.compile_time / len(batch)\n self.timing[2] += r.eval_time / len(batch)\n result = [\n (result[i], batch[i]) for i in range(len(batch)) if len(result[i]) > 0\n ]\n for r, q in result:\n total_loss += backpropagate_loss(\n r, q.p, weight=1 / len(result), q=q.substitute().query\n )\n return total_loss", "def get_loss():\n ##################\n # YOUR CODE HERE #\n ##################", "def get_loss(self):\n return self.loss / self.cnt", "def calc_loss(batch):\n result = Variable(torch.FloatTensor(1).zero_())\n for exps in batch:\n R = 0.0\n for exp in reversed(exps):\n R *= GAMMA\n R += exp.reward\n\n # here we take first experience in the chain\n state = Variable(torch.from_numpy(np.array([exps[0].state], dtype=np.float32)))\n q_vals = model(state)[0]\n q_val = q_vals[exps[0].action]\n result += (q_val - R) ** 2\n return result / len(batch)", "def test_loss_hook(self, losses):\n self.runinfo[\"dev_losses\"].append(losses)", "def losses(self):\n return {\n \"loss_cls\": self.drop_loss(),\n \"loss_box_reg\": self.smooth_l1_loss(),\n }", "def loss(self):\n return self._loss", "def _build_losses(self):\n self.loss_recon = 0.\n\n if self.lambda_b:\n with tf.variable_scope('reconstruction_mmd'):\n self._build_reconstruction_loss_mmd(self.reconstructed, self.x)\n with tf.variable_scope('batchcorrection'):\n self._build_reg_b()\n\n else:\n with tf.variable_scope('reconstruction'):\n self._build_reconstruction_loss(self.reconstructed, self.x)\n\n if self.lambda_c:\n with tf.variable_scope('clustering'):\n self.loss_c = 0\n\n act = tbn('layer_c:0')\n act = act / tf.reduce_max(act)\n\n self._build_reg_c(act)\n\n if self.lambda_d:\n with tf.variable_scope('intracluster_distances'):\n self._build_reg_d(act)\n\n self._build_total_loss()", "def get_module_loss(self):\n return self.loss, self.gradients, self.grad_placeholder, self.train_step", "def losses(self):\n loss = {\n \"fastrcnn_loss_cls\": self.softmax_cross_entropy_loss(),\n \"fastrcnn_loss_box_reg\": self.smooth_l1_loss(),\n }\n if self.pred_iou_logits is not None:\n loss['fastrcnn_loss_iou'] = self.get_pred_iou_loss()\n\n return loss", "def calculate_loss(self, output, batch, training_context, last_activation=None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute any branch of the stable or unstable submanifolds of a saddle. Accepts fixed point instances of class fixedpoint_2D.
def find_saddle_manifolds(fp, xname, ds=None, ds_gamma=None, ds_perp=None, tmax=None, max_arclen=None, ic=None, eps=None, ev_dirn=1, ic_ds=None, max_pts=1000, directions=(1,-1), which=('s', 'u'), other_pts=None, rel_scale=None, ds_perp_fac=0.75, verboselevel=0, fignum=None): if verboselevel > 1: figure_name, layer_name = plotter.active_layer _, layer_struct = plotter.active_layer_structs assert layer_struct is not None assert fp.classification == 'saddle' and not fp.degenerate if fp.evals[0] < 0: eval_s = fp.evals[0] eval_u = fp.evals[1] evec_s = fp.evecs[0] evec_u = fp.evecs[1] else: eval_s = fp.evals[1] eval_u = fp.evals[0] evec_s = fp.evecs[1] evec_u = fp.evecs[0] gen = fp.gen assert 'Gamma_out_plus' in gen.eventstruct, "Detection event surface(s) not present" assert 'Gamma_out_minus' in gen.eventstruct, "Detection event surface(s) not present" if eps is None: # Dividing fixed point's inherited epsilon tolerance by 100 eps = fp.eps / 100 ds_perp_eps = 1e-12 if ds_perp_fac >= 1 or ds_perp_fac <= 0: raise ValueError("ds_perp_fac must be between 0 and 1") normord = fp.normord if rel_scale is None: rel_scale = (1,1) dsscaled = dx_scaled_2D(ds, rel_scale) if isinstance(ds_gamma, dict): assert len(ds_gamma) == 2, "Invalid value for ds_gamma" assert remain(list(ds_gamma.keys()), [1,-1]) == [], \ "Invalid value for ds_gamma" else: try: ds_gamma = {1: ds_gamma, -1: ds_gamma} except: raise TypeError("Invalid type for ds_gamma") try: xcoord_ix = fp.point.coordnames.index(xname) except ValueError: raise ValueError("Invalid x coordinate name '%s'"%xname) else: # x coordinate index is either 0 or 1 for this 2D system # y coordinate index is therefore 1-xcoord_ix ycoord_ix = 1-xcoord_ix yname = fp.point.coordnames[ycoord_ix] if verboselevel>1: # validate coord names xn, yn = layer_struct.axes_vars if xname != xn and yname != yn: raise ValueError("x and y name mismatch with Plotter") def test_fn(x, dircode): if verboselevel>1: dm.log.msg("Integrate from test point", x=x[xname], y=x[yname], direction=dircode) gen.set(ics=x) try: test = gen.compute('test', dirn=dircode) except KeyboardInterrupt: raise except: raise RuntimeError("Integration failed") events = gen.getEvents() if verboselevel>1: pts=test.sample(coords=x.coordnames) # only show first 25 points unless Gamma bd not met plotter.add_data((pts[xname][:25],pts[yname][:25]), style='b-', layer=layer_name, name=dm.get_unique_name('test_traj_first25_')) if events['Gamma_out_plus'] is None: if events['Gamma_out_minus'] is None: if verboselevel>1: pts = test.sample(coords=x.coordnames) dm.log.msg("Error", err_msg="Did not reach Gamma surfaces", status="fail", last_computed_point=pts[-1], last_computed_time=pts['t'][-1]) plotter.add_data((pts[xname],pts[yname]), style='b-', layer=layer_name, name=dm.get_unique_name('test_traj_full'), log=dm.log) raise RuntimeError("Did not reach Gamma surfaces") else: # hit Gamma_out_minus if verboselevel>1: dm.log.msg("Reached Gamma minus", t=events['Gamma_out_minus']['t'][0], last_computed_point=pts[-1], last_computed_time=pts['t'][-1]) sgn = -1 else: if events['Gamma_out_minus'] is None: # hit Gamma_out_plus if verboselevel>1: dm.log.msg("Reached Gamma plus", t=events['Gamma_out_plus']['t'][0], last_computed_point=pts[-1], last_computed_time=pts['t'][-1]) sgn = 1 else: # both were non-None, i.e. both events happened: impossibru! if verboselevel>1: pts = test.sample(coords=x.coordnames) dm.log.msg("Error", err_msg="Both Gamma surfaces reached", status="fail", last_computed_point=pts[-1], last_computed_time=pts['t'][-1]) plotter.add_data((pts[xname],pts[yname]), style='b-', layer=layer_name, name=dm.get_unique_name('universe_fail'), log=dm.log) raise RuntimeError("Both Gamma surfaces reached, impossibly") return sgn def onto_manifold(x_ic, dn, normal_dir, dircode='f'): try: return bisection(test_fn, x_ic+dn*normal_dir, x_ic-dn*normal_dir, args=(dircode,), xtol=eps, maxiter=100, normord=normord) except AssertionError: if verboselevel>1: xp = x_ic+dn*normal_dir xm = x_ic-dn*normal_dir dm.log.msg("Error", err_msg="onto_manifold bisection fail", status="fail", point_p=xp, point_m=xm) plotter.add_data([xp[xname],xp[yname]], style='gx', layer=layer_name, name=dm.get_unique_name('xp'), log=dm.log) plotter.add_data([xm[xname],xm[yname]], style='gx', layer=layer_name, name=dm.get_unique_name('xm'), log=dm.log) plotter.show() raise RuntimeError("ds_perp too small? +/- initial displacement did not straddle manifold") except RuntimeError: if verboselevel>1: xp = x_ic+dn*normal_dir xm = x_ic-dn*normal_dir dm.log.msg("Error", err_msg="onto_manifold bisection fail", status="fail", point_p=xp, point_m=xm) plotter.add_data([xp[xname],xp[yname]], style='gx', layer=layer_struct.name, name=dm.get_unique_name('xp'), log=dm.log) plotter.add_data([xm[xname],xm[yname]], style='gx', layer=layer_struct.name, name=dm.get_unique_name('xm'), log=dm.log) plotter.show() raise gen.eventstruct['Gamma_out_plus'].activeFlag=True # terminal gen.eventstruct['Gamma_out_minus'].activeFlag=True # terminal assert tmax > 0 manifold = {'s': {1: None, -1: None}, 'u': {1: None, -1: None}} man_names = {'s': 'stable', 'u': 'unstable'} for w in which: # w = 's' => stable branch # w = 'u' => unstable branch if verboselevel>0: print("Starting %s branch" % man_names[w]) if w == 's': col = 'g' w_sgn = -1 integ_dircode = 'f' evec = evec_u evec_other = evec_s elif w == 'u': col = 'r' w_sgn = 1 integ_dircode = 'b' evec = evec_s evec_other = evec_u # set Gamma_out surfaces on "outgoing" branch # (polarity is arbitrary) p0_plus = fp.point + ds_gamma[1]*evec p0_minus = fp.point - ds_gamma[-1]*evec evec_perp = get_perp(evec) gen.eventstruct.setEventDir('Gamma_out_plus', ev_dirn) gen.eventstruct.setEventDir('Gamma_out_minus', -ev_dirn) gen.set(pars={'Gamma_out_plus_p_'+xname: p0_plus[xname], 'Gamma_out_plus_p_'+yname: p0_plus[yname], 'Gamma_out_plus_dp_'+xname: evec_perp[xname], 'Gamma_out_plus_dp_'+yname: evec_perp[yname], 'Gamma_out_minus_p_'+xname: p0_minus[xname], 'Gamma_out_minus_p_'+yname: p0_minus[yname], 'Gamma_out_minus_dp_'+xname: evec_perp[xname], 'Gamma_out_minus_dp_'+yname: evec_perp[yname], ## 'fp_'+xname: fp.point[xname], 'fp_'+yname: fp.point[yname] }, tdata = [0,tmax]) if verboselevel>1: if fignum is None: fignum=figure() else: figure(fignum) # plot event surfaces for gamma plus and minus exit events # ISSUE: Convert to plotter.add_data plot([p0_plus[xname]-dsscaled*evec_perp[xname],p0_plus[xname]+dsscaled*evec_perp[xname]], [p0_plus[yname]-dsscaled*evec_perp[yname],p0_plus[yname]+dsscaled*evec_perp[yname]], 'k-', linewidth=2) plot([p0_minus[xname]-dsscaled*evec_perp[xname],p0_minus[xname]+dsscaled*evec_perp[xname]], [p0_minus[yname]-dsscaled*evec_perp[yname],p0_minus[yname]+dsscaled*evec_perp[yname]], 'k-', linewidth=2) draw() check_other_pts = other_pts is not None if ic_ds is None: ic_ds = dsscaled else: ic_ds = dx_scaled_2D(ic_ds, rel_scale) if ic is None: ic = fp.point f_ic = -w_sgn * evec_other dirn_fix = 1 # not used for this case if verboselevel>0: # ISSUE: Convert to log entry print("f_ic from evec_other") print("evec_other " + str(evec_other)) print("f_ic = " + str(f_ic)) curve_len = 0 # initial estimate x0 = a point close to f.p. along manifold with # opposite stability else: # initial curve length from previous independent variable, if present # otherwise, assume zero if isinstance(ic, Pointset): assert len(ic) == 1, "Only pass a length-1 pointset" # (guarantee curve_len > 0) # BUG: for direction=-1 case, arc_len will be negative # and index 0 will have the smallest arc_len, not the # largest. Better not to use ic as Pointset option and # fix arc_len outside of call curve_len = abs(ic['arc_len'][0]) ic = ic[0] else: curve_len = 0 # ensure correct sign relative to starting point (if ic is None) sgns_orig = sign(-w_sgn * evec_other) f_ic_alpha = gen.Rhs(0, ic, gen.pars) # array in alpha order # f_ic here isn't normalized to length 1 like the case above that uses # evec_other (which is already normalized) f_ic = Point({xname: f_ic_alpha[xcoord_ix], yname: f_ic_alpha[ycoord_ix]}) sgns_f_ic = sign(f_ic) if any(sgns_orig != sgns_f_ic): dirn_fix = -1 f_ic = -f_ic else: dirn_fix = 1 if verboselevel>0: # ISSUE: Convert to log entry print("f_ic = " + str(f_ic)) for sgn in directions: piece = {} if verboselevel>0: # ISSUE: Convert to log entry print("Starting direction", sgn) # PREDICTION x0_ic = ic+w_sgn*sgn*ic_ds*f_ic/norm(f_ic, normord) if verboselevel>1: figure(fignum) # show starting point (initial estimate) as green circle # ISSUE: Convert to plotter.add_data plot(x0_ic[xname], x0_ic[yname], 'go', linewidth=1) # put x0 initial estimate onto stable manifold f_alpha = dirn_fix * gen.Rhs(0, x0_ic, gen.pars) # array in alpha order f = Point({xname: f_alpha[xcoord_ix], yname: f_alpha[ycoord_ix]}) normf = norm(f, normord) norm_to_flow = get_perp(f/normf) if verboselevel>1: # show flow direction from IC as solid red line plotter.add_data(([x0_ic[xname], x0_ic[xname]+dsscaled*f[xname]/normf], [x0_ic[yname], x0_ic[yname]+dsscaled*f[yname]/normf]), style='r-', name=dm.get_unique_name('flow_fwd'), log=dm.log) # show normal to flow direction from IC as dotted red line plotter.add_data(([x0_ic[xname], x0_ic[xname]+dsscaled*norm_to_flow[xname]], [x0_ic[yname], x0_ic[yname]+dsscaled*norm_to_flow[yname]]), style='r:', name=dm.get_unique_name('flow_perp'), log=dm.log) ds_perp_default = ds_perp # CORRECTION while ds_perp > ds_perp_eps: try: x = onto_manifold(x0_ic, ds_perp, norm_to_flow, dircode=integ_dircode) except RuntimeError as e: ds_perp *= ds_perp_fac else: break if ds_perp <= ds_perp_eps: # RuntimeError was raised and could not continue reducing ds_perp print("ds_perp reached lower tolerance =", ds_perp_eps) print(e) raise RuntimeError("Initial point did not converge") else: curve_len += norm(x-ic, normord) piece[sgn*curve_len] = x num_pts = 1 last_x = x if verboselevel>0: print("Initial point converged to (%.6f, %.6f)\n" % \ (x[xname], x[yname])) ds_perp = ds_perp_default last_f = f_ic # step backwards along local linear flow to predict next starting # position on manifold while curve_len < max_arclen and num_pts < max_pts: if verboselevel>0: # ISSUE: Convert to plotter.add_data figure(fignum) plot(last_x[xname], last_x[yname], col+'.', linewidth=1) if check_other_pts and sometrue([norm(last_x - pt, normord) < ds \ for pt in other_pts]): # we've hit a different fixed point (or other feature), so stop break f_alpha = dirn_fix * gen.Rhs(0, last_x, gen.pars) # array f = Point({xname: f_alpha[xcoord_ix], yname: f_alpha[ycoord_ix]}) if all(sign(f) != sign(last_f)): f = -f # on other side of manifold so must keep stepping in the # same direction, therefore switch signs! # PREDICTION x_ic = last_x + w_sgn*sgn*dsscaled*f/norm(f,normord) last_f = f if verboselevel>1: print("\nStarting from point ", last_x) delta = w_sgn*sgn*dsscaled*f/norm(f,normord) print("Trying point ", x_ic, "in direction (%.6f, %.6f)\n" % (delta[xname], delta[yname])) ds_perp = ds_perp_default # CORRECTION while ds_perp > ds_perp_eps: try: x = onto_manifold(x_ic, ds_perp, get_perp(f/norm(f,normord)), dircode=integ_dircode) except RuntimeError as e: ds_perp *= 0.75 else: break if ds_perp <= ds_perp_eps: # RuntimeError was raised and could not continue reducing ds_perp print("ds_perp reached lower tolerance =", ds_perp_eps) print(e) break # end while search else: curve_len += norm(x-last_x, normord) piece[sgn*curve_len] = x last_x = x num_pts += 1 if verboselevel>1: print("\nManifold has %i points" % num_pts) elif verboselevel>0: print(".", end=' ') sys.stdout.flush() indepvar, piece_sorted = sortedDictLists(piece, byvalue=False) manifold[w][sgn] = pointsToPointset(piece_sorted, indepvarname='arc_len', indepvararray=indepvar, norm=normord) if verboselevel>0: # finish the line on stdout print(" ") gen.eventstruct['Gamma_out_plus'].activeFlag=False gen.eventstruct['Gamma_out_minus'].activeFlag=False ## gen.eventstruct['fp_closest'].activeFlag=False return manifold
[ "def exact_saddle(V,X,Y,Z,dim,Z0=None):\n #from all_functions import find_saddle,sum_of_e_field\n if dim==3:\n print \"here\"\n print find_saddle(V,X,Y,Z,3)\n [I,J,K]=find_saddle(V,X,Y,Z,3) # guess saddle point; Z0 not needed\n print I,J,K\n r0=[X[I],Y[J],Z[K]]\n if I<2 or I>V.shape[0]-2: \n print('exact_saddle.py: Saddle point out of bounds in radial direction.')\n return r0\n if J<2 or J>V.shape[1]-2:\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.')\n return r0\n if K<2 or K>V.shape[2]-2:\n print('exact_saddle.py: Saddle point out of bounds in axial direction.')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # change grid vectors as well\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n #################################### Minimize\n r=spo.minimize(sum_of_e_field,r0,args=(Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],r[2] \n ################################################################################################# \n if dim==2: \n if len(V.shape)==3:\n K=0 # in case there is no saddle\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n K=i-1\n Vs = V.shape\n if K>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,K-1] # potential to left\n v2=V[:,:,K] # potential to right (actually right at estimate; K+1 to be actually to right)\n V2=v1+(v2-v1)*(Z0-Z[K-1])/(Z[K]-Z[K-1]) # averaged potential around given coordinate\n [I,J,K0]=find_saddle(V,X,Y,Z,2,Z0) \n r0=X[I],Y[J]\n print 1\n if (I<2 or I>V.shape[0]-2): \n print('exact_saddle.py: Saddle point out of bounds in radial direction.\\n')\n return r0\n if (J<2 or J>V.shape[1]-1):\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.\\n')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # Matlab 4, not 2\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n ################################## Minimize\n r=spo.minimize(sum_of_e_field_2d,r0,args=(Z0,Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],Z0\n print Xs\n print Ys\n print Zs\n return [Xs,Ys,Zs]", "def layer_sweep(self):\n for fixed_id, fixed_layer in enumerate(self.layers):\n if fixed_id + 1 == len(self.layers):\n break\n moving_layer = self.layers[fixed_id + 1]\n for node in moving_layer.nodes:\n self.find_neighbors(node)\n if len(node.neighbors) > 0:\n self.calculate_barycenter(node)\n else:\n node.barycenter = 0 #1000\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.barycenter, reverse=False)\n for slot, node in enumerate(sorted_nodes):\n node.slot = slot + 1\n barys = set([n.barycenter for n in sorted_nodes])\n bary_nodes = [list(filter(lambda x: x.barycenter == b, sorted_nodes)) for b in barys]\n for b in bary_nodes:\n if len(b) > 1:\n for node in b:\n if len(node.sl_neighbors) == 1:\n n_slot = node.sl_neighbors[0].slot\n if n_slot > node.slot:\n other_node = max(b, key=lambda s: s.slot)\n elif n_slot < node.slot:\n other_node = min(b, key=lambda s: s.slot)\n temp = node.slot\n node.slot = other_node.slot\n other_node.slot = temp\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.slot, reverse=False)\n moving_layer.nodes = sorted_nodes", "def compute_overlaps_in_parallel( step ):\n s_sd = step3.mapping.ovlp_mat_arb( sd_states_reindexed_sorted[step], sd_states_reindexed_sorted[step], S_ks[0][step], use_minimal=False )\n s_sd = data_conv.MATRIX2nparray(s_sd)\n return s_sd", "def solve_step(ball_list, step,borders,obstacle=None):\n ball_list = step1(ball_list, step,borders,obstacle)\n ball_list = step2(ball_list, step)", "def branch_competetive(state, time, d):\n\n th0 = state[0] \n th1 = state[1:(d[\"alpha1\"]+d[\"alpha1_p\"])]\n th2 = state[(d[\"alpha1\"]+d[\"alpha1_p\"]):]\n \n #print(len(state), len(th1))\n ### get all cytokine secreting cells \n th1_all = np.sum(th1[-d[\"alpha1_p\"]:])\n th2_all = np.sum(th2[-d[\"alpha2_p\"]:])\n ### calculate cytokine concentrations\n cyto_1 = d[\"beta_cyto_1\"]*th1_all + d[\"ifn_ext\"]\n cyto_2 = d[\"beta_cyto_2\"]*th2_all + d[\"il21_ext\"]\n ### calculate cytokine effect on rate\n fb1 = d[\"fb_rate1\"]*cyto_1**3/(cyto_1**3+d[\"K_1\"]**3)\n fb2 = d[\"fb_rate2\"]*cyto_2**3/(cyto_2**3+d[\"K_2\"]**3)\n ### update differantiation rate\n beta1 = d[\"beta1\"]*(1+fb1)\n beta2 = d[\"beta2\"]*(1+fb2)\n \n ### differentiate effectors th1 \n alpha = d[\"alpha1\"]\n p = 1.\n dt_th1 = diff_effector2(th1, th0, alpha, beta1, d[\"beta1_p\"], p, d)\n ### differentiate effectors th2\n alpha = d[\"alpha2\"]\n p = 1.\n dt_th2 = diff_effector2(th2, th0, alpha, beta2, d[\"beta2_p\"], p, d)\n \n ### combine all cells\n dt_th0 = -(beta1+beta2)*th0\n dt_state = np.concatenate(([dt_th0], dt_th1, dt_th2))\n\n return dt_state", "def __detect_branching_haghverdi16(\n self, Dseg: np.ndarray, tips: np.ndarray\n ) -> np.ndarray:\n # sort distance from first tip point\n # then the sequence of distances Dseg[tips[0]][idcs] increases\n idcs = np.argsort(Dseg[tips[0]])\n # consider now the sequence of distances from the other\n # two tip points, which only increase when being close to `tips[0]`\n # where they become correlated\n # at the point where this happens, we define a branching point\n if True:\n imax = self.kendall_tau_split(\n Dseg[tips[1]][idcs],\n Dseg[tips[2]][idcs],\n )\n if False:\n # if we were in euclidian space, the following should work\n # as well, but here, it doesn't because the scales in Dseg are\n # highly different, one would need to write the following equation\n # in terms of an ordering, such as exploited by the kendall\n # correlation method above\n imax = np.argmin(\n Dseg[tips[0]][idcs] + Dseg[tips[1]][idcs] + Dseg[tips[2]][idcs]\n )\n # init list to store new segments\n ssegs = [] # noqa: F841 # TODO Look into this\n # first new segment: all points until, but excluding the branching point\n # increasing the following slightly from imax is a more conservative choice\n # as the criterion based on normalized distances, which follows below,\n # is less stable\n if imax > 0.95 * len(idcs) and self.allow_kendall_tau_shift:\n # if \"everything\" is correlated (very large value of imax), a more\n # conservative choice amounts to reducing this\n logg.warning(\n 'shifting branching point away from maximal kendall-tau '\n 'correlation (suppress this with `allow_kendall_tau_shift=False`)'\n )\n ibranch = int(0.95 * imax)\n else:\n # otherwise, a more conservative choice is the following\n ibranch = imax + 1\n return idcs[:ibranch]", "def classify_fixedpoints(fps, scale):\n\n x_directions = []\n scale = scale\n for fp in fps:\n\n trace = np.matrix.trace(fp['jac'])\n det = np.linalg.det(fp['jac'])\n if det > 0 and trace == 0:\n print('center has been found. Watch out for limit cycles')\n elif trace**2 - 4 * det == 0:\n print(\"star nodes has been found.\")\n elif trace**2 - 4 * det < 0:\n print(\"spiral has been found\")\n e_val, e_vecs = np.linalg.eig(fp['jac'])\n ids = np.argwhere(np.real(e_val) > 0)\n countgreaterzero = np.sum(e_val > 0)\n if countgreaterzero == 0:\n print('stable fixed point was found.')\n fp['fp_stability'] = 'stable fixed point'\n elif countgreaterzero > 0:\n print('saddle point was found.')\n fp['fp_stability'] = 'saddle point'\n for id in ids:\n x_plus = fp['x'] + scale * e_val[id] * np.real(e_vecs[:, id].transpose())\n x_minus = fp['x'] - scale * e_val[id] * np.real(e_vecs[:, id].transpose())\n x_direction = np.vstack((x_plus, fp['x'], x_minus))\n x_directions.append(np.real(x_direction))\n\n return fps, x_directions", "def solve_step(particle_list, step, size):\r\n \r\n # Detect edge-hitting and collision of every particle\r\n for i in range(len(particle_list)):\r\n particle_list[i].compute_refl(step,size)\r\n for j in range(i+1,len(particle_list)):\r\n particle_list[i].compute_coll(particle_list[j],step) \r\n\r\n \r\n # Compute position of every particle \r\n for particle in particle_list:\r\n particle.compute_step(step)", "def optimize_cobra_model(sbml, bound=INF):\n\n cobra = convert_sbml_to_cobra(sbml, bound)\n\n N, L, U = cobra['S'], list(cobra['lb']), list(cobra['ub'])\n f, b = list(cobra['c']), list(cobra['b'])\n v_sol, f_opt, conv = easy_lp(f, N, b, L, U, one=True)\n\n return v_sol, f_opt", "def eval_top_down(\n root: Node,\n x: np.ndarray,\n lls: np.ndarray,\n leaf_func: Callable[[Leaf, np.ndarray, Any], np.ndarray],\n sum_func: Callable[[Sum, np.ndarray, Any], np.ndarray],\n leaf_func_kwargs: Optional[dict] = None,\n sum_func_kwargs: Optional[dict] = None,\n inplace: bool = False,\n n_jobs: int = 0\n) -> np.ndarray:\n if leaf_func_kwargs is None:\n leaf_func_kwargs = dict()\n if sum_func_kwargs is None:\n sum_func_kwargs = dict()\n\n # Check the SPN\n check_spn(root, labeled=True, smooth=True, decomposable=True)\n\n # Copy the input array, if not inplace mode\n if not inplace:\n x = np.copy(x)\n\n def eval_backward(n):\n if isinstance(n, Leaf):\n mask = np.ix_(masks[n.id], n.scope)\n x[mask] = leaf_func(n, x[mask], **leaf_func_kwargs)\n elif isinstance(n, Product):\n for c in n.children:\n masks[c.id] |= masks[n.id]\n elif isinstance(n, Sum):\n children_lls = np.stack([lls[c.id] for c in n.children], axis=1)\n branch = sum_func(n, children_lls, **sum_func_kwargs)\n for i, c in enumerate(n.children):\n masks[c.id] |= masks[n.id] & (branch == i)\n else:\n raise NotImplementedError(f\"Top down evaluation not implemented for node of type {n.__class__.__name__}\")\n\n if n_jobs == 0:\n # Compute the topological ordering\n ordering = topological_order(root)\n if ordering is None:\n raise ValueError(\"SPN structure is not a directed acyclic graph (DAG)\")\n n_nodes, n_samples = len(ordering), len(x)\n\n # Build the array consisting of top-down path masks\n masks = np.zeros(shape=(n_nodes, n_samples), dtype=np.bool_)\n masks[root.id] = True\n for node in ordering:\n eval_backward(node)\n else:\n # Compute the layered topological ordering\n layers = topological_order_layered(root)\n if layers is None:\n raise ValueError(\"SPN structure is not a directed acyclic graph (DAG)\")\n n_nodes, n_samples = sum(map(len, layers)), len(x)\n\n # Build the array consisting of top-down path masks\n masks = np.zeros(shape=(n_nodes, n_samples), dtype=np.bool_)\n masks[root.id] = True\n parallel_layerwise_eval(layers, eval_backward, reverse=False, n_jobs=n_jobs)\n\n return x", "def make_saddle_point_problem(objective_function, equality_constraint):\n def _lagrangian(x, multipliers):\n return objective_function(x) - np.dot(multipliers, equality_constraint(x))\n\n def _f(a, b):\n return _lagrangian(a, b)\n\n def _g(a, b):\n return -_f(a, b)\n\n return (_f, _g)", "def findStableState(L, boundaryConditions, Minv = None):\n\tn = L.shape[0]\n\tm = len(boundaryConditions)\n\tVb = np.zeros(m)\n\tpositions = {}\n\tfor i in range(m):\n\t\tcondition = boundaryConditions[i]\n\t\tVb[i] = condition[0]\n\t\tpositions[condition[0]] = condition[1]\n\tVb = np.sort(Vb)\n\tBPrime = np.zeros((m, n))\n\tYPrime = np.zeros((m, 3))\n\tfor i in range(m):\n\t\tBPrime[i][int(Vb[i])] = 1\n\t\tYPrime[i] = positions[Vb[i]]\n\n\tif Minv is None:\n\t\tzeroCorner = np.zeros((m, m))\n\t\tM = np.array(np.bmat([[L, -BPrime.T], [BPrime, zeroCorner]]))\n\t\tMinv = np.linalg.inv(M)\n\n\tXT = np.zeros((3, n))\n\t# find x coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[0]\n\tx = np.dot(Minv, y)\n\tXT[0] = x[:n]\n\t# find y coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[1]\n\tx = np.dot(Minv, y)\n\tXT[1] = x[:n]\n\t# find z coordinates\n\ty = np.zeros(n + m)\n\ty[n:] = YPrime.T[2]\n\tx = np.dot(Minv, y)\n\tXT[2] = x[:n]\n\n\treturn XT.T", "def extract_1d_boundaries(xy, NL, KL, BL, PVx, PVy, check=False):\n if PVx is None and PVy is None:\n raise RuntimeError('Not designed to allow openBC networks.')\n # PVx = np.zeros_like(KL, dtype=float)\n # PVy = np.zeros_like(KL, dtype=float)\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n # If no dangling bonds, no need to translate indices at the end\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n boundaries = []\n for boundaryloc in ['top', 'bottom']:\n # Initialize the boundary list to be as long as possible (will truncate later)\n bb = np.zeros(2 * len(xy), dtype=int)\n if boundaryloc == 'top':\n # Start with the topmost point, which is guaranteed to be\n # at the convex hull and thus also at the top outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.max(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n else:\n # Start with the bottom most point, which is guaranteed to be\n # at the convex hull and thus also at the bottom outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.min(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_1d_boundaries(): Found extremal pt: ', rightIND\n print 'le.extract_1d_boundaries(): with neighbors: ', NL[rightIND]\n print 'le.extract_1d_boundaries(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n # print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n connect = np.argwhere(np.abs(KL[rightIND]).ravel()).ravel()\n neighbors = NL[rightIND, connect]\n if check:\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1] + PVy[rightIND, connect],\n xy[neighbors, 0] - xy[rightIND, 0] + PVx[rightIND, connect]).ravel(),\n 2 * np.pi)\n if check:\n print 'le.extract_1d_boundaries(): KL[rightIND] = ', KL[rightIND]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] = ', KL[rightIND, 0]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles = ', angles\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[rightIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[rightIND, connect[angles == min(angles)][0]]\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n connect = np.argwhere(np.abs(KL[nextIND]).ravel())\n n_tmp = NL[nextIND, connect]\n\n # Get position in row of NL where NL == bb[dmyi - 1] (the previous boundary particle/site)\n # and where the PVx and PVy are opposite of the last used PVx and PVy values (to make sure we\n # are looking backwards along the boundary). We will use this to get the 'backward angle' -- the\n # angle of the previous bond in the boundary\n # Note that bb[dmyi - 1] may have been index 0, so there could be multiple matches\n nlpos = np.where(np.logical_and(NL[nextIND] == bb[dmyi - 1],\n np.abs(KL[nextIND]).ravel().astype(bool)))[0]\n if len(nlpos) > 1:\n # There is more than one connection to the previous particle. Check for where PVx and PVy\n # values are opposite the previously used values.\n ind_nlpos = np.where(np.logical_and(PVx[nextIND, nlpos] == -pvx_prev,\n PVy[nextIND, nlpos] == -pvy_prev))[0]\n print 'ind_nlpos = ', ind_nlpos\n nlpos = nlpos[ind_nlpos]\n\n # Exclude previous boundary particle (the copy of that particle in the nlpos position)\n # from the neighbors array, UNLESS IT IS THE ONLY ONE,\n # since its angle with itself is zero!\n\n # Used to remove previous particle, but this assumes that boundary is more than 2\n # particles long, which might not be true for periodic_strip bcs\n if len(n_tmp) == 1:\n print 'le: The bond is a lone bond, not part of a triangle.'\n neighbors = n_tmp\n else:\n print 'n_tmp = ', n_tmp\n neighbors = np.delete(n_tmp, nlpos)\n connect = np.delete(connect, nlpos)\n print 'n_tmp = ', n_tmp\n print 'neighbors = ', neighbors\n\n # print 'le: nlpos = ', nlpos\n forward_angles = np.arctan2(xy[neighbors, 1] - xy[nextIND, 1] + PVy[nextIND, connect],\n xy[neighbors, 0] - xy[nextIND, 0] + PVx[nextIND, connect]).ravel()\n backward_angle = np.arctan2(xy[bb[dmyi - 1], 1] - xy[nextIND, 1] + PVy[nextIND, nlpos],\n xy[bb[dmyi - 1], 0] - xy[nextIND, 0] + PVx[nextIND, nlpos]).ravel()\n if check:\n print 'le: connect = ', connect\n print 'le: forward_angles = ', forward_angles\n print 'le: backward_angle = ', backward_angle\n\n angles = np.mod(forward_angles - backward_angle, 2 * np.pi)\n if check:\n print 'le: angles = ', angles\n print 'le: angles==min--> ', angles == min(angles)\n print 'le: neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles==min--> ', angles == min(angles)\n print 'le.extract_1d_boundaries(): neighbors[angles == min(angles)] --> ', neighbors[angles == min(angles)]\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[nextIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[nextIND, connect[angles == min(angles)][0]]\n # Redefine nextIND to be the new boundary index\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n boundaries.append(boundary)\n\n return tuple(boundaries)", "def _detect_branching(\n self,\n Dseg: np.ndarray,\n tips: np.ndarray,\n seg_reference=None,\n ) -> Tuple[\n List[np.ndarray],\n List[np.ndarray],\n List[List[int]],\n List[List[int]],\n int,\n ]:\n if self.flavor == 'haghverdi16':\n ssegs = self._detect_branching_single_haghverdi16(Dseg, tips)\n elif self.flavor == 'wolf17_tri':\n ssegs = self._detect_branching_single_wolf17_tri(Dseg, tips)\n elif self.flavor == 'wolf17_bi' or self.flavor == 'wolf17_bi_un':\n ssegs = self._detect_branching_single_wolf17_bi(Dseg, tips)\n else:\n raise ValueError(\n '`flavor` needs to be in {\"haghverdi16\", \"wolf17_tri\", \"wolf17_bi\"}.'\n )\n # make sure that each data point has a unique association with a segment\n masks = np.zeros((len(ssegs), Dseg.shape[0]), dtype=bool)\n for iseg, seg in enumerate(ssegs):\n masks[iseg][seg] = True\n nonunique = np.sum(masks, axis=0) > 1\n ssegs = []\n for iseg, mask in enumerate(masks):\n mask[nonunique] = False\n ssegs.append(np.arange(Dseg.shape[0], dtype=int)[mask])\n # compute new tips within new segments\n ssegs_tips = []\n for inewseg, newseg in enumerate(ssegs):\n if len(np.flatnonzero(newseg)) <= 1:\n logg.warning(f'detected group with only {np.flatnonzero(newseg)} cells')\n secondtip = newseg[np.argmax(Dseg[tips[inewseg]][newseg])]\n ssegs_tips.append([tips[inewseg], secondtip])\n undecided_cells = np.arange(Dseg.shape[0], dtype=int)[nonunique]\n if len(undecided_cells) > 0:\n ssegs.append(undecided_cells)\n # establish the connecting points with the other segments\n ssegs_connects = [[], [], [], []]\n for inewseg, newseg_tips in enumerate(ssegs_tips):\n reference_point = newseg_tips[0]\n # closest cell to the new segment within undecided cells\n closest_cell = undecided_cells[\n np.argmin(Dseg[reference_point][undecided_cells])\n ]\n ssegs_connects[inewseg].append(closest_cell)\n # closest cell to the undecided cells within new segment\n closest_cell = ssegs[inewseg][\n np.argmin(Dseg[closest_cell][ssegs[inewseg]])\n ]\n ssegs_connects[-1].append(closest_cell)\n # also compute tips for the undecided cells\n tip_0 = undecided_cells[\n np.argmax(Dseg[undecided_cells[0]][undecided_cells])\n ]\n tip_1 = undecided_cells[np.argmax(Dseg[tip_0][undecided_cells])]\n ssegs_tips.append([tip_0, tip_1])\n ssegs_adjacency = [[3], [3], [3], [0, 1, 2]]\n trunk = 3\n elif len(ssegs) == 3:\n reference_point = np.zeros(3, dtype=int)\n reference_point[0] = ssegs_tips[0][0]\n reference_point[1] = ssegs_tips[1][0]\n reference_point[2] = ssegs_tips[2][0]\n closest_points = np.zeros((3, 3), dtype=int)\n # this is another strategy than for the undecided_cells\n # here it's possible to use the more symmetric procedure\n # shouldn't make much of a difference\n closest_points[0, 1] = ssegs[1][\n np.argmin(Dseg[reference_point[0]][ssegs[1]])\n ]\n closest_points[1, 0] = ssegs[0][\n np.argmin(Dseg[reference_point[1]][ssegs[0]])\n ]\n closest_points[0, 2] = ssegs[2][\n np.argmin(Dseg[reference_point[0]][ssegs[2]])\n ]\n closest_points[2, 0] = ssegs[0][\n np.argmin(Dseg[reference_point[2]][ssegs[0]])\n ]\n closest_points[1, 2] = ssegs[2][\n np.argmin(Dseg[reference_point[1]][ssegs[2]])\n ]\n closest_points[2, 1] = ssegs[1][\n np.argmin(Dseg[reference_point[2]][ssegs[1]])\n ]\n added_dist = np.zeros(3)\n added_dist[0] = (\n Dseg[closest_points[1, 0], closest_points[0, 1]]\n + Dseg[closest_points[2, 0], closest_points[0, 2]]\n )\n added_dist[1] = (\n Dseg[closest_points[0, 1], closest_points[1, 0]]\n + Dseg[closest_points[2, 1], closest_points[1, 2]]\n )\n added_dist[2] = (\n Dseg[closest_points[1, 2], closest_points[2, 1]]\n + Dseg[closest_points[0, 2], closest_points[2, 0]]\n )\n trunk = np.argmin(added_dist)\n ssegs_adjacency = [\n [trunk] if i != trunk else [j for j in range(3) if j != trunk]\n for i in range(3)\n ]\n ssegs_connects = [\n [closest_points[i, trunk]]\n if i != trunk\n else [closest_points[trunk, j] for j in range(3) if j != trunk]\n for i in range(3)\n ]\n else:\n trunk = 0\n ssegs_adjacency = [[1], [0]]\n reference_point_in_0 = ssegs_tips[0][0]\n closest_point_in_1 = ssegs[1][\n np.argmin(Dseg[reference_point_in_0][ssegs[1]])\n ]\n reference_point_in_1 = closest_point_in_1 # ssegs_tips[1][0]\n closest_point_in_0 = ssegs[0][\n np.argmin(Dseg[reference_point_in_1][ssegs[0]])\n ]\n ssegs_connects = [[closest_point_in_1], [closest_point_in_0]]\n return ssegs, ssegs_tips, ssegs_adjacency, ssegs_connects, trunk", "def identify_lipid_leaflets_legacy(pts,vec,monolayer_cutoff,\n\tmonolayer_cutoff_retry=True,max_count_asymmetry=0.05,pbc_rewrap=True,\n\ttopologize_tolerance=None,topologize_time_limit=30):\n\t#---previous default was somewhat high, but typically came in from specs, and we reduced it incrementally\n\tif monolayer_cutoff==None: monolayer_cutoff = 2.0\n\t#---time limit on the tolerance checker\n\ttry:\n\t\twith time_limit(topologize_time_limit): \n\t\t\twrapper = topologize(pts,vec,\n\t\t\t\t**({'tol':topologize_tolerance} if topologize_tolerance else {}))\n\texcept TimeoutException: \n\t\tstatus('topologize failed to join the bilayer. '\n\t\t\t'if it is broken over PBCs e.g. a saddle, this is a serious error which may go undetected. '\n\t\t\t'make sure you always inspect the topology later.',tag='error')\n\t\twrapper = np.zeros((len(pts),3))\n\tfindframe = pts + wrapper*np.array(vec)\n\tstatus('this step is somewhat slow. it uses scipy.spatial.pdist.',tag='warning')\n\tpd = [scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(findframe[:,d:d+1])) \n\t\tfor d in range(3)]\n\tif pbc_rewrap:\n\t\tpd3pbc = np.sqrt(np.sum(np.array([pd[d]-(pd[d]>vec[d]/2.)*vec[d]+(pd[d]<-1*vec[d]/2.)*vec[d] \n\t\t\tfor d in range(3)])**2,axis=0))\n\telse: pd3pbc = pd\n\tnbors = np.transpose(np.where(pd3pbc<monolayer_cutoff))\n\tnlipids = len(pts)\n\timono = np.zeros(nlipids)\n\tnlist = []\n\tfor i in range(nlipids):\n\t\tstatus('cataloging lipids',i=i,looplen=nlipids,tag='compute')\n\t\tnlist.append(nbors[np.where(nbors[:,0]==i)[0],1])\n\tiref = 0\n\tmono = np.zeros(nlipids)\n\tsearched = np.zeros(nlipids)\n\timono[iref],searched[iref] = 1,1\n\timono[nlist[iref]] = 1\n\twhile np.any(np.all((imono==1,searched==0),axis=0)):\n\t\tfor iref in np.where(np.all((imono==1,searched==0),axis=0))[0]: \n\t\t\timono[nlist[iref]] = 1\n\t\t\tsearched[iref] = 1\n\t#---check that the leaflets were properly distinguished by looking at the number in each monolayer\n\tif np.mean(imono)==0.5: \n\t\tstatus('[STATUS] perfect split is %0.5f'%np.mean(imono))\n\t\treturn imono\n\telif (monolayer_cutoff_retry and (np.all(np.array(imono)==0) or np.all(np.array(imono)==1) or \n\t\tnp.abs(np.mean(imono)-0.5)>=max_count_asymmetry)):\n\t\tstatus('[STATUS] split is %0.5f'%np.mean(imono))\n\t\tstatus('[STATUS] one side has %d'%np.sum(imono))\n\t\tstatus('[WARNING] leaflets were not distinguished')\n\t\tstatus('[COMPUTE] leaflets = '+str(np.sum(imono))+'/'+str(len(imono)))\n\t\tstatus('[WARNING] previous monolayer_cutoff = '+str(monolayer_cutoff))\n\t\traise Exception(\n\t\t\t'[ERROR] failed to identify leaflets so we are returning an exception to the LeafletFinder')\n\telse: status('[STATUS] some lipids might be flipped %d %.5f'%(np.sum(imono),np.mean(imono)))\n\treturn imono", "def find_saddle(V,X,Y,Z,dim,Z0=None):\n debug=False # internal code only; typically False\n from project_parameters import scale\n if (dim==2 and Z0==None):\n return 'z0 needed for evaluation'\n if dim==3:\n if len(V.shape)!=3:\n return('Problem with find_saddle.m dimensionalities.')\n f=V/float(np.amax(V)) # Normalize field\n [Ex,Ey,Ez]=np.gradient(f,abs(X[1]-X[0])/scale,abs(Y[1]-Y[0])/scale,abs(Z[1]-Z[0])/scale) # grid spacing is automatically consistent thanks to BEM-solver\n E=np.sqrt(Ex**2+Ey**2+Ez**2) # magnitude of gradient (E field)\n m=E[1,1,1]\n origin=[1,1,1]\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n for k in range(E.shape[2]):\n if E[i,j,k]<m:\n m=E[i,j,k]\n origin=[i,j,k] \n if debug:\n print('DEBUGGING...')\n fig=plt.figure()\n e=np.reshape(E,(1,E.shape[0]*E.shape[1]*E.shape[2]))\n ind,e=np.argsort(e),np.sort(e)\n e=e[0]\n ind=ind[0] #Sort V by the same indexing.\n v=np.reshape(V,(1,V.shape[0]*V.shape[1]*V.shape[2]))\n v=v[0]\n plt.plot(e/float(np.amax(e)))\n def index_sort(v,e):\n \"\"\"Takes in two lists of the same length and returns the first sorted by the indexing of i sorted.\"\"\"\n es=np.sort(e)\n ix=np.argsort(e)\n vs=np.ones(len(v)) #Sorted by the sorting defined by f being sorted. \n # If v==e, this returns es.\n for i in range(len(v)):\n j=ix[i]\n vs[i]=v[j]\n return vs\n v=index_sort(v,e) # Is it supposed to look like this?\n plt.plot(v/float(np.amax(v)))\n plt.title('Debugging: blue is sorted gradient, green is potential sorted by gradient')\n plt.show() #f is blue and smooth, v is green and fuzzy.\n if origin[0]==(1 or V.shape[0]):\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[1]):\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[2]): \n print('find_saddle: Saddle out of bounds in z (k) direction.\\n')\n return origin\n #################################################################################################\n if dim==2: # Extrapolate to the values of A at z0.\n V2=V\n if len(V.shape)==3:\n Ks=0 # in case there is no saddle point\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n Ks=i-1\n if Z0<1:\n Ks+=1\n Vs=V.shape\n if Ks>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,Ks] \n v2=V[:,:,Ks+1]\n V2=v1+(v2-v1)*(Z0-Z[Ks])/(Z[Ks+1]-Z[Ks])\n V2s=V2.shape\n if len(V2s)!=2: # Old: What is this supposed to check? Matlab code: (size(size(A2),2) ~= 2)\n return('Problem with find_saddle.py dimensionalities. It is {}.'.format(V2s))\n f=V2/float(np.max(abs(V2)))\n [Ex,Ey]=np.gradient(f,abs(X[1]-X[0]),abs(Y[1]-Y[0]))\n E=np.sqrt(Ex**2+Ey**2)\n m=float(np.min(E))\n if m>1e-4: # This requires a grid with step size 0.01, not just 0.1.\n if debug:\n Is,Js=np.NaN,np.NaN\n print('Warning, there seems to be no saddle point.')\n mr=E[0,0]\n Is,Js=1,1 # in case there is no saddle\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n if E[i,j]<mr:\n mr=E[i,j]\n Is,Js=i,j\n origin=[Is,Js,Ks]\n if Is==1 or Is==V.shape[0]:\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if Js==1 or Js==V.shape[1]:\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n return origin", "def between_vec(df, switch): \n gauss1_idx, gauss2_idx = gauss_idx_func(CC_scaled)\n nga_dict = {key:list() for key in NGAs}\n \n slopes = [] \n \n def slope(a, b):\n \"\"\" find slope given two points \"\"\"\n a1, a2 = PC_matrix[:, 0][a], PC_matrix[:, 1][a]\n b1, b2 = PC_matrix[:, 0][b], PC_matrix[:, 1][b]\n \n return b1-a1, b2-a2\n \n # compute flow vector for each nga \n for nga in NGAs:\n nga_idx = df.index[df['NGA'] == nga].tolist()\n\n gauss1 = [i for i in nga_idx if i in gauss1_idx]\n gauss2 = [j for j in nga_idx if j in gauss2_idx]\n\n # use the last point in the first cluster and the first point in the second cluster\n if switch == 1: \n \n try:\n a, b = gauss1[-1], gauss2[0]\n x, y = slope(a, b)\n slopes.append((x, y))\n\n except: # lies only in one of the two clusters \n pass \n \n # use the very first time points make a transition from the first to the second\n elif switch == 2:\n \n for idx in range(len(nga_idx)-1):\n \n if nga_idx[idx] in gauss1 and nga_idx[idx+1] in gauss2:\n \n a, b = nga_idx[idx], nga_idx[idx+1]\n x, y = slope(a, b)\n slopes.append((x, y))\n \n break \n \n # take all transitions\n elif switch == 3:\n \n for idx in range(len(nga_idx)-1):\n \n if nga_idx[idx] in gauss1 and nga_idx[idx+1] in gauss2:\n \n a, b = nga_idx[idx], nga_idx[idx+1]\n x, y = slope(a, b)\n slopes.append((x, y))\n \n return slopes", "def our_own_bvp_solve(f, a, b, n, y0, dim, bc, tol=1e-2):\n\n # interpolate the initial guess function y0 on Chebyshev points of the first kind\n cf0 = []\n for y0_i in y0:\n for thing in np.polynomial.chebyshev.Chebyshev(np.zeros(n), (a, b)).interpolate(y0_i, n, (a, b)):\n cf0.append(thing)\n\n solution = root(lambda u: fun(u, a, b, dim, n, f, bc), cf0, method='lm', tol=tol)\n if not solution.success:\n print('root finding failed')\n\n cf = solution.x\n cf = cf.reshape((dim, cf.size // dim))\n\n return [np.polynomial.chebyshev.Chebyshev(cf[i], (a, b)) for i in range(dim)]", "def group_centers_phase1_and_2(self) -> None:\n self.rotate_U_to_U()\n self.rotate_F_to_F()\n\n if self.centers_staged():\n return\n\n original_state = self.state[:]\n original_solution = self.solution[:]\n tmp_solution_len = len(self.solution)\n\n # find multiple phase1 solutions\n phase1_solutions = self.lt_LR_centers_stage.solutions_via_c(solution_count=100)\n pt_state_indexes = []\n pt_state_indexes_LR_centers_special = []\n phase2_pt_state_indexes_to_phase1_solution = {}\n logger.info(f\"found {len(phase1_solutions)} phase1 solutions\")\n\n # find the phase2 solution for each phase1 solution\n for phase1_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) in phase1_solutions:\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n for step in phase1_solution:\n self.rotate(step)\n\n # stage the LR centers\n phase2_pt_state_indexes = tuple([pt.state_index() for pt in self.lt_FB_centers_stage.prune_tables])\n pt_state_indexes.append(phase2_pt_state_indexes)\n phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution\n\n # stage the LR centers and put them into one of 495 states solveable with L L' R R'\n phase2_pt_state_indexes = tuple(\n [pt.state_index() for pt in self.lt_FB_centers_stage_LR_centers_special.prune_tables]\n )\n pt_state_indexes_LR_centers_special.append(phase2_pt_state_indexes)\n phase2_pt_state_indexes_to_phase1_solution[phase2_pt_state_indexes] = phase1_solution\n\n self.state = original_state[:]\n self.solution = original_solution[:]\n\n # stage the FB centers\n phase2_solutions = self.lt_FB_centers_stage.solutions_via_c(pt_states=pt_state_indexes, solution_count=1)\n phase2_solution = phase2_solutions[0][0]\n\n # stage the FB centers and put LR centers into one of 495 states solveable with L L' R R'\n phase2_solutions_lr_centers_special = self.lt_FB_centers_stage_LR_centers_special.solutions_via_c(\n pt_states=pt_state_indexes_LR_centers_special, solution_count=1\n )\n phase2_solution_lr_centers_special = phase2_solutions_lr_centers_special[0][0]\n\n # if we can put the LR centers into one of 495 states without adding to the move count, make it so\n if len(phase2_solution_lr_centers_special) <= len(phase2_solution):\n min_phase2_solution, (\n pt0_state,\n pt1_state,\n pt2_state,\n pt3_state,\n pt4_state,\n ) = phase2_solutions_lr_centers_special[0]\n min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state, pt2_state]\n else:\n min_phase2_solution, (pt0_state, pt1_state, pt2_state, pt3_state, pt4_state) = phase2_solutions[0]\n min_phase1_solution = phase2_pt_state_indexes_to_phase1_solution[pt0_state, pt1_state]\n\n logger.info(\n f\"phase2 solution length {len(phase2_solution)}, phase2_lr_centers_special solution length {len(phase2_solution_lr_centers_special)}\"\n )\n\n for step in min_phase1_solution:\n self.rotate(step)\n\n self.print_cube_add_comment(\"LR centers staged\", tmp_solution_len)\n\n tmp_solution_len = len(self.solution)\n for step in min_phase2_solution:\n self.rotate(step)\n\n self.print_cube_add_comment(\"UD FB centers staged\", tmp_solution_len)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returned data frame should have trading_pair as index and include usd volume, baseAsset and quoteAsset
async def get_active_exchange_markets(cls) -> pd.DataFrame: async with aiohttp.ClientSession() as client: trading_pairs_response = await client.get(ASSET_PAIRS_URL) trading_pairs_response: aiohttp.ClientResponse = trading_pairs_response if trading_pairs_response.status != 200: raise IOError(f"Error fetching Kraken trading pairs. " f"HTTP status is {trading_pairs_response.status}.") trading_pairs_data: Dict[str, Any] = await trading_pairs_response.json() trading_pairs_data["result"] = { pair: details for pair, details in trading_pairs_data["result"].items() if "." not in pair} wsname_dict: Dict[str, str] = {pair: details["wsname"] for pair, details in trading_pairs_data["result"].items()} trading_pairs: Dict[str, Any] = {pair: {"baseAsset": wsname_dict[pair].split("/")[0], "quoteAsset": wsname_dict[pair].split("/")[1], "wsname": wsname_dict[pair]} for pair in trading_pairs_data["result"]} trading_pairs_str: str = ','.join(trading_pairs.keys()) market_response = await client.get(f"{TICKER_URL}?pair={trading_pairs_str}") market_response: aiohttp.ClientResponse = market_response if market_response.status != 200: raise IOError(f"Error fetching Kraken markets information. " f"HTTP status is {market_response.status}.") market_data = await market_response.json() market_data: List[Dict[str, Any]] = [{"pair": pair, **market_data["result"][pair], **trading_pairs[pair]} for pair in market_data["result"] if pair in trading_pairs] # Build the data frame. all_markets: pd.DataFrame = pd.DataFrame.from_records(data=market_data, index="pair") all_markets["lastPrice"] = all_markets.c.map(lambda x: x[0]).astype("float") all_markets.loc[:, "volume"] = all_markets.v.map(lambda x: x[1]).astype("float") price_dict: Dict[str, float] = await cls.get_prices_from_df(all_markets) usd_volume: List[float] = [ ( baseVolume * price_dict[baseAsset] if baseAsset in price_dict else -1 ) for baseAsset, baseVolume in zip(all_markets.baseAsset, all_markets.volume)] all_markets.loc[:, "USDVolume"] = usd_volume return all_markets.sort_values("USDVolume", ascending=False)
[ "def calculate_asset_delta_from_trades(current_strategy_name: str,\n market_trading_pair_tuples: List[MarketTradingPairTuple],\n raw_queried_trades: List[TradeFill],\n ) -> Dict[MarketTradingPairTuple, Dict[str, Decimal]]:\n market_trading_pair_stats: Dict[MarketTradingPairTuple, Dict[str, Decimal]] = {}\n for market_trading_pair_tuple in market_trading_pair_tuples:\n asset_stats: Dict[str, Dict[str, Decimal]] = defaultdict(\n lambda: {\"spent\": s_decimal_0, \"acquired\": s_decimal_0}\n )\n asset_stats[market_trading_pair_tuple.base_asset.upper()] = {\"spent\": s_decimal_0, \"acquired\": s_decimal_0}\n asset_stats[market_trading_pair_tuple.quote_asset.upper()] = {\"spent\": s_decimal_0, \"acquired\": s_decimal_0}\n\n if raw_queried_trades is not None:\n queried_trades: List[TradeFill] = [t for t in raw_queried_trades if (\n t.strategy == current_strategy_name\n and t.market == market_trading_pair_tuple.market.display_name\n and t.symbol == market_trading_pair_tuple.trading_pair\n )]\n else:\n queried_trades = []\n\n if not queried_trades:\n market_trading_pair_stats[market_trading_pair_tuple] = {\n \"starting_quote_rate\": market_trading_pair_tuple.get_mid_price(),\n \"asset\": asset_stats,\n \"trade_count\": 0\n }\n continue\n\n for trade in queried_trades:\n # For each trade, calculate the spent and acquired amount of the corresponding base and quote asset\n trade_side: str = trade.trade_type\n base_asset: str = trade.base_asset.upper()\n quote_asset: str = trade.quote_asset.upper()\n base_delta, quote_delta = calculate_trade_asset_delta_with_fees(trade)\n if trade_side == TradeType.SELL.name:\n asset_stats[base_asset][\"spent\"] += base_delta\n asset_stats[quote_asset][\"acquired\"] += quote_delta\n elif trade_side == TradeType.BUY.name:\n asset_stats[base_asset][\"acquired\"] += base_delta\n asset_stats[quote_asset][\"spent\"] += quote_delta\n\n market_trading_pair_stats[market_trading_pair_tuple] = {\n \"starting_quote_rate\": Decimal(repr(queried_trades[0].price)),\n \"asset\": asset_stats,\n \"trade_count\": len(queried_trades)\n }\n\n return market_trading_pair_stats", "def _market_trading_pair_tuple(self,\n connector_name: str,\n trading_pair: str) -> MarketTradingPairTuple:\n base, quote = split_hb_trading_pair(trading_pair)\n return MarketTradingPairTuple(self.connectors[connector_name], trading_pair, base, quote)", "def prices(self, quotes: pd.DataFrame) -> pd.DataFrame:\n #df = quotes[['ask', 'bid']][-1]\n return quotes", "def join_data(df_trade, df_stock):\n raise NotImplemented('Not yet implemented')", "def get_trade_data(pair: str, year: str, path: str = \"accumulation_opportunity/data\"):\n\n dtypes = {\n \"PriceMillionths\": int,\n \"Side\": int,\n \"SizeBillionths\": int,\n \"timestamp_utc_nanoseconds\": int,\n }\n\n filename = f\"trades_narrow_{pair}_{year}.delim.gz\"\n delimiter = {\"2018\": \"|\", \"2021\": \"\\t\"}[year]\n\n with gzip.open(f\"{path}/{filename}\") as f:\n df = pd.read_csv(f, delimiter=delimiter, usecols=dtypes.keys(), dtype=dtypes)\n\n df.timestamp_utc_nanoseconds = pd.to_datetime(df.timestamp_utc_nanoseconds)\n\n return df.set_index(\"timestamp_utc_nanoseconds\")", "def account_df(self, typ='trades', improve=False):\n cols = ['date_open', 'date_close', 'symbol', 'style', 'volume', 'price_open', 'price_stop', 'price_limit', 'price_close', 'comment', 'magic', 'order_id_master', 'order_id_stop', 'order_id_limit', 'direction', 'price_diff', 'price_diff', 'price_diff_d', 'price_diff_rel', 'price_diff_rel_d', 'MAE', 'MFE', 'MAE_rel', 'MFE_rel', 'price_trailing_diff', 'profit']\n d = self._d_orders[typ]\n if len(d)>0:\n df = pd.DataFrame(d.values(), index=d.keys())\n df = df.rename(columns={0: 'bo'})\n df['date_created'] = df['bo'].map(lambda o: o.date_created)\n df['date_open'] = df['bo'].map(lambda o: o.date_open)\n df['date_close'] = df['bo'].map(lambda o: o.date_close)\n df['date_closed'] = df['bo'].map(lambda o: o.date_closed)\n df['symbol'] = df['bo'].map(lambda o: o.symbol)\n #df['style'] = df['bo'].map(lambda o: o.style)\n df['volume'] = df['bo'].map(lambda o: o.volume)\n df['price_open'] = df['bo'].map(lambda o: o.price_open)\n df['price_stop'] = df['bo'].map(lambda o: o.price_stop)\n df['price_limit'] = df['bo'].map(lambda o: o.price_limit)\n df['price_close'] = df['bo'].map(lambda o: o.price_close)\n df['comment'] = df['bo'].map(lambda o: o.comment)\n df['magic'] = df['bo'].map(lambda o: o.magic)\n #df['order_id_master'] = df['bo'].map(lambda o: o.order_id_master)\n #df['order_id_stop'] = df['bo'].map(lambda o: o.order_id_stop)\n #df['order_id_limit'] = df['bo'].map(lambda o: o.order_id_limit)\n\n df['direction'] = df['bo'].map(lambda o: o.direction)\n\n df['price_diff'] = df['bo'].map(lambda o: o.price_diff)\n df['price_diff_d'] = df['bo'].map(lambda o: o.price_diff_d)\n df['price_diff_rel'] = df['bo'].map(lambda o: o.price_diff_rel)\n df['price_diff_rel_d'] = df['bo'].map(lambda o: o.price_diff_rel_d)\n \n df['MAE'] = df['bo'].map(lambda o: o.MAE)\n df['MFE'] = df['bo'].map(lambda o: o.MFE)\n \n #df['MAE_rel'] = df['MAE'] / df['price_open']\n #df['MFE_rel'] = df['MFE'] / df['price_open']\n df['MAE_rel'] = df['bo'].map(lambda o: o.MAE_rel)\n df['MFE_rel'] = df['bo'].map(lambda o: o.MFE_rel)\n \n\n #df['profit'] = df['volume'] * df['price_diff'].fillna(0)\n df['profit'] = df['bo'].map(lambda o: o.profit)\n #df['profit_rel'] = df['bo'].map(lambda o: o.profit_rel)\n \n if improve:\n try:\n df = improve_account_df_with_additional_data(df)\n except Exception as e:\n log.error(\"Can't improve account df with additional data\")\n log.error(\"Reason: %s\" % str(e))\n \n #del df['bo'] \n \n return(df)\n else:\n return(pd.DataFrame(columns=cols))", "def fetch_coinpairs():\n client = config.client\n \n # get and process price information\n df = pd.DataFrame(client.get_all_tickers()).dropna()\n df[\"price\"] = df[\"price\"].astype(float) \n relevant_coinpairs = [\"USDT\"] \n irrelevant_coinpairs = [\"STORMUSDT\", \"LENDUSDT\", \"USDTUAH\", \"USDTBRL\", \"DOWN\", \"UP\", \"BULL\", \n \"USDTBIDR\",\"BEAR\", \"BCHSVUSDT\", \"USDTGYEN\", \"BCCUSDT\", \"BCHSVUSDT\",\n \"ERDUSDT\", \"USDTIDRT\", \"DAIUSDT\", \"AUDUSDT\", \"USDTNGN\", \"USDTZAR\",\n \"PAXUSDT\", \"USDSBUSDT\", \"PAXUSDT\", \"USDTDAI\", \"EPSUSDT\"]\n \n # remove irrelevant coins\n for coinpair in relevant_coinpairs:\n df = df[df[\"symbol\"].apply(lambda x: True if coinpair in x else False)]\n \n for coin_pair in df[\"symbol\"]:\n # if irrelevant coin pair in coin pair, remove from df\n for ir_coin_pair in irrelevant_coinpairs:\n if ir_coin_pair in coin_pair:\n df = df[df[\"symbol\"]!=coin_pair]\n\n # reset index and return \n return df.reset_index(drop=True)", "def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full", "def trading_pair(self, trading_pair):\n\n self._trading_pair = trading_pair", "def exchanges_df(self) -> pd.DataFrame:\n mid_price = self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)\n maker_buy_result = self.connectors[self.maker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n maker_sell_result = self.connectors[self.maker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n taker_buy_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n taker_sell_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n maker_buy_spread_bps = (maker_buy_result.result_price - taker_buy_result.result_price) / mid_price * 10000\n maker_sell_spread_bps = (taker_sell_result.result_price - maker_sell_result.result_price) / mid_price * 10000\n columns = [\"Exchange\", \"Market\", \"Mid Price\", \"Buy Price\", \"Sell Price\", \"Buy Spread\", \"Sell Spread\"]\n data = []\n data.append([\n self.maker_exchange,\n self.maker_pair,\n float(self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)),\n float(maker_buy_result.result_price),\n float(maker_sell_result.result_price),\n int(maker_buy_spread_bps),\n int(maker_sell_spread_bps)\n ])\n data.append([\n self.taker_exchange,\n self.taker_pair,\n float(self.connectors[self.taker_exchange].get_mid_price(self.maker_pair)),\n float(taker_buy_result.result_price),\n float(taker_sell_result.result_price),\n int(-maker_buy_spread_bps),\n int(-maker_sell_spread_bps)\n ])\n df = pd.DataFrame(data=data, columns=columns)\n return df", "def match_alg(quote_data, trade_data):\n \n # To not replace on original dataframe create tmp_quote and mess with its timestamps #\n tmp_quote = quote_data.copy(deep = True)\n \n tmp_quote['price'] = np.nan\n tmp_quote['tradesize'] = np.nan\n \n for ind, ns in trade_data.iterrows():\n low_id, up_id = find_neighbours(quote_data, ns['utcsec'])\n \n # Continue if lower id was not found #\n if np.isnan(low_id) == True:\n continue\n \n tmp_quote.loc[low_id, 'utcsec'] = ns['utcsec']\n tmp_quote.loc[low_id, 'price'] = ns['price']\n tmp_quote.loc[low_id, 'tradesize'] = ns['volume']\n \n # Drop observations that have no match. \n \n tmp_quote = tmp_quote.dropna(axis = 0)\n \n return tmp_quote", "def map_to_trade(self, raw_trade: HitbtcRawTradeModel) -> HitbtcTradeModel:\n\n id_ = int(raw_trade[\"id\"])\n price = Decimal(raw_trade[\"price\"])\n quantity = Decimal(raw_trade[\"quantity\"])\n side = raw_trade[\"side\"]\n timestamp = raw_trade[\"timestamp\"]\n\n trade = HitbtcTradeModel(\n id=id_,\n price=price,\n quantity=quantity,\n side=side,\n timestamp=timestamp)\n\n return trade", "def _deserialize_trade(self, raw_result: list[Any]) -> Trade:\n amount = deserialize_asset_amount(raw_result[4])\n trade_type = TradeType.BUY if amount >= ZERO else TradeType.SELL\n bfx_pair = self._process_bfx_pair(raw_result[1])\n if bfx_pair in self.pair_bfx_symbols_map:\n bfx_base_asset_symbol, bfx_quote_asset_symbol = self.pair_bfx_symbols_map[bfx_pair]\n elif len(bfx_pair) == 6:\n # Could not see it in the listed pairs. Probably delisted. Gotta try and figure it out\n # TODO: The whole pair logic in bitfinex seems complicated. Simplify!\n bfx_base_asset_symbol = bfx_pair[:3]\n bfx_quote_asset_symbol = bfx_pair[3:]\n else:\n raise DeserializationError(\n f'Could not deserialize bitfinex trade pair {raw_result[1]}. '\n f'Raw trade: {raw_result}',\n )\n\n base_asset = asset_from_bitfinex(\n bitfinex_name=bfx_base_asset_symbol,\n currency_map=self.currency_map,\n )\n quote_asset = asset_from_bitfinex(\n bitfinex_name=bfx_quote_asset_symbol,\n currency_map=self.currency_map,\n )\n fee_asset = asset_from_bitfinex(\n bitfinex_name=raw_result[10],\n currency_map=self.currency_map,\n )\n\n trade = Trade(\n timestamp=Timestamp(int(raw_result[2] / 1000)),\n location=Location.BITFINEX,\n base_asset=base_asset,\n quote_asset=quote_asset,\n trade_type=trade_type,\n amount=AssetAmount(abs(amount)),\n rate=deserialize_price(raw_result[5]),\n fee=Fee(abs(deserialize_fee(raw_result[9]))),\n fee_currency=fee_asset,\n link=str(raw_result[0]),\n notes='',\n )\n return trade", "def calculate_vol_adjusted_index_from_prices(self, prices_df, br):\n\n tsc = TimeSeriesCalcs()\n\n returns_df, leverage_df = self.calculate_vol_adjusted_returns(prices_df, br, returns = False)\n\n return tsc.create_mult_index(returns_df)", "def create_pair_differences(self):\n\n # Create an empty dataframe of pair differences, we will append this later.\n pair_string_names = []\n pair_price_diff = []\n\n for pair in self.__pairs_data:\n # Choose both stocks from each pair\n stock_symbol_1 = pair[0]\n stock_symbol_2 = pair[1]\n\n # Create a string that symbolizes the pair and add it to a list of strings\n pair_string = str(stock_symbol_1) + '-' + str(stock_symbol_2)\n pair_string_names.append(pair_string)\n\n # Get both stock prices from the price dataset\n stock_price1 = self.__price_data[stock_symbol_1]\n stock_price2 = self.__price_data[stock_symbol_2]\n pair_diff = stock_price2 - stock_price1\n pair_price_diff.append(pair_diff)\n\n # Concat all the pairs into the pair differences attribute in class and set column names\n self.__pair_diff = pd.concat([pd.Series(pair_prices) for pair_prices in pair_price_diff], axis=1)\n self.__pair_diff.columns = pair_string_names\n\n return self.__pair_diff", "def coin_pair_selector(df, trade_margin, amount = 10):\n\n df[\"up_by\"] = df[\"price\"] * config.ROI # trade margin (or ROI) is the percentage by which the price should go up\n df[\"target_price\"] = df[\"up_by\"] + df[\"price\"]\n\n df[\"#coins\"] = config.USDT_amount / df[\"price\"] \n df[\"avg_chg_minute\"] = df[\"symbol\"].apply(avg_change_minute)\n df[\"Δ1\"] = abs((df[\"avg_chg_minute\"] - df[\"up_by\"]) / df[\"avg_chg_minute\"])\n\n df = df.sort_values(by=\"Δ1\", ascending=True)\n #df = df[df[\"Δ1\"] < 10]\n \n return df", "async def get_trading_table(self):\n if self.trading_table is None:\n self.trading_table = {}\n wikitext = await Controller.get_wikitext('Trading')\n for match in re.finditer(r\"===='''([^']+)'''====\\n({\\|[^\\n]*\\n(?:[^\\n]*\\n)+?\\|})\", wikitext):\n place = match.group(1)\n trade_list = {'into':{}, 'from':{}}\n for row in match.group(2).strip().split('|-'):\n if len(row) < 5:\n continue\n trade = re.search(r'\\|([0-9,.]+)\\|\\| \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]\\|\\|→\\n\\|align\\=right\\|([0-9,.]+)\\|\\| \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]', row)\n if not trade:\n trade = re.search(r'\\| ?([0-9,.]+) \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]\\|\\| ?([0-9,.]+) \\[\\[(?:[^|\\]]+\\|)?([^\\]]+)\\]\\]', row)\n if not trade:\n logging.warn(f'No trade row in `{row}`')\n continue\n from_amt = int(trade.group(1).replace(',', ''))\n from_itm = trade.group(2).lower()\n to_amt = int(trade.group(3).replace(',', ''))\n to_itm = trade.group(4).lower()\n if from_itm not in trade_list['from']:\n trade_list['from'][from_itm] = []\n if to_itm not in trade_list['into']:\n trade_list['into'][to_itm] = []\n trade_list['from'][from_itm].append((to_itm, from_amt, to_amt))\n trade_list['into'][to_itm].append((from_itm, to_amt, from_amt))\n if '(' in place:\n # Gorenichi (Kiev), Magnitogorsk (trader), Magnitogorsk (fitter)\n if place[0] == 'G':\n place = 'Kiev'\n self.trading_table[place.lower()] = trade_list\n return self.trading_table", "def get_data(self):\n\n engine = sql.create_engine(\"mysql+mysqldb://root:Slimjoewilly12@localhost:3306/price_data\")\n pair_frames = {}\n for p in self.pairs:\n select = sql.select(['id']).where(\"ticker = '%s'\" % p).select_from('symbols')\n ticker_id = engine.execute(select).fetchone()[0]\n query = (\"SELECT symbols.ticker, ticks.date_time, ticks.bid, ticks.ask FROM ticks INNER JOIN symbols on ticks.symbol_id = symbols.id \"\n \"WHERE (symbol_id = '%s') AND (DATE(date_time) BETWEEN '%s' AND '%s')\" % (str(ticker_id), self.start_date, self.end_date))\n pair_frames[p] = pd.read_sql_query(query, con=engine, index_col=\"date_time\")\n return pd.concat(pair_frames.values()).sort().iterrows()", "def aggregate_historical_trades(self, pair: list):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a string from standard input, but prompt to standard error. The trailing newline is stripped.
def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover if file is sys.stdout: return input(prompt) try: stdin = sys.stdin except AttributeError: raise RuntimeError("stderr_input: lost sys.stdin") file.write(prompt) try: flush = file.flush except AttributeError: pass else: flush() try: file.softspace = 0 # type: ignore except (AttributeError, TypeError): pass line = stdin.readline() if not line: # inputting an empty line gives line == '\n' raise EOFError elif line[-1] == '\n': return line[:-1] return line
[ "def safeRawInput(in_string = None):\r\n \r\n try:\r\n if in_string == None:\r\n return raw_input()\r\n \r\n return raw_input(in_string)\r\n \r\n except (EOFError, KeyboardInterrupt):\r\n print\r\n print \"User Interrupt: Quitting script...\"\r\n sys.exit()", "def raw_input(prompt=\"\"):\n\t\n\tsys.stderr.flush()\n\t\n\ttty = STDIN.is_a_TTY() and STDOUT.is_a_TTY()\n\t\n\tif RETURN_UNICODE:\n\t\tif tty:\n\t\t\tline_bytes = readline(prompt)\n\t\t\tline = stdin_decode(line_bytes)\n\t\telse:\n\t\t\tline = stdio_readline(prompt)\n\t\t\n\telse:\n\t\tif tty:\n\t\t\tline = readline(prompt)\n\t\telse:\n\t\t\tline_unicode = stdio_readline(prompt)\n\t\t\tline = stdin_encode(line_unicode)\n\t\n\tif line:\n\t\treturn line[:-1] # strip strailing \"\\n\"\n\telse:\n\t\traise EOFError", "def input_or_error(stream=sys.stdin):\n line = readline_strip(stream)\n if not line: raise EOFError(\"End of input\")\n return line", "def prompt_str_input(prompt_name: str, get_user_input: GetInputFunc) -> str:\n try:\n return str(get_user_input(f\"type in {prompt_name}:\"))\n except (ValueError, IndexError) as e:\n raise InvalidInput(str(e))", "def rlinput(prompt: str, prefill: str = \"\"):\n readline.set_startup_hook(lambda: readline.insert_text(prefill))\n try:\n return input(prompt)\n finally:\n readline.set_startup_hook()", "def safe_input():\n try:\n input(\"Please enter something: \")\n except EOFError:\n return None\n except KeyboardInterrupt:\n return None", "def safe_input(s: str, block: int = 10) -> str:\n\n try:\n return input(s)\n except EOFError:\n sleep(block) # can be interrupted by KeyboardInterrupt\n raise", "def read_line(prompt=\"\"):\n exp = raw_input(prompt)\n return exp", "def input_helper(prompt):\n if version_info[0] == 2:\n # python2 input is scary - we want raw_input\n return raw_input(prompt)\n else:\n return input(prompt)", "def _prompt(letters='yn', default=None):\n\n import sys\n while True:\n try:\n inputstr = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if inputstr and inputstr in letters:\n return inputstr\n if default is not None and inputstr == '':\n return default\n print 'Come again?'", "def input_with_timeout(prompt: Optional[str] = None, timeout: float = 36000.0) -> str:\n # use of sys.stdin and sys.stdout to mimic the builtin input based on\n # https://github.com/python/cpython/blob/baf7bb30a02aabde260143136bdf5b3738a1d409/Lib/getpass.py#L129\n if prompt:\n sys.stdout.write(prompt)\n sys.stdout.flush()\n\n line = misc.readline_with_timeout(timeout, prompt)\n\n if not line:\n raise EOFError\n return line.rstrip('\\n')", "def input(prompt: str, default=\"y\"):\n import sys\n\n try:\n if sys.stdin.isatty():\n return _system_input(prompt)\n else:\n print(f\"Not connected to a console, so having to use \"\n f\"the default ({default})\")\n return default\n except Exception as e:\n print(f\"Unable to get the input: {e.__class__} {e}\")\n print(f\"Using the default ({default}) instead\")\n return default", "def get_input(prompt):\n return input(prompt)", "def read_password(prompt=\"Password:\"):\r\n setTerminalEcho(False)\r\n try:\r\n password = raw_input(prompt)\r\n except ValueError:\r\n raise EOFError\r\n sys.stdout.write(\"\\n\")\r\n setTerminalEcho(True)\r\n return password", "def getstr(message):\r\n try:\r\n str = raw_input(message)\r\n except ValueError, e:\r\n warning(e)\r\n raise e\r\n return str", "def input(prompt=\"\"):\n\t\n\tstring = stdin_decode(raw_input(prompt))\n\t\n\tcaller_frame = sys._getframe(1)\n\tglobals = caller_frame.f_globals\n\tlocals = caller_frame.f_locals\n\t\n\treturn eval(string, globals, locals)", "def input_with_timeout(prompt=None, timeout=36000.0):\n # use of sys.stdin and sys.stdout to mimic six.moves.input based on\n # https://github.com/python/cpython/blob/baf7bb30a02aabde260143136bdf5b3738a1d409/Lib/getpass.py#L129\n if prompt:\n sys.stdout.write(prompt)\n sys.stdout.flush()\n\n line = misc.readline_with_timeout(timeout, prompt)\n\n if not line:\n raise EOFError\n return line.rstrip('\\n')", "def ask(prompt:str, guess:str=\"\", insist=True) -> str:\n readline.set_startup_hook(lambda: readline.insert_text(guess))\n try:\n retval = ''\n while retval == '':\n retval = input(prompt)\n if not insist:\n break\n finally:\n readline.set_startup_hook()\n return retval", "def _getInput(self, prompt):\n try:\n return raw_input(prompt)\n except (EOFError, KeyboardInterrupt):\n raise HaltLoop" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return canonical form for control state.
def canonical_ctrl_state(ctrl_state, num_qubits): if not num_qubits: return '' if isinstance(ctrl_state, CtrlAll): if ctrl_state == CtrlAll.One: return '1' * num_qubits return '0' * num_qubits if isinstance(ctrl_state, int): # If the user inputs an integer, convert it to binary bit string converted_str = f'{ctrl_state:b}'.zfill(num_qubits)[::-1] if len(converted_str) != num_qubits: raise ValueError( f'Control state specified as {ctrl_state} ({converted_str}) is higher than maximum for {num_qubits} ' f'qubits: {2 ** num_qubits - 1}' ) return converted_str if isinstance(ctrl_state, str): # If the user inputs bit string, directly use it if len(ctrl_state) != num_qubits: raise ValueError( f'Control state {ctrl_state} has different length than the number of control qubits {num_qubits}' ) if not set(ctrl_state).issubset({'0', '1'}): raise ValueError(f'Control state {ctrl_state} has string other than 1 and 0') return ctrl_state raise TypeError('Input must be a string, an integer or an enum value of class State')
[ "def state_raw(self):\n return self._state_raw", "def normalize_state(self):\n self.state = 2 * (self.state - 0.5)", "def inverse(self):\n return CZGate(ctrl_state=self.ctrl_state) # self-inverse", "def get_state_s(self, lower = True):\r\n\r\n state_s = STATE_STRINGS[self._state - 1]\r\n state_s = state_s.lower() if lower else state_s\r\n return state_s", "def __getstate__(self, include_cats=True):\n state = {\n 'visible': self.visible,\n 'inputs': self.inputs.__getstate__()\n }\n if include_cats:\n state['cats'] = [cat.__getstate__() for cat in self.cats]\n return state", "def get_human_state(self):\n return ReferralState(self.state).label", "def getStateCode(self,state):\n return np.dot(state-self.minvalues,self.statecode)", "def _TransformPreservedState(instance):\n preserved_state_value = ''\n if ('preservedStateFromPolicy' in instance and\n instance['preservedStateFromPolicy']):\n preserved_state_value += 'POLICY,'\n if ('preservedStateFromConfig' in instance and\n instance['preservedStateFromConfig']):\n preserved_state_value += 'CONFIG'\n if preserved_state_value.endswith(','):\n preserved_state_value = preserved_state_value[:-1]\n return preserved_state_value", "def canonicalize(self):\n return _libsbml.ASTNode_canonicalize(self)", "def canonical(self):\n return self.composed_unit.canonical()", "def initial_state(self) -> str:\n return self._initial_state", "def __str__(self):\n\t\treturn str(self.states)", "def states(self) -> List[str]:\n return [self._initial_state] + \\\n sorted(self._states - {self._initial_state})", "def get_state(self, state):\n return state", "def inverse(self):\n return CCZGate(ctrl_state=self.ctrl_state) # self-inverse", "def state(self) -> str:\n try:\n state_bytes: bytes | None = self._redis.get(self._namespace(\"state\"))\n except RedisError:\n self.logger.error(\n \"RedisError: falling back to default circuit state\", exc_info=True\n )\n return self._fallback_circuit_state\n\n state = self._fallback_circuit_state\n if state_bytes is not None:\n state = state_bytes.decode(\"utf-8\")\n else:\n # state retrieved from redis was missing, so we re-initialize\n # the circuit breaker state on redis\n self._initialize_redis_state(self._fallback_circuit_state)\n\n return state", "def get_state_input_format(self):\n return self._s_input_format", "def canonical(gra):\n can_key_dct = canonical_keys(gra, backbone_only=False)\n return relabel(gra, can_key_dct)", "def correct_state(self, state, diff=True):\n return state" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if command cmd has a compute/uncompute tag.
def _has_compute_uncompute_tag(cmd): for tag in cmd.tags: if tag in [UncomputeTag(), ComputeTag()]: return True return False
[ "def _has_compute_uncompute_tag(self, cmd):\n for t in cmd.tags:\n if t in [UncomputeTag(), ComputeTag()]:\n return True\n return False", "def on_compute_node(self):\n return self.args.subparser_name == 'run'", "def has_extra(cmd):\n if cmd.instr == CQC_CMD_SEND:\n return True\n if cmd.instr == CQC_CMD_EPR:\n return True\n if cmd.instr == CQC_CMD_CNOT:\n return True\n if cmd.instr == CQC_CMD_CPHASE:\n return True\n if cmd.instr == CQC_CMD_ROT_X:\n return True\n if cmd.instr == CQC_CMD_ROT_Y:\n return True\n if cmd.instr == CQC_CMD_ROT_Z:\n return True\n if cmd.action:\n return True\n\n return False", "def is_attached_compute(self) -> bool:\n return pulumi.get(self, \"is_attached_compute\")", "def is_compute_name(self, args):\n return args['objinst']['nova_object.name'] == 'ComputeNode'", "def _isCmdStandalone(tgen):\n features = getattr(tgen, 'features', [])\n otherFeatures = set(features) - set(('runcmd', ))\n return not otherFeatures and getattr(tgen, 'rule', None) is None", "def isOp(self):\n return True", "def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def _verify_command(self, cmd):\n all_cmds = [\n MissionCommands.CALIBRATE,\n MissionCommands.LASERS,\n MissionCommands.LIGHTS,\n MissionCommands.LOAD,\n MissionCommands.PAN,\n MissionCommands.RUN,\n MissionCommands.SAMPLE,\n MissionCommands.TILT,\n MissionCommands.WAIT,\n MissionCommands.ZOOM]\n\n if cmd not in all_cmds: return False\n \n return True", "def is_instruction(self):\n return False", "def is_compute(self, nb_iterations):\n return nb_iterations % self.nb_iterations_between_compute == 0", "def has_command(name):\n return (len([ cmd for cmd in all_commands() if cmd.name == name]) > 0)", "def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False", "def is_device_command(self):\n if len(self.params) == 0:\n return False\n\n first_param = self.params[0]\n # See: https://cgit.freedesktop.org/mesa/mesa/tree/src/intel/vulkan/anv_entrypoints_gen.py#n434\n return first_param.type.type in ('VkDevice', 'VkCommandBuffer', 'VkQueue')", "def has_negative_control(cmd):\n return get_control_count(cmd) > 0 and '0' in cmd.control_state", "def nfvi_compute_plugin_disabled():\n return (_compute_plugin is None)", "def has_openstack_compute(labels):\n if not labels:\n return False\n\n for label in labels:\n if label.label_key == helm_common.LABEL_COMPUTE_LABEL and label.label_value:\n return helm_common.LABEL_VALUE_ENABLED == label.label_value.lower()\n\n # We haven't found the openstack compute node key. Return False\n return False", "def fingertip_no_recompute(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_recompute\", False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Receive a list of commands.
def receive(self, command_list): for cmd in command_list: self._handle_command(cmd)
[ "def receive(self, command_list):\n if self.save_commands:\n self.received_commands.extend(command_list)\n if not self.is_last_engine:\n self.send(command_list)", "def receive(self, command_list):\n for cmd in command_list:\n if not cmd.gate == FlushGate():\n self._add_cmd(cmd)\n\n # (try to) send on\n if not self.is_last_engine:\n if self._is_rotation(cmd):\n orig_cmd = cmd\n sequence = self._rotations.pop(0)\n for elem in sequence:\n self.send([elem])\n else:\n self.send([cmd])", "def send(self, command_list):\n self.next_engine.receive(command_list)", "def receive_command(client):\n global input_buffer\n input_buffer += client.recv(1024)\n commands = input_buffer.split(\"\\n\")\n input_buffer = commands[-1] # Yank off last element which is an incomplete command\n commands = commands[:-1] # Strip down to complete commands\n for command in commands:\n handle_command(client, command)", "def receive(self, command_list):\n for cmd in command_list:\n if isinstance(cmd.gate, FlushGate):\n while self._stored_commands:\n self._run()\n self.send([cmd])\n else:\n self._stored_commands.append(cmd)\n # Storage is full: Create new map and send some gates away:\n if len(self._stored_commands) >= self.storage:\n self._run()", "def get_my_commands(self) -> List[BotCommand]:\n return self._make_request()", "def list_commands():\n print(' ')\n print('Chat Client Commands')\n print('-----------------------')\n print(\"Whisper: Send a online user a private message: /w username (message)\")\n print('Current Users: Get a list of all current online users: /users')\n print('File Transfer (Upload): Transfer a file to the server: /file (file path)')\n print('File Transfer (Download): Prints out the contents of a file: /file_download (file name)')\n print('File List: Lists all files currently stored on a server: /file_list')\n print('Save Username: Save your current username to the server to auto login at this ip address: /save')\n print('Exit: Close the client: quit or exit')\n print('Commands: Lists all commands for the Client: /help')\n print('Feed: Redisplay all messages: /feed')\n print('-----------------------')\n print(' ')", "def receive(self, command_list):\n for cmd in command_list:\n if not cmd.gate == FlushGate():\n self.cache_cmd(cmd)\n if not self.is_last_engine:\n self.send(command_list)", "def list(ctx):\n socket_command = ctx.obj['socket_command']\n if ctx.invoked_subcommand is None:\n socket_command.list()", "def processCommandList():\n\n pass", "def process(self):\n try:\n (data, peer) = self._socket.recvfrom(1024)\n request = json.loads(data.decode())\n command = request['command']\n method = getattr(self, 'do_' + command)\n try:\n result = method(request)\n if result is not None:\n self._send_response(result, peer)\n except KeyError as exc:\n self._logger.error(\n \"missing parameter for command '%s': '%s'\",\n command, exc.args[0]\n )\n except ValueError:\n self._logger.error(\"invalid control request received\")\n except KeyError:\n self._logger.error(\"no control command specified\")\n except AttributeError:\n self._logger.error(\"unknown control command '%s'\", command)\n return []", "def retrieve_commands(transport):\n # this is actually just receive_stream...\n while True:\n command = transport.receive()\n if command == Command.END:\n break\n assert isinstance(command, Command)\n command.transport = transport\n yield command\n transport.send(True)\n transport.close()", "def get_commands(self):\n return []", "async def send_commands(ans: Message):\n await ans.answer(all_commands)", "async def listcommands(self, ctx):\n\t\twith open('custom_commands.json', 'r') as f:\n\t\t\tcommands = json.load(f)\n\t\t\toutput = \", \".join([*commands])\n\t\t\tawait ctx.send(f\"```List of custom commands:\\n{output}```\")", "def _parse_cmds(self):\n lst = self.inbuffer.split('\\n')\n # leave trailing text (not terminated by \\n) in inbuffer\n self.inbuffer = lst.pop(-1)\n if lst:\n for cmd in lst:\n self.cmds.append(cmd)", "async def _c_list(self, ctx):\n command_list = self.database.get_guild_commands(ctx.guild.id)\n if len(command_list) == 0:\n await ctx.send(\"This server has no custom commands\")\n return\n out = \"```\\nServer Commands:\\n\"\n for command in command_list:\n out += f\"{command.name}: {command.text}\\n\"\n out += \"```\"\n await ctx.send(out)", "def __command_handler__(self, commands, handler):\n message_set = self.event.text.split(u' ')\n for command in commands:\n if command in message_set:\n handler(self.event, self.vk)\n break", "def sendCMDlist(self): \n\n if self.cmdlist:\n for cmd in self.cmdlist:\n try:\n tmp = self.FixLineEndingsForWindows(cmd)\n charssent= self.leicasocket.send(tmp)\n # we actually need to make sure\n # we sent the whole string by comparing charssent.\n if charssent != len(tmp):\n print \"Error sending commands\"\n raise CAMSendCharsError\n except:\n print \"error sending command\", cmd\n return False\n time.sleep(self.delay) # wait some time between sending each line\n self.emptyCMDlist()\n time.sleep(self.delay)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of control qubits of the command object cmd.
def get_control_count(cmd): return len(cmd.control_qubits)
[ "def num_ctrl_qubits(self):\n return self._num_ctrl_qubits", "def command_count(self):\n return len(self.commands)", "def count(self):\n return len(self.commands)", "def num_commands(self):\n return len(self.commands)", "def count(self):\n return len(self._commands)", "def nr_commands(self) -> int:\n return sum(module.nr_commands for module in self._modules)", "def getCommandQueueSize(self, REQUEST=None):\r\n size = len(self._commands)\r\n return size", "def getCommandQueueSize(self):\r\n return self._commands.qsize()", "def number_jobs(self):\n if self.array:\n return len(self.commands)\n else:\n return 1", "def get_count_of_controls(self, recurse: bool) -> int:\n return len(list(self.get_all_controls(recurse)))", "def get_count_of_controls_in_dict(self) -> int:\n return len(self._control_dict.keys())", "def call_count(self, command: COMMAND) -> int:\n if not isinstance(command, Command):\n command_instance = Command(command)\n return len(tuple(filter(lambda elem: elem == command_instance, self.calls)))", "def num_controls(self):\n return len(self._controls)", "def number_of_qubits(self):\n return self._number_of_qubits", "def count_qubits(operator):\n # Handle FermionOperator.\n if isinstance(operator, FermionOperator):\n num_qubits = 0\n for term in operator.terms:\n for ladder_operator in term:\n if ladder_operator[0] + 1 > num_qubits:\n num_qubits = ladder_operator[0] + 1\n return num_qubits\n\n # Handle QubitOperator.\n elif isinstance(operator, QubitOperator):\n num_qubits = 0\n for term in operator.terms:\n if term:\n if term[-1][0] + 1 > num_qubits:\n num_qubits = term[-1][0] + 1\n return num_qubits\n\n # Handle MajoranaOperator.\n if isinstance(operator, MajoranaOperator):\n num_qubits = 0\n for term in operator.terms:\n for majorana_index in term:\n if numpy.ceil((majorana_index + 1) / 2) > num_qubits:\n num_qubits = int(numpy.ceil((majorana_index + 1) / 2))\n return num_qubits\n\n # Handle DiagonalCoulombHamiltonian\n elif isinstance(operator, DiagonalCoulombHamiltonian):\n return operator.one_body.shape[0]\n\n # Handle PolynomialTensor\n elif isinstance(operator, PolynomialTensor):\n return operator.n_qubits\n\n # Raise for other classes.\n else:\n raise TypeError('Operator of invalid type.')", "def _command_length(self, cmd, opcode, program_counter):\n if opcode == data_spec_constants.DSG_END_SPEC:\n return -1\n\n cmd_len = (cmd >> 28) & 0xF\n\n if cmd_len == 0xF:\n cmd_len = self.spec_strm[program_counter+1]\n\n return cmd_len", "def num_qubits(self) -> int:\n return int(self._job['qubits'])", "def steps_stored (self):\n result = 0\n for command in self._commands[1:-1]:\n result+=len(command)\n return result", "def subsystem_count(self):\n return len(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether a command has negatively controlled qubits.
def has_negative_control(cmd): return get_control_count(cmd) > 0 and '0' in cmd.control_state
[ "def is_use_qps(self) -> bool:\n if self.qps > 0 and self.second > 0:\n return True\n else:\n return False", "def has_non_cat_verb(self):\n return self.non_cat_verbs.all().count() > 0", "def is_qword(self):\n return ida_bytes.is_qword(self.flags)", "def has_commands(self) -> bool:\n return len(self.commands) > 0", "def is_non_exclusive(self, variable):\n non_exclusive = False\n for sub_effect in self._sub_effects:\n if sub_effect.get_variable() == variable:\n if not sub_effect.is_exclusive():\n non_exclusive = True\n elif len(sub_effect.get_value()) > 0 and not sub_effect.is_negated():\n return False\n return non_exclusive", "def command_disabled(self, cmd):\n return cmd in self._options['disabled']", "def is_non_terminal(self):\n return not self.state.game_over()", "def hasPower(self):\n return self.power != 0", "def qtilde(self) -> bool:\n return self._qtilde", "def is_question(self):\n return (\n any(cycle.question is not None for cycle in self.cycles)\n if self.cycles\n else None\n )", "def is_negative_operator(self):\n\n if not self.is_operator():\n return False\n\n return self.get_subtype() == _mexp_operators.OPERATOR_NEGATIVE_ID", "def __bool__(self):\n return not self.undefine", "def has_extra(cmd):\n if cmd.instr == CQC_CMD_SEND:\n return True\n if cmd.instr == CQC_CMD_EPR:\n return True\n if cmd.instr == CQC_CMD_CNOT:\n return True\n if cmd.instr == CQC_CMD_CPHASE:\n return True\n if cmd.instr == CQC_CMD_ROT_X:\n return True\n if cmd.instr == CQC_CMD_ROT_Y:\n return True\n if cmd.instr == CQC_CMD_ROT_Z:\n return True\n if cmd.action:\n return True\n\n return False", "def is_market(self):\n return(not self.is_pending)", "def is_disabled_command(*commands):\n\n if len(disabled_commands) == 0:\n return False\n\n for command in commands:\n if command in disabled_commands:\n return True\n\n return False", "def is_minus_operator(self):\n\n if not self.is_operator():\n return False\n\n return self.get_subtype() == _mexp_operators.OPERATOR_MINUS_ID", "def still_has_questions(self):\n return self.question_number < len(self.question_list) #returns boolean value", "def terminal_test(self, state):\n return state.utility != 0 or len(state.moves) == 0", "def nonNegative(self) -> bool:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a starboard. A starboard is a channel which has messages with some stars. To configure this starboard (such as max age and threshold, which are 7 days and 5 stars by default), use starconfig's subcommands. See the help for details.
async def starboard(self, ctx): if self.bot.db.execute("SELECT * FROM starboards WHERE guild_id = ?",(ctx.guild.id,)).fetchone(): return await ctx.say("star.already") async with ctx.typing(): await ctx.channel.edit( topic=TOPIC.format(mention=self.bot.user.mention, threshold=5, age=7), # yeah can't be localized nsfw=False, reason="Starboard preparation" ) await ctx.channel.set_permissions(ctx.guild.me, read_messages=True, send_messages=True, add_reactions=True, manage_messages=True, embed_links=True, attach_files=True, read_message_history=True, manage_roles=True, manage_channels=True ) await ctx.channel.set_permissions(ctx.guild.default_role, read_messages=True, send_messages=False, add_reactions=True, read_message_history=True ) tutorial = await ctx.say("star.done", STAR_EMOJI) try: await tutorial.pin() except discord.HTTPException: pass self.bot.db.execute("INSERT INTO starboards(guild_id, channel_id,threshold,age,enabled) VALUES (?, ?,5,7,1)", (ctx.guild.id, ctx.channel.id)) starboard_id = self.bot.db.execute("SELECT starboard_id FROM starboards WHERE channel_id = ?", (ctx.channel.id,)).fetchone()["starboard_id"] self.bot.db.execute("UPDATE guilds SET starboard_id = ? WHERE guild_id = ?", (starboard_id, ctx.guild.id))
[ "def create_star(ai_settings, screen, stars, star_x, star_y):\n star = Star(ai_settings, screen)\n star.rect.x = star_x\n star.rect.y = star_y\n stars.add(star)", "async def stars(self, ctx: commands.Context, stars: int):\n self.stars = stars\n await self._update_db()\n\n await ctx.send(\n f\"Done.Now this server needs `{stars}` :star: to appear on the starboard channel.\"\n )", "def create_star_background(ai_settings, screen, stars):\n star = Star(ai_settings, screen)\n star_y = 0\n max_number_stars_x = get_max_number_stars_x(ai_settings, star.rect.width)\n max_number_stars_y = get_max_number_stars_y(ai_settings, star.rect.height)\n #Generates a random number of rows to appear in the background\n number_rows = randint(5, max_number_stars_y)\n for row_number in range(number_rows):\n star_y += (ai_settings.screen_height) / number_rows\n #Generates a random number of stars to appear in each row\n number_stars_x = randint(4, max_number_stars_x)\n star_x = 0\n for star_number in range(number_stars_x):\n star_x += (ai_settings.screen_width) / number_stars_x\n create_star(ai_settings, screen, stars, star_x, star_y)", "def create_fleet(rk_settings, screen, rock, stars):\r\n\t# Create a star and find the number of stars in a row.\r\n\tstar = Star(rk_settings, screen)\r\n\tnumber_stars_x = get_number_stars_x(rk_settings, star.rect.width)\r\n\tnumber_rows = get_number_rows(rk_settings, rock.rect.height, \r\n\t\t\t\t\t\t\t\t\tstar.rect.height)\r\n\t\t\t\t\t\t\t\t\t\r\n\t# Create the first row of stars.\r\n\tfor row_number in range(number_rows):\r\n\t\tfor star_number in range(number_stars_x):\r\n\t\t\tcreate_star(rk_settings, screen, stars, star_number,\r\n\t\t\t\t\t\trow_number)", "def board_stars(self):\r\n return BoardStars(self)", "def board_star(self, board_star_id):\r\n return BoardStar(self, board_star_id)", "def _create_stars(self, stars_number, row_number):\n star = Star(self)\n stars_width, stars_height = star.rect.size\n star.x = stars_width + 2 * stars_width * stars_number\n star.rect.x = star.x\n star.rect.y = star.rect.height + 2 * star.rect.height * row_number\n self.stars.add(star)", "def showstars(self, verbose=True):\n try:\n import f2n\n except ImportError:\n print(\"Couldn't import f2n -- install it !\")\n return\n\n if verbose:\n print(\"Writing png ...\")\n myimage = f2n.fromfits(self.filepath, verbose=False)\n # myimage.rebin(int(myimage.xb/1000.0))\n myimage.setzscale(\"auto\", \"auto\")\n myimage.makepilimage(\"log\", negative=False)\n # myimage.upsample()\n myimage.drawstarlist(self.starlist, r=8, autocolour=\"flux\")\n myimage.writetitle(os.path.basename(self.filepath))\n # myimage.writeinfo([\"This is a demo\", \"of some possibilities\",\n # \"of f2n.py\"], colour=(255,100,0))\n if not os.path.isdir(\"alipy_visu\"):\n os.makedirs(\"alipy_visu\")\n myimage.tonet(os.path.join(\"alipy_visu\", self.name + \"_stars.png\"))", "async def list_starboard(self, ctx):\n entries = []\n guild_starboards = await self.starboards_collection.find_one({\"_id\": ctx.guild.id})\n\n if guild_starboards is None or guild_starboards.get(\"starboards\") is None:\n entries.append((\"No Starboards\", \"This guild has no starboards setup\"))\n return await StarboardPages(ctx, entries=entries).paginate()\n starboards = guild_starboards.get(\"starboards\")\n\n entries.append((\"Guild Starboard Status\", f\"Activated: `{guild_starboards.get('activated')}`\"))\n\n for starboard in starboards:\n entries.append((f\"Starboard #{starboard.get('_id')}\", f\"Channel: <#{starboard.get('channel')}>\\n\"\n f\"Emotes: {' '.join(starboard.get('emotes'))}\\n\"\n f\"Threshold: `{starboard.get('threshold')}`\\n\"\n f\"Created: `{starboard.get('created').strftime('%b %d %Y %H:%M:%S')}`\\n\"\n f\"Activated: `{starboard.get('activated')}`\"))\n\n pages = StarboardPages(ctx, entries=entries)\n await pages.paginate()", "def _create_galaxy(self):\n # Make a star.\n star = Star(self)\n stars_width, stars_height = star.rect.size\n # Fill galaxy across the screen\n available_space_x = self.settings.screen_width - (2 * stars_width)\n number_stars_x = available_space_x // (2 * stars_width)\n # Determine the number of rows of stars that fit on the screen.\n ship_height = self.ship.rect.height\n available_space_y = (self.settings.screen_height - (3 * stars_height) - ship_height)\n number_rows = available_space_y // (2 * stars_height)\n # Create the full galaxy of stars.\n for row_number in range(number_rows):\n # Create the first row of stars.\n for stars_number in range(number_stars_x):\n self._create_stars(stars_number, row_number)", "def __init__(self, screen, max):\n self._stars = [Star(screen) for x in range(max)]", "async def starred(self, ctx: Message):\n\t\tglobal starred\n\t\tglobal starredauthor\n\t\tawait self.send(\n\t\t f\"Starred Message: {starred}ㅤ|ㅤMessage Creator: @{starredauthor}\")", "def create_new_board():\n\n board = Board()\n board.print_board()", "def test_stars(self):\n self.stars.empty()\n for i in range(random.randint(8, 16)):\n self.make_star(\"random\")", "def create_board():\n db.session.add(\n Board(\n name='board_name',\n url=f\"https://trello.com/b/{default_board_id}\",\n trello_board_id=default_board_id\n )\n )", "async def starboard_current(self, ctx):\n starboard_settings = self.bot.cache.starboard_settings.get(str(ctx.guild.id))\n if not starboard_settings:\n raise exceptions.Warning(\"Nothing has been configured on this server yet!\")\n\n (\n is_enabled,\n board_channel_id,\n required_reaction_count,\n emoji_name,\n emoji_id,\n emoji_type,\n log_channel_id,\n ) = starboard_settings\n\n if emoji_type == \"custom\":\n emoji = self.bot.get_emoji(emoji_id)\n else:\n emoji = emoji_name\n\n blacklisted_channels = await self.bot.db.execute(\n \"\"\"\n SELECT channel_id FROM starboard_blacklist WHERE guild_id = %s\n \"\"\",\n ctx.guild.id,\n as_list=True,\n )\n\n content = discord.Embed(title=\":star: Current starboard settings\", color=int(\"ffac33\", 16))\n content.add_field(\n name=\"State\", value=\":white_check_mark: Enabled\" if is_enabled else \":x: Disabled\"\n )\n content.add_field(name=\"Emoji\", value=emoji)\n content.add_field(name=\"Reactions required\", value=required_reaction_count)\n content.add_field(\n name=\"Board channel\",\n value=f\"<#{board_channel_id}>\" if board_channel_id is not None else None,\n )\n content.add_field(\n name=\"Log channel\",\n value=f\"<#{log_channel_id}>\" if log_channel_id is not None else None,\n )\n content.add_field(\n name=\"Blacklisted channels\",\n value=\" \".join(f\"<#{cid}>\" for cid in blacklisted_channels)\n if blacklisted_channels\n else None,\n )\n\n await ctx.send(embed=content)", "def makeStars(self, logger=None):\n import galsim\n import piff\n\n stars = []\n if logger:\n if len(self.cats) == 1:\n logger.debug(\"Making star list from catalog %s\", self.cat_files[0])\n else:\n logger.debug(\"Making star list from %d catalogs\", len(self.cats))\n for i in range(len(self.images)):\n image = self.images[i]\n wt = self.weight[i]\n cat = self.cats[i]\n chipnum = self.chipnums[i]\n fname = self.cat_files[i]\n if logger:\n logger.info(\"Processing catalog %s with %d stars\",fname,len(cat))\n nstars_in_image = 0\n for k in range(len(cat)):\n x = cat[self.x_col][k]\n y = cat[self.y_col][k]\n icen = int(x+0.5)\n jcen = int(y+0.5)\n half_size = self.stamp_size // 2\n bounds = galsim.BoundsI(icen+half_size-self.stamp_size+1, icen+half_size,\n jcen+half_size-self.stamp_size+1, jcen+half_size)\n if not image.bounds.includes(bounds):\n bounds = bounds & image.bounds\n if not bounds.isDefined():\n if logger:\n logger.warning(\"Star at position %f,%f is off the edge of the image.\"%(x,y))\n logger.warning(\"Skipping this star.\")\n continue\n if logger:\n logger.info(\"Star at position %f,%f is near the edge of the image.\"%(x,y))\n logger.info(\"Using smaller than the full stamp size: %s\"%bounds)\n stamp = image[bounds]\n props = { 'chipnum' : chipnum }\n sky = None\n if self.sky_col is not None:\n sky = cat[self.sky_col][k]\n elif self.sky is not None:\n if type(self.sky) in [float, int]:\n sky = float(self.sky)\n elif str(self.sky) != self.sky:\n raise ValueError(\"Unable to parse input sky: %s\"%self.sky)\n else:\n file_name = self.image_files[0]\n fits = fitsio.FITS(file_name)\n hdu = 1 if file_name.endswith('.fz') else 0\n header = fits[hdu].read_header()\n sky = float(header[self.sky])\n if sky is not None:\n if logger:\n logger.debug(\"Subtracting off sky = %f\", sky)\n stamp = stamp - sky # Don't change the original!\n props['sky'] = sky\n wt_stamp = wt[bounds]\n # if a star is totally masked, then don't add it!\n if np.all(wt_stamp.array == 0):\n if logger:\n logger.warning(\"Star at position %f,%f is completely masked.\"%(x,y))\n logger.warning(\"Skipping this star.\")\n continue\n pos = galsim.PositionD(x,y)\n data = piff.StarData(stamp, pos, weight=wt_stamp, pointing=self.pointing,\n properties=props)\n stars.append(piff.Star(data, None))\n\n nstars_in_image += 1\n if self.nstars is not None and nstars_in_image >= self.nstars:\n if logger:\n logger.info(\"Reached limit of %d stars in image %d\",self.nstars,i)\n break\n if logger:\n logger.warning(\"Read a total of %d stars from %d image%s\",len(stars),len(self.images),\n \"s\" if len(self.images) > 1 else \"\")\n\n return stars", "async def channel(ctx, channel: typing.Optional[discord.TextChannel]):\n\n if channel != None:\n ctx.bot.config[ctx.guild.id][\"starboard_id\"] = channel.id\n await ctx.send(f\"Set channel to {channel.mention}!\")\n else:\n starboard_id = ctx.bot.config[ctx.guild.id]['starboard_id']\n starboard_mention = f\"<#{starboard_id}>\" if starboard_id != None else \"None\"\n await ctx.send(f\"Starboard channel: {starboard_mention}\")", "def move_and_draw_stars(screen):\n global stars\n for star in stars:\n star[1] += STAR_SPEED\n if star[1] >= screen.get_height():\n star[1] = 0\n star[0] = randrange(0,639)\n \n screen.set_at(star,(255,255,255))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables a disabled starboard.
async def enable(self, ctx): self.bot.db.execute("UPDATE starboards SET enabled = 1 WHERE channel_id = ?", (ctx.channel.id,)) await ctx.say("star.enabled")
[ "def enable(self):\n self.enabled = True", "async def starboard_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"starboard_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Starboard is now **enabled**\")\n else:\n await util.send_success(ctx, \"Starboard is now **disabled**\")\n await self.bot.cache.cache_starboard_settings()", "def setEcnEnabled(enabled):\n val = 'true' if enabled else 'false'\n for src in xrange(1, NUM_RACKS + 1):\n for dst in xrange(1, NUM_RACKS + 1):\n clickWriteHandler('hybrid_switch/q{}{}/q'.format(src, dst),\n 'marking_enabled', val)\n time.sleep(0.1)", "def enabled(self, enable):\n #ic()\n pass", "def enable(self):\n self.switch.enable()\n self._enabled = True", "def set_enabled(self, enabled):\n self.widget.setEnabled(enabled)", "def set_disabled_switch(self, disabled):\n self.disabled = disabled", "def _enable_elem(self, i):\n if self.is_disabled():\n return\n self.widgets[i]['symbol'].config(state='normal')\n self.widgets[i]['fill'].config(state='normal')\n self.widgets[i]['z'].config(state='normal')\n self.widgets[i]['w'].config(state='normal')\n self.widgets[i]['stoich'].config(state='normal')\n self.widgets[i]['disp'].config(state='normal')", "def enable_button(self, index):\n if index != 0:\n self.roll_dem_bones.setEnabled(True)", "def enable():\n ret = _LIB.led_matrix_click_enable()\n if ret < 0:\n raise Exception(\"led matrix click enable failed\")", "def setupenabled(self):\n\n if self.imagearray is None:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.vspins[self.numcols - 1].setReadOnly(False)\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.pcspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(False)\n self.vspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.pcspins[self.numcols - 1].setReadOnly(False)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setEnabled(True)\n self.nsspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(False)\n self.vspins[n].setEnabled(False)\n self.nsspins[self.numcols - 1].setReadOnly(False)\n else:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(True)\n self.nsspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(True)\n self.nsspins[n].setReadOnly(True)\n self.vspins[self.numcols - 1].setReadOnly(False)\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.pcspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(False)\n self.vspins[n].setEnabled(True)\n self.nsspins[n].setEnabled(True)\n self.vspins[n].setReadOnly(True)\n self.nsspins[n].setReadOnly(True)\n self.pcspins[self.numcols - 1].setReadOnly(False)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setEnabled(True)\n self.nsspins[n].setReadOnly(False)\n self.pcspins[n].setEnabled(True)\n self.vspins[n].setEnabled(True)\n self.pcspins[n].setReadOnly(True)\n self.vspins[n].setReadOnly(True)\n self.nsspins[self.numcols - 1].setReadOnly(False)\n\n for n in range(self.numcols, len(self.vspins)):\n self.vspins[n].setEnabled(False)\n self.nsspins[n].setEnabled(False)\n self.pcspins[n].setEnabled(False)", "async def star_dm(self, ctx, enable: bool = None):\n if enable is None:\n result = self.bot.db.execute(\"SELECT starboard_dm FROM users WHERE user_id = ?\", (ctx.author.id,)).fetchone()\n enabled = result[\"starboard_dm\"] if result else 0\n status_str = ctx._(f\"star.dm{['Disabled', 'Enabled'][enabled]}\")\n return await ctx.say(\"star.dmCurrent\", status_str)\n self.bot.db.execute(\"UPDATE users SET starboard_dm = ? WHERE user_id = ?\",(\n int(enable),\n ctx.author.id\n ))\n status_str = ctx._(f\"star.dm{['Disabled', 'Enabled'][enable]}\")\n return await ctx.say(\"star.dmCurrent\", status_str)", "def enable_moves(self):\r\n if self.board is not None:\r\n self.board.enable_moves()", "def rf_enableIcon(self):\n if self._item._node.nodeIsEnabled:\n self.pbEnable.setIcon(self.mainUi.graphZone.enabledIcon)\n else:\n self.pbEnable.setIcon(self.mainUi.graphZone.disabledIcon)\n self.lNodeName.setEnabled(self.isActive)", "def enable(self):\n self.colour_combo.config(state=tk.NORMAL)\n self.game_name_entry.config(state=tk.NORMAL)\n self.num_tickets_entry.config(state=tk.NORMAL)", "def _disable(self):\n self.enabled = False", "def set_enabled(self, *, enabled: bool = True) -> None:\n lib.wlr_scene_node_set_enabled(self._ptr, enabled)", "def set_disabled(self, disabled):\n if disabled:\n self.__button_new_game.configure(state=DISABLED, text=\"Playing...\")\n else:\n self.__button_new_game.configure(state=ACTIVE, text=\"New Game\")", "def reenable(*args):\n self.controls.disabled = False\n self.disabled = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets "max age" for the starboard messages. If a message is older than the specified days, the message is ignored. Note that existing messages are not affected. Defaults to 7 (one week).
async def maxage(self, ctx, age: int): if age > 0: self.bot.db.execute("UPDATE starboards SET age = ? WHERE channel_id = ?", (age,ctx.channel.id)) await ctx.say("star.age", age) await self.set_topic(ctx.channel.id) else: await ctx.say("star.unsigned", age)
[ "def max_age(self, max_age):\n self._max_age = max_age", "def max_age(self, max_age):\n\n self._max_age = max_age", "def set_maxdays(name, maxdays):\n pre_info = info(name)\n if maxdays == pre_info[\"max\"]:\n return True\n cmd = \"passwd -x {} {}\".format(maxdays, name)\n __salt__[\"cmd.run\"](cmd, python_shell=False)\n post_info = info(name)\n if post_info[\"max\"] != pre_info[\"max\"]:\n return post_info[\"max\"] == maxdays", "def get_max_age(self) -> int:\n return self.__max_age", "def max_age(self):\n api_option = timedelta(seconds=int(self.options.batcher_check_interval))\n computation_time = timedelta(seconds=30)\n return api_option + computation_time", "def max_age(self):\n return self._max_age", "def max_message_count(self, max_message_count):\n\n self._max_message_count = max_message_count", "async def days(self, ctx: commands.Context, days: int = None):\n if days < 1:\n days = 0\n await self.config.guild(ctx.guild).daythreshold.set(days)\n await ctx.maybe_send_embed(\n _(\"Accounts older than {days} will bypass auto moderation\").format(days=days)\n )", "def max_jobs_age(self):\n return int(self.__get_option('max_jobs_age'))", "def message_retention_in_days(self) -> Optional[int]:\n return pulumi.get(self, \"message_retention_in_days\")", "def message_retention_in_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"message_retention_in_days\")", "def max_backups(self, max_backups):\n self._max_backups = max_backups", "def max_age_rule(self) -> Optional[pulumi.Input['ApplicationMaxAgeRuleArgs']]:\n return pulumi.get(self, \"max_age_rule\")", "def max_users(self, max_users):\n\n self._max_users = max_users", "def show_max_age_label(self):\n self.draw_max_age = True", "def _send_maximum(self):\n content = {'maximum': self.maximum.isoformat()}\n self.send_action('set_maximum', content)", "def is_older_than_days(time_message, max_days):\n time_now = time.time()\n if time_message > time_now:\n vprint(\"warning: message has date in the future\")\n return False\n secs_old_max = (max_days * 24 * 60 * 60)\n days_old = (time_now - time_message) / 24 / 60 / 60\n vprint(\"message is %.2f days old\" % days_old)\n if ((time_message + secs_old_max) < time_now):\n return True\n return False", "def set_max(self, max_value):\n self._max = max_value", "def set_margin_max_leverage(self, **params):\n return self._request_margin_api('post', 'margin/max-leverage', signed=True, data=params)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets "threshold" for the starboard messages. The specified number of stars are required to put the message on the starboard. Note that existing messages are not affected. Defaults to 5.
async def threshold(self, ctx, threshold: int): if threshold > 0: self.bot.db.execute("UPDATE starboards SET threshold = ? WHERE channel_id = ?", (threshold, ctx.channel.id)) await ctx.say("star.threshold", threshold) await self.set_topic(ctx.channel.id) else: await ctx.say("star.unsigned", threshold)
[ "async def set_reaction_threshold(ctx: MessageContext, threshold: int) -> None:\n ctx.guild.reaction_threshold = threshold\n await ctx.session.commit()\n await ctx.channel.send(f\"Set the reaction threshold to {threshold}\")", "def setThreshold(self, newThreshold, frame):\n self.threshold = newThreshold\n text = tk.Label(frame, text = '\\nThreshold was set to %i\\n' %newThreshold)\n text.pack()\n self.save()", "async def _msgvote_threshold(self, ctx, threshold: int):\n\n if threshold < 0:\n await self.bot.say(\"Invalid threshold. Must be a positive \"\n \"integer, or 0 to disable.\")\n elif threshold == 0:\n self.settings[\"threshold\"] = threshold\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Message deletion disabled.\")\n else:\n self.settings[\"threshold\"] = threshold\n dataIO.save_json(self.settings_path, self.settings)\n await self.bot.say(\"Messages will be deleted if [downvotes - \"\n \"upvotes] reaches {}.\".format(threshold))", "def update_threshold(self):\n sch = self.mpq.get()\n self.mpq.put(sch)\n new_threshold = -1*sch[0]\n if new_threshold < self.threshold:\n print \"New Threshold: {}\".format(new_threshold)\n self.threshold = new_threshold", "def update_threshold(self, threshold):\n self.mf.set_threshold(self.cm.estimate)", "def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass", "def setThreshold(self, thresh):\n\t\tif thresh <= 0:\n\t\t\traise ValueError(\"Threshold value must be > 0\")\n\t\t\n\t\tself.threshold = thresh", "def threshold(self, threshold):\n\n self._threshold = threshold", "def set_threshold(self, threshold):\n self._threshold = check_value_positive('threshold', threshold)", "def setThreshold(self, threshold):\n self.t = threshold", "def set_match_threshold(self, match_threshold) -> None:\n self.match_threshold = match_threshold", "def OnBitmapButton5StarButton(self, event):\r\n\t\tself._configtmp[\"userrating\"] = 5\r\n\t\tself.SetStars()", "def above_threshold(self, board, score):\n if score[1] == \"M\": # This a mate score !\n return self.threshold.above_threshold(normalize(board, 128)) # A mate value is 128\n else:\n return self.threshold.above_threshold(normalize(board, float(score[1:])))", "def thresh(self, thresh: int):\n\n self._thresh = thresh", "def setContactMinThresholdValues(self,contactMinThreshold):\n contactMinThreshold.setLabel(\"Surface Detect threshold\")\n contactMinThreshold.setDefaultValue(60)\n contactMinThreshold.setMin(0)\n contactMinThreshold.setMax(65535)\n contactMinThreshold.setDescription(\"The minimum contact size measurement for persistent contact tracking. Contact size is the sum of neighbouring keys' touch deltas forming the touch contact.\")", "def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)", "def setThreshold1(self, trsh):\n\t\tself.edgeThreshold1 = trsh\n\t\tself.edgeThreshold2 = trsh * 2.5", "def set_ThresholdValue(self, value):\n super(UpdateTriggerInputSet, self)._set_input('ThresholdValue', value)", "def blank_threshold(self, value):\n self._internal.set_blank_threshold(float(value))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows a starboard item. The argument can be either original message ID or starboard item ID.
async def star_show(self, ctx, item: Star): board = self.bot.db.execute("SELECT * FROM starboards WHERE guild_id = ?", (ctx.guild.id,)).fetchone() try: board_msg = await self.bot.get_channel(board["channel_id"]).fetch_message(item["item_id"]) except discord.NotFound: return await self.destroy_item(board["channel_id"], item["item_id"]) else: await ctx.send(board_msg.content, embed=board_msg.embeds[0])
[ "def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()", "def show_item(self):\n code = input(\"\\nEnter the items code: \")\n index = StockTracker.get_index(self, code)\n try:\n print(\"\\n{:<6}{:^64}{:>6}\".format(\"Code\", \"Description\", \"Amount\"))\n print(\"----------------------------------------------------------------------------\")\n print(self.item_list[index].item_info())\n except TypeError:\n print(\"\\nERROR: THERE IS NO ITEM WITH THAT ITEM-CODE!\")", "def view_item(request, id_item):\n item = get_object_or_404(Item, id=id_item)\n return render(request, 'inventory/item.html', {item: item})", "def show_item(self, show_item):\n\n self._show_item = show_item", "def show(self, args=[]):\r\n\r\n self.set_inventory(args[0])\r\n super().show()", "def showItem(category_item_id):\n return render_template('item.html', item=db.findItem(id=category_item_id))", "def show_item(category_name, item_style):\n category = session.query(\n Category).filter_by(name=category_name).one_or_none()\n item = session.query(Item).filter_by(\n category=category, style=item_style).one_or_none()\n return render_template('item_detail.html', category=category, item=item)", "def show_item(request, itemID):\n\ttry:\n\t\titem = get_object_or_404(Item, itemID = itemID)\n\n\t# Handle when the given itemID is not UUID\n\texcept ValidationError:\n\t\traise Http404\n\n\tcontext_dict = {}\n\tsearch_form = Search_bar()\n\tcontext_dict['search_bar'] = search_form\n\tcontext_dict['item'] = item\n\tcontext_dict['seller_rating'] = range(int(round(item.seller.rating, 1)))\n\n\trelated = Item.objects.filter(category = item.category).exclude(itemID = item.itemID)\n\t\n\tif len(related) > 3:\n\t\tcontext_dict['trendingItems'] = related[0:3]\n\telse:\n\t\tcontext_dict['trendingItems'] = related\n\n\tresponse = render(request, 'tailored/product.html', context_dict)\n\t\n\tif first_visit(request, response, str(item.itemID)):\n\t\titem.dailyVisits += 1\n\t\titem.save()\n\t\t\n\tcontext_dict['itemID'] = item.itemID\n\n\tif item.seller.user != request.user:\n\t\treturn response\n\n\tsold_form = SoldItemForm()\n\n\tif request.method == 'POST':\n\t\tsold_form = SoldItemForm(request.POST, request.FILES)\n\n\t\tif sold_form.is_valid():\n\t\t\tuser_query = User.objects.filter(username = sold_form.cleaned_data['sold_to'])\n\t\t\tif not user_query:\n\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError('The given user does not exist.'))\n\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\n\t\t\telif user_query[0] != request.user:\n\t\t\t\ttry:\n\t\t\t\t\titem.sold_to = UserProfile.objects.get(user = user_query[0])\n\t\t\t\t\titem.save()\n\t\t\t\texcept UserProfile.DoesNotExist:\n\t\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError('The given user does not exist.'))\n\t\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\t\t\telse:\n\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError(\"You can't sell an item to yourself.\"))\n\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\t\t\titem.save()\n\t\t\treturn HttpResponseRedirect(reverse('tailored:index'))\n\n\tcontext_dict['form'] = sold_form\n\treturn render(request, 'tailored/product.html', context_dict)", "def showTile(self, event):\n clicked = event.widget\n if clicked.isInPlay():\n self.changeSmile(1)\n returned = clicked.show()\n if returned == 1 and clicked.isZero():\n returned += self.cascadeShow(clicked)\n self.checkEnd(returned)", "async def info(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'I need embed_links permission to answer in this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n canonical = await Controller.canonical_title(item)\n if canonical:\n item = canonical\n page_url = Controller.link_from_title(item)\n try:\n wikitext = await Controller.get_wikitext(item)\n except ValueError as e:\n # Means the page is not found\n await msg.channel.send(**{\n 'content': f'No page found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n contents = []\n template_names = []\n for template in WTP.parse(wikitext).templates:\n template_names.append(template.name.strip())\n if self.is_infobox(template.name):\n args = template.arguments\n title = item\n entries = {}\n for arg in args:\n k, v = arg.string.strip(' |\\n').split('=')\n k = k.strip()\n v = v.strip()\n if k.lower() in ['title1', 'name']:\n # Set this as the item name\n title = v\n elif k.lower() in ['image1', 'image'] or not v:\n # Skip images and empty values\n continue\n else:\n entries[k] = v.replace('\\n\\n', '\\n').replace('\\n', '\\n\\t')\n entries = [f'{k} = {v}' for k, v in entries.items()]\n entries = '• '+'\\n• '.join(entries)\n content = f'## **{title}** ##\\nSource: {page_url}\\n{template.name.strip()}\\n{entries}'\n contents.append(content)\n logging.info(f'Templates at {item}: '+', '.join(template_names))\n if not contents:\n await msg.channel.send(**{\n 'content': f'No infobox found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n await msg.channel.send(**{\n 'content': '\\n===\\n'.join(contents),\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })", "def toggle_item_starred(self):\n self.get_selected()\n if not self.selected_item:\n return\n was_starred = self.selected_item.starred\n message = 'Starred flag is now ON'\n if was_starred:\n message = 'Starred flag is now OFF'\n self.trigger_item_starred(not was_starred)\n self.controller.display_message(message)", "def item_popup(request, item_id):\r\n try:\r\n item_id = int(item_id)\r\n ni = NewsItem.objects.get(id=item_id)\r\n except:\r\n return HttpResponse(status=404)\r\n\r\n schema = ni.schema\r\n template_list = ['richmaps/newsitem_popup_%s.html' % schema.slug,\r\n 'richmaps/newsitem_popup.html',\r\n ]\r\n current_template = select_template(template_list)\r\n html = current_template.render(template.Context({'newsitem': ni, 'schema': schema, }))\r\n response = HttpResponse(html)\r\n patch_response_headers(response, cache_timeout=3600)\r\n return response", "async def item(self, ctx: commands.Context, *args):\n item_name = InputParser(args).concat()\n dct = self.__dbItem.get_item(str(ctx.guild.id), item_name)\n if dct == None:\n dct = self.__dbHeader.entity_not_found(str(ctx.guild.id), 'item_not_found')\n foooter = self.__dbHeader.get_footer(str(ctx.guild.id), 'general_footer')\n embed = CommonEmbed(dct, foooter, ctx)\n await ctx.send(embed=embed.notFound())\n\n else:\n headers = self.__dbHeader.get_headers(str(ctx.guild.id), ctx.invoked_with)\n thumbnail_file = discord.File(self._item_img_route+dct['icon'], filename=dct['icon'])\n embed = ItemEmbed(dct, headers)\n embed_main, maps_embeds = embed.main()\n\n if len(maps_embeds) == 0:\n await ctx.send(embed = embed_main, file=thumbnail_file)\n else:\n message = await ctx.send(embed = embed_main, file=thumbnail_file)\n valid_reactions = []\n for k in range(0,len(maps_embeds)):\n await message.add_reaction(number_to_emoji(k+1))\n valid_reactions.append(number_to_emoji(k+1))\n \n def check(reaction, user):\n return user == ctx.author\n\n reaction = None\n reaction_used = []\n while True:\n if str(reaction) in valid_reactions and str(reaction) not in reaction_used:\n i = emoji_to_number(str(reaction))\n reaction_used.append(str(reaction))\n map_file = discord.File(self._map_img_route+maps_embeds[i-1]['map-img'], \n filename=maps_embeds[i-1]['map-img'])\n await ctx.send(embed=maps_embeds[i-1]['embed'], file=map_file)\n try:\n reaction, user = await self._bot.wait_for(event='reaction_add', timeout = 60.0, check = check)\n await message.remove_reaction(reaction, user)\n except:\n break\n\n await message.clear_reactions()", "def show_item_by_effect(plugin, item_id, effect_plugin, effect_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page({\n 'effect': effect_plugin + ':' + effect_id,\n 'id': plugin + ':' + item_id\n })", "def item_route(item_id):\n target_item = get_item(item_id)\n\n if target_item is None:\n abort(404)\n\n return render_template('item.html', page={\n 'title': 'Item ' + target_item.name,\n 'has_sidebar': True\n }, user=user_info(), content={\n 'categories': get_categories(),\n 'item': target_item\n })", "def item_starred(self, item):\n self.update_item(item)", "def view_item(item_id):\n session['target'] = url_for('view_item', item_id=item_id)\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"view_item.html\", item=item)", "def item_detail(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n current_user.id == Item.user_id\n ).first()\n if not item:\n flash(\"Couldn't find this item\", category='warning')\n return redirect(url_for('url.index'))\n return render_template('detail.html', item=item)", "async def starred(self, ctx: Message):\n\t\tglobal starred\n\t\tglobal starredauthor\n\t\tawait self.send(\n\t\t f\"Starred Message: {starred}ㅤ|ㅤMessage Creator: @{starredauthor}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables/disables DM when your message was stared. If the parameter is not given, this returns current status. Can be used anywhere including DM.
async def star_dm(self, ctx, enable: bool = None): if enable is None: result = self.bot.db.execute("SELECT starboard_dm FROM users WHERE user_id = ?", (ctx.author.id,)).fetchone() enabled = result["starboard_dm"] if result else 0 status_str = ctx._(f"star.dm{['Disabled', 'Enabled'][enabled]}") return await ctx.say("star.dmCurrent", status_str) self.bot.db.execute("UPDATE users SET starboard_dm = ? WHERE user_id = ?",( int(enable), ctx.author.id )) status_str = ctx._(f"star.dm{['Disabled', 'Enabled'][enable]}") return await ctx.say("star.dmCurrent", status_str)
[ "async def moderation(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"ban_kick_mute\")\n\n if new_value:\n message = \":white_check_mark: You will now receive DMs when you get muted, kicked or banned by me.\"\n else:\n message = \":white_check_mark: You will no longer receive DMs when you get muted, kicked or banned.\"\n\n await ctx.send(message)", "async def toggle(self, ctx):\n guild = ctx.message.guild\n\n enabled = await self.config.guild(guild).enabled()\n\n enabled = not enabled\n await self.config.guild(guild).enabled.set(enabled)\n\n if enabled is True:\n await ctx.send(\"AntiSpam has been enabled\")\n else:\n await ctx.send(\"AntiSpam has been disabled\")", "def dev_status(self):\n self.debug_stream(\"In dev_status()\")\n argout = \"\"\n #----- PROTECTED REGION ID(SynchroMotorDS.Status) ENABLED START -----#\n self.argout = \"Status is ON\"\n #----- PROTECTED REGION END -----#\t//\tSynchroMotorDS.Status\n self.set_status(self.argout)\n self.__status = PyTango.Device_4Impl.dev_status(self)\n return self.__status", "async def channeltoggle(self, ctx):\n redis = self.bot.db.get_storage(ctx.message.server)\n\n current_setting = await redis.get(\"channel:{0}:disabled\".format(ctx.message.channel.id))\n\n #True == disabled\n #False == enabled\n\n if current_setting == \"True\":\n #Disable commands\n await redis.delete(\"channel:{0}:disabled\".format(ctx.message.channel.id))\n\n await self.bot.say(\"Enabled commands for this channel.\")\n else:\n await redis.set(\"channel:{0}:disabled\".format(ctx.message.channel.id), \"True\")\n\n await self.bot.say(\"Disabled commands for this channel.\")", "def set_On(self):\n if not(self._locked):\n self.__dict__['statusOn']=True\n self._do_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)", "async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)", "async def greeter_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"greeter_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Greeter is now **enabled**\")\n else:\n await util.send_success(ctx, \"Greeter is now **disabled**\")", "async def async_turn_on(self):\n path = \"/queue/simple\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"queue\"]:\n if self._ctrl.data[\"queue\"][uid][\"name\"] == f\"{self._data['name']}\":\n value = self._ctrl.data[\"queue\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.force_update()", "def request_relays_status() -> str:\n return \"SMR\"", "def do(self):\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = \"\"\n property_val = this_server.read_property(\"SdpMasterFQDN\")[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(\n const.CMD_Disable, None, self.disable_cmd_ended_cb\n )\n self.logger.debug(const.STR_DISABLE_CMS_SUCCESS)\n this_server.write_attr(\n \"activityMessage\", const.STR_DISABLE_CMS_SUCCESS, False\n )\n\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f\"{const.ERR_DISABLE_CMD_FAIL}{dev_failed}\"\n tango.Except.re_throw_exception(\n dev_failed,\n const.ERR_INVOKING_CMD,\n log_msg,\n \"SdpMasterLeafNode.DisableCommand()\",\n tango.ErrSeverity.ERR,\n )", "def protect_status(self):\n return self._protect_status", "def getLEDStatus(self):\n self.querier.setMsgHandler(LEDStatusMsgHandler(\"led level\"))\n self.querier.querysd(0x19, 0x01)", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def sms_disabled(self):\n return self._sms_disabled", "def lock_status(self) -> Dict[str, str]:\n self.__logger.debug('Eva.lock_status called')\n return self.__http_client.lock_status()", "def enable(self):\n return self._enable", "def enable_sync(self) -> None:\n self.is_enabled = True", "def do_monitor_mss_check_enable(client, args):\n item = client.msscheck.perform_action(args.id, 'enable')\n utils.print_dict(item)", "async def toggle(self, ctx):\r\n server = ctx.guild\r\n if self._logs[str(server.id)][\"toggle\"] == True:\r\n self._logs[str(server.id)][\"toggle\"] = False\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(\"Modlogs are now disabled.\")\r\n return\r\n if self._logs[str(server.id)][\"toggle\"] == False:\r\n self._logs[str(server.id)][\"toggle\"] = True\r\n dataIO.save_json(self._logs_file, self._logs)\r\n await ctx.send(f\"Modlogs are now enabled {self.bot.get_emoji(470063310386233344)}\")\r\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the postion that a value of weight "weight" would fall in the weight_list, where weight_list is sorted by smallest to largest. Newer inputs win in ties.
def find_pos(weight, weight_list): bool_list = [weight >= x for x in weight_list] pos = bool_list.count(True) - 1 return pos
[ "def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][1]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][1]\n return list[itemId]", "def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][3]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][3]\n return list[itemId]", "def get_rank(weight):\n weight = min(1.0, max(weight, 0.0))\n ranks = [x for x in ALL_RANKS if weight >= x.min_weight]\n ranks.sort(key=lambda x: x.min_weight)\n return ranks.pop()", "def weighted_choice(list_, weights=None):\n size = len(list_)\n if weights is not None:\n assert size == len(weights)\n\n if weights is None:\n probs = np.array([1 / float(size) for i in range(size)])\n else:\n probs = np.array(weights) / sum(weights) # just in case\n\n rand = np.random.random()\n\n _sum = 0\n for i in range(size):\n if _sum <= rand < _sum + probs[i]:\n choice = i\n break\n else:\n _sum += probs[i]\n\n return list_[choice]", "def extract(vertices_list, weight_list):\n m = 0\n minimum = weight_list[0]\n for i in range(len(weight_list)):\n if weight_list[i] < minimum:\n minimum = weight_list[i]\n m = i\n return m, vertices_list[m]", "def _dominantWeights(self, weight):\n keyStore = tuple(weight)\n if keyStore in self._dominantWeightsStore:\n return self._dominantWeightsStore[keyStore]\n # convert the weight\n weight = np.array([weight], dtype=int)\n listw = [weight]\n counter = 1\n while counter <= len(listw):\n aux = [listw[counter - 1] - self.proots[i] for i in range(len(self.proots))]\n aux = [el for el in aux if np.all(el == abs(el))]\n listw = listw + aux\n tp = []\n listw = [self._nptokey(el) for el in listw]\n for el in listw:\n if not (el) in tp:\n tp.append(el)\n listw = [np.array([el], dtype=int) for el in tp]\n counter += 1\n\n # need to sort listw\n def sortList(a, b):\n tp1 = list(np.dot(-(a - b), self.ncminv)[0])\n return self._cmp(tp1, [0] * a.shape[1])\n\n listw.sort(key=cmp_to_key(sortList))\n\n functionaux = {self._nptokey(listw[0]): 1}\n result = [[listw[0], 1]]\n for j in range(2, len(listw) + 1):\n for i in range(1, len(self.proots) + 1):\n k = 1\n aux1 = self._indic(functionaux,\n tuple(self._dominantConjugate(k * self.proots[i - 1] + listw[j - 1])[0]))\n key = self._nptokey(listw[j - 1])\n while aux1 != 0:\n aux2 = k * self.proots[i - 1] + listw[j - 1]\n if key in functionaux:\n functionaux[key] += 2 * aux1 * self._simpleProduct(aux2, [self.proots[i - 1]], self._cmID)\n else:\n functionaux[key] = 2 * aux1 * self._simpleProduct(aux2, [self.proots[i - 1]], self._cmID)\n k += 1\n # update aux1 value\n kkey = tuple(self._dominantConjugate(k * self.proots[i - 1] + listw[j - 1])[0])\n if kkey in functionaux:\n aux1 = functionaux[kkey]\n else:\n aux1 = 0\n functionaux[key] /= self._simpleProduct(listw[0] + listw[j - 1] + self._deltaTimes2,\n listw[0] - listw[j - 1], self._cmID)\n result.append([listw[j - 1], self._indic(functionaux, self._nptokey(listw[j - 1]))])\n self._dominantWeightsStore[keyStore] = result\n return result", "def weighted_choice(weights):\n totals = []\n running_total = 0\n\n for w in weights:\n running_total += w\n totals.append(running_total)\n\n rnd = random.random() * running_total\n for i, total in enumerate(totals):\n if rnd < total:\n return i", "def mwis(weights: List[int]) -> [int, List[int]]:\n n = len(weights)\n solutions = [0] * n\n solutions[0] = weights[0]\n solutions[1] = max(solutions[0], weights[1])\n for i in range(2, n):\n solutions[i] = max(solutions[i - 1], solutions[i - 2] + weights[i])\n max_weight = solutions[-1]\n vertices = []\n i = n - 1\n while i >= 2:\n if solutions[i - 1] >= solutions[i - 2] + weights[i]:\n i -= 1\n else:\n vertices.append(i + 1)\n i -= 2\n if i < 2:\n vertices.append(i + 1)\n return max_weight, sorted(vertices)", "def select(weights):\n r = random.random() * sum(weights)\n s = 0.0\n for k,w in enumerate(weights):\n s += w\n if r <= s:\n return k\n raise RuntimeError(\"select WTF from %s\" % weights)", "def best_value(i: int, w: int) -> int:\n if i == 0:\n return 0\n\n value, weight = items[i - 1]\n\n if weight > w:\n return best_value(i - 1, w)\n\n return max(best_value(i - 1, w), best_value(i - 1, w - weight) + value)", "def sample_from(self, weights):\n total = sum(weights)\n rnd = total * random.random() # uniform between 0 and total\n for i, w in enumerate(weights):\n rnd -= w # return the smallest i such that\n if rnd <= 0:\n return i # weights[0] + ... + weights[i] >= rnd", "def weight_check_neighbor(inp_node, connection_list, time_label_list, weights):\n temp_label_hold = []\n sum_weights = {}\n for neighbors in connection_list[inp_node]:\n if time_label_list[neighbors] == -1: # only count \"real\" labels\n pass\n else:\n try:\n sum_weights[time_label_list[neighbors]] += weights[neighbors]\n except:\n sum_weights[time_label_list[neighbors]] = weights[neighbors]\n try:\n max_value = max(sum_weights.values())\n max_value = list({key for key, value in sum_weights.items()\n if value == max_value})\n except:\n return -1\n print(\"these are in top_labels \", max_value)\n # a set of the most popular labels\n return random.choice(max_value) # this is the label", "def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item", "def dp_make_weight(egg_weights, target_weight, memo = {}):\r\n \"\"\"\r\n # Not a dynamic programming method, but a greedy algorithm method\r\n if target_weight <= 0:\r\n return sum(memo.values())\r\n\r\n chosen_weight_list = [k for k in egg_weights if k <= target_weight]\r\n\r\n if chosen_weight_list:\r\n chosen_weight = max(chosen_weight_list)\r\n if chosen_weight in memo:\r\n memo[chosen_weight] += 1\r\n else:\r\n memo[chosen_weight] = 1\r\n\r\n left = target_weight - chosen_weight\r\n\r\n return dp_make_weight(egg_weights, left, memo)\r\n \"\"\"\r\n # memo is used to store the minimum numbers for target_weight\r\n min_nums = target_weight\r\n if target_weight <= 0:\r\n return 1\r\n # Look up memo to check out if there is already a best solution\r\n elif target_weight in memo:\r\n return memo[target_weight]\r\n else:\r\n for weight in [k for k in egg_weights if k <= target_weight]:\r\n # Divide the problem into several subproblems\r\n num_eggs = 1 + dp_make_weight(egg_weights, target_weight-weight, memo)\r\n if num_eggs < min_nums:\r\n min_nums = num_eggs\r\n memo[target_weight] = min_nums\r\n\r\n return min_nums", "def randpck(elements, rand_function):\n\n # First, we compute the total weight (for example 10)\n total_weight = 0\n for e in elements:\n assert e[1] >= 0\n total_weight += e[1]\n\n # Then we generate a random number multiplied by the total weight (e.g. 0.4218 * 10 = 42.18)\n random_weight = rand_function() * total_weight\n\n # Lastly, we run through the list to find which one matches with the generated weight\n current_weight = 0\n for e in elements:\n current_weight += e[1]\n if random_weight < current_weight:\n return e[0]\n\n return None", "def weighted_random_item(items, weight):\n if not items:\n return None\n\n weight_sum = sum(weight(item) for item in items)\n if weight_sum <= 0:\n return None\n\n choice = random.random() * weight_sum\n for item in items:\n choice -= weight(item)\n if choice < 0:\n return item, weight(item) / weight_sum\n return items[-1], -1 # floating-point rounding error", "def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item", "def weighted_choice(choices, weight):\n\t# requirements = random\n\tweights = []\n\t# get weight values for each of the choices\n\tfor choice in choices:\n\t\tchoice_weight = weight(choice)\n\t\tif not (isinstance(choice_weight, int) and choice_weight > 0):\n\t\t\traise TypeError('weight results must be positive integers')\n\t\tweights.append(choice_weight)\n\n\t# make a selection within the acceptable range\n\tselection = random.randint(0, sum(weights) - 1)\n\n\t# find and return the corresponding choice\n\tfor idx, choice in enumerate(choices):\n\t\tif selection < sum(weights[:idx + 1]):\n\t\t\treturn choice\n\traise RuntimeError('no selection could be made')", "def get_indices_of_item_weights(weights, length, limit):\n # Your code here\n\n #where does weights[x] + weights[y] == limit?\n\n # make a lookup\n\n weight_dict = {}\n\n if length == 1:\n return None\n else:\n for i in range(len(weights)):\n \n item = weights[i]\n #if item in weight_dict:\n #print(i)\n #print(item)\n #weight_dict[item].insert(0,i)\n #else:\n weight_dict[item] = i\n # print(weight_dict)\n\n for item in weights:\n \n if (limit - item) in weight_dict:\n #print(weight_dict[limit-item])\n #print('FOUND ONE')\n return_value = [weight_dict[limit-item], weights.index(item)]\n return return_value\n\n\n\n # start over\n # else:\n # for item in weights:\n # for second_item in weights:\n # print(item, second_item)\n # if item + second_item == limit:\n # if second_item >= item:\n # print('wtf')\n # print([weights.index(second_item), weights.index(item)])\n # return [weights.index(second_item), weights.index(item)]\n # else:\n # print(item, second_item)\n # #print([weights.index(item), weights.index(second_item)])\n # return [weights.index(item), weights.index(second_item)]\n # # else:\n # # return None\n\n \n \n #return None\n\n\n # if x + y == limit:\n # return x, y\n # else:\n # return None\n\n\n # two_items = (weights[x], weights[y])\n # return two_items" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adjusts top10 list in ascending order, by inserting a new item in appropriate place and adjusting others appropriately
def adjust_top10(value, pos, weight, top10, top10weights): # Create new top10 to be adjusted newtop10 = top10 newtop10weights = top10weights # Keep higher ones, shift lower ones left one newtop10[0:pos] = top10[1:pos + 1] newtop10weights[0:pos] = top10weights[1:pos + 1] # add new ones newtop10[pos] = value newtop10weights[pos] = weight return (newtop10, newtop10weights)
[ "def top10(self, top10: List[Word]):\n\n self._top10 = top10", "def sort_list_10(li):\n new_list = []\n\n li.sort()\n\n for item in li:\n new_list.append(item * 10)\n \n return new_list", "def move_top ( self ):\n list, index = self.get_info()\n self.value = [ list[index] ] + list[:index] + list[index+1:]", "def getTop10(group, element):\n \n group = add(group, element)\n \n # each element in a group is a [airlineid, depdelay]\n group.sort(key=itemgetter(1))\n \n # remove all elements after the 10 index. When reducing, two lists could be evaluated\n if len(group) > 10:\n group = group[:10]\n \n return group", "def display_top_ten_also_likes(self,list_of_docs):\n #Create readtime header for table\n self.e = Entry(self.tab, width=45, fg='blue')\n self.e.grid(column=1, row=4)\n self.e.insert(END, \"Top ten also liked documents\") \n \n total_rows = 10\n total_cols = 1\n \n #Create table entries with top\n for i in range(total_rows): \n for j in range(total_cols): \n \n self.e = Entry(self.tab, width=45, fg='blue')\n self.e.grid(column=j+1, row=i+5)\n try: \n self.e.insert(END, list_of_docs[i]) \n except IndexError:\n self.e.insert(END,'')", "def reorder(items, before):", "def test_sort_fewer_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.commit()\n list = top_n_in_order(1,5)\n self.assertEqual([(12, 1343), (3, 100), (1, 89)], list)", "def set_ranking_place(list_of_objects: list) -> list:\n place = 1\n for obj in list_of_objects:\n obj.ranking_place = place\n place += 1\n return list_of_objects", "def top_n(items, n):\n\n for i in range(n):\n for j in range(len(items)-1-i):\n\n if items[j] > items[j+1]:\n items[j], items[j+1] = items[j+1], items[j]\n \n top_n = items[-n:]\n\n return top_n[::-1]", "def topn(self, topn):\n self._topn = topn", "def sort_list(self):\n\n previous_item_index = False\n previous_item_value = None\n\n for item_index, item_value in enumerate(self.list):\n\n if previous_item_index != False and item_value < previous_item_value:\n self.display_list_item_shuffling_message(item_value, previous_item_value)\n self.list.insert(previous_item_index, self.list.pop(item_index))\n self.list_items_shuffled_count += 1 \n\n previous_item_index = item_index\n previous_item_value = item_value", "def reorder(self):\n\n self.resultlist = [r for r in self.resultlist if r.col is not None and r.row is not None]\n self.resultlist.sort(key=lambda x: x.adus + int(x.istarget) * 1e50, reverse=True)", "def pull_to_top(self, food):\n # find the index of food\n index = self.food_list.index(food)\n # loop and pull all the other foods down towards the zero end\n for i in range(index, len(self.food_list) - 1):\n self.food_list[i] = self.food_list[i + 1]\n # put this food to the right-side/top of the food list\n self.food_list[len(self.food_list) - 1] = food", "def add_top_pairs(dry_run=False, pair_now=False):\n top = ratings.top_n(15)\n new_pairs = []\n for idx, t in enumerate(top[:10]):\n new_pairs += [[t[0], o[0]] for o in top[idx+1:idx+5]]\n\n if dry_run:\n print(new_pairs)\n return\n\n if pair_now:\n maybe_enqueue(new_pairs)\n else:\n _append_pairs(new_pairs)", "def update_top_dict(d, key, value, n = 10):\n if value > min_value(d):\n d[key] = value\n if len(d) > n:\n d.pop(min_key(d), None)\n return d", "def getTop10Popup(self):\n if len(self.ranklist) >= 10:\n top10 = self.ranklist[0:10]\n else:\n top10 = self.ranklist # Potential referencing issue\n if popuplib.exists(\"ultirank_top10\"):\n popuplib.delete(\"ultirank_top10\")\n rankPopup = popuplib.create(\"ultirank_top10\")\n rankPopup.addline(\"[UltiRank %s] Top 10\" % info.version)\n rankPopup.addline(\"=================================\")\n for i in range(len(top10)):\n rankPopup.addline(\"%d. %s: %d points (KDR: %s)\" % (i+1, top10[i][3], top10[i][0], self.getKDR(top10[i][1], top10[i][2])))\n rankPopup.addline(\" \")\n rankPopup.addline(\"0. Close\")\n return rankPopup", "def top_ten(self):\n \n query = ListItem.all();\n query.order('-count')\n if self.class_name() is not 'ListItem':\n query.filter('class=%s' % self.class_name())\n \n \n return self.all().fetch(10)", "def find_top_unique(self, list_of_entries, top_n):\n\n\n if len(list_of_entries) < top_n:\n self.top_n_too_large_label = Label(self.main_frame,\n fg=\"red\",\n text=\"Max N = %s\" % len(list_of_entries))\n if type(list_of_entries[0]) is AudioEntry:\n self.top_n_too_large_label.grid(row=13, column=4)\n if type(list_of_entries[0]) is VideoEntry:\n self.top_n_too_large_label.grid(row=13, column=5)\n raise Exception(\"N is larger than the total number of words\")\n\n if self.top_n_too_large_label is not None:\n self.top_n_too_large_label.grid_remove()\n\n sorted_by_count = sorted(list_of_entries, key=self.get_count, reverse=True)\n #self.top_n_too_large_label = Label(self.main_frame, fg=\"red\", text=\"Max N = %s\" % len(list_of_entries))\n unique_entries = [[] for i in range(top_n)]\n\n curr_rank = 0\n prev_count = None\n curr_count = None\n\n for entry in sorted_by_count:\n\n if entry.word in self.general_parser.words:\n entry.in_general = True\n else:\n entry.in_general = False\n\n curr_count = entry.count\n\n if prev_count is None:\n if entry.word not in self.specific_month_words:\n unique_entries[curr_rank].append(entry)\n prev_count = entry.count\n entry.rank = 1\n continue\n\n\n if curr_rank >= top_n:\n break\n\n\n if entry.word not in self.specific_month_words:\n # increment rank if current entry has a different count\n # (the last set of entries having this count are all filled\n # into the unique_entries[])\n if curr_count != prev_count:\n curr_rank = curr_rank + 1\n if curr_rank >= top_n:\n break\n unique_entries[curr_rank].append(entry)\n prev_count = entry.count\n entry.rank = curr_rank + 1\n continue\n unique_entries[curr_rank].append(entry)\n entry.rank = curr_rank + 1\n\n\n\n return unique_entries[0:curr_rank + 1]", "def top_n(items, n):\n \n return sorted(items, key = lambda x: x[1], reverse = True)[:n]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the correction factor for ambient air temperature and relative humidity Based on the linearization of the temperature dependency curve under and above 20 degrees Celsius, asuming a linear dependency on humidity,
def get_correction_factor(self, temperature, humidity): if temperature < 20: return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity - 33.) * self.CORD return self.CORE * temperature + self.CORF * humidity + self.CORG
[ "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def linear_scaling_correction(str_path_calibration_forcing, str_path_input_hist_forcing, str_path_input_forcing):\n \n calibration_forcing = nc.Dataset(str_path_calibration_forcing)\n input_hist_forcing = nc.Dataset(str_path_input_hist_forcing)\n input_forcing = nc.Dataset(str_path_input_forcing)\n\n calibration_forcing = generate_forcing_from_NETCDF(calibration_forcing)\n calibration_forcing.index = pd.to_datetime(calibration_forcing.index)\n calibration_forcing = calibration_forcing.loc[calibration_forcing.index.year <= calibration_forcing.index.year[-1]]\n\n input_hist_forcing = generate_forcing_from_NETCDF(input_hist_forcing)\n input_hist_forcing.index = pd.to_datetime(input_hist_forcing.index)\n input_hist_forcing = input_hist_forcing.loc[(input_hist_forcing.index.year >= calibration_forcing.index.year[0]) & (input_hist_forcing.index.year <= calibration_forcing.index.year[-1])]\n\n daily_mean_prec_input = input_hist_forcing.groupby(input_hist_forcing.index.strftime(\"%m\")).mean() #.prec.transform('mean')\n daily_mean_prec_calibration = calibration_forcing.groupby(calibration_forcing.index.strftime(\"%m\")).mean() #.prec.transform('mean')\n \n prec_correction_factor = daily_mean_prec_calibration.prec / daily_mean_prec_input.prec\n prec_correction_factor.index = np.arange(1,13)\n \n temp_correction_factor = daily_mean_prec_calibration.temp - daily_mean_prec_input.temp\n temp_correction_factor.index = np.arange(1,13)\n\n \n input_forcing = generate_forcing_from_NETCDF(input_forcing)\n input_forcing = nc.Dataset(str_path_input_forcing)\n input_forcing = generate_forcing_from_NETCDF(input_forcing)\n input_forcing.index = pd.to_datetime(input_forcing.index)\n\n for m in range(len(prec_correction_factor)):\n input_forcing.loc[input_forcing.index.month == m+1, 'prec'] = input_forcing.loc[input_forcing.index.month == m+1].prec * prec_correction_factor.values[m]\n input_forcing.loc[input_forcing.index.month == m+1, 'temp'] = input_forcing.loc[input_forcing.index.month == m+1].temp + temp_correction_factor.values[m]\n\n\n \n return input_forcing", "def fRwTemperatureCorrected(Rw_Temp1, Temp1, Temp2):\n\treturn Rw_Temp1 * ((Temp1 + 21.5) / (Temp2 + 21.5))", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def antenna_temperature(flux,effective_area):\n return flux*Jy*effective_area/Physics.k/2", "def calcTemperature(resistance, a = 9.6564e-4, b = 2.1069e-4, c = 8.5826e-8):\n temp = a + b * ln(resistance) + (c*(ln(resistance)**3))\n temp = 1/temp\n temp -= 273.15 #convert from kelvin to celcius\n if args.fahrenheit: #convert to Fahrenheit\n temp = temp*1.8+32\n return temp", "def temperatur_correction(self):\n pass", "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_corrected_temp(sense):\n temp = sense.get_temperature()\n cpu_temp = get_cpu_temp()\n # Uses magic factor to correct measured temp\n # Source: github.com/initialstate/wunderground-sensehat/wiki/Part-3.-Sense-HAT-Temperature-Correction\n return temp - ((cpu_temp - temp)/5.466)", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def thermal_expansivity_of_air(self) -> float:\n\n return 1 / self.ambient_temperature", "def temp_dependence(temperature, T0, theta_0, width):\n\ttheta = -width*(temperature-T0)*(temperature-T0) + theta_0\n\tif theta < 0:\n\t\ttheta = 0\n\n\treturn theta", "def dielectric_constant_water(temperature=298.15):\n tabulated_data = np.array([[263.15, 92.10],\n [268.15, 89.96],\n [273.15, 87.90],\n [278.15, 85.90],\n [283.15, 83.96],\n [288.15, 82.06],\n [293.15, 80.20],\n [298.15, 78.38],\n [303.15, 76.60],\n [308.15, 74.86],\n [313.15, 73.17],\n [318.15, 71.50],\n [323.15, 69.88],\n [328.15, 68.29],\n [333.15, 66.74],\n [338.15, 65.22],\n [343.15, 63.73],\n [348.15, 62.28],\n [353.15, 60.87],\n [358.15, 59.48],\n [363.15, 58.13],\n [368.15, 56.81],\n [373.15, 55.51]])\n polynomal_degree = 5\n fitdata = np.polyfit(tabulated_data[:, 0], tabulated_data[:, 1],\n polynomal_degree)\n fitfunction = np.poly1d(fitdata)\n return fitfunction(temperature)", "def calculate_calibration_coefficients(self, event):\n\n ped_data = event.mon.tel[self.tel_id].pedestal\n ff_data = event.mon.tel[self.tel_id].flatfield\n status_data = event.mon.tel[self.tel_id].pixel_status\n calib_data = event.mon.tel[self.tel_id].calibration\n\n # find unusable pixel from pedestal and flat-field data\n unusable_pixels = np.logical_or(status_data.pedestal_failing_pixels,\n status_data.flatfield_failing_pixels)\n \n signal = ff_data.charge_median - ped_data.charge_median\n\n # Extract calibration coefficients with F-factor method\n # Assume fixed excess noise factor must be known from elsewhere\n numerator = ff_data.charge_std ** 2 - ped_data.charge_std ** 2\n denominator = self.squared_excess_noise_factor * signal\n gain = np.divide(numerator, denominator, out=np.zeros_like(numerator), where=denominator != 0)\n\n # correct for the quadratic term (which is zero if not given)\n systematic_correction = self.quadratic_term**2 * signal / self.squared_excess_noise_factor\n gain -= systematic_correction\n\n # calculate photon-electrons\n numerator = signal\n denominator = gain\n\n n_pe = np.divide(numerator, denominator, out=np.zeros_like(numerator), where=denominator != 0)\n\n # fill WaveformCalibrationContainer\n calib_data.time = ff_data.sample_time\n calib_data.time_min = ff_data.sample_time_min\n calib_data.time_max = ff_data.sample_time_max\n calib_data.n_pe = n_pe\n\n # find signal median of good pixels over the camera (FF factor=<npe>/npe)\n masked_npe = np.ma.array(n_pe, mask=unusable_pixels)\n npe_median = np.ma.median(masked_npe, axis=1)\n\n # flat-fielded calibration coefficients\n numerator = npe_median[:,np.newaxis]\n denominator = signal\n calib_data.dc_to_pe = np.divide(numerator, denominator, out=np.zeros_like(denominator), where=denominator != 0)\n\n # flat-field time corrections\n calib_data.time_correction = -ff_data.relative_time_median\n\n calib_data.pedestal_per_sample = ped_data.charge_median / self.pedestal.extractor.window_width.tel[self.tel_id]\n\n # define unusables on number of estimated pe\n npe_deviation = calib_data.n_pe - npe_median[:,np.newaxis]\n\n # cut on the base of pe statistical uncertainty (adding a 7% spread due to different detection QE among PMs) \n tot_std = np.sqrt(npe_median + (self.relative_qe_dispersion * npe_median)**2)\n\n npe_outliers = (\n np.logical_or(npe_deviation < self.npe_median_cut_outliers[0] * tot_std[:,np.newaxis],\n npe_deviation > self.npe_median_cut_outliers[1] * tot_std[:,np.newaxis]))\n\n # calibration unusable pixels are an OR of all masks\n calib_data.unusable_pixels = np.logical_or(unusable_pixels, npe_outliers).filled(True)\n \n # give to the unusable pixels the median camera value for the dc_to_pe and pedestal\n # (these are the starting data for the Cat-B calibration) \n dc_to_pe_masked = np.ma.array(calib_data.dc_to_pe, mask=calib_data.unusable_pixels)\n median_dc_to_pe = np.ma.median(dc_to_pe_masked, axis=1)[:,np.newaxis]\n fill_array = np.ones((constants.N_GAINS, constants.N_PIXELS)) * median_dc_to_pe\n calib_data.dc_to_pe = np.ma.filled(dc_to_pe_masked, fill_array)\n \n pedestal_per_sample_masked = np.ma.array(calib_data.pedestal_per_sample, mask=calib_data.unusable_pixels)\n median_pedestal_per_sample = np.ma.median(pedestal_per_sample_masked, axis=1)[:,np.newaxis]\n fill_array = np.ones((constants.N_GAINS, constants.N_PIXELS)) * median_pedestal_per_sample\n calib_data.pedestal_per_sample = np.ma.filled(pedestal_per_sample_masked, fill_array)\n \n # set to zero time corrections of unusable pixels\n time_correction_masked = np.ma.array(calib_data.time_correction, mask=calib_data.unusable_pixels)\n calib_data.time_correction = time_correction_masked.filled(0)\n\n # in the case FF intensity is not sufficiently high, better to scale low gain calibration from high gain results\n if self.use_scaled_low_gain:\n calib_data.unusable_pixels[constants.LOW_GAIN] = calib_data.unusable_pixels[constants.HIGH_GAIN]\n calib_data.dc_to_pe[constants.LOW_GAIN] = calib_data.dc_to_pe[constants.HIGH_GAIN] * self.hg_lg_ratio\n calib_data.time_correction[constants.LOW_GAIN] = calib_data.time_correction[constants.HIGH_GAIN]\n \n # eliminate inf values id any (still necessary?)\n calib_data.dc_to_pe[np.isinf(calib_data.dc_to_pe)] = 0", "def compute_dewpoint(temperature, humidity):\n\n temp_C = (temperature - 32) * 5 / 9 # Convert temperature from deg F to deg C\n rh = humidity / 100\n\n b = 18.678\n c = 257.14 # deg C\n\n gamma = math.log(rh) + (b * temp_C) / (c + temp_C)\n tdp = c * gamma / (b -gamma)\n\n tdp_F = 9 / 5 * tdp + 32 # Convert temperature from deg C to deg F\n return tdp_F;", "def surfRelHumidity( d2, t2):\n#\n# As of June 2020, specific humidity is not available for retrieval from \n# the ERA Interim data server (see note by Coquart Laure, Jan 28 2020, \n# available at:\n# https://confluence.ecmwf.int/pages/viewpage.action?pageId=171411214).\n#\n# To compute relative humidity we use the formula:\n#\n# RH = 100 * es(Td)/es(T), where Td and T are the 2 m dew point\n# and air temperatures, and es is the saturation water vapour pressure \n# (documentation of the IFS for CY41R2, Ch. 7, sec. 7.2.1b, eq. 7.5 ):\n#\n# es(T) = a1 * exp[a3*(T-T0)/(T-a4)],\n# with a1=611.21 Pa, a3=17.502, a4=32.19 K and T0=273.16 K. \n\n# Set parameters\n\n T0 = 273.16\n\n a1 = 611.21\n a3 = 17.502\n a4 = 32.19\n\n# Saturation water vapour pressure\n\n esat_Td = a1 * np.exp(a3 * (d2 - T0)/(d2 - a4))\n\n esat_T = a1 * np.exp(a3 * (t2 - T0)/(t2 - a4))\n\n# Relative humidity\n\n relh = esat_Td/esat_T\n\n return relh", "def correct_temp(temp_tab):\n output = subprocess.check_output(\"cat /sys/class/thermal/thermal_zone0/temp\", shell=True)\n cpu_temp = int(output)/1000\n temp_calibrated = temp_tab - ((cpu_temp - temp_tab)/1.5)\n return temp_calibrated", "def calculate_dew_point(self, temperature, humidity):\n if temperature > 0:\n tn = 243.12\n m = 17.62\n else:\n tn = 272.62\n m = 22.46\n return tn * (math.log(humidity / 100.0) + (m * temperature) / (tn + temperature)) / \\\n (m - math.log(humidity / 100.0) - m * temperature / (tn + temperature))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the resistance of the sensor in kOhms // 1 if not value got in pin
def get_resistance(self): adc = ADC(self.pin) value = adc.read() if value == 0: return -1 return (4095./value - 1.) * self.RLOAD
[ "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_distance():\n \n GPIO.output(pinTrigger, False) # pulse off\n time.sleep(0.2)\n\n GPIO.output(pinTrigger,True) # send 10us pulse\n time.sleep(10e-6)\n GPIO.output(pinTrigger,False)\n\n StartTime = time.time() # start timer\n\n while GPIO.input(pinEcho)==0: # keep timer reset\n StartTime = time.time()\n\n while GPIO.input(pinEcho) == 1:\n StopTime = time.time()\n\n if StopTime - StartTime >= 0.04:\n print(\"Too close!!!\")\n StopTime = StartTime\n break\n\n ElapsedTime = StopTime - StartTime\n\n distance = (ElapsedTime * 34326)/2\n\n print('{:2.1f} cm'.format(distance))\n #dots = int(distance/2)\n #print('.'*dots)\n\n return(distance)", "def humidity(self):\n self.pi.i2c_write_device(self._h, [0xF5]) # RH no hold\n time.sleep(0.1)\n c, rh = self.pi.i2c_read_device(self._h, 3) # msb, lsb, checksum\n if self._crc(rh) == 0:\n rh_val = (rh[0]<<8) + rh[1]\n RH = ((125.0 * rh_val)/65536.0) - 6.0\n else:\n RH = 999\n return RH", "def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))", "def distance_cm(self):\n GPIO.output(self.GPIO_TRIGGER, True)\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n start = time.time()\n stop = time.time()\n\n while GPIO.input(self.GPIO_ECHO) == 0:\n start = time.time()\n\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop = time.time()\n\n # Convert to inches:\n return ((stop - start) * 34300)/2", "def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1", "def sensor_value(self):\n\n sensor_value = self.sensor.read_adc(0, gain=ADC_GAIN)\n\n if sensor_value < int(config['dye_sensor']['min_sensor_value']):\n logging.debug('dye sensor under threshold: {0}'.format(\n sensor_value))\n return 0\n else:\n return sensor_value", "def min_humidity(self):\n return 0", "def distance_inch(self):\n GPIO.output(self.GPIO_TRIGGER, True)\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n start = time.time()\n stop = time.time()\n\n while GPIO.input(self.GPIO_ECHO) == 0:\n start = time.time()\n\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop = time.time()\n\n # Convert to inches:\n return (((stop - start) * 34300)/2)*0.393701", "def input_resistance(self):\n return None", "def get_sensor_value_from_pin(pin):\n try:\n response=mybolt.analogRead(pin)\n data=json.loads(response)\n if data[\"success\"]!=1:\n print(\"Request Unsuccessful\")\n print(\"Response is ->\", data)\n return -999\n sensor_value=int(data[\"value\"])\n return sensor_value\n except Exception as e:\n print(\"Something went wrong when returning sensor value\")\n print(e)\n return -999", "def get_resistance(self):\n if self._integration_time > 1 :\n \n channel = self.get_channel()\n time.sleep(0.1)\n\n for i in range(10):\n # query the event status register, with 000 the bridge should be settled enough\n ESR = int(self.ask(\"*ESR?\"))\n if int(ESR):\n time.sleep(1)\n else:\n break\n # the ESR register was not cleared ... return nothing sensful\n if i == 9: return 0\n\n # Corresponding command: <ohm value>[term] = RDGR? <channel>[term]\n logging.debug('Get resistance of channel {:d}.'.format(channel))\n resistance = float(self.ask('RDGR? {:d}'.format(channel)))\n else:\n # 'fast mode'\n logging.debug('Get resistance of channel {:d}.'.format(self._channel))\n resistance = float(self.ask('RDGR? {:d}'.format(self._channel)))\n\n return resistance", "def read_odometer(self):\n print(\"el total de kilometros recorridos es de: \"+str(self.odometer))", "def get_sonar_distance(unit: int = SONAR_CM, timeout_us: int = 30000) -> float:\n # trigger\n pin8.write_digital(0)\n sleep_us(5)\n pin8.write_digital(1)\n sleep_us(10)\n pin8.write_digital(0)\n\n # catch echo\n echo_time = time_pulse_us(pin12, 1, timeout_us)\n if echo_time < 0:\n return echo_time\n if unit == SONAR_CM:\n return (echo_time / 2) / 29.1\n elif unit == SONAR_IN:\n return float((echo_time / 2) / 74)\n return -1.0", "def get_consumption(self):\n if not (self.sensors and 'cmpfreq' in self.sensors\n and self.controls and 'shum' in self.controls):\n return float('NaN')\n shum = self.controls['shum']\n shum = int(shum) if shum.isnumeric() else 0\n cmpfreq = int(self.sensors['cmpfreq'])\n return cmpfreq * 20 + (200 if shum > 0 else 0)", "def moisture(self):\n if self.moisture_sensor is None:\n return None\n else:\n return self.moisture_sensor.percent", "def calc_cond_resistance(self):\n return log(self.outer_dia / self.inner_dia) / (2 * pi * self.conductivity * self.length)", "async def get_outdoor_temperature(self) -> Optional[float]:\n return mapper.map_outdoor_temp(await self._call_api(self.urls.system_status))", "def get_on_resistance(self):\n is_nchannel = True\n stack = 4\n is_cell = False\n return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the resistance of the sensor corrected for temperature/humidity
def get_corrected_resistance(self, temperature, humidity): return self.get_resistance()/ self.get_correction_factor(temperature, humidity)
[ "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD", "def get_res(self,channel):\r\n\r\n global a\r\n try:\r\n raw_output=self._device.query('RDGR?'+str(channel))\r\n out=float(raw_output[0:len(raw_output)-1])\r\n return out\r\n except:\r\n print \"Failed to read Resistance\"\r\n return NaN", "def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))", "def humidity(self):\n self.pi.i2c_write_device(self._h, [0xF5]) # RH no hold\n time.sleep(0.1)\n c, rh = self.pi.i2c_read_device(self._h, 3) # msb, lsb, checksum\n if self._crc(rh) == 0:\n rh_val = (rh[0]<<8) + rh[1]\n RH = ((125.0 * rh_val)/65536.0) - 6.0\n else:\n RH = 999\n return RH", "def relative_humidity(self):\n _, raw_humidity = self._data()\n return 100 * (raw_humidity / 65523)", "def relative_humidity(self):\n self._perform_measurement()\n self._humidity = (self._buf[1] << 12) | (self._buf[2] << 4) | (self._buf[3] >> 4)\n self._humidity = (self._humidity * 100) / 0x100000\n return self._humidity", "def read_gas_resistance(self):\n #Declare global variables\n global calAmbTemp\n\n self._force_read(True)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n gasResADC = (self._read_register_1ubyte(self.BME680_GAS_R_MSB) << 2) | (self._read_register_1ubyte(self.BME680_GAS_R_LSB) >> 6)\n gasRange = self._read_register_1ubyte(self.BME680_GAS_R_LSB) & 0x0F\n\n calAmbTemp = self._compensate_temperature(tempADC)\n val = self._calculate_gas_resistance(gasResADC, gasRange)\n\n return float(val)", "async def _get_temperature_internal(self) -> float:\n\n self._device._update_temperature()\n return self._device._temperature[\"CCD\"]", "def get_resistance(self):\n if self._integration_time > 1 :\n \n channel = self.get_channel()\n time.sleep(0.1)\n\n for i in range(10):\n # query the event status register, with 000 the bridge should be settled enough\n ESR = int(self.ask(\"*ESR?\"))\n if int(ESR):\n time.sleep(1)\n else:\n break\n # the ESR register was not cleared ... return nothing sensful\n if i == 9: return 0\n\n # Corresponding command: <ohm value>[term] = RDGR? <channel>[term]\n logging.debug('Get resistance of channel {:d}.'.format(channel))\n resistance = float(self.ask('RDGR? {:d}'.format(channel)))\n else:\n # 'fast mode'\n logging.debug('Get resistance of channel {:d}.'.format(self._channel))\n resistance = float(self.ask('RDGR? {:d}'.format(self._channel)))\n\n return resistance", "def get_corrected_temp(sense):\n temp = sense.get_temperature()\n cpu_temp = get_cpu_temp()\n # Uses magic factor to correct measured temp\n # Source: github.com/initialstate/wunderground-sensehat/wiki/Part-3.-Sense-HAT-Temperature-Correction\n return temp - ((cpu_temp - temp)/5.466)", "def get_temperature(self):\n temp_cpu = get_cpu_temp()\n # Calculates the real temperature compensating CPU heating.\n temp_avg = (self.get_temperature_from_humidity() + self.get_temperature_from_humidity()) / 2\n calibrated = temp_avg - ((temp_cpu - temp_avg) / 1.2)\n calibrated = get_smooth(calibrated)\n return calibrated", "def get_ambient_temperature(self) -> float:\n return self.query(WeatherCommand.GET_SENSOR_TEMP) / 100.", "def _calculate_heater_resistance(self, target_temp):\n if target_temp > 400: #Maximum temperature\n target_temp = 400\n\n var1 = (calGH1 / 16.0) + 49.0\n var2 = ((calGH2 / 32768.0) * 0.0005) + 0.00235\n var3 = calGH3 / 1024.0\n var4 = var1 * (1.0 + (var2 * target_temp))\n var5 = var4 + (var3 * self.calAmbTemp)\n res_heat = 3.4 * ((var5 * (4 / (4 + calResHeatRange)) * (1 / (1 + (calResHeatVal * 0.002)))) - 25)\n\n return int(res_heat)", "def io_temp_sensor():\n\ttemperature_bus.read_byte_data(SENSOR_ADDR, SENSOR_INPUT)\n\t# discard the first one as it is likely a wrong one (that's how the ADC works)\n\ttime.sleep(0.1)\n\tval = temperature_bus.read_byte_data(SENSOR_ADDR, SENSOR_INPUT)\n\t# we get val==0 for 0 degrees Celsius or less, as the sensor gives out 0 Volts or a negative number which the ADC ignores\n\t# max value from the ADC is 255 and it happens whenever the sensor outputs 3,3V, which is equivalent to 330 degress (never happens)\n\tcelsius = float(val)*100.*3.3/255.\n\t# the smallest change in the read-out is therefore 100*3.3/255 = 1.29 degrees, and this limits our accuracy to >1 degree\n\t# therefore not to fool anyone we cut the digits after the decimal point:\n\treturn int(celsius)", "def humidity(self):\n\n \"\"\"\n C1 = -4.0 # for 12 Bit\n C2 = 0.0405 # for 12 Bit\n C3 = -0.0000028 # for 12 Bit\n T1 = 0.01 # for 14 Bit @ 5V\n T2 = 0.00008 # for 14 Bit @ 5V\n \"\"\"\n C1 = -2.0468 # for 12 Bit\n C2 = 0.0367 # for 12 Bit\n C3 = -0.0000015955 # for 12 Bit\n T1 = 0.01 # for 14 Bit @ 5V\n T2 = 0.00008 # for 14 Bit @ 5V\n\n self._sht_command(0x05)\n self._wait_sht()\n val = self._get_data_sht()\n self._skip_crc()\n linear_humidity = C1 + C2 * val + C3 * val * val\n return((self.temperature() - 25.0 ) * (T1 + T2 * val) + linear_humidity)", "def humidity(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"humidity\"))\r\n return round(self._humidity * 100)", "def relative_humidity(self):\n humidity_string = self._current_observation['relative_humidity']\n return float(humidity_string.strip('%'))", "def humidity_sensor():\n return _get_sensor(\"Humidity\", \"humidity\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the resistance RZero of the sensor (in kOhms) for calibratioin purposes
def get_rzero(self): return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))
[ "def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD", "def r(self) -> float:\n return self._ohms.real", "def get_resistance(self):\n if self._integration_time > 1 :\n \n channel = self.get_channel()\n time.sleep(0.1)\n\n for i in range(10):\n # query the event status register, with 000 the bridge should be settled enough\n ESR = int(self.ask(\"*ESR?\"))\n if int(ESR):\n time.sleep(1)\n else:\n break\n # the ESR register was not cleared ... return nothing sensful\n if i == 9: return 0\n\n # Corresponding command: <ohm value>[term] = RDGR? <channel>[term]\n logging.debug('Get resistance of channel {:d}.'.format(channel))\n resistance = float(self.ask('RDGR? {:d}'.format(channel)))\n else:\n # 'fast mode'\n logging.debug('Get resistance of channel {:d}.'.format(self._channel))\n resistance = float(self.ask('RDGR? {:d}'.format(self._channel)))\n\n return resistance", "def get_res(self,channel):\r\n\r\n global a\r\n try:\r\n raw_output=self._device.query('RDGR?'+str(channel))\r\n out=float(raw_output[0:len(raw_output)-1])\r\n return out\r\n except:\r\n print \"Failed to read Resistance\"\r\n return NaN", "def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)", "def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))", "def humidity(self):\n self.pi.i2c_write_device(self._h, [0xF5]) # RH no hold\n time.sleep(0.1)\n c, rh = self.pi.i2c_read_device(self._h, 3) # msb, lsb, checksum\n if self._crc(rh) == 0:\n rh_val = (rh[0]<<8) + rh[1]\n RH = ((125.0 * rh_val)/65536.0) - 6.0\n else:\n RH = 999\n return RH", "def r0(self):\n return self._r0", "def getAccuracy(self):\r\n if len(self.R_times) < 1:\r\n return None \r\n return float(self.RTTs[-1]/2.0)", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def read_odometer(self):\n print(\"el total de kilometros recorridos es de: \"+str(self.odometer))", "def r0_500(self):\n return self._r0*(self._lam/500.)**(-1.2)", "def residual(us):\n return self.h_S(z0, us) - h_P", "def get_radiation():\n sun_pos = get_sun_position()\n if sun_pos <= POSITION_MIN or sun_pos >= POSITION_MAX:\n return 0\n else:\n # Calculate a new delta.\n delta = random.randint(0, RADIATION_DELTA)\n if random.random() > 0.5:\n delta = -1 * delta\n # Calculate the radiation based on the sun position.\n new_radiation = round(-0.1279 * pow(sun_pos, 2) + 46.05 * sun_pos - 3100)\n # Apply the delta and return the value.\n return new_radiation + delta", "def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp", "def FRET_radius(efficiency, R0):\n return R0 * np.power(1 / efficiency - 1, 1 / 6)", "def estimate_roofline(self):\r\n self.layer['data_roof'] = self.layer['data_bytes'] / self.bandwidth\r\n self.layer['time_ms'] = np.max([self.layer['data_roof']])*1000 # to milliseconds\r\n return self.layer['time_ms']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the resistance RZero of the sensor (in kOhms) for calibration purposes corrected for temperature/humidity
def get_corrected_rzero(self, temperature, humidity): return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))
[ "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD", "def get_resistance(self):\n if self._integration_time > 1 :\n \n channel = self.get_channel()\n time.sleep(0.1)\n\n for i in range(10):\n # query the event status register, with 000 the bridge should be settled enough\n ESR = int(self.ask(\"*ESR?\"))\n if int(ESR):\n time.sleep(1)\n else:\n break\n # the ESR register was not cleared ... return nothing sensful\n if i == 9: return 0\n\n # Corresponding command: <ohm value>[term] = RDGR? <channel>[term]\n logging.debug('Get resistance of channel {:d}.'.format(channel))\n resistance = float(self.ask('RDGR? {:d}'.format(channel)))\n else:\n # 'fast mode'\n logging.debug('Get resistance of channel {:d}.'.format(self._channel))\n resistance = float(self.ask('RDGR? {:d}'.format(self._channel)))\n\n return resistance", "def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)", "def _calculate_r0(self):\n\n self.r0 = self.coherence_cell_size * (np.cos(np.deg2rad(self.zenith_angle)))**(3/5)", "def humidity(self):\n self.pi.i2c_write_device(self._h, [0xF5]) # RH no hold\n time.sleep(0.1)\n c, rh = self.pi.i2c_read_device(self._h, 3) # msb, lsb, checksum\n if self._crc(rh) == 0:\n rh_val = (rh[0]<<8) + rh[1]\n RH = ((125.0 * rh_val)/65536.0) - 6.0\n else:\n RH = 999\n return RH", "def r(self) -> float:\n return self._ohms.real", "def get_res(self,channel):\r\n\r\n global a\r\n try:\r\n raw_output=self._device.query('RDGR?'+str(channel))\r\n out=float(raw_output[0:len(raw_output)-1])\r\n return out\r\n except:\r\n print \"Failed to read Resistance\"\r\n return NaN", "def humidity_calibration(self):\r\n\t\t\r\n\t\t# Read data back from HTS221_H0_RH_X2(0x30), 1 byte\r\n\t\tval = bus.read_byte_data(HTS221_DEFAULT_ADDRESS, HTS221_H0_RH_X2)\r\n\t\tself.H0 = val / 2\r\n\t\t\r\n\t\t# Read data back from HTS221_H1_RH_X2(0x31), 1 byte\r\n\t\tval = bus.read_byte_data(HTS221_DEFAULT_ADDRESS, HTS221_H1_RH_X2)\r\n\t\tself.H1 = val /2\r\n\t\t\r\n\t\t# Read data back from HTS221_H0_T0_OUT_L(0x36), 2 bytes\r\n\t\tval0 = bus.read_byte_data(HTS221_DEFAULT_ADDRESS, HTS221_H0_T0_OUT_L)\r\n\t\tval1 = bus.read_byte_data(HTS221_DEFAULT_ADDRESS, HTS221_H0_T0_OUT_H)\r\n\t\tself.H2 = ((val1 & 0xFF) * 256) + (val0 & 0xFF)\r\n\t\t\r\n\t\t# Read data back from HTS221_H1_T0_OUT_L(0x3A), 2 bytes\r\n\t\tval0 = bus.read_byte_data(HTS221_DEFAULT_ADDRESS, HTS221_H1_T0_OUT_L)\r\n\t\tval1 = bus.read_byte_data(HTS221_DEFAULT_ADDRESS, HTS221_H1_T0_OUT_H)\r\n\t\tself.H3 = ((val1 & 0xFF) * 256) + (val0 & 0xFF)", "def meas_resistance(instrument):\n return float(instrument.query('MEAS:RESistance?'))", "def getAccuracy(self):\r\n if len(self.R_times) < 1:\r\n return None \r\n return float(self.RTTs[-1]/2.0)", "def read_calibrated(self):\n\n self.read_sensors()\n\n print(\"uncalibrated readings\")\n self.print_sensor_values(self.sensorValues)\n\n for i in range(0, self.NUM_SENSORS):\n denominator = self.calibratedMax[i] - self.calibratedMin[i]\n val = 0\n if denominator != 0:\n val = (self.sensorValues[i] - self.calibratedMin[i]) * 1000 / denominator\n if val < 0:\n val = 0\n elif val > 1000:\n val = 1000\n self.sensorValues[i] = val\n\n print(\"calibrated readings\")\n self.print_sensor_values(self.sensorValues)", "def calibrate_decide(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage < 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n else:\n\n # get rho using full range coeffs\n XKw = coeffs['path_len'] * coeffs['Kwf']\n logV0 = np.log(coeffs['V0f'])\n rho_temp = (np.log(voltage) - logV0) / XKw\n\n # determine new coeffs based on the \"temporary\" values\n if np.mean(rho_temp) > 9:\n if verbose:\n print('high')\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n else:\n if verbose:\n print('low')\n XKw = coeffs['path_len'] * coeffs['Kwl']\n logV0 = np.log(coeffs['V0l'])\n # re-calculate rho with these coefficients\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho", "def r0(self):\n return self._r0", "def residual(us):\n return self.h_S(z0, us) - h_P", "def ultrasonic_sensor_error(raw_sensor_value):\n\treturn raw_sensor_value * 1.1", "def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp", "def get_temperature(self):\n temp_cpu = get_cpu_temp()\n # Calculates the real temperature compensating CPU heating.\n temp_avg = (self.get_temperature_from_humidity() + self.get_temperature_from_humidity()) / 2\n calibrated = temp_avg - ((temp_cpu - temp_avg) / 1.2)\n calibrated = get_smooth(calibrated)\n return calibrated" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find and create a configuration for Boost. prefix Where to find sofiasip, should sofiasip/sip.h.
def __init__(self, prefix = None): # Compute the search path. if prefix is None: test = [Path('/usr'), Path('/usr/local')] else: test = [Path(prefix)] self.__prefix = self._search_all('include/sofia-sip-1.12/sofia-sip/sip.h', test)[0] self.__config = drake.cxx.Config() self.__config.add_system_include_path(self.__prefix / 'include/sofia-sip-1.12') self.__config.lib_path(self.__prefix / 'lib')
[ "def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()", "def getapxs_location():\n return getconfigure_option(\"APXS\")", "def get_confed_prefix(path=None):\n setup_dir = find_parent_containing('setup.cfg', path=path, check='isfile')\n if not setup_dir:\n return\n parser = get_configparser()\n parser.read(os.path.join(setup_dir, 'setup.cfg'))\n return parser.get('versioning', 'tag_prefix')", "def check_and_configure_bap():\n if not config.get('bap_executable_path'):\n path = ask_user(bap.find())\n if path and len(path) > 0:\n config.set('bap_executable_path', path)", "def test_replace_namespaced_build_config(self):\n pass", "def find_boost(hint=None, verbose=True):\n\n search_dirs = [] if hint is None else hint\n\n if \"BOOST_DIR\" in os.environ:\n search_dirs.append(os.path.join(os.environ[\"BOOST_DIR\"], \"include\"))\n\n if \"CONDA_PREFIX\" in os.environ:\n search_dirs.append(os.path.join(os.environ[\"CONDA_PREFIX\"], \"include\"))\n\n search_dirs += [\n \"/usr/include\",\n \"/usr/local/include\",\n \"/usr/local/homebrew/include\",\n \"/opt/local/include\",\n \"/data/sljg2/software/boost/include\",\n ]\n\n for d in search_dirs:\n path = os.path.join(d, \"boost\", \"geometry\", \"index\", \"rtree.hpp\")\n if os.path.exists(path):\n vf = os.path.join(d, \"boost\", \"version.hpp\")\n src = open(vf, \"r\").read()\n v = re.findall('#define BOOST_LIB_VERSION \"(.+)\"', src)\n if not len(v):\n continue\n v = v[0]\n if verbose:\n print(\"Found Boost version {0} in: {1}\".format(v, d))\n return d\n return None", "def __init__(self, prefix = None):\n # Compute the search path.\n if prefix is None:\n test = [Path('/usr'), Path('/usr/local')]\n else:\n test = [Path(prefix)]\n for i in range(len(test)):\n if not test[i].absolute:\n test[i] = srctree() / test[i]\n self.__prefix = self._search_all('include/curl/curl.h', test)[0]\n self.__config = drake.cxx.Config()\n self.__config.add_system_include_path(self.__prefix / 'include')\n self.__config.lib_path(self.__prefix / 'lib')", "def config_locator():\n print(pkgrs.resource_filename('latools', 'latools.cfg'))\n return", "def insert_package_path():\n sys.path.insert(0, ospdn(ospdn(ospdn(ospap(__file__)))))", "def get_config(app):\n items = app.config.items()\n prefix = 'WIKI_'\n\n def strip_prefix(tup):\n return (tup[0].replace('WIKI_', ''), tup[1])\n\n return dict([strip_prefix(i) for i in items if i[0].startswith(prefix)])", "def samtoolsLocation():", "def make_config_finder(self):\n if self.config_finder is None:\n extra_config_files = utils.normalize_paths(\n self.prelim_opts.append_config)\n self.config_finder = config.ConfigFileFinder(\n self.option_manager.program_name,\n self.prelim_args,\n extra_config_files,\n )", "def setup_lib(CLIB):\n # {{ SETUP_LIB }}", "def get_navicli_config(self):\r\n self.add_copy_specs([\r\n \"/etc/Navisphere/agent.config\",\r\n \"/etc/Navisphere/Navimon.cfg\",\r\n \"/etc/Navisphere/Quietmode.cfg\",\r\n \"/etc/Navisphere/messages/[a-z]*\",\r\n \"/etc/Navisphere/log/[a-z]*\"])", "def getSystemPrefixPath():\n\n global _the_sys_prefix # Cached result, pylint: disable=global-statement\n if _the_sys_prefix is None:\n sys_prefix = getattr(\n sys, \"real_prefix\", getattr(sys, \"base_prefix\", sys.prefix)\n )\n sys_prefix = os.path.abspath(sys_prefix)\n\n # Some virtualenv contain the \"orig-prefix.txt\" as a textual link to the\n # target, this is often on Windows with virtualenv. There are two places to\n # look for.\n for candidate in (\n \"Lib/orig-prefix.txt\",\n \"lib/python%s/orig-prefix.txt\" % python_version_str,\n ):\n candidate = os.path.join(sys_prefix, candidate)\n if os.path.exists(candidate):\n # Cannot use FileOperations.getFileContents() here, because of circular dependency.\n # pylint: disable=unspecified-encoding\n with open(candidate) as f:\n sys_prefix = f.read()\n\n # Trailing spaces in the python prefix, please not.\n assert sys_prefix == sys_prefix.strip()\n\n # This is another for of virtualenv references:\n if os.name != \"nt\" and os.path.islink(os.path.join(sys_prefix, \".Python\")):\n sys_prefix = os.path.normpath(\n os.path.join(os.readlink(os.path.join(sys_prefix, \".Python\")), \"..\")\n )\n\n # Some virtualenv created by \"venv\" seem to have a different structure, where\n # library and include files are outside of it.\n if (\n os.name != \"nt\"\n and python_version >= 0x330\n and os.path.exists(os.path.join(sys_prefix, \"bin/activate\"))\n ):\n python_binary = os.path.join(sys_prefix, \"bin\", \"python\")\n python_binary = os.path.realpath(python_binary)\n\n sys_prefix = os.path.normpath(os.path.join(python_binary, \"..\", \"..\"))\n\n # Resolve symlinks on Windows manually.\n if os.name == \"nt\":\n from nuitka.utils.FileOperations import getDirectoryRealPath\n\n sys_prefix = getDirectoryRealPath(sys_prefix)\n\n _the_sys_prefix = sys_prefix\n\n return _the_sys_prefix", "def find_software() -> SloppyTree:\n locations = params[socket.gethostname().split('.')[0]].locations\n pass", "def setup_nagios_cfgs():\n put(\n \"{0}/nagios/contacts.cfg\".format(env.CONFIG.NAGIOS_CFG_DIR),\n \"/usr/local/nagios/etc/objects/contacts.cfg\",\n use_sudo=True\n )\n put(\n \"{0}/nagios/cgi.cfg\".format(env.CONFIG.NAGIOS_CFG_DIR),\n \"/usr/local/nagios/etc/cgi.cfg\",\n use_sudo=True\n )\n put(\n \"{0}/nagios/commands.cfg\".format(env.CONFIG.NAGIOS_CFG_DIR),\n \"/usr/local/nagios/etc/objects/commands.cfg\",\n use_sudo=True\n )\n put(\n \"{0}/nagios/nagios.cfg\".format(env.CONFIG.NAGIOS_CFG_DIR),\n \"/usr/local/nagios/etc/nagios.cfg\",\n use_sudo=True\n )\n sudo(\"mkdir -p /usr/local/nagios/etc/servers\")\n for server in glob.glob(\n env.CONFIG.NAGIOS_CFG_DIR + \"/nagios/servers/*\"):\n put(\n server,\n \"/usr/local/nagios/etc/servers/\",\n use_sudo=True\n )\n\n sudo(\n \"\"\"\n chown -R nagios:nagios /usr/local/nagios/etc\n \"\"\"\n )\n sudo(\"service nagios restart\")", "def configuration_key_prefix():\n return \"foo\"", "def get_sip_dir_flags(config):\n try:\n sip_dir = config.pyqt_sip_dir\n sip_flags = config.pyqt_sip_flags\n return sip_dir, sip_flags\n except AttributeError:\n # sipconfig.Configuration does not have a pyqt_sip_dir or pyqt_sip_flags AttributeError\n sip_flags = QtCore.PYQT_CONFIGURATION['sip_flags']\n\n # Archlinux installs sip files here by default\n default_sip_dir = os.path.join(PyQt5.__path__[0], 'bindings')\n if os.path.exists(default_sip_dir):\n return default_sip_dir, sip_flags\n\n # sip4 installs here by default\n default_sip_dir = os.path.join(sipconfig._pkg_config['default_sip_dir'], 'PyQt5')\n if os.path.exists(default_sip_dir):\n return default_sip_dir, sip_flags\n\n # Homebrew installs sip files here by default\n default_sip_dir = os.path.join(sipconfig._pkg_config['default_sip_dir'], 'Qt5')\n if os.path.exists(default_sip_dir):\n return default_sip_dir, sip_flags\n raise FileNotFoundError('The sip directory for PyQt5 could not be located. Please ensure' +\n ' that PyQt5 is installed')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transliterate and clean username by removing any unsupported character
def clean_username(value): if NO_ASCII_REGEX.search(value): value = unidecode(value) value = NO_ASCII_REGEX.sub('', value) value = NO_SPECIAL_REGEX.sub('', value) return value
[ "def clean_username(username):\n bad_characters = \" !#$%^&*()[]'\\\"\"\n\n for char in bad_characters:\n if char in username:\n username = username.replace(char, '')\n\n return username", "def sanitizeUserName(username):\n if username.startswith(\"@\"):\n username = username[1:] # remove \"@\"\n return username.lower()", "def clean_username(self, username):\n return username.lower()", "def normalize_username(self, username):\n #username = username.lower()\n username_map = self.get_username_map()\n username = username_map.get(username, username)\n return username", "def normalize_usernames(self):\n find_username = re.compile(\"^@\\w+\")\n dummy = \"_USER_\"\n self.normalize(find_username, dummy)", "def get_clean_username(email):\n dot = '.'\n under_score = '_'\n if not email is None:\n username = get_username_from_email(email)\n if username:\n if dot in username:\n return username.replace(dot,under_score).strip()\n return username.strip()\n else:\n pass", "def clean_username(self):\n username = self.cleaned_data.get('username', '')\n badness = re.findall(r'[^a-zA-Z0-9\\._@\\-]', username)\n if len(badness):\n raise forms.ValidationError('Please keep user names to alphanumeric characters and the characters [. - _ @] only.')\n return username", "def normalize_username(self, key, username):\n if not username:\n return\n\n return username.strip().lower()", "def force_valid_username(name):\n resulting_username = re.sub(\"[^A-Za-z0-9_-]\", \"_\", name)\n if resulting_username is None:\n resulting_username = name\n if len(resulting_username) > 30:\n resulting_username = resulting_username[:30]\n return resulting_username.lower()", "def raw_username(username):\n sitewide_domain = settings.HQ_ACCOUNT_ROOT\n username = str(username or '')\n username = username.lower()\n try:\n u, d = username.split(\"@\")\n except Exception:\n return username\n if d.endswith('.' + sitewide_domain):\n return u\n else:\n return username", "def remove_usernames(text):\r\n clean = re.compile(r'@\\w*')\r\n\r\n return re.sub(clean, '', text)", "def make_valid_nickname(nickname):\n return re.sub('[^a-zA-Z0-9_\\.]', '', nickname)", "def clean_username(self):\n return self.cleaned_data['username'].lower()", "def clean_name(name):\n return re.compile('[^a-zA-Z]').sub('', name)", "def username(u):\n return u.replace(\" \", \"\").lower().split(\"u/\")[-1]", "def clean_username(self, username, request):\n backend_str = request.session[auth.BACKEND_SESSION_KEY]\n backend = auth.load_backend(backend_str)\n try:\n LOGGER.debug('calling the backend %s clean_username with %s',\n backend,\n username)\n username = backend.clean_username(username)\n LOGGER.debug('cleaned username is %s', username)\n except AttributeError: # Backend has no clean_username method.\n pass\n return username", "def remove_special_characters(self, txt: str) -> str:", "def _username_from_name(self, name):\r\n return name.replace(' ', '_')", "def artist_name_cleaner(txt): \n \n # here I deal with numbers, special charachters and other noise\n step1 = re.sub(\"[0-9]\",\"\", txt).replace(\"Dezember\", \"\")\n step2 = re.sub(\"\"\"['\"\\/;:.(),-]\"\"\", \"\", step1) # here 'bad' charachters\n t = \"_\".join(step2.strip().lower().split())\n \n # here I remove the letters with accents\n text=unicodedata.normalize('NFD', t).encode('ascii', 'ignore').decode(\"utf-8\")\n\n return str(text)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replacement of ore.alchemist.container.stringKey The difference is that here the primary_key is not determined by sqlalchemy.orm.mapper.primary_key_from_instance(obj) but by doing the logically equivalent (but a little more laborious) [ getattr(instance, c.name) for c in mapper.primary_key ]. This is because, in some hardtodebug cases, the previous was returning None to all pk values e.g. for objects on which checkPermission() has not been called. Using this version, the primary_key is correctly determined irrespective of whether checkPermission() had previously been called on the object.
def stringKey(obj): unproxied = proxy.removeSecurityProxy(obj) mapper = orm.object_mapper(unproxied) #primary_key = mapper.primary_key_from_instance(unproxied) identity_values = [ getattr(unproxied, c.name) for c in mapper.primary_key ] identity_key = "-".join(map(str, identity_values)) return "obj-%s" % (identity_key)
[ "def _to_primary_key(self, value):\n if value is None:\n return None\n if isinstance(value, self.base_class):\n if not value._is_loaded:\n raise Exception('Record must be loaded.')\n return value._primary_key\n\n return self.base_class._to_primary_key(value)\n # return self.base_class._check_pk_type(value)", "def _get_obj_pk(self, obj):\n if self.use_natural_keys and hasattr(obj, 'natural_key'):\n raw_nat_key = obj.natural_key()\n obj_pk = smart_text(NATURAL_KEY_JOINER.join(raw_nat_key))\n keytype = 'natural'\n else:\n obj_pk = obj._get_pk_val()\n keytype = 'pk'\n\n return obj_pk, keytype", "def primary_key_name(model_or_instance):\n its_a_model = isinstance(model_or_instance, type)\n model = model_or_instance if its_a_model else model_or_instance.__class__\n pk_names = primary_key_names(model)\n return 'id' if 'id' in pk_names else pk_names[0]", "def primary_key(cls):\n has_multiple_pk = len(class_keys(cls)) > 1\n\n if has_multiple_pk:\n # guess the pk\n pk = cls.__name__.lower() + '_id'\n else:\n for key in class_keys(cls):\n pk = key\n break\n\n if not pk in cls.__dict__:\n # could not find pk field in class, now check\n # whether it has been explicitly specified\n if 'pk_field' in cls.__dict__:\n pk = cls.__dict__['pk_field']\n else:\n raise KeyNotFoundException(\"Could not figure out primary key field\"\n \"for %s model. Tried to first use %s as\"\n \" field name,and then looked for\"\n \" pk_field attr which was also missing\"\n % (cls.__name__, pk))\n\n return pk", "def get_primary_key(cls) -> str:\n return inspect(cls).primary_key[0].name", "def primary_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"primary_key\")", "def primary_key(cls):\n\n if cls.__from_class__:\n cls = cls.__from_class__\n return cls.__table__.primary_key.columns.values()[0].name", "def get_key_value(self):\n return getattr(self, self.__class__._meta.primary_key.name)", "def get_key_id(self):", "def test_primary_key(self):\r\n\r\n # This should just work.\r\n class AutoFieldKey(models.Model):\r\n key = models.AutoField(primary_key=True)\r\n AutoFieldKey.objects.create()\r\n\r\n # This one can be exactly represented.\r\n class CharKey(models.Model):\r\n id = models.CharField(primary_key=True, max_length=10)\r\n CharKey.objects.create(id='a')\r\n\r\n # Some rely on unstable assumptions or have other quirks and\r\n # should warn.\r\n\r\n# # TODO: Warning with a range limitation.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class IntegerKey(models.Model):\r\n# id = models.IntegerField(primary_key=True)\r\n# IntegerKey.objects.create(id=1)\r\n\r\n# # TODO: date/times could be resonably encoded / decoded as\r\n# # strings (in a reversible manner) for key usage, but\r\n# # would need special handling and continue to raise an\r\n# # exception for now\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DateKey(models.Model):\r\n# id = models.DateField(primary_key=True, auto_now=True)\r\n# DateKey.objects.create()\r\n\r\n# # TODO: There is a db.Email field that would be better to\r\n# # store emails, but that may prevent them from being\r\n# # used as keys.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class EmailKey(models.Model):\r\n# id = models.EmailField(primary_key=True)\r\n# EmailKey.objects.create(id='aaa@example.com')\r\n\r\n# # TODO: Warn that changing field parameters breaks sorting.\r\n# # This applies to any DecimalField, so should belong to\r\n# # the docs.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DecimalKey(models.Model):\r\n# id = models.DecimalField(primary_key=True, decimal_places=2,\r\n# max_digits=5)\r\n# DecimalKey.objects.create(id=1)\r\n\r\n # Some cannot be reasonably represented (e.g. binary or string\r\n # encoding would prevent comparisons to work as expected).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class FloatKey(models.Model):\r\n id = models.FloatField(primary_key=True)\r\n FloatKey.objects.create(id=1.0)\r\n\r\n # TODO: Better fail during validation or creation than\r\n # sometimes when filtering (False = 0 is a wrong key value).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class BooleanKey(models.Model):\r\n id = models.BooleanField(primary_key=True)\r\n BooleanKey.objects.create(id=True)\r\n len(BooleanKey.objects.filter(id=False))", "def primary_key(cls, row):\n if isinstance(cls.pkey, tuple):\n return tuple([row[k] for k in cls.pkey])\n return row[cls.pkey]", "def object_pk(self):\n\n if self._wrapped not in (None, empty):\n return str(self._wrapped.pk)\n\n if '_object_pk' in self.__dict__:\n return self.__dict__['_object_pk']\n\n identifier = self._get_identifier()\n if identifier:\n # noinspection PyBroadException\n try:\n object_pk = identifier.split('.', 2)[-1]\n if object_pk == 'None':\n object_pk = None\n self.__dict__['_object_pk'] = object_pk\n return object_pk\n except Exception:\n pass\n\n raise AttributeError()", "def primary(self):\n primary_k = self.__class__.get_primary()\n return getattr(self, primary_k)", "def get_datastore_key(model, pk):\n\n kind = get_top_concrete_parent(model)._meta.db_table\n return Key.from_path(kind, pk)", "def _get_key(key_or_id, key_cls):\n return (\n key_cls.from_string(key_or_id)\n if isinstance(key_or_id, str)\n else key_or_id\n )", "def get_primary_id(self):", "def pk(self, ctx):\n\n #if (self._pk == False):\n if True:\n pk_cols = []\n for col in self.columns:\n if col.pk:\n pk_cols.append(col)\n\n if (len(pk_cols) > 1):\n raise Exception(\"Table %s has multiple primary keys: %s\" % (self.name, pk_cols))\n elif (len(pk_cols) == 1):\n self._pk = pk_cols[0]\n else:\n self._pk = None\n\n return self._pk", "def _get_raw_key(self, key_id):", "def test_primary_key_coercing(self):\r\n CharKey.objects.create(id=1)\r\n CharKey.objects.create(id='a')\r\n CharKey.objects.create(id=1.1)\r\n CharKey.objects.get(id='1')\r\n CharKey.objects.get(id='a')\r\n CharKey.objects.get(id='1.1')\r\n\r\n IntegerKey.objects.create(id=1)\r\n with self.assertRaises(ValueError):\r\n IntegerKey.objects.create(id='a')\r\n IntegerKey.objects.create(id=1.1)\r\n IntegerKey.objects.get(id='1')\r\n with self.assertRaises(ValueError):\r\n IntegerKey.objects.get(id='a')\r\n IntegerKey.objects.get(id=1.1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the puzzle state based on the provided move string
def update_puzzle(self, move_string): zero_row, zero_col = self.current_position(0, 0) for direction in move_string: if direction == "l": assert zero_col > 0, "move off grid: " + direction self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1] self._grid[zero_row][zero_col - 1] = 0 zero_col -= 1 elif direction == "r": assert zero_col < self._width - 1, "move off grid: " + direction self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1] self._grid[zero_row][zero_col + 1] = 0 zero_col += 1 elif direction == "u": assert zero_row > 0, "move off grid: " + direction self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col] self._grid[zero_row - 1][zero_col] = 0 zero_row -= 1 elif direction == "d": assert zero_row < self._height - 1, "move off grid: " + direction self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col] self._grid[zero_row + 1][zero_col] = 0 zero_row += 1 else: assert False, "invalid direction: " + direction
[ "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n if DEBUG_UP:\n print self\n assert False, \"invalid direction: \" + direction", "def update_state(self, state, imove):\n pass", "def update_state(self, move):\n\n self.state = self.next_state(self.state, move)", "def str_to_move(self, move: str) -> Any:\n raise NotImplementedError", "def make_move(state: str, section_num: int, move: str) -> str:\n if move == wf.CHECK:\n check_result = wf.check_section(state, section_num)\n if check_result:\n print('The section is correct')\n else:\n print('The section is incorrect')\n else:\n state = wf.change_state(state, section_num, move) \n return state", "def setBoard( self, moveString ): \n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'", "def set_board(self, move_string):\n next_side = \"X\"\n for col_string in move_string:\n col = int(col_string)\n if col >= 0 and col <= self.width:\n self.add_move(col, next_side)\n if next_side == \"X\":\n next_side = \"O\"\n else:\n next_side = \"X\"", "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'", "def solve(self):\n moves = 0\n self.print_state()\n prev_state = self.state\n\n while not self.in_goal_state():\n new_state = self.move(self.state)\n\n moves += 1\n print(\"\\nMove #{}:\\n\".format(moves))\n self.print_state(new_state)\n\n if not self.check_move(prev_state, new_state):\n raise Exception(\"Invalid move executed.\")\n self.state = new_state\n\n time.sleep(self.delay)\n if prev_state == self.state:\n print(\"No state change after move, exiting\")\n return\n prev_state = new_state\n print(\"Puzzle solved, congratulations!\")", "def set_board(self, move_string):\r\n next_checker = 'X' # we starten door een 'X' te spelen\r\n for col_char in move_string:\r\n col = int(col_char)\r\n if 0 <= col <= self.width:\r\n self.add_move(col, next_checker)\r\n if next_checker == 'X':\r\n next_checker = 'O'\r\n else:\r\n next_checker = 'X'", "def set_board(self, move_string):\n next_checker = 'X' # we starten door een 'X' te spelen\n for col_char in move_string:\n col = int(col_char)\n\n if 0 <= col <= self.width:\n self.add_move(col, next_checker)\n\n if next_checker == 'X':\n next_checker = 'O'\n else:\n next_checker = 'X'", "def str_to_move(self, move: str) -> str:\n return move", "def move_executed(self, old_game_state, move, new_game_state):\n pass", "def str_to_move(self, move: str) -> int:\n return int(move)", "def result(self, move):\n row, col = self.blankLocation\n if(move == 'up'):\n newrow = row - 1\n newcol = col\n elif(move == 'down'):\n newrow = row + 1\n newcol = col\n elif(move == 'left'):\n newrow = row\n newcol = col - 1\n elif(move == 'right'):\n newrow = row\n newcol = col + 1\n else:\n raise \"Illegal Move\"\n\n # Create a copy of the current eightPuzzle\n newPuzzle = EightPuzzleState([0, 0, 0, 0, 0, 0, 0, 0, 0])\n newPuzzle.cells = [values[:] for values in self.cells]\n # And update it to reflect the move\n newPuzzle.cells[row][col] = self.cells[newrow][newcol]\n newPuzzle.cells[newrow][newcol] = self.cells[row][col]\n newPuzzle.blankLocation = newrow, newcol\n newPuzzle.parent = self\n newPuzzle.move = move\n return newPuzzle", "def apply_move(self, move):\n\n # declare the status fields using default parameters\n fields = ['w', 'KQkq', '-', 0, 1]\n # move = self._translate(move)\n\n start = Game.xy2i(move[:2])\n end = Game.xy2i(move[2:4])\n piece = self.board.get_piece(start)\n target = self.board.get_piece(end)\n\n if self.validate and move not in self.get_moves(idx_list=[start]):\n raise InvalidMove(\"\\nIllegal move: {}\\nfen: {}\".format(move,\n str(self)))\n\n # toggle the active player\n fields[0] = {'w': 'b', 'b': 'w'}[self.state.player]\n\n # modify castling rights - the set of castling rights that *might*\n # be voided by a move is uniquely determined by the starting index\n # of the move - regardless of what piece moves from that position\n # (excluding chess variants like chess960).\n rights_map = {0: 'q', 4: 'kq', 7: 'k',\n 56: 'Q', 60: 'KQ', 63: 'K'}\n void_set = ''.join([rights_map.get(start, ''),\n rights_map.get(end, '')])\n new_rights = [r for r in self.state.rights if r not in void_set]\n fields[1] = ''.join(new_rights) or '-'\n\n # set en passant target square when a pawn advances two spaces\n if piece.lower() == 'p' and abs(start - end) == 16:\n fields[2] = Game.i2xy((start + end) // 2)\n\n # reset the half move counter when a pawn moves or is captured\n fields[3] = self.state.ply + 1\n if piece.lower() == 'p' or target.lower() != ' ':\n fields[3] = 0\n\n # Increment the turn counter when the next move is from white, i.e.,\n # the current player is black\n fields[4] = self.state.turn\n if self.state.player == 'b':\n fields[4] = self.state.turn + 1\n\n # check for pawn promotion\n if len(move) == 5:\n piece = move[4]\n if self.state.player == 'w':\n piece = piece.upper()\n\n # record the move in the game history and apply it to the board\n self.move_history.append(move)\n self.board.move_piece(start, end, piece)\n\n # move the rook to the other side of the king in case of castling\n c_type = {62: 'K', 58: 'Q', 6: 'k', 2: 'q'}.get(end, None)\n if piece.lower() == 'k' and c_type and c_type in self.state.rights:\n coords = {'K': (63, 61), 'Q': (56, 59),\n 'k': (7, 5), 'q': (0, 3)}[c_type]\n r_piece = self.board.get_piece(coords[0])\n self.board.move_piece(coords[0], coords[1], r_piece)\n\n # in en passant remove the piece that is captured\n if piece.lower() == 'p' and self.state.en_passant != '-' \\\n and Game.xy2i(self.state.en_passant) == end:\n ep_tgt = Game.xy2i(self.state.en_passant)\n if ep_tgt < 24:\n self.board.move_piece(end + 8, end + 8, ' ')\n elif ep_tgt > 32:\n self.board.move_piece(end - 8, end - 8, ' ')\n\n # state update must happen after castling\n self.set_fen(' '.join(str(x) for x in [self.board] + list(fields)))", "def _ai_move(self):\n move = self.AI_MOVES[self.game_board.get_string_board()][0]\n self.game_board.move_pieces(start=move[\"start\"], end=move[\"end\"])\n\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = f\"N/A\"\n self.selected_move = -1\n\n self._sync_gui()", "def update_game_state(db_cursor: sqlite3.Cursor,\n move_id: int,\n command: Command,\n gridpoint_row: int,\n gridpoint_col: int,\n tweet_id: str) -> None:\n db_cursor.execute(\n _UPDATE_STATE_QUERY,\n (move_id, command.name, gridpoint_row, gridpoint_col, tweet_id)\n )", "def apply_move(self, move):\n next_board = copy.deepcopy(self.board)\n next_board.remove(self.next_player, move.start)\n next_board.place(self.next_player, move.end)\n return GameState(next_board, self.next_player.other, move)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether the puzzle satisfies the row zero invariant at the given column (col > 1) Returns a boolean
def row0_invariant(self, target_col): result = True if self._grid[0][target_col] != 0: result = False if self._grid[1][target_col] != (target_col + self._width * 1): result = False for row in range(2, self._height): for col in range(self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False for row in (0, 1): for col in range(target_col+1, self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False return result
[ "def row0_invariant(self, target_col):\n # replace with your code\n boolean = True\n \n if self.get_number(0, target_col) != 0:\n boolean = False\n \n for row in range(2, self._height):\n for col in range(self._width):\n if self.current_position(row, col) != (row, col):\n boolean = False\n \n for col in range(target_col+1, self._width):\n for row in range(2):\n if self.current_position(row, col) != (row,col):\n boolean = False\n \n if self.current_position(1,target_col)!= (1, target_col):\n boolean = False\n \n return boolean", "def row1_invariant(self, target_col):\n # replace with your code\n target_row = 1\n \n #check whether tile zero is at (1,j)\n if self._grid[target_row][target_col] != 0:\n return False\n \n #check whether all positions to the right of this position are solved\n for col in range(target_col + 1, self._width):\n if self.current_position(0, col) != (0, col):\n return False\n \n #check whether all positions below of this position are solved \n if not self.lower_row_invariant(target_row, target_col):\n return False\n \n return True", "def row1_invariant(self, target_col):\n result = True\n if self._grid[1][target_col] != 0:\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result", "def check_zeros(board):\n for row in board:\n if 0 in row:\n return False\n\n return True", "def iscomplete(grid):\n \n for i in range(0,9):\n for j in range(0,9):\n if grid[i][j] == 0:\n return False\n return True", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def is_solved(puzzle):\n for row in puzzle:\n for item in row:\n if item==1:\n return False\n return True", "def row1_invariant(self, target_col):\n # replace with your code\n boolean = True\n \n if self.get_number(1, target_col) != 0:\n boolean = False\n \n for row in range(2, self._height):\n for col in range(self._width):\n if self.current_position(row, col) != (row, col):\n boolean = False\n \n for col in range(target_col+1, self._width):\n for row in range(2):\n if self.current_position(row, col) != (row,col):\n boolean = False\n \n return boolean", "def row1_invariant(self, target_col):\r\n # assert that row 1 is solved\r\n if not self.lower_row_invariant(1, target_col):\r\n return False\r\n # asserts that tile proceeded to (1,j), the grid below (1,j) and to the right is solved\r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(2, self.get_height()):\r\n if not (dummy_i, dummy_j) == self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True", "def check_pivot_row(self, row):\r\n all_zeros = True\r\n for i in range(self.SIZE):\r\n if self.matrix[row][i] != 0:\r\n all_zeros = False\r\n break\r\n\r\n if all_zeros:\r\n self.check_solvability(0, self.matrix[row][-1])", "def is_true(self, ncol, index):\n nround = self.maxrows + 1\n n = index % nround / 2\n return n < nround / 2", "def checkMatrix(matrix):\r\n if (matrix[0][0] == -1):\r\n return False\r\n return True", "def check_on_board(cell):\n if cell[0] > 4 or cell[0] < -4 or cell[1] > 4 or cell[1] < -4:\n return False\n if cell[0] + cell[1] > 4 or cell[0] + cell[1] < -4:\n return False\n\n return True", "def check_tile_availability(self, row, col):\n return self.board[row][col] == 0", "def solved(grid):\n\n return all(len(vals) == 1 for vals in grid.cells.values())", "def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols", "def is_row_echelon(a):\n n = len(a)\n p = -1\n for i in range(n):\n j = 0\n while j <= n:\n if j == n:\n p = n\n break\n elif a[i][j] == 0:\n j += 1\n elif j > p:\n p = j\n break\n else:\n return False\n return True", "def is_solved(self):\n for y in range(9):\n for x in range(9):\n if self.puzzle[y][x] == 0:\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether the puzzle satisfies the row one invariant at the given column (col > 1) Returns a boolean
def row1_invariant(self, target_col): result = True if self._grid[1][target_col] != 0: result = False for row in range(2, self._height): for col in range(self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False for row in (0, 1): for col in range(target_col+1, self._width): solved_value = (col + self._width * row) if solved_value != self._grid[row][col]: result = False return result
[ "def row1_invariant(self, target_col):\r\n # assert that row 1 is solved\r\n if not self.lower_row_invariant(1, target_col):\r\n return False\r\n # asserts that tile proceeded to (1,j), the grid below (1,j) and to the right is solved\r\n for dummy_j in range(0, self.get_width()):\r\n for dummy_i in range(2, self.get_height()):\r\n if not (dummy_i, dummy_j) == self.current_position(dummy_i, dummy_j):\r\n return False\r\n return True", "def row1_invariant(self, target_col):\n # replace with your code\n boolean = True\n \n if self.get_number(1, target_col) != 0:\n boolean = False\n \n for row in range(2, self._height):\n for col in range(self._width):\n if self.current_position(row, col) != (row, col):\n boolean = False\n \n for col in range(target_col+1, self._width):\n for row in range(2):\n if self.current_position(row, col) != (row,col):\n boolean = False\n \n return boolean", "def row1_invariant(self, target_col):\n # replace with your code\n target_row = 1\n \n #check whether tile zero is at (1,j)\n if self._grid[target_row][target_col] != 0:\n return False\n \n #check whether all positions to the right of this position are solved\n for col in range(target_col + 1, self._width):\n if self.current_position(0, col) != (0, col):\n return False\n \n #check whether all positions below of this position are solved \n if not self.lower_row_invariant(target_row, target_col):\n return False\n \n return True", "def is_solved(puzzle):\n for row in puzzle:\n for item in row:\n if item==1:\n return False\n return True", "def is_valid(columns, row, col):\n # `row` is the current row; check against all previous rows\n for r in range(row):\n c = columns[r]\n # Check column\n if c == col:\n return False\n # Check diagonal\n if abs(c - col) == row - r:\n return False\n return True", "def solved(grid):\n\n return all(len(vals) == 1 for vals in grid.cells.values())", "def is_true(self, ncol, index):\n nround = self.maxrows + 1\n n = index % nround / 2\n return n < nround / 2", "def row0_invariant(self, target_col):\n result = True\n if self._grid[0][target_col] != 0:\n result = False\n if self._grid[1][target_col] != (target_col + self._width * 1):\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def __isValidGrid(self, row, col, number):\n row_triplet = row//3\n col_triplet = col//3\n\n for row in range(3):\n for col in range(3):\n if number == self.board[row_triplet*3 + row][col_triplet*3 + col]:\n return False\n return True", "def check_row(sudoku):\r\n for row in range(len(sudoku)):\r\n for col in range(len(sudoku)):\r\n if sudoku[row].count(sudoku[row][col]) != 1:\r\n return True #returns True is there is more than two of the same numbers in a row\r", "def row0_invariant(self, target_col):\n # replace with your code\n boolean = True\n \n if self.get_number(0, target_col) != 0:\n boolean = False\n \n for row in range(2, self._height):\n for col in range(self._width):\n if self.current_position(row, col) != (row, col):\n boolean = False\n \n for col in range(target_col+1, self._width):\n for row in range(2):\n if self.current_position(row, col) != (row,col):\n boolean = False\n \n if self.current_position(1,target_col)!= (1, target_col):\n boolean = False\n \n return boolean", "def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )", "def check_on_board(cell):\n if cell[0] > 4 or cell[0] < -4 or cell[1] > 4 or cell[1] < -4:\n return False\n if cell[0] + cell[1] > 4 or cell[0] + cell[1] < -4:\n return False\n\n return True", "def is_solved(sudoku_matrix):\n\n for i in range(9):\n for j in range(9):\n if len(sudoku_matrix[i][j]) > 1:\n return False\n return True", "def is_valid(row, col):\n\treturn ((row >= 0) and (row <= 9) and (col >= 0) and (col <= 9))", "def in_col(n: int, row: int, col: int, grid: List) -> bool:\n for x in range(9):\n if x!= row and n == grid[x][col]:\n return True\n return False", "def inBoard(self, row, col):\n return 0 <= row < self.rows and 0 <= col < self.cols", "def is_row_echelon(a):\n n = len(a)\n p = -1\n for i in range(n):\n j = 0\n while j <= n:\n if j == n:\n p = n\n break\n elif a[i][j] == 0:\n j += 1\n elif j > p:\n p = j\n break\n else:\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a solution string for a puzzle Updates the puzzle and returns a move string
def solve_puzzle(self): cur0_row, cur0_col = self.current_position(0, 0) move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1) self.update_puzzle(move_str) for row in range(self._height-1, 1, -1): for col in range(self._width-1, -1, -1): assert self.lower_row_invariant(row, col) if col != 0: move_str += self.solve_interior_tile(row, col) else: move_str += self.solve_col0_tile(row) for col in range(self._width-1, 1, -1): assert self.row1_invariant(col) move_str += self.solve_row1_tile(col) assert self.row0_invariant(col) move_str += self.solve_row0_tile(col) move_str += self.solve_2x2() return move_str
[ "def solve_puzzle(self):\r\n move_str = \"\"\r\n \r\n puzzle_copy = self.clone()\r\n height = puzzle_copy.get_height()\r\n width = puzzle_copy.get_width()\r\n \r\n # move 0 tile to bottom right\r\n zero_pos = puzzle_copy.current_position(0, 0)\r\n row_diff = height - zero_pos[0] \r\n col_diff = width - zero_pos[1]\r\n move_str += \"r\" * (col_diff - 1)\r\n move_str += \"d\" * (row_diff - 1)\r\n puzzle_copy.update_puzzle(move_str)\r\n \r\n # solve rows after the first two rows\r\n for row in range(height - 1, 1, -1):\r\n for col in range(width - 1, -1, -1):\r\n if col == 0:\r\n move_str += puzzle_copy.solve_col0_tile(row)\r\n else:\r\n move_str += puzzle_copy.solve_interior_tile(row, col)\r\n \r\n # solve all except 2x2\r\n for col in range(width - 1, 1, -1):\t\r\n for row in range(1, -1, -1):\r\n if row == 0:\r\n move_str += puzzle_copy.solve_row0_tile(col)\r\n else:\r\n move_str += puzzle_copy.solve_row1_tile(col)\r\n \r\n move_str += puzzle_copy.solve_2x2()\r\n \r\n self.update_puzzle(move_str)\r\n return move_str", "def solve_puzzle(self):\n\n move_str = \"\"\n \n # Move zero tile to bottom right corner tile of puzzle.\n zero_pos = self.current_position(0,0) \n vert_dist = (self.get_height() - 1) - zero_pos[0]\n horiz_dist = (self.get_width() - 1) - zero_pos[1]\n move_str += ((\"d\" * vert_dist) + (\"r\" * horiz_dist))\n self.update_puzzle(move_str)\n \n # Solve lower rows\n if self.get_height() > 2:\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n \n # Solve top 2 rows\n if self.get_width() > 2:\n for col in range(self.get_width() - 1, 1, -1):\n move_str += self.solve_row1_tile(col)\n move_str += self.solve_row0_tile(col)\n \n # Solve 2x2\n move_str += self.solve_2x2()\n\n return move_str", "def solve_puzzle(self):\n # replace with your code\n return \"\"", "def solve_puzzle(self):\n # replace with your code\n boolean = True\n for row in range(self._height):\n for col in range(self._width):\n if self.current_position(row, col) != (row, col):\n boolean = False\n break\n \n if boolean == True:\n return \"\"\n \n string = \"\"\n string0 = (self._width-1-self.current_position(0,0)[1])*\"r\" + \\\n (self._height-1-self.current_position(0,0)[0])*\"d\"\n \n self.update_puzzle(string0)\n print \"0\",self.current_position(0,0), string0\n \n for row in range(self._height-1,1,-1):\n for col in range(self._width-1,-1,-1):\n print \"fix positon:\", row,col\n if col == 0:\n string += self.solve_col0_tile(row)\n else:\n string += self.solve_interior_tile(row, col)\n \n for col in range(self._width-1,1,-1):\n print \"fix positon:\", col\n\n string += self.solve_row1_tile(col)\n print \"fixed row 1\", self\n string += self.solve_row0_tile(col)\n \n print \"fixed row 0\", self\n\n string += self.solve_2x2()\n \n print \"fixed row 2x2\", self\n\n \n #self.update_puzzle(string)\n \n return string0+string", "def gen_string_sol(sudoku):\n sol =''\n for letter in sudoku.rows:\n for digit in sudoku.cols:\n sol+=str(sudoku.solved_board[letter+digit])\n return sol", "def gen_solve_to_text(self):\n\n count = 0\n self.url = \"scramble: \\n\"\n for move in self.scramble.split():\n self.url += \"{} \".format(move)\n self.url += \"\\n\\nsolve:\\n\"\n\n for move in self.solve_stats:\n if self.comms_unparsed_bool:\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"\\n//{}\\n\".format(piece) + alg\n self.url += self.comms_unparsed[count]\n count += 1\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n if \"move\" in move:\n if move[\"move\"] != \"\":\n self.url += \"{} \".format(move[\"move\"])\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"//{}\\n\".format(piece) + alg\n\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n self.url += \"// {} \\n\".format(move[\"comment\"])", "def print_solution(path):\n rowCurrent = 0\n columnCurrent = 0\n rowNext = 0\n columnNext = 0\n for i in range(len(path)):\n currentPuzzle = path[i]\n print(currentPuzzle)\n if i < (len(path) - 1):\n nextPuzzle = path[i + 1]\n for row in range(nextPuzzle.size):\n for column in range(nextPuzzle.size):\n if currentPuzzle.array[row][column] == 0:\n rowCurrent = row\n columnCurrent = column\n elif nextPuzzle.array[row][column] == 0:\n rowNext = row\n columnNext = column\n\n if rowCurrent > rowNext:\n action = (\" \"\n + str(currentPuzzle.array[rowNext][columnNext])\n + \" ---> down \\n\")\n print(action)\n elif rowCurrent < rowNext:\n action = (\" \"\n + str(currentPuzzle.array[rowNext][columnNext])\n + \" ---> up \\n\")\n print(action)\n elif columnCurrent > columnNext:\n action = (\" \"\n + str(currentPuzzle.array[rowNext][columnNext])\n + \" ---> right \\n\")\n print(action)\n elif columnCurrent < columnNext:\n action = (\" \"\n + str(currentPuzzle.array[rowNext][columnNext])\n + \" ---> left \\n\")\n print(action)\n print(\"Number of movements made to reach goal state: \" + str(len(path)-1) + \"\\n\")\n print(\"######################################################\\n\")", "def to_puzzle_string(self):\n return \"\".join(str(x) for row in self.board for x in row)", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n if DEBUG_UP:\n print self\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def solve_2x2(self):\n move=\"\"\n if self.row1_invariant(1):\n if self.get_row_col(1)==(0, 0):\n move +=\"ul\" \n elif self.get_row_col(1)==(1, 0):\n move +=\"uldrul\" \n elif self.get_row_col(1)==(0, 1):\n move +=\"uldruldrul\"\n \n self.update_puzzle(move) \n return move", "def result(self, move):\n row, col = self.blankLocation\n if(move == 'up'):\n newrow = row - 1\n newcol = col\n elif(move == 'down'):\n newrow = row + 1\n newcol = col\n elif(move == 'left'):\n newrow = row\n newcol = col - 1\n elif(move == 'right'):\n newrow = row\n newcol = col + 1\n else:\n raise \"Illegal Move\"\n\n # Create a copy of the current eightPuzzle\n newPuzzle = EightPuzzleState([0, 0, 0, 0, 0, 0, 0, 0, 0])\n newPuzzle.cells = [values[:] for values in self.cells]\n # And update it to reflect the move\n newPuzzle.cells[row][col] = self.cells[newrow][newcol]\n newPuzzle.cells[newrow][newcol] = self.cells[row][col]\n newPuzzle.blankLocation = newrow, newcol\n newPuzzle.parent = self\n newPuzzle.move = move\n return newPuzzle", "def solve(puzzle):\n print(\"Solving...\")\n array_puzzle = np.asarray(puzzle)\n array_puzzle.flags.writeable = False # Turn off writable flags to prevent data being ovewritten accidentally.\n goal_state = __generate_goal(len(array_puzzle[0]), len(array_puzzle))\n\n flat_puzzle = list(chain.from_iterable(puzzle)) # Flatten the list\n\n # If the puzzle doesn't contain 0, exit.\n try:\n flat_puzzle.remove(0) # Remove 0 from the list\n except:\n print(\"All puzzles must include an open tile (0).\")\n return None\n\n inversions = __count_inversions(flat_puzzle) # Count the inversions\n\n # width = len(array_puzzle[0]) # Get the width of the puzzle (columns)\n # length = len(array_puzzle) # Get the length of the puzzle (rows)\n\n oddEven = __odd_or_even(len(array_puzzle[0])) # Determine if the width is odd or even.\n start_position = __find_start(array_puzzle) # Find the start position's row\n solvable = __is_solvable(oddEven, inversions, len(array_puzzle), start_position) # Cleck if the puzzle is solvable.\n\n # If the puzzle is not solvable, return None.\n if(solvable == \"None\"):\n return None\n\n # If we cannot calculate a* (for example the given values are not all in sequential order (1-5) 4 is replaced by 6 (1,2,3,5,6))\n try:\n return __a_star(array_puzzle, goal_state)\n except:\n print(\"Please make sure there are no duplicate or skipped inputs.\")\n return None\n\n # This code was used in testing to print out the string.\n # solved = __a_star(array_puzzle, goal_state)\n # Return the moves needed to complete the puzzle.\n # return print(str(__build_string(solved)) + \" (\" + str(len(solved)) + \")\")", "def solve_row1_tile(self, target_col):\r\n moves_str = \"\"\r\n current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n moves_str += self.position_tile(zero_row, zero_col, current_row, current_col)\r\n moves_str += \"ur\"\r\n self.update_puzzle(moves_str)\r\n print \"solve_row1_tile\"\r\n print moves_str\r\n print self._grid\r\n return moves_str", "def solve(self):\n moves = 0\n self.print_state()\n prev_state = self.state\n\n while not self.in_goal_state():\n new_state = self.move(self.state)\n\n moves += 1\n print(\"\\nMove #{}:\\n\".format(moves))\n self.print_state(new_state)\n\n if not self.check_move(prev_state, new_state):\n raise Exception(\"Invalid move executed.\")\n self.state = new_state\n\n time.sleep(self.delay)\n if prev_state == self.state:\n print(\"No state change after move, exiting\")\n return\n prev_state = new_state\n print(\"Puzzle solved, congratulations!\")", "def shuffle_puzzle(solution: str) -> str:\r\n shuffled_solution = solution[:-1]\r\n\r\n # Do more shuffling for bigger puzzles.\r\n swaps = len(solution) * 2\r\n for _ in range(swaps):\r\n # Pick two indices in the puzzle randomly.\r\n index1, index2 = random.sample(range(len(shuffled_solution)), k=2)\r\n shuffled_solution = swap_position(shuffled_solution, index1, index2)\r\n\r\n return shuffled_solution + EMPTY", "def generateSolution(self): \n self.statusbar.showMessage(\"Generating Solution\")\n msg = \"Generating solution for cubestring: \"+self.cube.cubestring\n self.addLogEntry(msg)\n\n timer = QElapsedTimer()\n timer.start()\n solution = \"No Solution\"\n try:\n solution = kociemba.solve(self.cube.cubestring)\n except Exception as err:\n print(err)\n error_dialog = QErrorMessage()\n error_dialog.showMessage(err.args[0])\n error_dialog.exec_()\n solution = err.args[0]\n self.statusbar.showMessage(\"Solution generation failed!\")\n msg = \"Solution could not be calculated: \" + solution\n self.addLogEntry(msg)\n\n self.lineEdit_InOut.setText(solution)\n self.label_CurrentString.setText(\"Solution:\")\n self.statusbar.showMessage(\"Generated Solution\")\n msg = \"Solution calculation took: {} ms\".format(timer.nsecsElapsed()/1000000)\n self.addLogEntry(msg)\n \n # self.timer1ms.stop()\n pass", "def solution_to_string(self):\n solution_vector_index_format = [index+1 if elem == 1 else -index-1 for index, elem in enumerate(self.solution_vector)]\n return \" \".join(map(str, solution_vector_index_format))", "def get_sequence(self, output_maze: bool = False):\n if not self.solved:\n raise EnvironmentError('maze not solved yet, use solve() first')\n x = self.start_x\n y = self.start_y\n output = \"\"\n repr_version = self.maze.copy()\n while True:\n if self.maze[y][x] is EXIT:\n return repr_version if output_maze else output\n elif self.maze[y][x] not in (PATH, START, EXIT, VISITED, SOLUTION):\n raise ValueError('walked into tile ({}, {}) that is impassable'.format(x, y))\n else:\n try:\n if self.maze[y][x + 1] in (SOLUTION, EXIT):\n output += \">\"\n repr_version[y][x] = \">\"\n y, x = y, x + 1\n elif self.maze[y][x - 1] in (SOLUTION, EXIT):\n output += \"<\"\n repr_version[y][x] = \"<\"\n y, x = y, x - 1\n elif self.maze[y + 1][x] in (SOLUTION, EXIT):\n output += \"V\"\n repr_version[y][x] = \"V\"\n y, x = y + 1, x\n elif self.maze[y - 1][x] in (SOLUTION, EXIT):\n output += \"^\"\n repr_version[y][x] = \"^\"\n y, x = y - 1, x\n except IndexError:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a reaction and combine the products in a single string. Makes errors readable ish
def _reactAndSummarize(rxn_smarts, *smiles): rxn = rdChemReactions.ReactionFromSmarts(rxn_smarts) mols = [Chem.MolFromSmiles(s) for s in smiles] products = [] for prods in rxn.RunReactants(mols): products.append(' + '.join(map(_getProductCXSMILES, prods))) products = ' OR '.join(products) return products
[ "def reaction():\n return gr.Reaction({1, 2}, {3, 4})", "def reaction_str(self):\n\n def format(number):\n return str(number).rstrip(\".0\") + \" \"\n\n reactant_bits = []\n product_bits = []\n for met in sorted(self._metabolites, key=attrgetter(\"id\")):\n coefficient = self._metabolites[met]\n if coefficient >= 0:\n product_bits.append(format(coefficient) + met.id)\n else:\n reactant_bits.append(format(abs(coefficient)) + met.id)\n\n reaction_string = ' + '.join(reactant_bits)\n if self.gapfill_direction == '=':\n reaction_string += ' <=> '\n elif self.gapfill_direction == '<':\n reaction_string += ' <-- '\n elif self.gapfill_direction == '>':\n reaction_string += ' --> '\n reaction_string += ' + '.join(product_bits)\n return reaction_string", "def step2string(rxn):\n spcs = rxn[2]\n reactants = r''\n products = r''\n # define mappings for species keywords to string representation \n adsorbed_str = {True:'*', False:''}\n pe_str = {0.0:'', 1.0:' H+ + e-'}\n for spc in spcs:\n s = spc['formula'] + adsorbed_str[spc['adsorbed']] + pe_str[spc['pe']] + ' + '\n if spc['count'] == -1:\n reactants += s\n elif spc['count'] == 1:\n products += s\n return reactants[:-2] + ' -> ' + products[:-2]", "def generating_products_from_reaction_rules(molefile,cutoff):\n #morphine = Chem.MolFromMolFile(\"/Users/aasimwani/Downloads/MolData/morphine.mol\")\n ruleDF = pd.read_csv(\"retrorules_rr02_rp2_flat_retro.csv\")\n SMILE_rxn_rules = pd.read_csv(\"SMILE_reaction_rules.csv\")\n extracted_reaction_rules = reducing_reaction_rules(SMILE_rxn_rules,cutoff)\n rules = pd.DataFrame(pd.merge(extracted_reaction_rules.reset_index(),ruleDF.reset_index(),how = \"inner\",on = \"index\")[\"Rule\"])\n item = []\n negative_reactions = []\n for i in range(0,rules.shape[0]):\n from rdkit.Chem import rdChemReactions\n rxn = rdChemReactions.ReactionFromSmarts(rules[\"Rule\"][i])\n reacts = Chem.AddHs(Chem.MolFromSmiles(molefile))\n products = rxn.RunReactants((reacts,))\n item.append(products)\n if products == ():\n negative_reactions.append([molefile,rdChemReactions.ReactionFromSmarts(rules['Rule'][i])])\n formatted = list(itertools.chain(*item))\n let = []\n for i in range(0,len(formatted)):\n let.append(Chem.MolToSmiles(formatted[i][0]))\n a = pd.DataFrame(let)\n a.columns = [\"ar1\"]\n only_uniques = a[\"ar1\"].unique()\n return only_uniques, negative_reactions", "async def send_react(self, reactions, *args, **kwargs):\n message = await self.send(*args, **kwargs)\n if isinstance(reactions, str): # Handle two-character emojis\n reactions = (reactions,)\n for reaction in reactions:\n await self.add_reaction(message, reaction)\n return message", "def get_reaction(reaction_type):\n\n if reaction_type == \"neg\":\n speechcon = \"<say-as interpret-as='interjection'>\" \\\n + random.choice(NEG_SPEECHCONS) + \"</say-as>\"\n ans = random.choice(NEG_ANS)\n elif reaction_type == \"pos\":\n speechcon = random.choice(POS_SPEECHCONS)\n ans = random.choice(POS_ANS)\n else:\n raise ValueError\n\n return speechcon + ans", "def genReaction(resource, depletable=0):\n task = resource[3:-1].lower()\n name = resource[3:]\n return \"REACTION \" + name + \" \" + task + \" process:resource=\" + \\\n resource + \":value=\" + str(taskValDict[task]) + \":type=\" \\\n + rxnType + \":frac=\" + str(frac) + \":max=\" + str(resMax) + \\\n \":depletable=\" + str(int(depletable)) + \" requisite:max_count=\" \\\n + str(maxCount) + \"\\n\"", "def clean_reaction(rxn_block):\r\n reaction = rxn_block.split('\\n')\r\n clean_reaction = []\r\n clean_reaction[:5] = reaction[:5]\r\n line = reaction[4]\r\n num_reactants = int(line[:3])\r\n num_products = int(line[3:6])\r\n start_line = 5\r\n for n in range(0, num_reactants + num_products):\r\n clean_reaction.append(reaction[start_line]) # $MOL line\r\n # print('>>', reaction[start_line], num_reactants, num_products)\r\n start_line += 1\r\n end_line = find_end_of_molfile(reaction, start_line)\r\n clean_reaction.extend(clean_molecule(reaction[start_line:end_line]))\r\n start_line = end_line\r\n # print(len(clean_reaction)) \r\n return '\\n'.join(clean_reaction)", "def chain(self):\n commodity = self.commodity\n reactions = set()\n reaction_count = 0\n\n for comm in commodity:\n\n n = len(comm)\n repeated = r2_index(comm)\n inloop_r_count = 0\n\n for x in range(0, n - 1):\n\n if self.recombination == Recomb_1:\n\n i = x + 1\n\n if comm[x] != comm[x + 1]:\n reaction_count = reaction_count + 1\n inloop_r_count = inloop_r_count + 1\n\n if inloop_r_count == 1: # inital reaction\n left1 = [comm[x] for i in range(0, n)]\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n continue\n\n else:\n left1 = left2\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n reactions.add(r)\n\n\n elif self.recombination == Recomb_2:\n\n reaction_count = reaction_count + 1\n inloop_r_count = inloop_r_count + 1\n\n if inloop_r_count == 1: # inital reaction\n left1 = [repeated[0][0] for i in range(0, n)]\n right1 = [repeated[1][0] for i in range(0, n)]\n i = repeated[1][1]\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n continue\n\n else:\n if right2 == comm:\n break\n else:\n left1 = right2\n right1 = [repeated[inloop_r_count][0] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n i = repeated[inloop_r_count][1]\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n reactions.add(r)\n\n # all same char in comm\n elif comm == n * comm[0]:\n left1 = [comm[x] for i in range(0, n)]\n right1 = [comm[x + 1] for i in range(0, n)]\n # reaction_n = \"r{}\".format(reaction_count)\n r = self.recombination(reaction_count, join_str(left1), join_str(right1), i)\n left2, right2 = r.product()\n # react_str_.append(reaction_n, str(r))\n reactions.add(r)\n break\n\n # reaction_comm[comm] = reactions\n self.reactions = reactions\n return self.reactions", "def addReaction(\n self, reactants, products, expression, local_params=None, rxn_id=\"\"\n ):\n\n r1 = self.model.createReaction()\n self.check(r1, \"create reaction\")\n if len(rxn_id) == 0:\n rxn_id = \"v\" + str(self.model.getNumReactions())\n self.check(r1.setId(rxn_id), \"set reaction id\")\n self.check(r1.setReversible(False), \"set reaction reversibility flag\")\n self.check(r1.setFast(False), 'set reaction \"fast\" attribute')\n\n for re in reactants:\n if re is not None and \"$\" in re:\n re.translate(None, \"$\")\n re_split = re.split()\n if len(re_split) == 1:\n sto = 1.0\n re_id = re\n elif len(re_split) == 2 and re_split[0].isdigit():\n sto = float(re_split[0])\n re_id = re_split[1]\n else:\n err_msg = (\n \"Error: reactants must be listed in format 'S' or '(float)' S'\"\n )\n raise SystemExit(err_msg)\n s1 = self.model.getSpecies(re_id)\n species_ref1 = r1.createReactant()\n self.check(species_ref1, \"create reactant\")\n self.check(species_ref1.setSpecies(s1.getId()), \"assign reactant species\")\n self.check(\n species_ref1.setStoichiometry(sto), \"assign reactant stoichiometry\"\n )\n if self.document.getLevel() == 3:\n self.check(\n species_ref1.setConstant(True), 'set \"constant\" on species ref 1'\n )\n\n for pro in products:\n if pro is not None and \"$\" in pro:\n pro.translate(None, \"$\")\n pro_split = pro.split()\n if len(pro_split) == 1:\n sto = 1.0\n pro_id = pro\n elif len(pro_split) == 2:\n sto = float(pro_split[0])\n pro_id = pro_split[1]\n else:\n err_msg = \"Error: products must be listed in format 'S' or '(float)' S'\"\n raise SystemExit(err_msg)\n s2 = self.model.getSpecies(pro_id)\n species_ref2 = r1.createProduct()\n self.check(species_ref2, \"create product\")\n self.check(species_ref2.setSpecies(s2.getId()), \"assign product species\")\n self.check(species_ref2.setStoichiometry(sto), \"set product stoichiometry\")\n if self.document.getLevel() == 3:\n self.check(\n species_ref2.setConstant(True), 'set \"constant\" on species ref 2'\n )\n\n math_ast = libsbml.parseL3Formula(expression)\n self.check(math_ast, \"create AST for rate expression\")\n\n kinetic_law = r1.createKineticLaw()\n self.check(kinetic_law, \"create kinetic law\")\n self.check(kinetic_law.setMath(math_ast), \"set math on kinetic law\")\n if local_params is not None:\n for param in local_params.keys():\n val = local_params.get(param)\n if self.document.getLevel() == 3:\n p = kinetic_law.createLocalParameter()\n else:\n p = kinetic_law.createParameter()\n self.check(p, \"create local parameter\")\n self.check(p.setId(param), \"set id of local parameter\")\n self.check(p.setValue(val), \"set value of local parameter\")\n return r1", "def _add_reaction(self, reactants, rule_name, stereo_prods):\n # Hash reaction text\n rhash = rxn2hash(reactants, stereo_prods)\n # Generate unique hash from InChI keys of reactants and products\n inchi_rxn_hash, text_rxn = \\\n self._calculate_rxn_hash_and_text(reactants, stereo_prods)\n # Add reaction to reactions dictionary if not already there\n if rhash not in self.reactions:\n self.reactions[rhash] = {\"_id\": rhash,\n \"Reactants\": reactants,\n \"Products\": stereo_prods,\n \"InChI_hash\": inchi_rxn_hash,\n \"Operators\": {rule_name},\n \"Reaction_rules\": {rule_name},\n \"SMILES_rxn\": text_rxn,\n \"Generation\": self.generation}\n # Otherwise, update the operators and rules\n else:\n self.reactions[rhash]['Operators'].add(rule_name)\n self.reactions[rhash]['Reaction_rules'].add(rule_name)\n return text_rxn", "def _GetReactionSideString(side):\n sdata = []\n for c_w_coeff in side:\n if c_w_coeff.coeff == 1:\n sdata.append(c_w_coeff.GetName())\n else:\n sdata.append('%d %s' % (c_w_coeff.coeff,\n c_w_coeff.GetName()))\n return ' + '.join(sdata)", "def __str__(self):\n s = \"\"\n for e in self._sub_effects:\n s += str(e) + \" ^ \"\n return s[0:-3] if len(self._sub_effects) > 0 else \"Void\"", "def recipe(self,e):\r\n #e = list, effects\r\n d = self.data.create_list()\r\n print \"ALL INGREDIENTS WITH \" + str(e) + \" EFFECT(S):\\n\"\r\n for ingredient, effect in d.iteritems():\r\n for effects in e:\r\n if effects not in effect:\r\n break\r\n else:\r\n print ingredient\r\n return \"\\nEND OF INGREDIENTS WITH \" + str(e) + \" EFFECT(S)\"", "def test_react(self):\n procnum = 1\n\n spc_a = Species().from_smiles('[OH]')\n spcs = [Species().from_smiles('CC'), Species().from_smiles('[CH3]')]\n spc_tuples = [((spc_a, spc), ['H_Abstraction']) for spc in spcs]\n\n reaction_list = list(itertools.chain.from_iterable(react(spc_tuples, procnum)))\n self.assertIsNotNone(reaction_list)\n self.assertEqual(len(reaction_list), 3)\n self.assertTrue(all([isinstance(rxn, TemplateReaction) for rxn in reaction_list]))", "def genReactionAntString(self, revTag = \"RevRe__\",\n iRevTag = \"IrRevRe__\"):\n \n lines = self.antString.splitlines()\n lines = [line.split(\"#\")[0] for line in lines]\n rLines = [line.split(\":\") for line in lines if\n len(line.split(\":\"))==2]\n rLines = [[line[0]]+line[1].split(\";\") for line in rLines\n if len(line[1].split(\";\"))>=2]\n rLines = [[part.strip() for part in line] for line in rLines]\n rLines = [line for line in rLines if (\"->\" in line[1]) or\n (\"=>\" in line[1])]\n rLines = [[line[0], \"->\" in line[1], line[2]] for line in rLines]\n rLines = [[revTag+line[0], line[1], line[2]] if line[1] else\n [iRevTag+line[0], line[1], line[2]] for line in rLines]\n rLines = [line[0]+\" := \"+line[2]+\";\" for line in rLines]\n primed = False\n for i, line in zip(range(len(lines)),lines):\n if line.strip().startswith(\"model\"):\n primed = True\n if (line.strip() == \"end\") and primed:\n break\n print(\"line \"+str(i))\n indent = \"\"\n while indent == \"\" and i>0:\n i = i-1\n indent = re.search(r'^\\s*', lines[i]).group()\n rLines = [indent+line for line in rLines]\n self.reactionAntString = \"\\n\".join(lines[:i+1]+rLines+lines[i+1:])", "def format(self):\n def format_name(behaviour):\n \"\"\" format a name within the reaction as a string.\"\"\"\n if behaviour.stoichiometry == 1:\n species = behaviour.species\n else:\n species = (\"(\" + behaviour.species + \",\" +\n str(behaviour.stoichiometry) + \")\")\n if behaviour.role == \"(+)\":\n prefix = \"+\"\n elif behaviour.role == \"(-)\":\n prefix = \"-\"\n elif behaviour.role == \"(.)\":\n prefix = \".\"\n else:\n prefix = \"\"\n return prefix + species\n pre_arrows = itertools.chain(self.reactants, self.activators,\n self.inhibitors, self.modifiers)\n pre_arrow = \", \".join(format_name(b) for b in pre_arrows)\n post_arrow = \", \".join(format_name(b) for b in self.products)\n\n return \" \".join([self.name + \":\", pre_arrow, \"-->\", post_arrow])", "def generate_excuse() -> str:", "def tosmiles(products, reactants):\n \"\"\" pruducts, reactatnts are lists of tuples of regno,molfile \"\"\"\n prods = list()\n reacts = list()\n smiles = ''\n\n for regno,r in reactants:\n reactant_smiles = (regno, createSmiles(r)) \n\n # skip reaction if a component had an error\n if reactant_smiles[1] is None:\n return None\n\n reacts.append(reactant_smiles)\n if reactant_smiles[1] != '':\n smiles += reactant_smiles[1] + '.'\n\n smiles = smiles[:-1]\n smiles += '>>'\n\n for regno, p in products:\n product_smiles = (regno, createSmiles(p)) \n\n # skip reaction if a component had an error\n if product_smiles[1] is None:\n return None\n \n prods.append(product_smiles)\n if product_smiles[1] != '':\n smiles += product_smiles[1] + '.'\n\n smiles = smiles[:-1]\n\n return reacts, prods, smiles" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StereoGroup atoms are in the reaction, and the reaction destroys the specified chirality at the stereo centers > invalidate stereo center, preserve the rest of the stereo group.
def test_reaction_destroys_stereo(self): reaction = '[C@:1]>>[C:1]' products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|') self.assertEqual(products, 'FC(Cl)Br') products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|') self.assertEqual(products, 'FC(Cl)Br') products = _reactAndSummarize(reaction, 'FC(Cl)Br') self.assertEqual(products, 'FC(Cl)Br') reaction = '[C@:1]F>>[C:1]F' # Reaction destroys stereo (but preserves unaffected group products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|') self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|') # Reaction destroys stereo (but preserves the rest of the group products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|') self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')
[ "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def cull_irrelevant_reactions(self):\n\n ## define a function f, that maps log10(rxn_cs) to chances of being removed\n ## it is a linear piecemeal function, below `a` it is guaranteed to be\n ## removed and above `b` is is guaranteed to remain. Inbetween values\n ## are interpreted linearly.\n \n # remove_below\n a = -12\n #keep_above\n b = -5\n\n m = -1.0 / (b-a)\n k = 1.0 - (m*a)\n\n def f(c):\n if c <= a:\n return 1.0\n if c >= b:\n return 0.0\n return m*c+k\n\n ## the concentration of all of the molecules\n ## relevant to each reaction in self.reactions\n for rxn in self.reactions:\n rxn_c = np.log10(sum([self.global_concentration(m)\n for m in rxn.all_molecules()]))\n p_remove = f(rxn_c)\n print(f'{rxn_c} removal chance: {p_remove}')\n if np.random.rand() < p_remove:\n print('REMOVED')\n self.reactions.remove(rxn)\n \n #print(rxn_c)\n # score = 1./(1E-15+c_of_all_mols)\n # score = score/sum(score)\n # print(score)\n #quit()\n # print(f'%{p} for total conc of: {total_c}')\n # TODO DECIDE ODDS OF REMOVING REACTION\n # if np.random.rand() < p :\n # print(f'Removing {rxn}')\n # self.reactions.remove(rxn)\n print(f'#m: {len(self.molecules)} \\t #r:{len(self.reactions)}')", "def epimerize(self, center_atom, substituent1, substituent2):\n\n self._check_atom_number(center_atom)\n self._check_atom_number(substituent1)\n self._check_atom_number(substituent2)\n\n assert self.bonds.number_of_edges() > 0, \"need a bond graph to perform this operation -- try calling self.assign_connectivity()!\"\n\n adj = self.get_adjacent_atoms(center_atom)\n assert len(adj) == 4, \"center atom must be making 4 bonds!\"\n assert substituent1 in adj, \"1st substituent is not bonded to center atom!\"\n assert substituent2 in adj, \"2nd substituent is not bonded to center atom!\"\n\n #### remove both substituents\n mol, group1, mmap1, gmap1 = cctk.Group.remove_group_from_molecule(self, center_atom, substituent1, return_mapping=True)\n mol, group2, mmap2, gmap2 = cctk.Group.remove_group_from_molecule(mol, mmap1[center_atom], mmap1[substituent2], return_mapping=True)\n\n h1 = mol.num_atoms() - 1\n h2 = mol.num_atoms()\n\n #### add them back in the opposite fashion\n mol, mmap3, gmap3 = cctk.Group.add_group_to_molecule(mol, group2, h1, return_mapping=True)\n mol = cctk.Group.add_group_to_molecule(mol, group1, mmap3[h2])\n\n #### relabel new graph to match original molecule\n which = top.get_stereogenic_centers(self)\n which.remove(center_atom)\n return mol.renumber_to_match(self, check_chirality=which)", "def collision_group(group1, group2, del_group1, del_group2):\n return pygame.sprite.groupcollide(group1, group2, del_group1, del_group2)", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def flip_atoms_in_ncs_groups(hierarchy, ncs_restraints_group_list, mon_lib_srv=None):\n if mon_lib_srv is None:\n mon_lib_srv = mmtbx.monomer_library.server.server()\n for ncs_gr in ncs_restraints_group_list:\n master_isel = ncs_gr.master_iselection\n chains_master = hierarchy.select(master_isel).only_model().chains()\n for copy in ncs_gr.copies:\n copy_isel = copy.iselection\n chains_copy = hierarchy.select(copy_isel).only_model().chains()\n for ch_m, ch_c in zip(chains_master, chains_copy):\n for r_m, r_c in zip(ch_m.residues(), ch_c.residues()):\n # print \"working on \", r_m.id_str(), r_c.id_str()\n if should_be_flipped(r_m, r_c):\n flip_residue(r_c, mon_lib_srv)", "def remove_reaction(self, name):\n idx = self.dic_enzs.get(name)\n if idx is None:\n raise ValueError(\"Reaction not in model: \" + name)\n\n self.obj = list(filter(lambda x: x[0] != name, self.obj))\n self.design_obj = list(filter(lambda x: x[0] != name, self.obj))\n\n reac = self._sbml.getReaction(name)\n reac.removeFromParentAndDelete()\n\n self.calcs()", "def _kill_group(self, x, y):\n if self[x, y] not in self.TURNS:\n raise BoardError('Can only kill black or white group')\n\n group = self.get_group(x, y)\n score = len(group)\n\n for x1, y1 in group:\n self[x1, y1] = self.EMPTY\n\n return score", "def test_missing_electrode_group(self):\n groups = self.single_segment_we.recording.get_channel_groups()\n self.single_segment_we.recording.delete_property(\"group\")\n write_waveforms(\n waveform_extractor=self.single_segment_we,\n nwbfile=self.nwbfile,\n )\n self.single_segment_we.recording.set_channel_groups(groups)", "def onStopCollide(self, node):\n if node == self.modelGeom.node():\n self.hasContact = False\n self.ignore(\"sensorAction\" + self.sensorID)", "def DeMorgan_equivalence(self, position_list=[]):\n\t\treturn self.__class__(_replace_match_at(self, position_list, [\n\t\t\t[ ((neg, A), disj, (neg, B)), (neg, (A, conj, B)) ],\n\t\t\t[ (neg, (A, conj, B)), ((neg, A), disj, (neg, B)) ],\n\t\t\t[ ((neg, A), conj, (neg, B)), (neg, (A, disj, B)) ],\n\t\t\t[ (neg, (A, disj, B)), ((neg, A), conj, (neg, B)) ]\n\t\t]))", "def test_remove_overlapping():\n e = Emulsion([SphericalDroplet([0, 1], 2), SphericalDroplet([1, 1], 2)])\n assert len(e) == 2\n e.remove_overlapping()\n assert len(e) == 1", "def do_mc_mgrp_destroy(self, line):\n words = collections.deque(line.split())\n try:\n mgrp_hdl = self.get_handle(words, \"MGRP_HDL\")\n self._thrift_client.mc_mgrp_destroy(mgrp_hdl)\n print \"Multicast group has been destroyed.\"\n except NameError as ne:\n print >> sys.stderr, ne\n except thrift.protocol.TProtocol.TProtocolException as e:\n print >> sys.stderr, e\n except Exception as e:\n self.usage(e, \"mc_mgrp_destroy\")", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def clear_surface_groups(self):\n self.surface_groups = {}", "def collide(self, world, fog_sprites, light_r):\n self.collision = False\n # for o in world:\n # if self.rect.colliderect(o) and o != self and o.name != 'DoorOpen':\n # self.collision = True\n\n if not self.collision:\n for o in fog_sprites:\n if self.rect.colliderect(o):\n fog_sprites.remove(o)\n o.kill()\n del o", "def remove_as_subgroup(self, other_groups):\r\n symbols_to_exclude = reduce(lambda alphabet, cell: alphabet.union(cell.get_possible_symbols()),\r\n self.cells, set())\r\n my_cells = set(self.cells)\r\n\r\n for group in other_groups:\r\n if my_cells.issubset(group.cells) and self is not group:\r\n # Remove my cells from the other group\r\n for cell in self.cells:\r\n cell.remove_group(group)\r\n group.cells.remove(cell)\r\n\r\n # Update the alphabets in the other group\r\n for cell in group.cells:\r\n cell.remove_possible_symbols(symbols_to_exclude)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
StereoGroup atoms are in the reaction, and the reaction creates the specified chirality at the stereo centers > remove the stereo center from > invalidate stereo group
def test_reaction_defines_stereo(self): products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|') self.assertEqual(products, 'F[C@@H](Cl)Br') products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|') self.assertEqual(products, 'F[C@@H](Cl)Br') products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br') self.assertEqual(products, 'F[C@@H](Cl)Br') # Remove group with defined stereo products = _reactAndSummarize('[C:1]F>>[C@@:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|') self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|') # Remove atoms with defined stereo from group products = _reactAndSummarize('[C:1]F>>[C@@:1]F', 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|') self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')
[ "def test_reaction_destroys_stereo(self):\n reaction = '[C@:1]>>[C:1]'\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'FC(Cl)Br')\n products = _reactAndSummarize(reaction, 'FC(Cl)Br')\n self.assertEqual(products, 'FC(Cl)Br')\n\n reaction = '[C@:1]F>>[C:1]F'\n # Reaction destroys stereo (but preserves unaffected group\n products = _reactAndSummarize(reaction,\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')\n # Reaction destroys stereo (but preserves the rest of the group\n products = _reactAndSummarize(reaction, 'F[C@H](Cl)[C@@H](Cl)Br |&1:1,3|')\n self.assertEqual(products, 'FC(Cl)[C@@H](Cl)Br |&1:3|')", "def test_reaction_copies_stereogroup(self):\n # Stereogroup atoms are in the reaction with multiple copies in the product\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )\n\n # Stereogroup atoms are not in the reaction, but have multiple copies in the\n # product.\n products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]',\n 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|',\n 'CC(=O)C')\n # stereogroup manually checked, product SMILES assumed correct.\n self.assertEqual(\n products,\n 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|'\n )", "def epimerize(self, center_atom, substituent1, substituent2):\n\n self._check_atom_number(center_atom)\n self._check_atom_number(substituent1)\n self._check_atom_number(substituent2)\n\n assert self.bonds.number_of_edges() > 0, \"need a bond graph to perform this operation -- try calling self.assign_connectivity()!\"\n\n adj = self.get_adjacent_atoms(center_atom)\n assert len(adj) == 4, \"center atom must be making 4 bonds!\"\n assert substituent1 in adj, \"1st substituent is not bonded to center atom!\"\n assert substituent2 in adj, \"2nd substituent is not bonded to center atom!\"\n\n #### remove both substituents\n mol, group1, mmap1, gmap1 = cctk.Group.remove_group_from_molecule(self, center_atom, substituent1, return_mapping=True)\n mol, group2, mmap2, gmap2 = cctk.Group.remove_group_from_molecule(mol, mmap1[center_atom], mmap1[substituent2], return_mapping=True)\n\n h1 = mol.num_atoms() - 1\n h2 = mol.num_atoms()\n\n #### add them back in the opposite fashion\n mol, mmap3, gmap3 = cctk.Group.add_group_to_molecule(mol, group2, h1, return_mapping=True)\n mol = cctk.Group.add_group_to_molecule(mol, group1, mmap3[h2])\n\n #### relabel new graph to match original molecule\n which = top.get_stereogenic_centers(self)\n which.remove(center_atom)\n return mol.renumber_to_match(self, check_chirality=which)", "def cleanUpRigPose(self):\n\n # show the proxy geo\n cmds.select(self.name + \"_mover_grp\", hi=True)\n allNodes = cmds.ls(sl=True)\n for node in allNodes:\n if node.find(\"_proxy_geo\") != -1:\n if cmds.nodeType(node) == \"mesh\":\n parent = cmds.listRelatives(node, parent=True)[0]\n cmds.lockNode(parent, lock=False)\n cmds.setAttr(parent + \".v\", lock=False)\n cmds.setAttr(parent + \".v\", 1)\n cmds.lockNode(parent, lock=True)\n\n # unlock mover group for this module and make invisible\n cmds.lockNode(self.name + \"_mover_grp\", lock=False)\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=False)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", 0)\n\n cmds.setAttr(self.name + \"_mover_grp.v\", lock=True)\n cmds.lockNode(self.name + \"_mover_grp\", lock=True)\n\n # get the joints created by this module and remove the constraints\n joints = self.returnCreatedJoints\n\n # create mover name\n networkNode = self.returnNetworkNode\n baseName = cmds.getAttr(networkNode + \".baseName\")\n\n for joint in joints:\n if cmds.objExists(joint + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)\n\n else:\n jointBaseName = joint\n if self.name != baseName:\n nameData = self.name.split(baseName)\n\n if nameData[0] != \"\":\n jointBaseName = jointBaseName.partition(nameData[0])[2]\n if nameData[1] != \"\":\n jointBaseName = jointBaseName.partition(nameData[1])[0]\n\n if cmds.objExists(self.name + \"_\" + jointBaseName + \"_mover_offset\"):\n cmds.select(joint)\n cmds.delete(constraints=True)", "def cull_irrelevant_reactions(self):\n\n ## define a function f, that maps log10(rxn_cs) to chances of being removed\n ## it is a linear piecemeal function, below `a` it is guaranteed to be\n ## removed and above `b` is is guaranteed to remain. Inbetween values\n ## are interpreted linearly.\n \n # remove_below\n a = -12\n #keep_above\n b = -5\n\n m = -1.0 / (b-a)\n k = 1.0 - (m*a)\n\n def f(c):\n if c <= a:\n return 1.0\n if c >= b:\n return 0.0\n return m*c+k\n\n ## the concentration of all of the molecules\n ## relevant to each reaction in self.reactions\n for rxn in self.reactions:\n rxn_c = np.log10(sum([self.global_concentration(m)\n for m in rxn.all_molecules()]))\n p_remove = f(rxn_c)\n print(f'{rxn_c} removal chance: {p_remove}')\n if np.random.rand() < p_remove:\n print('REMOVED')\n self.reactions.remove(rxn)\n \n #print(rxn_c)\n # score = 1./(1E-15+c_of_all_mols)\n # score = score/sum(score)\n # print(score)\n #quit()\n # print(f'%{p} for total conc of: {total_c}')\n # TODO DECIDE ODDS OF REMOVING REACTION\n # if np.random.rand() < p :\n # print(f'Removing {rxn}')\n # self.reactions.remove(rxn)\n print(f'#m: {len(self.molecules)} \\t #r:{len(self.reactions)}')", "def doped_surface(slabGratoms,dopantString,dopantType='add',ini_surf_atoms=None):\n fin_surf_atoms_list = []\n dopant = molecule(dopantString)[0]\n\n if ini_surf_atoms is not None:\n slabGratoms.set_surface_atoms(ini_surf_atoms)\n\n surf_atoms = slabGratoms.get_surface_atoms().tolist()\n\n builder = Builder(slabGratoms)\n if dopantType=='add':\n overlappedList = []\n offsetList = []\n fin_slabs = builder.add_adsorbate(dopant, bonds=[0], index=-1)\n for i, fin_slab in enumerate(fin_slabs):\n overlapped = overlappedAdatoms(fin_slab) # Check for ovelapped adatoms\n offset = offsetAdatoms(fin_slab) # Check for off surface adatoms\n if overlapped:\n overlappedList.append(i)\n if offset:\n offsetList.append(i)\n new_surf_atoms = copy(surf_atoms)\n new_surf_atoms.append(new_surf_atoms[-1]+1) # Add dopant to list of surface atoms\n fin_slabs[i].set_surface_atoms(new_surf_atoms)\n\n tagList = fin_slab.get_tags()\n tagList[-1]=0 # Set tag of dopant to 0\n fin_slabs[i].set_tags(tagList)\n\n delList = overlappedList + offsetList\n print('Surfaces with overlapped dopants: ')\n print(overlappedList)\n print('Surface with offset atom(s):')\n print(offsetList)\n delList = list(set(delList))\n delList.sort()\n if delList:\n for i in reversed(delList):\n del(fin_slabs[i]) #Remove sturctures with overlapped dopants\n elif dopantType=='replace':\n fin_slabs=[]\n for i in surf_atoms:\n tempGratoms = deepcopy(slabGratoms)\n if slabGratoms[i].symbol != dopantString:\n tempGratoms[i].symbol = dopantString\n fin_slabs.append(tempGratoms)\n else:\n sys.exit('Dopant type has to be either \\'add\\' or \\'replace\\'. Please choose one!')\n return(fin_slabs)", "def removeFgrpcc(self, base):\n\n self.gripcontactpairs_precc = []\n self.gripcontactpairnormals_precc = []\n self.gripcontactpairfacets_precc = []\n\n plotoffsetfp = 6\n\n self.counter = 0\n\n while self.counter < self.facetpairs.shape[0]:\n # print str(self.counter) + \"/\" + str(self.facetpairs.shape[0]-1)\n # print self.gripcontactpairs\n self.gripcontactpairs_precc.append([])\n self.gripcontactpairnormals_precc.append([])\n self.gripcontactpairfacets_precc.append([])\n\n facetpair = self.facetpairs[self.counter]\n facetidx0 = facetpair[0]\n facetidx1 = facetpair[1]\n\n for j, contactpair in enumerate(self.gripcontactpairs[self.counter]):\n cctpnt0 = contactpair[0] + plotoffsetfp * self.facetnormals[facetidx0]\n cctpnt1 = contactpair[1] + plotoffsetfp * self.facetnormals[facetidx1]\n cctnormal0 = self.facetnormals[facetidx0]\n cctnormal1 = [-cctnormal0[0], -cctnormal0[1], -cctnormal0[2]]\n handfgrpcc0 = NodePath(\"handfgrpcc0\")\n self.handfgrpcc_uninstanced.instanceTo(handfgrpcc0)\n handfgrpcc0.setPos(cctpnt0[0], cctpnt0[1], cctpnt0[2])\n handfgrpcc0.lookAt(cctpnt0[0] + cctnormal0[0], cctpnt0[1] + cctnormal0[1],\n cctpnt0[2] + cctnormal0[2])\n handfgrpcc1 = NodePath(\"handfgrpcc1\")\n self.handfgrpcc_uninstanced.instanceTo(handfgrpcc1)\n handfgrpcc1.setPos(cctpnt1[0], cctpnt1[1], cctpnt1[2])\n handfgrpcc1.lookAt(cctpnt1[0] + cctnormal1[0], cctpnt1[1] + cctnormal1[1],\n cctpnt1[2] + cctnormal1[2])\n handfgrpcc = NodePath(\"handfgrpcc\")\n handfgrpcc0.reparentTo(handfgrpcc)\n handfgrpcc1.reparentTo(handfgrpcc)\n # prepare the model for collision detection\n facetmeshbullnode = cd.genCollisionMeshMultiNp(handfgrpcc)\n result = self.bulletworld.contactTest(facetmeshbullnode)\n\n if not result.getNumContacts():\n self.gripcontactpairs_precc[-1].append(contactpair)\n self.gripcontactpairnormals_precc[-1].append(self.gripcontactpairnormals[self.counter][j])\n self.gripcontactpairfacets_precc[-1].append(self.gripcontactpairfacets[self.counter])\n self.counter += 1\n self.counter=0", "def alphact_conformational_change(frame, alphact_stage_one_sliced, alphact_stage_two_sliced):\n INSULIN_RECEPTOR = pdb.PDBMolecule(PATH_PDB, center=False)\n INSULIN_RECEPTOR.move_to([0,0,0])\n frame_start = 270 #the first frame when this function is called upon\n \n for num in range(10014, 10115):\n if num < (frame - frame_start) * round(101/60) + 10014:\n if num not in alphact_stage_two_sliced:\n #remove a portion of the atoms that are not in second stage based on the current frame\n alphact_stage_one_sliced.remove(num)\n for num in range(10171, 10211):\n if num < (frame - frame_start) * round(40/60) + 10171:\n #add a portion of atoms that are not in the first stage based on the current frame\n alphact_stage_one_sliced.append(num)\n\n alphact_stage_one_sliced_mol = INSULIN_RECEPTOR.divide(alphact_stage_one_sliced, 'alphact_one')\n rotation = (frame - frame_start - 60) * -0.01 #set a number for the amount the molecule should rotate based on the frame\n alphact_stage_one_sliced_mol.rotate([0,0,1], rotation) #rotate the molecule on the z axis with the rotation variable\n \n insulin_alpha = INSULIN_RECEPTOR.divide(ATOM_POS[\"N\"], \"alphact_two\")\n insulin_alpha.move_offset([0,30,0])\n \n return alphact_stage_one_sliced_mol, insulin_alpha", "def do_mc_mgrp_destroy(self, line):\n words = collections.deque(line.split())\n try:\n mgrp_hdl = self.get_handle(words, \"MGRP_HDL\")\n self._thrift_client.mc_mgrp_destroy(mgrp_hdl)\n print \"Multicast group has been destroyed.\"\n except NameError as ne:\n print >> sys.stderr, ne\n except thrift.protocol.TProtocol.TProtocolException as e:\n print >> sys.stderr, e\n except Exception as e:\n self.usage(e, \"mc_mgrp_destroy\")", "def test_missing_electrode_group(self):\n groups = self.single_segment_we.recording.get_channel_groups()\n self.single_segment_we.recording.delete_property(\"group\")\n write_waveforms(\n waveform_extractor=self.single_segment_we,\n nwbfile=self.nwbfile,\n )\n self.single_segment_we.recording.set_channel_groups(groups)", "def test_remove_overlapping():\n e = Emulsion([SphericalDroplet([0, 1], 2), SphericalDroplet([1, 1], 2)])\n assert len(e) == 2\n e.remove_overlapping()\n assert len(e) == 1", "def flip_atoms_in_ncs_groups(hierarchy, ncs_restraints_group_list, mon_lib_srv=None):\n if mon_lib_srv is None:\n mon_lib_srv = mmtbx.monomer_library.server.server()\n for ncs_gr in ncs_restraints_group_list:\n master_isel = ncs_gr.master_iselection\n chains_master = hierarchy.select(master_isel).only_model().chains()\n for copy in ncs_gr.copies:\n copy_isel = copy.iselection\n chains_copy = hierarchy.select(copy_isel).only_model().chains()\n for ch_m, ch_c in zip(chains_master, chains_copy):\n for r_m, r_c in zip(ch_m.residues(), ch_c.residues()):\n # print \"working on \", r_m.id_str(), r_c.id_str()\n if should_be_flipped(r_m, r_c):\n flip_residue(r_c, mon_lib_srv)", "def clear_surface_groups(self):\n self.surface_groups = {}", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def test_check_for_existing_reaction_removes_duplicates_in_opposite_directions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n s1 = Species().from_smiles(\"[H]\")\n s2 = Species().from_smiles(\"CC\")\n s3 = Species().from_smiles(\"[H][H]\")\n s4 = Species().from_smiles(\"C[CH2]\")\n s1.label = 'H'\n s2.label = 'CC'\n s3.label = 'HH'\n s4.label = 'C[CH2]'\n\n rxn_f = TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction')\n )\n\n rxn_r = TemplateReaction(reactants=[s3, s4],\n products=[s1, s2],\n template=['H2', 'C_rad/H2/Cs/H3'],\n degeneracy=2,\n family='H_Abstraction',\n reverse=TemplateReaction(reactants=[s1, s2],\n products=[s3, s4],\n template=['C/H3/Cs/H3', 'H_rad'],\n degeneracy=6,\n family='H_Abstraction')\n )\n\n rxn_f.reactants.sort()\n rxn_f.products.sort()\n\n cerm.add_reaction_to_core(rxn_f)\n cerm.register_reaction(rxn_f)\n\n reactions = cerm.search_retrieve_reactions(rxn_r)\n self.assertEqual(1, len(reactions), 'cerm.search_retrieve_reactions could not identify reverse reaction')\n\n found, rxn = cerm.check_for_existing_reaction(rxn_r)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction in the reverse direction')\n self.assertEqual(rxn, rxn_f)", "def destroy_oxygen(self):\n\n self.oxygen.destroy_oxygen()\n\n # Shake camera if animations are enabled\n if self.anim:\n self.shake_amp = 50", "def test_parameterize_mol_missing_stereo_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n\n molecule = Molecule.from_smiles(\"CC1CCC(=O)O1\", allow_undefined_stereo=True)\n topology = Topology.from_molecules([molecule])\n\n force_field.create_openmm_system(\n topology,\n toolkit_registry=toolkit_registry,\n )", "def remove_reactions(reaction_id_list, model_file_in, model_file_out = None):", "def concerted_unimolecular_elimination(rct_zmas, prd_zmas):\n\n # Initialize info for the returns\n ret = None, None, None, None, None\n finish_build = True\n\n # Attempt to build appropriate z-matrix\n prd_zmas, prd_gras = shifted_standard_zmas_graphs(\n prd_zmas, remove_stereo=True)\n if len(rct_zmas) == 1:\n count = 1\n while True:\n rct_zmas, rct_gras = shifted_standard_zmas_graphs(\n rct_zmas, remove_stereo=True)\n init_zma, = rct_zmas\n\n tras, _, _ = automol.graph.reac.elimination(rct_gras, prd_gras)\n if tras is not None:\n if len(tras[0]) == 1:\n tras = [tras]\n min_dist = 100.\n frm_bnd_key = None\n for tra_i in tras:\n # Get the bond formation and breaking keys\n bnd_key, = automol.graph.trans.formed_bond_keys(tra_i)\n geo = automol.zmatrix.geometry(rct_zmas[0])\n dist = automol.geom.distance(geo, *list(bnd_key))\n if dist < min_dist:\n min_dist = dist\n frm_bnd_key = bnd_key\n tra = tra_i\n brk_keys = automol.graph.trans.broken_bond_keys(tra)\n brk_bnd_key1, brk_bnd_key2 = brk_keys\n init_zma, = rct_zmas\n\n\n # Get index for migrating atom (or bond-form atom in group)\n for bnd_key in (brk_bnd_key1, brk_bnd_key2):\n if bnd_key & frm_bnd_key:\n mig_key = next(iter(bnd_key & frm_bnd_key))\n for key in frm_bnd_key:\n if key != mig_key:\n a1_idx = key\n\n # Get chain for redefining the rc1_atm1_key z-matrix entries\n _, gras = shifted_standard_zmas_graphs(\n [init_zma], remove_stereo=True)\n gra = functools.reduce(automol.graph.union, gras)\n xgr1, = automol.graph.connected_components(gra)\n atm1_neighbors = _atom_neighbor_keys(xgr1)[a1_idx]\n for idx in atm1_neighbors:\n num_keys = len(_atom_neighbor_keys(xgr1)[idx])\n if idx != mig_key and num_keys > 1:\n a2_idx = idx\n atm2_neighbors = _atom_neighbor_keys(xgr1)[a2_idx]\n for idx in atm2_neighbors:\n if idx not in (mig_key, a1_idx):\n a3_idx = idx\n\n mig_redef_keys = (a1_idx, a2_idx, a3_idx)\n\n # determine if the zmatrix needs to be rebuilt by x2z\n # determines if the hydrogen atom is used to define other atoms\n rebuild = False\n if any(idx > mig_key for idx in mig_redef_keys):\n rebuild = True\n\n # rebuild zmat and go through while loop again if needed\n # shift order of cartesian coords & rerun x2z to get a new zmat\n # else go to next stage\n if rebuild:\n reord_zma = reorder_zmatrix_for_migration(\n init_zma, a1_idx, mig_key)\n rct_zmas = [reord_zma]\n count += 1\n if count == 3:\n finish_build = False\n break\n else:\n rct_zma = init_zma\n finish_build = True\n break\n else:\n finish_build = False\n\n # If z-mat with good order found, finish building it\n if finish_build:\n\n # determine the new coordinates\n rct_geo = automol.zmatrix.geometry(rct_zma)\n distance = automol.geom.distance(\n rct_geo, mig_key, a1_idx)\n angle = automol.geom.central_angle(\n rct_geo, mig_key, a1_idx, a2_idx)\n dihedral = automol.geom.dihedral_angle(\n rct_geo, mig_key, a1_idx, a2_idx, a3_idx)\n # Reset the keys for the migrating H atom\n new_idxs = (a1_idx, a2_idx, a3_idx)\n key_dct = {mig_key: new_idxs}\n ts_zma = automol.zmatrix.set_keys(rct_zma, key_dct)\n\n # Reset the values in the value dict\n mig_names = automol.zmatrix.name_matrix(ts_zma)[mig_key]\n ts_zma = automol.zmatrix.set_values(\n ts_zma, {mig_names[0]: distance,\n mig_names[1]: angle,\n mig_names[2]: dihedral}\n )\n\n # standardize the ts zmat and get tors and dist coords\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n dist_coo_key = tuple(reversed(sorted(frm_bnd_key)))\n dist_name = next(coo_name for coo_name, coo_keys in coo_dct.items()\n if dist_coo_key in coo_keys)\n ts_name_dct = automol.zmatrix.standard_names(ts_zma)\n dist_name = ts_name_dct[dist_name]\n ts_zma = automol.zmatrix.standard_form(ts_zma)\n\n # Get the name of the coordinate of the other bond that is breaking\n brk_dist_name = None\n for brk_key in (brk_bnd_key1, brk_bnd_key2):\n if not brk_key.intersection(frm_bnd_key):\n brk_dist_name = automol.zmatrix.bond_key_from_idxs(\n ts_zma, brk_key)\n\n # Add second attempt to get brk_dist_name\n if brk_dist_name is None:\n brk_dist_names = [\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key1),\n automol.zmatrix.bond_key_from_idxs(ts_zma, brk_bnd_key2)\n ]\n # Grab the name that is not None\n for name in brk_dist_names:\n if name is not None:\n brk_dist_name = name\n\n # get full set of potential torsional coordinates\n pot_tors_names = automol.zmatrix.torsion_coordinate_names(rct_zma)\n\n # remove the torsional coordinates that would break reaction coordinate\n gra = automol.zmatrix.graph(ts_zma, remove_stereo=True)\n coo_dct = automol.zmatrix.coordinates(ts_zma)\n tors_names = []\n for tors_name in pot_tors_names:\n axis = coo_dct[tors_name][0][1:3]\n grp1 = [axis[1]] + (\n list(automol.graph.branch_atom_keys(gra, axis[0], axis) -\n set(axis)))\n grp2 = [axis[0]] + (\n list(automol.graph.branch_atom_keys(gra, axis[1], axis) -\n set(axis)))\n if not ((mig_key in grp1 and a1_idx in grp2) or\n (mig_key in grp2 and a1_idx in grp1)):\n tors_names.append(tors_name)\n\n # Get reactants graph\n _, rct_gras = shifted_standard_zmas_graphs(\n [rct_zma], remove_stereo=True)\n rcts_gra = automol.graph.union_from_sequence(rct_gras)\n\n brk_bnd_key1 = shift_vals_from_dummy(brk_bnd_key1, ts_zma)\n brk_bnd_key2 = shift_vals_from_dummy(brk_bnd_key2, ts_zma)\n brk_bnd_keys = frozenset({brk_bnd_key1, brk_bnd_key2})\n frm_bnd_key = shift_vals_from_dummy(frm_bnd_key, ts_zma)\n\n ret = ts_zma, dist_name, brk_dist_name, brk_bnd_keys, frm_bnd_key, tors_names, rcts_gra\n\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If multiple copies of an atom in StereoGroup show up in the product, they should all be part of the same product StereoGroup.
def test_reaction_copies_stereogroup(self): # Stereogroup atoms are in the reaction with multiple copies in the product products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]', 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|', 'CC(=O)C') # stereogroup manually checked, product SMILES assumed correct. self.assertEqual( products, 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|' ) # Stereogroup atoms are not in the reaction, but have multiple copies in the # product. products = _reactAndSummarize('[O:1].[C:2]=O>>[O:1][C:2][O:1]', 'Cl[C@@H](Br)C[C@H](Br)CCO |&1:1,4|', 'CC(=O)C') # stereogroup manually checked, product SMILES assumed correct. self.assertEqual( products, 'CC(C)(OCC[C@@H](Br)C[C@@H](Cl)Br)OCC[C@@H](Br)C[C@@H](Cl)Br |&1:6,9,15,18|' )
[ "def test_grouping_of_components_of_same_protein(self):\n same = [set(self.model.components[2:5])]\n self.assertEqual(self.model.group_accessions(), same)", "def test_grouping_of_components_of_same_protein(self):\n same = [set(self.model.components[2:5]), set(self.model.components[5:7])]\n self.assertEqual(self.model.group_accessions(), same)", "def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)", "def test_reaction_defines_stereo(self):\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@H](Cl)Br |o1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'F[C@@H](Cl)Br |&1:1|')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n products = _reactAndSummarize('[C:1]>>[C@@:1]', 'FC(Cl)Br')\n self.assertEqual(products, 'F[C@@H](Cl)Br')\n\n # Remove group with defined stereo\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,&2:3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |&1:3|')\n\n # Remove atoms with defined stereo from group\n products = _reactAndSummarize('[C:1]F>>[C@@:1]F',\n 'F[C@H](Cl)[C@@H](Cl)Br |o1:1,3|')\n self.assertEqual(products, 'F[C@@H](Cl)[C@@H](Cl)Br |o1:3|')", "def _check_symmgroup(graph, symmgroup):\n from netket.utils.semigroup import Permutation\n\n autom = graph.automorphisms()\n for el in symmgroup.to_array():\n assert Permutation(el) in autom.elems\n\n assert symmgroup == symmgroup.remove_duplicates()", "def semidirect_product(self, other,hom):\r\n if not isinstance(other, Group):\r\n raise TypeError(\"other must be a group\")\r\n bin_op=Function(self.Set.cartesian(other.Set).cartesian(self.Set.cartesian(other.Set)),self.Set.cartesian(other.Set),\r\n lambda x:(self.bin_op((x[0][0],hom(other(x[0][1])).elem.function(self(x[1][0])).elem)),\\\r\n other.bin_op((x[0][1], x[1][1]))), check_well_defined=False)\r\n Gr=Group((self.Set).cartesian(other.Set), bin_op, check_ass=False, check_inv=False, identity=(self.e.elem, other.e.elem),\r\n abelian=False,group_order=self.group_order*other.group_order)\r\n #Gr.group_gens=[(self.e.elem,b) for b in other.group_gens]+[(a,other.e.elem) for a in self.group_gens]\r\n return Gr", "def test_consistent_ids(self) -> None:\n bnode = BNode()\n g0_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Golan Trevize\")),\n (bnode, RDF.type, FOAF.Person),\n }\n bnode = BNode()\n g1_ts: _TripleSet = {\n (bnode, FOAF.name, Literal(\"Janov Pelorat\")),\n (bnode, RDF.type, FOAF.Person),\n }\n\n g0 = Graph()\n g0 += g0_ts\n cg0 = to_canonical_graph(g0)\n cg0_ts = GraphHelper.triple_set(cg0)\n\n g1 = Graph()\n g1 += g1_ts\n cg1 = to_canonical_graph(g1)\n cg1_ts = GraphHelper.triple_set(cg1)\n\n assert cg0_ts.issubset(\n cg1_ts\n ), \"canonical triple set cg0_ts should be a subset of canonical triple set cg1_ts\"", "def is_normal_subgroup(self, other):\r\n if not(self<=other):\r\n return False\r\n if other.is_abelian():\r\n return True\r\n if other.index(self)==2:\r\n return True\r\n gens1 = self.group_gens\r\n gens2 = other.group_gens\r\n for g in gens2:\r\n for h in gens1:\r\n p = g * h * g**-1\r\n if not p in Set(self.group_elems):\r\n return False\r\n return True", "def test_make_shared_replacements(self):\n\n with pm.Model() as test_model:\n test1 = pm.Normal(\"test1\", mu=0.0, sigma=1.0, size=(1, 10))\n test2 = pm.Normal(\"test2\", mu=0.0, sigma=1.0, size=(10, 1))\n\n # Replace test1 with a shared variable, keep test 2 the same\n replacement = pm.make_shared_replacements(\n test_model.initial_point, [test_model.test2], test_model\n )\n assert (\n test_model.test1.broadcastable\n == replacement[test_model.test1.tag.value_var].broadcastable\n )", "def merge_single_process(self):\n sample_names = [sample.name for sample in self.samples]\n duplicates_names = set([x for x in sample_names if sample_names.count(x) > 1])\n for name in duplicates_names:\n duplicate_samples = [s for s in self.samples if s.name == name]\n for s in duplicate_samples[1:]:\n duplicate_samples[0] += s\n summed_sample = sum(duplicate_samples)\n for s in duplicate_samples:\n self.samples.remove(s)\n self.samples.append(summed_sample)", "def test_repeated_nodes(self):\n w = osm.OSMWriter()\n ids = [\n w.build_node(geos.Point(123, 456), [], map=False),\n w.build_node(geos.Point(123, 456), [], map=False),\n ]\n self.assertEqual(len(w.tree.findall('//node')), 2)\n\n w = osm.OSMWriter()\n ids = [\n w.build_geom(geos.Point(123, 456), []),\n w.build_geom(geos.Point(123, 456), []),\n ]\n self.assertEqual(len(w.tree.findall('//node')), 2)\n\n w = osm.OSMWriter()\n ids = w.build_geom(geos.MultiPoint(geos.Point(123, 456), geos.Point(123,456)), {})\n self.assertEqual(len(w.tree.findall('//node')), 1)\n\n w = osm.OSMWriter()\n ids = w.build_geom(geos.MultiPoint(geos.Point(123, 456), geos.Point(12,34)), {})\n ids = w.build_geom(geos.Point(123, 456), {})\n self.assertEqual(len(w.tree.findall('//node')), 3)\n\n w = osm.OSMWriter()\n ids = w.build_geom(geos.Point(123, 456), {})\n ids = w.build_geom(geos.MultiPoint(geos.Point(123, 456), geos.Point(12,34)), {})\n self.assertEqual(len(w.tree.findall('//node')), 3)\n\n w = osm.OSMWriter()\n ids = w.build_geom(geos.Point(123, 456), {'tag':'val'})\n ids = w.build_geom(geos.MultiPoint(geos.Point(123, 456), geos.Point(12,34)), {'tag':'val'})\n self.assertEqual(len(w.tree.findall('//node')), 3)\n\n w = osm.OSMWriter()\n ids = w.build_geom(geos.MultiPoint(geos.Point(123, 456), geos.Point(12,34)), {'tag':'val'})\n ids = w.build_geom(geos.Point(123, 456), {'tag':'val'})\n self.assertEqual(len(w.tree.findall('//node')), 3)", "def test_check_for_existing_reaction_keeps_identical_reactions_with_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=True)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=True)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertFalse(found, 'check_for_existing_reaction failed to identify duplicate template reactions')", "def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')", "def test_same_set(self):\n s = randset()\n cluster = Cluster(width=10, bandwidth=2)\n cluster.add_item(s)\n cluster.add_item(s)\n self.assertEqual(1, len(cluster.get_clusters()))", "def has_group():", "def test_install_set_multi(self):\n expected = copy.deepcopy(test_xdata)\n for thing in expected.xpath(\"Children[@identical='true']/Thing\"):\n thing.text = \"same\"\n self._install(\n [lxml.etree.Element(\n \"SetMulti\", value=\"same\",\n base='Test/Children[#attribute/identical = \"true\"]',\n sub=\"Thing/#text\")],\n expected)", "def testShareDuplicatePhotos(self):\r\n share_list = [{'existing_episode_id': self._episode_id2,\r\n 'new_episode_id': self._existing_ep_id,\r\n 'photo_ids': self._photo_ids2}]\r\n self._tester.ShareExisting(self._cookie, self._existing_vp_id, share_list)\r\n self._tester.ShareExisting(self._cookie, self._existing_vp_id, share_list)", "def check_sane(group):\n attrs = None\n\n for info in group:\n dup_info = dict(info)\n\n # Remove lat and lon\n for prohib in ('lat', 'lon'):\n if prohib in dup_info:\n del dup_info[prohib]\n\n if attrs is None:\n # Use the first file as a reference\n attrs = dup_info\n else:\n # Do the sanity check\n if dup_info.items() != attrs.items():\n msg = \"File '{}' doesn't match '{}' in same group\".format(\n attrs, dup_info\n )\n raise ValueError(msg)", "def is_transitive(self):\r\n f=GroupAction(self,Set(list(range(1,self.group_degree+1))),lambda x,y:x.elem(y))\r\n return f.is_transitive()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the versions from GitHub tags
def get_versions(self): # They randomly use and don't use 'r' prefix so we have to sort # versions manually versions = list(self._get_github_tags()) versions.sort( key=operator.attrgetter('base_version'), reverse=True, ) return versions
[ "def get_versions(self):\n return self._get_github_tags(normalize_func=self._normalize_tag_name)", "def remote_tags():\n url = \"%s/git/refs/tags\" % get_github_api_url()\n for result in requests.get(url).json():\n ref = result[\"ref\"]\n version = ref.split(\"/\")[-1]\n if version is not None:\n yield version", "def _select_version_tags(tags):\n return [t for t in tags if VERSION_REGEX.match(t)]", "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def major_tags(self, owner, repo):\n cursor = \"null\"\n tags_list = []\n url = \"https://api.github.com/graphql\"\n\n while True:\n query = {\"query\" :\n \"\"\"\n query {\n repository(owner: \"%s\", name: \"%s\") {\n tags: refs(refPrefix: \"refs/tags/\", first: 100, after: \"%s\") {\n edges {\n cursor\n tag: node {\n name\n target {\n ... on Tag {\n tagger {\n date\n }\n }\n }\n }\n }\n }\n }\n }\n \"\"\" % (owner, repo, cursor)\n }\n r = requests.post(url, auth=requests.auth.HTTPBasicAuth('user', self.GITHUB_API_KEY), json=query)\n raw = r.text\n data = json.loads(json.loads(json.dumps(raw)))\n tags = data['data']['repository']['tags']['edges']\n for i in tags:\n try:\n tags_list.append({'date' : i['tag']['target']['tagger']['date'], 'release' : i['tag']['name']})\n except KeyError:\n pass\n if data['data']['repository']['tags']['edges'] == []:\n break\n else:\n cursor = data['data']['repository']['tags']['edges'][-1]['cursor']\n\n major_versions = []\n pattern = re.compile(\"[0-9]+\\.[0]+\\.[0]+$\")\n for i in tags_list:\n try:\n if re.search(pattern, i[\"release\"]) != None:\n major_versions.append(i)\n except AttributeError:\n pass\n\n return pd.DataFrame(major_versions)", "def versions(self):\n versions = (t.lstrip('v') for t in self.tags)\n return filter(version_is_valid, versions)", "def show_git_versions(ctx):\n\n ws = get_workspace(config)\n\n exp = Experiment(ws, config[\"experiment_name\"])\n\n versions = [\n (run.id, run.get_properties()[\"azureml.git.commit\"]) for run in exp.get_runs()\n ]\n\n print(tabulate(versions, headers=[\"Run ID\", \"Git Version\"]))", "def versions():\n result = timeline.versions()\n if result:\n click.echo('\\n'.join(result))", "def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())", "def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def list_tags(self) -> List[str]:\n self._validate()\n\n cmd = \"git tag --list --sort v:refname\"\n cmd_params = cmd_exec.CommandParameters(cwd=self.get_source_directory())\n tag_output = cmd_exec.run_command(cmd, cmd_params)\n\n tag_list = [tag.strip() for tag in tag_output.split('\\n') if tag]\n log.debug(f\"Repository tags {tag_list}\")\n\n return tag_list", "def getVersions(self) -> List[ghidra.framework.store.Version]:\n ...", "def get_stack_versions(stack_root):\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n if not versions:\n versions = get_versions_from_stack_root(stack_root)\n return versions", "def versions(self, name):\n versions = [x[0] for x in self._versions_and_recipes(name)]\n return versions", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def getVersions(self):\n logger.debug(\"Func: getVersions\")\n\n try:\n return self._currentSceneInfo[\"Versions\"]\n except:\n return []", "def get_possible_tags(tags: list[str], versions: str, unsupervised: bool = True):\n prev_version, next_version = versions.replace(\"None\", \"\").split(\":\")\n prev_tag = handle_tag_or_substring(prev_version, tags)\n # print(f\"prev_tag: {prev_tag}, prev_version: {prev_version}\")\n if len(prev_tag) == 0 and len(prev_version) > 0:\n prev_tag = [\n tag\n for tag in tags\n if prev_version == clean_tag(tag) and not is_rc_or_date(tag)\n ]\n next_tag = handle_tag_or_substring(next_version, tags)\n # print(f\"next_tag: {next_tag}, next_version: {next_version}\")\n if len(next_tag) == 0 and len(next_version) > 0:\n next_tag = [\n tag\n for tag in tags\n if next_version == clean_tag(tag) and not is_rc_or_date(tag)\n ]\n\n if len(prev_tag) == 1 and len(next_tag) == 1:\n return prev_tag[0], next_tag[0]\n elif len(prev_tag) == 1 and len(next_tag) > 1:\n next_tag = [\n tag for tag in next_tag if tag != prev_tag[0] or tag not in prev_tag[0]\n ] # this may lead to empty list\n logger.info(f\"Possible tags are:{prev_tag}:{next_tag}\")\n return (\n prev_tag[0],\n next_tag[0], # difflib.get_close_matches(prev_tag[0], next_tag, n=1)[0],\n )\n elif len(prev_tag) > 1 and len(next_tag) == 1:\n prev_tag = [\n tag for tag in prev_tag if tag != next_tag[0] or next_tag[0] not in tag\n ]\n logger.info(f\"Possible tags are:{prev_tag}:{next_tag}\")\n return (\n prev_tag[-1], # difflib.get_close_matches(next_tag[0], prev_tag, n=1)[0],\n next_tag[0],\n )\n # If there is one exact match but no candidates for the other tag, exit and hint the user with possible candidates\n elif len(prev_tag) == 0 and len(next_tag) == 1:\n prev_candidates = get_possible_missing_tag(tags, next_tag=next_tag[0])\n if len(prev_version) == 0 and len(prev_candidates) == 0:\n return \"\", next_tag[0]\n logger.info(f\"Previous tag can be: {','.join(prev_candidates)}\")\n elif len(prev_tag) == 1 and len(next_tag) == 0:\n next_candidates = get_possible_missing_tag(tags, prev_tag=prev_tag[0])\n if len(next_version) == 0 and len(next_candidates) == 0:\n return prev_tag[0], \"\"\n logger.info(f\"Next tag can be: {','.join(next_candidates)}\")\n elif len(prev_tag) > 1 and len(next_tag) > 1:\n logger.info(\"Multiple tag candidates found.\")\n else:\n prev_tag = [tag for tag in tags if prev_version in clean_tag(tag, False)]\n next_tag = [tag for tag in tags if next_version in clean_tag(tag, False)]\n # print(f\"Possible tags are:\\n\\t{prev_tag}\\n\\t{next_tag}\")\n if len(prev_tag) == 1 and len(next_tag) == 1:\n return prev_tag[0], next_tag[0]\n elif len(prev_tag) == 1 and len(next_tag) == 0 and next_version == \"\":\n return prev_tag[0], None\n elif len(prev_tag) == 0 and len(next_tag) == 1 and prev_version == \"\":\n return None, next_tag[0]\n elif len(prev_tag) == 0 and len(next_tag) == 0:\n return None, None\n elif prev_version == \"\" and next_version == \"\":\n return None, None\n # return \"\",\"\" to trigger tag mismatch\n return None, None", "def list_versions():\n versions = get_versions()\n for idx, ver in enumerate(versions):\n print(\" {}. {}\".format(idx+1, ver['name']))", "def _fetch_latest_version(cls) -> str:\n response = requests.get(\n \"https://api.github.com/repos/datahub-project/datahub/releases/latest\"\n )\n response.raise_for_status()\n return json.loads(response.text)[\"tag_name\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fill the packets data properties.
def fill_data(self, data): self._data = data self._data_length = data[1:3] self._frame_id = data[4] self._address = XbeeAddress(data[5:9], data[9:13], data[13:15]) self._at_command = data[15:17] self._command_status = data[17] try: self._command_data = data[18:21] self._checksum = data[22] except IndexError: self._command_data = None self._checksum = data[18]
[ "def set_data(self):\n self.data_init = copy.deepcopy(self.tuun.data)\n self.data = copy.deepcopy(self.data_init)", "def set_properties(self):\n\n # assign feed entries from the root of the parsed data\n if hasattr(self.parsed_data, \"entries\"):\n self.items = self.parsed_data.entries\n\n # check if it is a feed root or feed element\n if hasattr(self.parsed_data, \"feed\"):\n source_data = self.parsed_data.feed\n else:\n source_data = self.parsed_data\n\n # assign available properties not listed in keymap\n self.title = source_data.title\n self.link = source_data.link\n\n for key in self.parsed_data.keymap.keys():\n if hasattr(self, key) and not getattr(self, key):\n attr_value = source_data.get(key)\n if isinstance(attr_value, struct_time):\n attr_value = self.serialize_datetime(attr_value)\n\n setattr(self, key, attr_value)", "def _fill_gps_data(self, simulator_data):\n simulator_data.gps.latitude = self.aircraft.instruments.gps.latitude\n simulator_data.gps.longitude = self.aircraft.instruments.gps.longitude\n simulator_data.gps.altitude = self.aircraft.instruments.gps.altitude\n simulator_data.gps.airspeed = self.aircraft.instruments.gps.airspeed\n simulator_data.gps.heading = self.aircraft.instruments.gps.heading", "def prepare_data(self):", "def _initialize_data(self):\n self.OUT_values = None\n self.truncated_output = False\n self.auto_send = False\n self.input_mode = \"R0\"\n\n pass", "def __init__(self):\n self.data = {}\n self.rdata = []", "def ghostManagerData(self, _packet, _data=[]):\n \tpkt = _packet\n \treturn pkt", "def __init__(self):\n self.data = []\n self.headers = []\n self.lamps_data = []\n self.lamps_headers = []", "def load_data(self, data):\n import numpy as np\n for key in data.keys():\n self.data[key] = []\n for i in range(len(self.buffer_info[key])):\n self.data[key].append(data[key][i])\n self.data['output'] = []\n for i in range(len(self.buffer_info['output'])):\n self.data['output'].append(\n np.zeros(self.buffer_info['output'][i]['size'], dtype=ctype(self.buffer_info['output'][i]['type']),\n order='C'))", "def populate_data(self):\r\n # Importing StationData with the standard imports causes a redundancy\r\n # problem, so it is imported here only when it is needed.\r\n from stationData import StationData\r\n # Find data requirements from all plumes.\r\n requirements = describe.PLUMES\r\n # Loop over plumes and define parameters to be used for pulling data.\r\n grib_file = pygrib.open(self.grib_file_path)\r\n for req in requirements:\r\n (plume,data_types,grid_level_type,grid_level,unused) = req\r\n selected = grib_file.select(shortName=data_types,\r\n typeOfLevel=grid_level_type,\r\n level=grid_level)\r\n for i, message in enumerate(selected):\r\n if i % 20 == 0:\r\n print '%s %s/%s Grib messages processed for %s' %\\\r\n (PRETEXT, i + 1, len(selected), req[0])\r\n for sdo in StationData.instances:\r\n if sdo.grib_i is None:\r\n StationData.populate_grid_information(message,\r\n self.config)\r\n sdo.add_data(plume,self.member_name,message)\r\n grib_file.close()\r\n return", "def __init__(self, packet_id, src, dest, flow_id, timestamp=0):\n super(DataPacket, self).__init__(packet_id, src, dest, flow_id,\n timestamp)\n self._size = DATA_PACKET_SIZE", "def load_data(self, data):\n\n self.id = data[\"id\"]\n self.type = data[\"type\"]\n self.name = data[\"name\"]\n self.short = data[\"short\"]\n self.amount = data[\"amount\"]\n self.init_cost = data[\"init_cost\"]\n self.init_time = data[\"init_time\"]\n self.init_profit = data[\"init_profit\"]", "def test_data(self):\n # CALLS\n # Setter\n packet1 = packets.Packet(\"telemetry\")\n packet1.data = sample_telemetry_data\n\n # Getter\n packet2 = packets.Packet(\"telemetry\")\n packet2.data = packet1.data\n\n # Packet from Raw Data -> Data\n packet3 = packets.get_packet_from_raw_data(packet1.raw_data)\n\n # ASSERTS\n # test setters and getters end to end\n self.assertEqual(packet1.raw_data, packet2.raw_data, packet3.raw_data)\n self.assertEqual(packet1.data, packet2.data, packet3.data)\n\n # test whether the data matches what we inputted\n self.assertEqual(packet1.data, sample_telemetry_data)\n\n # PRINTS\n print(packet1.data)", "def _init_net_delay_data(self):\n if self._net_delay_raw_data is None:\n return\n\n json_data = json_util.load_content(self._net_delay_raw_data)\n for row in json_data:\n app_id = int(row['app'])\n src_node_id = int(row['src_node'])\n dst_node_id = int(row['dst_node'])\n net_delay = float(row['net_delay'])\n self._net_delay_data[app_id][src_node_id][dst_node_id].append(net_delay)", "def rebuildData(self):\n self.data, self.collision_data, self.visual_data = createDatas(self.model, self.collision_model, self.visual_model)", "def initializeData(self, problem, random, structure):\n raise NotImplementedError", "def __fill_data_variables(self):\n data_vars = []\n for data_var in self.ts.data.data_vars:\n data_vars.append(data_var)\n\n self.data_vars = Dropdown(\n options=data_vars,\n value=data_vars[0],\n description='Data variables:',\n disabled=False,\n style = {'description_width': 'initial'},\n layout={'width': '400px'},\n )\n\n self.data_vars.observe(self.on_data_vars_change)", "def __init__(self, packet):\r\n self.__packet = struct.unpack(\"!4H\", packet[:8])\r\n\r\n self.src_port = self.__packet[0]\r\n self.dest_port = self.__packet[1]\r\n self.length = self.__packet[2]\r\n self.checksum = self.__packet[3]\r\n self.data = packet[8:]", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test if the stations are sorted correctly by distance
def test_stations_by_distance(): station_list = build_station_list() #test for stations closest to cambridge city coordinates station_list_sort = stations_by_distance(station_list, (52.2053, 0.1218)) output = [(station.name, distance) for (station, distance) in station_list_sort] for n in range(1, len(station_list)): #make sure that the distance of the previous station to the point is less than the next one in the list assert output[n-1][1] <= output[n][1]
[ "def test_nearest_filter(self):\n for airport, reports, count in (\n (True, True, 6),\n (True, False, 16),\n (False, True, 6),\n (False, False, 30),\n ):\n stations = station.nearest(30, -80, 30, airport, reports, 1.5)\n self.assertEqual(len(stations), count)", "def testOrdered(self):\n oldDistance = 0\n for x,y in IntegerPointsByDistance():\n newDistance = x**2 + y**2\n self.assertTrue(newDistance >= oldDistance)\n oldDistance = newDistance\n if newDistance > IntegerPointsByDistanceTest.threshold:\n break", "def test_nearest(self):\n dist = station.nearest(28.43, -81.31)\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"KMCO\")\n for val in dist.values():\n self.assertIsInstance(val, float)\n for *params, count in (\n (30, -82, 10, True, True, 0.2, 1),\n (30, -82, 10, True, False, 0.2, 5),\n (30, -82, 10, False, False, 0.2, 6),\n (30, -82, 1000, True, True, 0.5, 6),\n (30, -82, 1000, False, False, 0.5, 37),\n ):\n stations = station.nearest(*params)\n self.assertEqual(len(stations), count)\n for dist in stations:\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n for val in dist.values():\n self.assertIsInstance(val, float)", "def test_sorting_ascending_by_district():", "def test_distance(self):\n t0 = ts(times=[0,1,2,4,5,6],values=[3,4,5,6,7,8])\n t0_stand = distances.stand(t0,t0.mean(),t0.std())\n t1 = ts(times=[0,1,2,4,5,6],values=[3,4,5,6,7,8])\n t1_stand = distances.stand(t1,t1.mean(), t1.std()) \n assert distances.distance(t0_stand, t1_stand) < 10**(-16)", "def sort_by_miss_distance_chosen(self, asteroids_list): # 1\n return sorted(asteroids_list, key=lambda a: a.asteroid_miss_distance_km)", "def test_different_routes_from_c_to_c_and_distance_less_than_30(self):\n railroad = trains.Railroad()\n routes = railroad.find_routes('C', 'C', 9)\n routes = railroad.filter_routes_by_distance(routes, 0, 30)\n self.assertEqual(len(routes), 7)", "def test_sort_feeds_data_valid(self):\n expected_result = [self.unsorted_data[2],\n self.unsorted_data[0],\n self.unsorted_data[1]]\n result = sort_feeds_data(self.unsorted_data, self.timestamp)\n self.assertEqual(result, expected_result)", "def order_by_distance(center, points):\n pass", "def calculate_distance_to_stations(self, stations: list):\n for station in stations:\n import pdb; pdb.set_trace()", "def hasSortCriterion():", "def test_nearest(self):\n for lat, lon, icao in ((28.43, -81.31, \"KMCO\"), (28.43, -81, \"KTIX\")):\n stn, dist = station.Station.nearest(lat, lon, is_airport=True)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, icao)\n for val in dist.values():\n self.assertIsInstance(val, float)\n # Test with IATA req disabled\n stn, dist = station.Station.nearest(28.43, -81, False, False)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"FA18\")\n for val in dist.values():\n self.assertIsInstance(val, float)", "def test_distance():\n r = 2\n lat = 90\n long1 = 90\n long2 = 270\n\n analdist = 4\n\n classy = SWARMprocess()\n compdist = classy.distance(lat, long1, r, lat, long2, r)\n\n epsilon = 1e-4\n testy = (np.abs(analdist - compdist) < epsilon)\n msg = \"analytical distance not equal to calculated distance.\\\n Calculated %g, expected %g\" % (compdist, analdist)\n assert testy, msg", "def _rank_stations_by_distance_and_quality(lat, lon):\n\n station_ranking = rank_stations(lat, lon)\n station_ranking['enumerated_quality'] = station_ranking['rough_quality'].map(QUALITY_SORT)\n station_ranking = station_ranking.sort_values(by=['distance_meters', 'enumerated_quality'])\n return station_ranking", "def test_sorting_descending_by_district():", "def sort(self) -> bool: \n try: return Stream.display_units.sort\n except: return False", "def test_sort_cost(self):\n l1 = self.create_landmark(name='Cheap Landmark', cost=1)\n l2 = self.create_landmark(name='Expensive Landmark', cost=60)\n self.assertEqual(sorted([l2, l1]), [l1, l2])", "def is_sorted(self):\n import abjad\n if len(self) < 2:\n return True\n pairs = abjad.sequence(self).nwise()\n for left_timespan, right_timespan in pairs:\n if right_timespan.start_offset < left_timespan.start_offset:\n return False\n if left_timespan.start_offset == right_timespan.start_offset:\n if right_timespan.stop_offset < left_timespan.stop_offset:\n return False\n return True", "def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
size 层汉诺塔搬移,从 left 到 right,借助 mid
def move(self, size: int, left: str, right: str, mid: str): if size == 1: print(f'Move 1 from {left} to {right} ') return # 把n-1个数移动到mid上 self.move(size - 1, left, mid, right) # 把最后一个移动到right print(f'Move {size} from {left} to {right} ') # 把mid的n-1个移动到right self.move(size - 1, mid, right, left)
[ "def _int_sift_down(self, arr, pos, size):\n #if 2*pos + 1 > size:\n # return\n min_pos = pos\n l_child = 2*pos + 1\n r_child = 2*pos + 2\n if (l_child <= size) and (arr[min_pos] > arr[l_child]):\n min_pos = l_child\n if (r_child <= size) and (arr[min_pos] > arr[r_child]):\n min_pos = r_child\n if min_pos != pos:\n arr[pos], arr[min_pos] = arr[min_pos], arr[pos]\n self._int_sift_down(arr, min_pos, size)", "def fix_size(self):\n self.size = 1 + Node.get_size(self.left) + Node.get_size(self.right)", "def slice(self, size):\n current = self.next\n self.next += size\n return self.view[current:self.next]", "def path_length_tree(x, t,e):\r\n e = e\r\n if t.exnodes == 1:\r\n e = e+ c(t.size) # normlization\r\n return e\r\n else:\r\n a = t.split_by\r\n if x[a] < t.split_value :\r\n return path_length_tree(x, t.left, e+1)\r\n\r\n if x[a] >= t.split_value :\r\n return path_length_tree(x, t.right, e+1)", "def _resize_interval(start, end, size):\n center = int(0.5 * (start + end))\n half_size = int(0.5 * size)\n left = center - half_size\n right = left + size\n return left, right", "def sub_pieces(self):\n self._pieces_left -= 1", "def unstacked_index(size, index):\n return index % size, index // size", "def mid_down(self):\r\n self.writing_position()\r\n self.down()\r\n self.half_left()", "def partition(dat, size):\n\treturn (lambda dat, size: map(lambda i: dat[i:i+size], xrange(0, len(dat), size)))(dat,size)", "def size(s: Stack) -> int:\n side_stack = Stack()\n count = 0\n # Pop everything off <s> and onto <side_stack>, counting as we go.\n while not s.is_empty():\n side_stack.push(s.pop())\n count += 1\n # Now pop everything off <side_stack> and back onto <s>.\n while not side_stack.is_empty():\n s.push(side_stack.pop())\n # <s> is restored to its state at the start of the function call.\n # We consider that it was not mutated.\n return count", "def subimage_size_from_inner_size(self, inner_size:int) -> int:\n return (2 ** self.n_folds) * (inner_size + 2) - 2", "def _heapify(self):\n #1. 找到最后一个非叶子节点\n last_node = self.parent(len(self.l) -1)\n #2. 从租后一个非叶子节点开始下沉操作\n\n for i in range(last_node + 1,-1,-1):\n self._sift_down(i)", "def mid_right(self):\r\n self.writing_position()\r\n self.half_down()", "def mid_top(self):\r\n self.writing_position()\r\n self.half_left()", "def perform_chunking(self, img_size, chunk_size):\n chunks, i = [], 0\n while True:\n chunks.append((i*(chunk_size - self.overlap/2), i*(chunk_size - self.overlap/2)+chunk_size))\n i+=1\n if chunks[-1][1] > img_size:\n break\n n_count = len(chunks) \n chunks[-1] = tuple(x - (n_count*chunk_size - img_size - (n_count-1)*self.overlap/2) for x in chunks[-1])\n chunks = [(int(x), int(y)) for x, y in chunks]\n return chunks", "def calculate_previous_size(required_hole_size):\n\treturn required_hole_size/8", "def siftDown(start, count):\n root = start\n while root * 2 + 1 < count:\n child = root * 2 + 1 # 'child' is the left children of the current node\n if child < count - 1 and self.data[child] > self.data[child + 1]:\n # Verify that right sibling is lower than the left one, if so,\n # let 'child' be the right sibling\n child += 1\n if self.data[root] > self.data[child]:\n # Swap the current child and the parent if the parent is higher than the child\n self.data[root], self.data[child] = self.data[child], self.data[root]\n root = child\n else:\n return", "def _create_empty_segment_tree(size):\n import math\n\n next_pow_of_two = math.ceil(math.log(size, 2))\n new_size = 2 * math.pow(2, next_pow_of_two) - 1\n\n return [0] * int(new_size)", "def inner_size_from_label_size(self, label_size: int) -> int:\n return 4 + math.ceil((label_size - 2) / (2 ** self.n_folds))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
arr 都是整数,从 arr 选择任意个数,可以累加出 aim 的方法数
def sum_aim(self, arr: List[int], aim: int): if not arr or aim < 0: return 0 return self.sum_aim_process(arr, 0, aim)
[ "def sum_array(arr):\n sum = 0\n for num in arr:\n sum += num\n return sum", "def sum(numbers):", "def ArrayAdditionI(arr):\n\n nums = sorted(arr)\n\n #Get highest num\n highestNum = max(arr)\n currentSum = 0 - highestNum\n\n for num in nums:\n currentSum += num\n\n if currentSum < highestNum:\n return 'false'\n else:\n return 'true'", "def number_from_array(arr):\n val = 0\n\n for i in range(len(arr)):\n val += arr[i] << (7 - i)\n\n return val", "def sum_elements(arr):\n return sum(arr)", "def _addToArray(self, num, arr):\r\n return [i + num for i in arr]", "def sumArray(arr):\n if arr:\n return sum(sorted(arr)[1:-1])\n return 0", "def test_sum_array():\n assert recursion.sum_array([1,2,3,4]) == 10, 'incorrect'\n assert recursion.sum_array([4,-2,0,2.5]) == 4.5, 'incorrect'\n assert recursion.sum_array([0,0]) == 0, 'incorrect'", "def reduce_for_range(method, a, seed) -> int:\n for i in range(len(a)):\n seed = method(seed, a[i])\n return seed", "def sum_array(array):\n sum_all = 0 #our starting point\n for item in array:\n sum_all = sum_all + item #using the for loop we add each item to the starting point\n return sum_all #return the sum of all items in the given array", "def _addToZeros(self, num, arr):\r\n for index, val in enumerate(arr):\r\n if val == 0:\r\n arr[index] += num\r\n return arr", "def which_number_twice( arr ):", "def increment(self, a: int, b: int):", "def get_code_reward(index, arr, code_digits):\n \n result = 0\n if (index == 2) and (arr[2] == code_digits[0]):\n result=result + 1\n if arr[3] == code_digits[1]: result=result + 1\n elif (index == 3) and (arr[3] == code_digits[1]):\n result=result + 1\n if arr[2] == code_digits[0]: result=result + 1\n return result", "def sumTo(a):\n return a*(a+1)/2", "def sumRange(self, i, j):\n def numArray():\n\t \tindex = 1\n\t \tlength = len(nums)\n\t \tself.sum.append(self.nums[0])\n\t \twhile index < length:\n\t \t\tself.sum.append(self.sum[-1] + self.nums[index])\n\t \t\tindex = index + 1\n\n numArray()\n if i > j or i < 0 or j < 0 or i > len(nums) or j > len(nums):\n \treturn 0\n else:\n \tif i == j:\n \t\treturn self.nums[i]\n \telse:\n \t\treturn self.sum[j] - self.sum[i] + self.nums[i]", "def canMakeArithmeticProgression(arr): \n new_arr = sorted(arr)\n diff = new_arr[1] - new_arr[0]\n for idx, num in enumerate(new_arr):\n if idx == 0:\n pass\n elif num - new_arr[idx - 1] != diff:\n return False\n return True", "def mysum(items) :", "def normaliseInt(array,tot=1.0):\r\n tot1 = np.sum(array)\r\n arrayout = array * tot / tot1\r\n return arrayout" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
地上有一个m行和n列的方格。 一个机器人从坐标 0,0 的格子开始移动,每一次只能向左,右,上,下四个方向移动一格, 但是不能进入行坐标和列坐标的数位之和大于 k 的格子.
def robot_move(m: int, n: int, k: int): # x1 = min(n % 10 + (n // 10) % 10, k+1) # x2 = min(m % 10 + (m // 10) % 10, k+1) # print(f'x1={x1}, x2={x2}') # 其他 return _robot_move(m, n, k)
[ "def matrix_left_move_column(k, matrix):\n pass", "def compute_new_pos(m,n,row,col,R,edge_row,edge_col,location):\n new_col = 0\n new_row = 0\n col1= col\n row1 = row\n while R > 0:\n if location == \"top\":\n #new_col = R - col\n if R > col - edge_col:\n R = R - (col - edge_col)\n col = edge_col\n row = row\n location = \"left\" #move left <=\n else:\n new_col = col - R\n new_row = row\n R = 0\n \n elif location == \"left\":\n if R > (edge_row + m) - row:\n R = R - ((edge_row + m) - row)\n row = edge_row + m\n col = col\n location = \"bottom\" #move down\n else:\n new_row = R + row\n new_col = col\n R = 0\n \n elif location == \"bottom\":\n if R > (edge_col + n) - col:\n R = R - ((edge_col + n) - col)\n col = (edge_col + n)\n row = row\n location = \"right\" #move right =>\n else:\n new_col = R + col\n new_row = row\n R = 0\n \n elif location == \"right\":\n if R > row - edge_row:\n R = R - (row - edge_row)\n row = edge_row\n col = col\n location = \"top\" #move up \n else:\n new_row = row - R\n new_col = col\n R = 0\n## print row,col,new_row,new_col,edge_row,edge_col,location,m,n\n return [new_row,new_col]", "def shiftGrid(self, grid: List[List[int]], k: int) -> List[List[int]]:\n return self.linear_solution(grid, k)", "def change_grid(self, i, j, k):\n \"\"\" k decides the change \"\"\"\n board.M[i][j] = k\n board.grid[2 * i][j] = self.helper(k)\n board.grid[2 * i + 1][j] = self.helper(k)", "def solution(n, m, r, c, k) -> int:\n xs = []\n # Add all the non-zero room widths to xs\n last_column_wall = None\n for col in c:\n if last_column_wall is not None and col - last_column_wall - 1 > 0:\n xs.append(col - last_column_wall - 1)\n last_column_wall = col\n ys = []\n # Add all the non-zero room heights to ys\n last_row_wall = None\n for row in r:\n if last_row_wall is not None and row - last_row_wall - 1 > 0:\n ys.append(row - last_row_wall - 1)\n last_row_wall = row\n return aux(xs, ys, k)", "def get_cross_size_grid(n, m, grid):\n grid = [[int(c == '#') for c in row] for row in grid]\n acc = [[[0] * 4 for _ in range(m)] for _ in range(n)]\n for i in range(n):\n acc[i][0][L] = grid[i][0]\n acc[i][-1][R] = grid[i][-1]\n for j in range(1, m):\n val = grid[i][j]\n acc[i][j][L] = acc[i][j-1][L] + val if val else 0\n val = grid[i][-j-1]\n acc[i][-j-1][R] = acc[i][-j][R] + val if val else 0\n for j in range(m):\n acc[0][j][T] = grid[0][j]\n acc[-1][j][B] = grid[-1][j]\n for i in range(1, n):\n val = grid[i][j]\n acc[i][j][T] = acc[i-1][j][T] + val if val else 0\n val = grid[-i-1][j]\n acc[-i-1][j][B] = acc[-i][j][B] + val if val else 0\n\n for i in range(n):\n for j in range(m):\n grid[i][j] = min(acc[i][j])\n return grid", "def __move_king_to_col(self, from_col, from_row, to_col):\n card = self.solitaire[from_col, from_row]\n # Chgeck if king can be moved to empty column\n if card.number == 13 and self.get_pile_size_in_col(to_col) == 0:\n self.__move_cards(from_col, from_row, to_col)\n return True\n else:\n return False", "def sponge(n, m):\n M = np.zeros((m, m))\n\n def trouer(n, xa, ya, xb, yb):\n \"\"\"Insert a hole between (xa, ya) and (xb, yb)\n Then iterate n-1 itself inside\n\n Args:\n n (int): number of iterations\n xa (float): x coordinate of A\n ya (float): y coordinate of A\n xb (float): x coordinate of B\n yb (float): y coordinate of B\n \"\"\"\n if n > 0:\n stepx = (xb - xa) // 3\n stepy = (yb - ya) // 3\n for i in range(xa + stepx, xa + 2 * stepx):\n for j in range(ya + stepy, ya + 2 * stepy):\n M[i][j] = 1\n trouer(n - 1, xa, ya, xa + stepx, ya + stepy)\n trouer(n - 1, xa + stepx, ya, xa + 2 * stepx, ya + stepy)\n trouer(n - 1, xa + 2 * stepx, ya, xb, ya + stepy)\n trouer(n - 1, xa, ya + stepy, xa + stepx, ya + 2 * stepy)\n trouer(n - 1, xa + 2 * stepx, ya + stepy, xb, ya + 2 * stepy)\n trouer(n - 1, xa, ya + 2 * stepy, xa + stepx, yb)\n trouer(n - 1, xa + stepx, ya + 2 * stepy, xa + 2 * stepx, yb)\n trouer(n - 1, xa + 2 * stepx, ya + 2 * stepy, xb, yb)\n trouer(n, 0, 0, m, m)\n plt.matshow(M)\n plt.axis('equal')\n plt.axis('off')\n plt.show()", "def _up_left(self, col, row):\n ones = 0\n twos = 0\n for step in range(4):\n\n current = self.layout[col + (step*-1)][row + (step)] #step up and left\n if current == 1: ones+=1\n if current == 2: twos+=1\n\n return self._score_a_quartet(ones, twos)", "def gridalign(self):\n self.position.x = int(round(self.position.x))\n self.position.y = int(round(self.position.y))\n self.position.z = int(round(self.position.z))\n\n if self.fan:\n self.fan = (int(round(self.fan[0])),int(round(self.fan[1])),int(round(self.fan[2])))\n\n bestDist = 2*9\n bestMatrix = makeMatrix(0,0,0)\n\n for compass in [0, 90, 180, 270]:\n for pitch in [0, 90, 180, 270]:\n for roll in [0, 90, 180, 270]:\n m = makeMatrix(compass,pitch,roll)\n dist = matrixDistanceSquared(self.matrix, m)\n if dist < bestDist:\n bestMatrix = m\n bestDist = dist\n\n self.matrix = bestMatrix\n self.positionOut()\n self.directionOut()", "def _left_down_span(self, (m, n), length, wrap):\n\n if length > self.num_cols or length > self.num_rows or \\\n (not wrap and (n - length + 1 < 0 or m + length > self.num_rows)):\n return None\n\n return [(mm % self.num_rows, nn % self.num_cols) \n for (mm, nn) in izip(xrange(m, m + length), \n xrange(n, n - length, -1))]", "def moveUpLeft(self):\n if self.curr_col > 0:\n self.curr_col -= 1\n if self.curr_row > 0:\n self.curr_row -= 1", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def make_grid(m, n, k):\n \n grid = np.zeros(m*n)\n \n mine_indecies = np.arange(m*n)\n np.random.shuffle(mine_indecies)\n mine_indecies = mine_indecies[:k]\n \n #print('mine_indecies:', mine_indecies)\n #print(np.arange(m*n).reshape((m,n)))\n \n for i in mine_indecies:\n mask = get_mask(m, n, i)\n #print(mask.reshape((m,n)))\n grid = grid + mask\n \n grid[mine_indecies] = -1\n grid.resize((m,n))\n return grid", "def mirrorHoriz():", "def judge_walkable(n, x, y):\r\n # 需要将坐标(x,y)等价转换到N=1的迷宫中的网格。\r\n # 此时给出N=1中的坐标的关系\r\n maze_list = [[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 0], [0, 0, 0, 0, 0]]\r\n # N=1的迷宫中各个网格的情况\r\n maze_dict = {(i, j): maze_list[i][j] for i in range(len(maze_list)) for j in range(len(maze_list[i]))}\r\n\r\n while (x, y) not in maze_dict or n != 1:\r\n # 首先判断处在小迷宫相交地方的三个网格\r\n if x == 2 ** n:\r\n if y == 1: # 左上,左下相交\r\n return 1\r\n elif y == 2 ** (n + 1) - 1: # 右上,右下相交\r\n return 1\r\n else:\r\n return 0\r\n if y == 2 ** n:\r\n if x == 2 ** n - 1: # 左上,右上相交\r\n return 1\r\n else:\r\n return 0\r\n\r\n # 然后判断是否处于四周,\r\n if x == 0 or x == 2 ** (n+1):\r\n return 0\r\n if y == 0 or y == 2 ** (n+1):\r\n return 0\r\n\r\n # 根据所在不同区域进行不同的变化\r\n if 1 <= x < 2**n < y < 2**(n+1): # 右上\r\n y -= 2**n # 向左平移2**n\r\n\r\n elif 1 <= y < 2**n < x < 2**(n+1): # 左下\r\n # 首先逆时针旋转90度, 原始的a行b列变为2**n-b行a列\r\n a = x - 2 ** n\r\n b = y\r\n x, y = 2 ** n + 2 ** n-b, a\r\n # 然后向上平移2**n个单位\r\n x -= 2 ** n\r\n elif 2**n < x < 2**(n+1)and 2**n < y < 2**(n+1): # 右下\r\n # 首先顺时针旋转90度, 原始的a行b列变为b行(2**n-a)列\r\n a = x - 2 ** n\r\n b = y - 2 ** n\r\n x, y = 2 ** n + b, 2**n + 2**n - a\r\n # 然后向上平移2**n个单位\r\n x -= 2 ** n\r\n # 然后向左平移2**n个单位\r\n y -= 2 ** n\r\n n -= 1\r\n return maze_dict[(x, y)]", "def transform_to_circles(matrix, m, n):\n def of(i, j):\n \"\"\"Offset: n is length of the line while i and j are matrix/list position indices\"\"\"\n # print(f'{i} * {n} + {j} = {i * n + j}')\n return i * n + j\n\n ar = [x for line in matrix for x in line]\n circles = []\n for k in range(int(min(m, n) / 2)):\n \"\"\" Per circle, extract the four sides of frame and create deque.\n frame from matrix = [\n matrix[(k,k):(k,j-k)] + \n matrix[(k+1,j-k):(i-k-1,j-k)] + \n matrix[(i-k,j-k):(i-k,k)] + \n matrix[(i-k-1,k):(k+1,k)]\n ] \n frame from the matrix line per line concatenation = [\n ar[of(k,k):of(k,(n-1)-k)] + \n [ar[of(i,(n-1)-k)] for i in range(k+1, (m-1)-k-1)] +\n ar[of((m-1)-k, (n-1)-k):of((m-1)-k, k):-1] + \n [ar[of(i, k)] for i in range((m-1)-k-1), k+1)]\n ]\n \"\"\"\n # print(f'k:{k}')\n top = ar[of(k, k):of(k, n - k)]\n # print(f'{top}')\n right = [ar[of(i, n - 1 - k)] for i in range(k + 1, m - k - 1)]\n # print(f'{right}')\n bottom = ar[of(m - 1 - k, n - 1 - k):of(m - 1 - k, k) - 1: -1]\n # print(f'{bottom}')\n left = [ar[of(i, k)] for i in range(m - 1 - k - 1, k, -1)]\n # print(f'{left}')\n circles.append(deque(\n top +\n right +\n bottom +\n left\n ))\n return circles", "def insert_gdelta(self, variables, size, row, column, k):\r\n for i in range(size):\r\n self.matrix[row, column] = variables[i]\r\n column = column + 1\r\n k[row] = (k[row] - variables[-1]) % 2", "def step_forward(self):\r\n self.col += self.rotation[0]\r\n self.row -= self.rotation[1]\r\n self.rect_to_point()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
位数之和 123 => 1+2+3 345 => 3+4+5
def bit_sum(n): res = 0 while n > 0: res += n % 10 n //= 10 return res
[ "def __digit(cls, s_code: str) -> str:\n i = 1\n tmp_sum = 0\n for num in s_code: # number 为0-9\n if 0 == i % 2:\n tmp = int(ord(num)) * 2 % 10 # tmp 这种\n else:\n tmp = int(ord(num)) * 3 % 10\n tmp_sum += tmp\n i += 2\n if 0 == tmp_sum % 10:\n bit = 0\n elif tmp_sum > 100:\n bit = 10 - tmp_sum % 100 % 10\n else:\n bit = 10 - tmp_sum % 10\n return str(bit)", "def problem016():\n\n n = 2 ** 1000\n ans = sum(int(c) for c in str(n))\n return ans", "def sumDigits(s):", "def digital_sum(n):\n r = 0\n while n:\n r, n = r + n % 10, n // 10\n return r", "def digsum(num, power):\r\n val = 0\r\n for el in str(num):\r\n val += int(el) ** power\r\n \r\n return val", "def add_digits(n):\n return sum([int(d) for d in str(n)])", "def sum(number):\n number += 1\n return number", "def sixteen():\r\n \r\n number = str(pow(2, 1000))\r\n sum = 0\r\n \r\n for i in number:\r\n sum += int(i)\r\n \r\n return sum", "def carry(num: int) -> int:\r\n base = 10\r\n while True:\r\n div, mod = divmod(num, base)\r\n if mod == 0:\r\n base *= 10\r\n else:\r\n return (div + 1) * base", "def sum_digits_power(n,p):\n return sum(map(lambda d : int(d)**p, str(n)))", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def calc_check_digit(number):\n weights = (2, 4, 8, 5, 10, 9, 7, 3, 6)\n return str(sum(w * int(n) for w, n in zip(weights, number)) % 11 % 10)", "def digitalSum(n):\n if n < 10:\n return n\n return n % 10 + digitalSum( n // 10)", "def plus_one(n):\n m = n + 1\n return m", "def sum32(x):\n return sum(x) & 0xffffffff", "def sum_numbers_one_to_ten():\n sum=0\n for num in range(1,11):\n sum=sum+num\n return sum\n pass", "def reverse_and_add(n):\n return n + int(str(n)[::-1])", "def powerDigitSum(base, exp):\n\treturn sum(power(base, exp))", "def method2(num): # 方法二,改进型,解决了负数无限循环问题,但时间复杂度为O(n),n是二进制的位数\n count = 0\n flag = 1\n while flag & 31: # flag & 31是模拟书中c的代码,32位的无符号的int类型,如果是flag等于2^32时,最高位超过32,自然丢失,变成0\n if num & flag:\n count += 1\n flag <<= 1\n print(count)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Arabic character set ranges from 0600–06FF in unicode.
def see_arabic_chars_unicode(): import unicodedata absent = 0 present = 0 for i in range(0x0600, 0x06FF + 1): try: print('{:04X} \t{} --> {}'.format(i, unicodedata.name(chr(i)), chr(i))) present += 1 except ValueError: absent += 1 else: print('\nTotal present: {}'.format(present)) print('\nTotal absent: {}'.format(absent))
[ "def iupac_standard_characters(cls):\n return set(\"ACGUacgu\")", "def normalize_alef_maksura_ar(s):\n\n return s.replace(u'\\u0649', u'\\u064a')", "def clean_arabic(word: str):\n diacritics = [chr(1614),chr(1615),chr(1616),chr(1617),chr(1618),chr(1761),chr(1619),chr(1648),chr(1649),chr(1611),chr(1612),chr(1613)]\n for dia in diacritics:\n word = re.sub(dia,'',word)\n return word", "def make_unicode():\r\n for num in range(300, 320):\r\n yield unichr(num)", "def CharacterToScript(u):\n if u >= u'\\u0000' and u <= u'\\u007F': return 'Latin'\n elif u >= u'\\u0080' and u <= u'\\u00FF': return 'Latin'\n elif u >= u'\\u0100' and u <= u'\\u017F': return 'Latin'\n elif u >= u'\\u0180' and u <= u'\\u024F': return 'Latin'\n elif u >= u'\\u0370' and u <= u'\\u03FF': return 'Greek'\n elif u >= u'\\u0400' and u <= u'\\u04FF': return 'Cyrillic'\n elif u >= u'\\u0500' and u <= u'\\u052F': return 'Cyrillic'\n elif u >= u'\\u0530' and u <= u'\\u058F': return 'Armenian'\n elif u >= u'\\u0590' and u <= u'\\u05FF': return 'Hebrew'\n elif u >= u'\\u0600' and u <= u'\\u06FF': return 'Arabic'\n elif u >= u'\\u0700' and u <= u'\\u074F': return 'Syriac'\n elif u >= u'\\u0750' and u <= u'\\u077F': return 'Arabic'\n elif u >= u'\\u0780' and u <= u'\\u07BF': return 'Thaana'\n elif u >= u'\\u0900' and u <= u'\\u097F': return 'Devanagari'\n elif u >= u'\\u0980' and u <= u'\\u09FF': return 'Bengali'\n elif u >= u'\\u0A00' and u <= u'\\u0A7F': return 'Gurmukhi'\n elif u >= u'\\u0A80' and u <= u'\\u0AFF': return 'Gujarati'\n elif u >= u'\\u0B00' and u <= u'\\u0B7F': return 'Oriya'\n elif u >= u'\\u0B80' and u <= u'\\u0BFF': return 'Tamil'\n elif u >= u'\\u0C00' and u <= u'\\u0C7F': return 'Telugu'\n elif u >= u'\\u0C80' and u <= u'\\u0CFF': return 'Kannada'\n elif u >= u'\\u0D00' and u <= u'\\u0D7F': return 'Malayalam'\n elif u >= u'\\u0D80' and u <= u'\\u0DFF': return 'Sinhala'\n elif u >= u'\\u0E00' and u <= u'\\u0E7F': return 'Thai'\n elif u >= u'\\u0E80' and u <= u'\\u0EFF': return 'Lao'\n elif u >= u'\\u0F00' and u <= u'\\u0FFF': return 'Tibetan'\n elif u >= u'\\u1000' and u <= u'\\u109F': return 'Burmese'\n elif u >= u'\\u10A0' and u <= u'\\u10FF': return 'Georgian'\n elif u >= u'\\u1100' and u <= u'\\u11FF': return 'Hangul'\n elif u >= u'\\u1200' and u <= u'\\u137F': return 'Ethiopic'\n elif u >= u'\\u1380' and u <= u'\\u139F': return 'Ethiopic'\n elif u >= u'\\u13A0' and u <= u'\\u13FF': return 'Cherokee'\n elif u >= u'\\u1400' and u <= u'\\u167F': return 'UCS'\n elif u >= u'\\u1680' and u <= u'\\u169F': return 'Ogham'\n elif u >= u'\\u16A0' and u <= u'\\u16FF': return 'Runic'\n elif u >= u'\\u1700' and u <= u'\\u171F': return 'Tagalog'\n elif u >= u'\\u1720' and u <= u'\\u173F': return 'Hanunoo'\n elif u >= u'\\u1740' and u <= u'\\u175F': return 'Buhid'\n elif u >= u'\\u1760' and u <= u'\\u177F': return 'Tagbanwa'\n elif u >= u'\\u1780' and u <= u'\\u17FF': return 'Khmer'\n elif u >= u'\\u1800' and u <= u'\\u18AF': return 'Mongolian'\n elif u >= u'\\u1900' and u <= u'\\u194F': return 'Limbu'\n elif u >= u'\\u1950' and u <= u'\\u197F': return 'Tai Le'\n elif u >= u'\\u1980' and u <= u'\\u19DF': return 'New Tai Lue'\n elif u >= u'\\u19E0' and u <= u'\\u19FF': return 'Khmer'\n elif u >= u'\\u1A00' and u <= u'\\u1A1F': return 'Buginese'\n elif u >= u'\\u1E00' and u <= u'\\u1EFF': return 'Latin'\n elif u >= u'\\u1F00' and u <= u'\\u1FFF': return 'Greek'\n elif u >= u'\\u2C00' and u <= u'\\u2C5F': return 'Glagolitic'\n elif u >= u'\\u2C80' and u <= u'\\u2CFF': return 'Coptic'\n elif u >= u'\\u2D00' and u <= u'\\u2D2F': return 'Georgian'\n elif u >= u'\\u2D30' and u <= u'\\u2D7F': return 'Tifinagh'\n elif u >= u'\\u2D80' and u <= u'\\u2DDF': return 'Ethiopic'\n elif u >= u'\\u2E80' and u <= u'\\u2EFF': return 'CJK'\n elif u >= u'\\u2F00' and u <= u'\\u2FDF': return 'Kangxi Radicals'\n elif u >= u'\\u3040' and u <= u'\\u309F': return 'Hiragana'\n elif u >= u'\\u30A0' and u <= u'\\u30FF': return 'Katakana'\n elif u >= u'\\u3100' and u <= u'\\u312F': return 'Bopomofo'\n elif u >= u'\\u3130' and u <= u'\\u318F': return 'Hangul'\n elif u >= u'\\u3190' and u <= u'\\u319F': return 'Kanbun'\n elif u >= u'\\u31A0' and u <= u'\\u31BF': return 'Bopomofo'\n elif u >= u'\\u31F0' and u <= u'\\u31FF': return 'Katakana'\n elif u >= u'\\u3300' and u <= u'\\u33FF': return 'CJK'\n elif u >= u'\\u3400' and u <= u'\\u4DBF': return 'CJK'\n elif u >= u'\\u4E00' and u <= u'\\u9FFF': return 'CJK'\n elif u >= u'\\uA000' and u <= u'\\uA48F': return 'Yi'\n elif u >= u'\\uA490' and u <= u'\\uA4CF': return 'Yi'\n elif u >= u'\\uA800' and u <= u'\\uA82F': return 'Syloti Nagri'\n elif u >= u'\\uAC00' and u <= u'\\uD7AF': return 'Hangul'\n elif u >= u'\\uF900' and u <= u'\\uFAFF': return 'CJK'\n elif u >= u'\\uFE30' and u <= u'\\uFE4F': return 'CJK'\n elif u >= u'\\uFE70' and u <= u'\\uFEFF': return 'Arabic'\n elif u >= u'\\u10000' and u <= u'\\u1007F': return 'Linear B'\n elif u >= u'\\u10080' and u <= u'\\u100FF': return 'Linear B'\n elif u >= u'\\u10300' and u <= u'\\u1032F': return 'Old Italic'\n elif u >= u'\\u10330' and u <= u'\\u1034F': return 'Gothic'\n elif u >= u'\\u10380' and u <= u'\\u1039F': return 'Ugaritic'\n elif u >= u'\\u103A0' and u <= u'\\u103DF': return 'Old Persian'\n elif u >= u'\\u10400' and u <= u'\\u1044F': return 'Deseret'\n elif u >= u'\\u10450' and u <= u'\\u1047F': return 'Shavian'\n elif u >= u'\\u10480' and u <= u'\\u104AF': return 'Osmanya'\n elif u >= u'\\u10800' and u <= u'\\u1083F': return 'Cypriot Syllabary'\n elif u >= u'\\u10A00' and u <= u'\\u10A5F': return 'Kharoshthi'\n elif u >= u'\\u20000' and u <= u'\\u2A6DF': return 'CJK'\n elif u >= u'\\u2F800' and u <= u'\\u2FA1F': return 'CJK'\n else: return UNKNOWN_SCRIPT_", "def 取龜(我):\n return 我", "def glyph_unicodes(self, glyph):\n # Glyphs stores Unicode values as hex string\n return set([int(u, 16) for u in glyph.unicodes])", "def range_to_utf16(lines: List[str], range: Range) -> Range:\n return Range(\n start=position_to_utf16(lines, range.start),\n end=position_to_utf16(lines, range.end),\n )", "def normalize_teh_marbuta_ar(s):\n\n return s.replace(u'\\u0629', u'\\u0647')", "def transliterate_from_hira_to_kana(text: str) -> str:\n return JTran.transpose_codepoints_in_range(text, 96, 12353, 12438)", "def _exctract_uas(self, content):\n return re.findall(r'UA-\\d{8}', content)", "def unicode_letters_and_digits():\n return strategies.characters(\n whitelist_categories=[\n 'Lu', # Uppercase letters\n 'Ll', # Lowercase letters\n 'Lt', # Titlecase letters\n 'Lm', # Modifier letters\n 'Lo', # Other letters\n 'Nd', # Decimal digit numbers\n 'No', # Other number\n ]\n )", "def encodings(self) -> list[int]:\n pass", "def test_latin_parse_diacritics(self):\n inputs = [\"a\", \"ū\", \"ï\"]\n outputs = [self.latin_transcriber._parse_diacritics(char) for char in inputs]\n target = [\n unicodedata.normalize(\"NFC\", c)\n for c in [\n \"a///\",\n \"u/\" + lat.chars.LONG + \"//\",\n \"i//\" + lat.chars.DIAERESIS + \"/\",\n ]\n ]\n self.assertEqual(outputs, target)", "def _check_unicode_message(text):\n\t\tfor char in text:\n\t\t\tcode = ord(char)\n\t\t\tif (0xd800 <= code <= 0xdfff) or (code > 0xffff):\n\t\t\t\traise SMSTradeError(u\"the message can not be represented in UCS2\")\n\t\tif len(text) > 70:\n\t\t\traise SMSTradeError(u\"too many characters in message, unicode SMS may contain up to 70 characters\")", "def unicode_flag(self):\n if not self.code:\n return \"\"\n\n # Don't really like magic numbers, but this is the code point for [A]\n # (Regional Indicator A), minus the code point for ASCII A. By adding\n # this to the uppercase characters making up the ISO 3166-1 alpha-2\n # codes we can get the flag.\n OFFSET = 127397\n points = [ord(x) + OFFSET for x in self.code.upper()]\n return chr(points[0]) + chr(points[1])", "def test_parse_ical_accented_chars(self):\n pass", "def all_characters(self)->list:\n return [chr(i) for i in range(ord(self._first), ord(self._last) + 1)]", "def Basic_0032(Place):\n CountryCode, PlaceId = CountryCode_PlaceID(Place)\n return_emits = []\n try:\n BaseTextList = Place.findall(ns+\"BaseText\")\n for BaseText in BaseTextList:\n btext = BaseText.text\n btext_attrib = BaseText.attrib\n try: # Get the languageCode\n btext_lc = btext_attrib['languageCode']\n except:\n btext_lc = 'None'\n if btext_lc != \"ar\" and btext_lc != \"ur\":\n for char in btext:\n if re.match(ur'[\\u0621-\\u063A]+|[\\u0640-\\u064A]+|[\\u066E-\\u066F]+|[\\u0671-\\u06D3]+|[\\u06EE-\\u06EF]+|[\\u06FA-\\u06FF]+',char):\n btext = btext.replace('|', '#') # remove pipe symbols so that they don't mess up the output\n btext = btext.encode('UTF-8') # convert the unicode to bytestrings for the return\n emit_string = 'Basic_0032|'+CountryCode+'|'+PlaceId+'|Invalid: Arabic characters found in BaseText|'+btext+'|languageCode=\"'+btext_lc+'\"'\n return_emits.append(emit_string)\n break\n except:\n pass\n return return_emits" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to reset instrument commands.
def reset_instrument(self): return self.inst.write('*RST')
[ "def __reset():\n\n global _COMMANDS\n\n _COMMANDS = {}\n\n arguments.reset_parser()", "def reset(self):\n self.query_lines(\"ri\", 3) # Resets the device\n self.query_lines(\"rfsm 1\", 2) # Turns comms on", "def reset(self):\r\n try:\r\n self.write(\"*RST\") # Calling \"write\" instance to write SCPI commands\r\n self.default_setup() # Calling \"default setup\" instance\r\n except Exception as ex:\r\n self.exceptionhandler(ex)", "def ObsReset(self):\n handler = self.get_command_object(\"ObsReset\")\n handler()", "def resetTool(*args, **kwargs):\n\n pass", "def reset(self):\r\r\n self.read(\"*cls\")\r\r\n self.waitForCompletion()\r\r\n self.read(\"*RST\") # Reset and query\r\r\n self.dev.write(\"*cls\")\r\r\n while self.read(\"*OPC?\") != \"1\": time.sleep(1) # Wait until completion\r\r", "def resetDeviceStates(self):", "def reset(self):\n self.enable_undo(enable=False)\n self.enable_redo(enable=False)\n self.undo_stack = []\n self.redo_stack = []", "def reset(self):\n self._cmd_line = 0\n self._file_line = 0", "async def reset(ctx):\n racers.clear()\n result.clear()\n await ctx.send('I have cleared the racers and the results')", "def resetTool(*args, **kwargs)->None:\n pass", "def resetSim(self):\n self.powers = []", "def reset(self):\n self._i2c.send(6, 0x00)", "def reset_interrupts(self):\n\n self.read_interrupt_capture(0)\n self.read_interrupt_capture(1)\n return", "def reset(self):\n self.rst.value(0) # RST on\n self.sleep_us(100) # reset impulse has to be >100 ns and <100 ms\n self.rst.value(1) # RST off\n # Defaults after reset:\n self.power = self.POWER_DOWN\n self.addressing = self.ADDRESSING_HORIZ\n self.instr = self.INSTR_BASIC\n self.display_mode = self.DISPLAY_BLANK\n self.temp_coeff = self.TEMP_COEFF_0\n self.bias = self.BIAS_1_11\n self.voltage = 3060", "def reset(self):\n self._internal_pong.machine_reset()", "def system_reset(self):\n self.send([SYSTEM_RESET])", "def reset(self):\n\n # Issue the reset command\n try:\n self.crate_resetting = True\n # Reset the FRU init status to stop attempts to read the sensors\n self.frus_inited = False\n # Wait a few seconds to allow any existing ipmitool requests to complete\n print(\"reset: Short wait before resetting (2 s)\")\n time.sleep(2.0)\n # Force the records to invalid\n print(\"reset: Force sensor read to set invalid\")\n self.read_sensors()\n print(\"reset: Triggering records to scan\")\n self.scan_list.interrupt()\n self.mch_comms.connected = False\n # Stop the ipmitool session. System will reconnect on restart\n self.mch_comms.ipmitool_shell.terminate()\n time.sleep(2.0)\n #print(\"reset: Killing ipmitool shell process\")\n self.mch_comms.ipmitool_shell.kill()\n self.mch_comms.ipmitool_shell = None\n # Stop the reader thread\n #print(\"reset: Stopping thread\")\n self.mch_comms.stop = True\n # Wait for the thread to stop\n self.mch_comms.t.join()\n #print(\"reset: Thread stopped\")\n self.mch_comms.t = None\n # Allow the thread to restart\n self.mch_comms.stop = False\n #print(\"reset: Exiting \")\n # Reset the crate\n print(\"reset: Resetting crate now\")\n self.mch_comms.call_ipmitool_direct_command([\"raw\", \"0x06\", \"0x03\"])\n\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n # Be silent. We expect this command to timeout.\n print('reset: reset command sent')\n pass\n\n # Reconnect to the crate\n print('reset: reconnecting')\n self.mch_comms.ipmitool_shell_reconnect()", "def _clear(self):\n self._commands = []\n self._activeMacros = []\n self._index = 0\n self._emitSignals()\n self._inUndoRedo = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
queries the database for a specific character takes a name returns a json with the lines
def lines_from_char(character): query = f""" SELECT script_l FROM script JOIN characters ON characters.char_id = script.characters_char_id WHERE name = '{character}' """ data = pd.read_sql_query(query,engine) return data.to_json(orient="records")
[ "def lines_from_char_ep(character,ep):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\nWHERE name = '{character}' and episode = '{ep}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")", "def check_character(char_name):\r\n db.connect(reuse_if_open=True)\r\n query = Character.select().where(Character.name ** char_name).exists()\r\n db.close()\r\n return query", "def search_from_sqlite(self, key):\n key = ('.*' +key+ '.*',)\n conn = get_sqlite()\n c = conn.cursor()\n conn.create_function(\"REGEXP\", 2, regexp)\n c.execute('SELECT * FROM vertices WHERE name REGEXP ? ', key)\n results = c.fetchall()\n\n return json.dumps([{\n 'name': r[1],\n 'size': r[3],\n 'parent': r[2],\n 'last_accessed': r[4],\n 'last_modified': r[5]} for r in results])", "def get_character_detail(chara_name: str) -> dict:\n\n chara_misc_json = load_characters_config()\n chara_details = list(filter(lambda x: (x['name'] == chara_name), chara_misc_json))\n\n if chara_details:\n return chara_details[0]\n else:\n return None", "def test_get_character_by_name(name: str = 'Ajak'):\n response = session.get(base_url.format(character_by_name.format(name)))\n assert response.ok", "def fetchPilotData(charName):\n\tprint('DB polled for {}'.format(charName))\n\n\tconn = lite.connect(DB_NAME)\n\tcurs = conn.cursor()\n\tparam = (charName,)\n\tquery = \"SELECT characterID, name, homeStationID, corpID, corpName, allianceID, allianceName, intelligence, \"\n\tquery += \"memory, charisma, perception, willpower, skillInTrainingID, trainingEndTime, trainingToLevel, walletBalance \"\n\tquery += \"from pilot where name = ?\"\n\n\tcurs.execute(query, param)\n\tresult = curs.fetchone()\n\n\tpilot = {\n\t\t'characterID': ''\n\t\t,'name': ''\n\t\t,'homeStationID': ''\n\t\t,'corpID': ''\n\t\t,'corpName': ''\n\t\t,'allianceID': ''\n\t\t,'allianceName': ''\n\t\t,'intelligence': ''\n\t\t,'memory': ''\n\t\t,'charisma': ''\n\t\t,'perception': ''\n\t\t,'willpower': ''\n\t\t,'skillInTrainingID': ''\n\t\t,'trainingEndTime': ''\n\t\t,'trainingToLevel': ''\n\t\t,'walletBalance': 0 \n\t}\n\n\n\tpilot['characterID'] = result[0]\n\tpilot['name'] = result[1]\n\tpilot['homeStationID'] = result[2]\n\tpilot['corpID'] = result[3]\n\tpilot['corpName'] = result[4]\n\tpilot['allianceID'] = result[5]\n\tpilot['allianceName'] = result[6]\n\tpilot['intelligence'] = result[7]\n\tpilot['memory'] = result[8]\n\tpilot['charisma'] = result[9]\n\tpilot['perception'] = result[10]\n\tpilot['willpower'] = result[11]\n\tpilot['skillInTrainingID'] = result[12]\n\tpilot['trainingEndTime'] = result[13]\n\tpilot['trainingToLevel'] = result[14]\n\tpilot['walletBalance'] = float(result[15])\n\n\tconn.close()\n\treturn pilot", "def sql_fetch_json(cursor: pymysql.cursors.Cursor):\n keys = []\n for column in cursor.description:\n keys.append(column[0])\n key_number = len(keys)\n\n json_data = []\n for row in cursor.fetchall():\n item = dict()\n for q in range(key_number):\n item[keys[q]] = row[q]\n json_data.append(item)\n\n return json_data", "def save_char(char):\r\n save_file = char['name'] + '.json'\r\n with open(save_file, 'w') as fp:\r\n json.dump(char, fp)\r\n return", "def getJson(self, row):\n postid = row[0]\n\n # feeds conll output and persists JSON in a local file. Must be streamed to a file, \n # as the JSON returned by semafor is framed parse JSON per line which must be converted\n # into array of JSON objects\n conllfilename = '/tmp/conll/conll_' + repr(postid)\n jsonfile = open('/tmp/conll/' + repr(postid) + '.json', 'w')\n if os.path.exists(conllfilename):\n process = subprocess.Popen('cat ' + conllfilename + ' | nc localhost 5000', shell=True, stdout=jsonfile)\n process.wait()\n process.kill() \n\n # read persisted JSON from local file system and append it to parsed array as a JSON\n # object.\n parsed = []\n with open('/tmp/conll/' + repr(postid) + '.json', 'r') as f:\n for line in f:\n parsed.append(json.loads(line.strip())) \n f.close()\n\n # update curosr to persist JSON returned by semafor server. parsed array must be cast\n # as a JSONB during the update\n updcursor = self.conn.cursor()\n updcursor.execute('UPDATE public.post SET parsed_json = CAST(%s as JSONB) WHERE postid=%s', (json.dumps(parsed), postid))\n updated_rows = updcursor.rowcount \n updcursor.close()", "def cursor_data(c):\r\n\r\n # pull column description\r\n d = []\r\n for i in range(len(c.description)):\r\n d.append(c.description[i][0])\r\n\r\n # fetch column entries\r\n c = c.fetchall()\r\n\r\n # compile list\r\n info = []\r\n for i in range(len(c)):\r\n # compile dictionary entry\r\n entry = {}\r\n for j in range(len(d)):\r\n entry[d[j]] = c[i][j]\r\n info.append(entry)\t\r\n\r\n # success\r\n return info", "def get_by_character(self, character_id):\n sql = \"SELECT {0} FROM people_{0} WHERE people=?\".format(self.conveyance_type)\n try:\n query_result = self.cursor.execute(sql, (str(character_id),))\n except Exception as e:\n raise Exception(\n \"An error occurred while getting a character %s in the database: query: %s - message: %s\"\n % (self.conveyance_type, sql, e)\n )\n\n rows = query_result.fetchall()\n starships = [s_id for _, s_id in rows]\n\n return starships", "def get_id(char_name):\r\n db.connect(reuse_if_open=True)\r\n query = Character.select(Character.uuid).where(Character.name ** char_name).get()\r\n return query.uuid", "def lookup(self, name):\n with closing(self._connection.cursor()) as cursor:\n cursor.execute('SELECT * FROM ' + TABLE_NAME + ' where name= ?', (name,))\n row = cursor.fetchone()\n #if you try to use the cursor not in the block above it won't let you b/c it closes it ex:\n #row = cursor.fetchone()\n\n\n if row:\n print row\n # because I have that\n return Polygon.from_row( row ) #(row[1], row[2], row[3]))", "def characters(term : str):\n r = requests.get(\n settings['apiurl'] + \"/characters\",\n params = {\n 'filter[name]': term\n },\n headers = settings['header']\n )\n\n if r.status_code != 200:\n raise serverError(r.text, r.status_code)\n \n try:\n jsd = ujson.loads(r.text)\n except ValueError:\n raise serializationFailed(r.text, r.status_code)\n else:\n if jsd['meta']['count']:\n return kitsuWrapper(\n jsd['data'],\n jsd['links']['next'] if 'next' in jsd['links'] else None,\n settings['header']\n )\n else:\n return jsd", "def load_chars(self):\n\t\tconn = lite.connect(DB_NAME)\n\t\tcurs = conn.cursor()\n\n\t\t# create a list of all keyfiles available\n\t\tcharKeys = get_all_keyfiles(\"chars\")\n\n\t\t# access each API, get data, put in DB\n\t\tfor charKey in charKeys:\n\t\t\tprint('charKey = {}'.format(charKey))\n\n\t\t\tchar = {\n\t\t\t\t'characterID': ''\n\t\t\t\t,'keyID': ''\n\t\t\t\t,'vCode': ''\n\t\t\t}\n\t\t\t\n\t\t\tfilename = '.\\\\chars\\\\' + charKey\n\n\t\t\tf = open(filename, 'r')\n\t\t\tchar['characterID'] = f.readline().rstrip('\\n')\n\t\t\tchar['keyID'] = f.readline().rstrip('\\n')\n\t\t\tchar['vCode'] = f.readline().rstrip('\\n')\n\t\t\tf.close()\n\n\t\t\tpilot = {\n\t\t\t\t'characterID': ''\n\t\t\t\t,'name': ''\n\t\t\t\t,'homeStationID': ''\n\t\t\t\t,'corpID': ''\n\t\t\t\t,'corpName': ''\n\t\t\t\t,'allianceID': ''\n\t\t\t\t,'allianceName': ''\n\t\t\t\t,'intelligence': ''\n\t\t\t\t,'memory': ''\n\t\t\t\t,'charisma': ''\n\t\t\t\t,'perception': ''\n\t\t\t\t,'willpower': ''\n\t\t\t\t,'skillInTrainingID': ''\n\t\t\t\t,'trainingEndTime': ''\n\t\t\t\t,'trainingToLevel': ''\n\t\t\t\t,'walletBalance': 0 \n\t\t\t}\n\n\t\t\tpilot_skills = []\n\n\t\t\t# check if character already in DB file and clean old data\n\t\t\texists = checkDB('pilot', 'characterID', char['characterID'])\n\t\t\tprint('Check on pilot returned {}'.format(exists))\n\t\t\tif exists:\n\t\t\t\tprint('Purging old data for {}'.format(char['characterID']))\n\t\t\t\tcurs.execute(\"delete from pilot where characterID = ?\", (char['characterID'],))\n\t\t\t\tcurs.execute(\"delete from pilot_skill where characterID = ?\", (pilot['characterID'],))\n\t\t\t\tconn.commit()\n\n\t\t\tr = requests.get(\"https://api.eveonline.com/char/CharacterSheet.xml.aspx\", params=char)\n\t\t\troot = ET.fromstring(r.text)\n\n\t\t\tpilot['characterID'] = root[1][0].text\n\t\t\tpilot['name'] = root[1][1].text\n\t\t\tpilot['homeStationID'] = root[1][2].text\n\t\t\tpilot['corpID'] = root[1][9].text\n\t\t\tpilot['corpName'] = root[1][8].text\n\t\t\tpilot['allianceID'] = root[1][11].text\n\t\t\tpilot['allianceName'] = root[1][10].text\n\t\t\tpilot['intelligence'] = root[1][30][0].text\n\t\t\tpilot['memory'] = root[1][30][1].text\n\t\t\tpilot['charisma'] = root[1][30][2].text\n\t\t\tpilot['perception'] = root[1][30][3].text\n\t\t\tpilot['willpower'] = root[1][30][4].text\n\t\t\tpilot['walletBalance'] = float(root[1][28].text)\n\n\t\t\tfor row in root.find('.//rowset[@name=\"skills\"]'):\n\t\t\t\tskill_entry = (\n\t\t\t\t\tpilot['characterID']\n\t\t\t\t\t,row.get('typeID')\n\t\t\t\t\t,row.get('level')\n\t\t\t\t\t,row.get('skillpoints')\n\t\t\t\t)\n\n\t\t\t\tpilot_skills.append(skill_entry)\n\n\t\t\tr = requests.get(\"https://api.eveonline.com/char/SkillInTraining.xml.aspx\", params=char)\n\t\t\troot = ET.fromstring(r.text)\n\n\t\t\tif len(list(root[1])) == 8:\n\t\t\t\tpilot['skillInTrainingID'] = root[1][3].text\n\t\t\t\tpilot['trainingEndTime'] = root[1][1].text\n\t\t\t\tpilot['trainingToLevel'] = root[1][6].text\n\t\t\telse:\n\t\t\t\tpilot['skillInTrainingID'] = 'Not training'\n\t\t\t\tpilot['trainingEndTime'] = 'n/a'\n\t\t\t\tpilot['trainingToLevel'] = 'n/a'\n\n\n\n\t\t\tprint('API polled for {}'.format(pilot['name']))\n\t\t\tparam = (\n\t\t\t\tpilot['characterID']\n\t\t\t\t,pilot['name'] \n\t\t\t\t,pilot['homeStationID']\n\t\t\t\t,pilot['corpID']\n\t\t\t\t,pilot['corpName']\n\t\t\t\t,pilot['allianceID']\n\t\t\t\t,pilot['allianceName']\n\t\t\t\t,pilot['intelligence']\n\t\t\t\t,pilot['memory']\n\t\t\t\t,pilot['charisma']\n\t\t\t\t,pilot['perception']\n\t\t\t\t,pilot['willpower']\n\t\t\t\t,pilot['skillInTrainingID']\n\t\t\t\t,pilot['trainingEndTime']\n\t\t\t\t,pilot['trainingToLevel']\n\t\t\t\t,pilot['walletBalance']\n\t\t\t)\n\t\t\t# insert pilot data into DB\n\t\t\tcommand = \"insert into pilot (characterID, name, homeStationID, corpID, corpName, allianceID, allianceName, intelligence, \"\n\t\t\tcommand += \"memory, charisma, perception, willpower, skillInTrainingID, trainingEndTime, trainingToLevel, walletBalance) \"\n\t\t\tcommand += \"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\n\t\t\tcurs.execute(command, param)\n\t\t\tconn.commit()\n\n\t\t\t#insert pilot_skill data into DB\n\t\t\tcurs.executemany(\"insert into pilot_skill (characterID, skillID, skillLevel, skillPoints) VALUES (?, ?, ?, ?)\", pilot_skills)\n\t\t\tconn.commit()\n\t\t\tprint('Database updated correctly for {}\\n'.format(pilot['name']))\n\n\t\tprint('All character keyfiles accounted for.\\n')\n\t\tconn.close()", "async def fetch_character(self, name: str, *, test: bool = False) -> TibiaResponse[Optional[Character]]:\n response = await self._request(\"GET\", get_character_url(name.strip()), test=test)\n return response.parse(CharacterParser.from_content)", "async def fetch_character(self, name, *, test=False):\n response = await self._request(\"GET\", Character.get_url(name.strip()), test=test)\n start_time = time.perf_counter()\n char = Character.from_content(response.content)\n parsing_time = time.perf_counter() - start_time\n return TibiaResponse(response, char, parsing_time)", "def load_character():\n global character\n filename = 'character.json'\n with open(filename) as file_object:\n character = json.load(file_object)", "def fetch_all_characters(cls) -> Dict[str, Any]:\n res = cls._send_request(\"character\")\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
queries the database for a specific character and episode takes a name and episode returns a json with the filtered lines
def lines_from_char_ep(character,ep): query = f""" SELECT script_l FROM script JOIN characters ON characters.char_id = script.characters_char_id INNER JOIN episodes ON episodes.ep_id = script.episodes_ep_id WHERE name = '{character}' and episode = '{ep}' """ data = pd.read_sql_query(query,engine) return data.to_json(orient="records")
[ "def lines_from_char(character):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nWHERE name = '{character}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")", "def stream_char(episode, user_path):\n \n # path to shows directories\n DATA_PLUMCOT = user_path\n \n show = episode.split('.')[0]\n season = episode.split('.')[1]\n ep = episode.split('.')[2]\n \n # load episodes list\n episodes_list = [episode]\n\n for episode in episodes_list:\n print(\"\\nCurrent episode\", episode)\n \n # process serie or film\n if len(episode.split('.')) == 3:\n series, _, _ = episode.split('.')\n elif len(episode.split('.')) == 2:\n series, _ = episode.split('.') \n \n # load mkv & aligned sentences\n mkv, aligned, sentences = load_files(series, episode, DATA_PLUMCOT)\n \n \n if mkv == \"\" and aligned == \"\":\n continue\n \n else: \n\n # credits for the current episode\n episode_characters = load_credits(episode, series, DATA_PLUMCOT)\n \n print(\"\\nCHARACTERS\\n\")\n for idx, char in enumerate(episode_characters):\n print(idx+1, char) \n\n # load pictures for the characters of the current episode\n pictures = load_photo(episode_characters, series, DATA_PLUMCOT)\n \n # options to load in the choice box\n options = []\n for name, val in pictures.items():\n # display photo in options\n if \"centroid\" in val:\n options.append({\"id\":name, \"image\":file_to_b64(val)})\n else : \n # display character's name when no picture\n options.append({\"id\":name, \"text\": name})\n # selection for all@ and #unknown#\n options.append({\"id\":\"all@\",\"text\": \"all@\"})\n options.append({\"id\":f\"#unknown#{episode}\",\"text\":f\"#unknown#{episode}\"})\n\n # find all sentences with non available character\n sentences_choice_not_available = [(sentence, idx) for idx, sentence in enumerate(sentences) if sentence._.speaker == 'not_available' if str(sentence) != '']\n\n print(\"Sentences to annotate :\", len(sentences_choice_not_available))\n \n for el in sentences_choice_not_available: \n \n sentence = el[0]\n sentence_id = el[1]\n \n try :\n if sentences.index(sentence) != 0:\n left = sentences[sentences.index(sentence)-1]\n right = sentences[sentences.index(sentence)+1]\n # beug : left index = last sentence index in the list when current sentence is 0\n else:\n left = \" \"\n right = sentences[sentences.index(sentence)+1]\n\n except IndexError:\n left = \" \"\n right = \" \" \n\n # video\n if str(left) != \" \" and str(right) != \" \":\n start_time = left._.start_time\n end_time= right._.end_time + 0.1\n else:\n start_time = sentence._.start_time\n end_time = sentence._.end_time +0.1 \n \n speaker = sentence._.speaker\n\n # extract corresponding video excerpt\n video_excerpt = mkv_to_base64(mkv, start_time, end_time)\n\n yield {\n \"video\": video_excerpt,\n \"speaker\": f\"{speaker}\",\n \"text\": f\"{sentence}\",\n \"pictures\" : pictures,\n \"options\" : options,\n \"start_time\": f\"{sentence._.start_time}\",\n \"end_time\": f\"{sentence._.end_time}\",\n \"sentence_id\" : sentence_id,\n \"meta\": {\"start_extract\": start_time, \"end_extract\": end_time, \n \"episode\": episode, \"mkv_path\": mkv},\n }", "def fetch_available_episodes():\n db = opendb()\n animes = db.all()\n db.close()\n if len(animes) == 0:\n print('There is no animes in database')\n return []\n avail_episodes = []\n for anime in animes:\n print('anime: {0}'.format(anime['name']))\n start_date = anime['start_date']\n start_date_datetime = datetime(\n start_date.year, start_date.month, start_date.day)\n time_interval = datetime.now() - start_date_datetime\n time_interval_days = time_interval.total_seconds() \\\n / timedelta(days=1).total_seconds()\n episode_now = int(ceil(time_interval_days / 7)) - anime['offset']\n episode_now = anime['total_ep'] if episode_now > anime['total_ep'] \\\n else episode_now\n print('days between now and start day:{0}'.format(time_interval_days))\n print('episode available now:{}'.format(episode_now))\n print('downloaded:{}\\n\\n'.format(anime['dled_ep']))\n if episode_now > anime['dled_ep']:\n for i in range(anime['dled_ep'] + 1, episode_now + 1):\n avail_episodes.append({\n 'name': anime['name'],\n 'keyword': anime['keyword'],\n 'translation_team': anime['translation_team'],\n 'ep': i,\n 'folder': anime['folder']\n })\n return avail_episodes", "def api_show_episodes():\n return db.printTableEpisodes()", "def get_episode_details(token, url, season):\n u = url + str(season)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(u, headers=headers)\n json_data = json.loads(r.text).get('data')\n season_details = {}\n season_details['current_season'] = season\n if len(json_data) > 1:\n for episode in json_data:\n d = episode.get('firstAired')\n date = datetime.datetime.strptime(d, \"%Y-%m-%d\")\n today = datetime.datetime.today()\n if date.date() >= today.date():\n season_details['next_ep_no'] = episode.get('airedEpisodeNumber')\n season_details['next_air_date'] = episode.get('firstAired')\n season_details['ep_title'] = episode.get('episodeName')\n season_details['ep_overview'] = episode.get('overview')\n break\n else:\n season_details['next_ep_no'] = (json_data[len(json_data) - 1].get('airedEpisodeNumber'))\n season_details['next_air_date'] = (json_data[len(json_data) - 1].get('firstAired'))\n season_details['ep_title'] = (json_data[len(json_data) - 1].get('episodeName'))\n season_details['ep_overview'] = (json_data[len(json_data) - 1].get('overview'))\n else:\n season_details['next_ep_no'] = 1\n season_details['next_air_date'] = (json_data[0].get('firstAired'))\n season_details['ep_title'] = (json_data[0].get('episodeName'))\n season_details['ep_overview'] = (json_data[0].get('overview'))\n if season_details['next_air_date'] == \"\":\n season_details['next_air_date'] = 'TBD'\n if season_details['ep_title'] == \"\" or season_details['ep_title'] is None:\n season_details['ep_title'] = 'TBD'\n if season_details['ep_overview'] == \"\" or season_details['ep_overview'] is None:\n season_details['ep_overview'] = 'TBD'\n return season_details", "def get_episode_queries(self, episode):\n key = self.title_key(episode.title)\n path = Path(re.sub(\"[.\\-_]+\", \" \", str(episode.path)))\n queries = [\" \".join(part.split()[:i]) for part in path.parts for i in range(1, len(part.split()) + 1)]\n queries += [\" \".join(part.split()[i:]) for part in path.parts for i in range(len(part.split()))]\n queries = [q for q in queries if self.title_key(q) in key or key in self.title_key(q)]\n return [episode.title] + list(set(queries))", "def airports():\n\n queryType = \"SQL++ query - scoped to inventory: \"\n partialAirportName = request.args['search']\n\n queryPrep = \"SELECT airportname FROM `travel-sample`.inventory.airport WHERE \"\n sameCase = partialAirportName == partialAirportName.lower() or partialAirportName == partialAirportName.upper() #bool\n\n if sameCase and len(partialAirportName) == 3:\n queryPrep += \"faa=$1\"\n queryArgs = [partialAirportName.upper()]\n elif sameCase and len(partialAirportName) == 4:\n queryPrep += \"icao=$1\"\n queryArgs = [partialAirportName.upper()]\n else:\n queryPrep += \"POSITION(LOWER(airportname), $1) = 0\"\n queryArgs = [partialAirportName.lower()]\n\n results = cluster.query(queryPrep, *queryArgs)\n airports = [x for x in results]\n\n context = [queryType + queryPrep]\n\n response = make_response(jsonify({\"data\": airports, \"context\": context}))\n return response", "def chunk_to_dict(self, episode_chunk):\n names = {}\n for line in episode:\n if ':' not in line or line == ['']:\n continue\n line = line.split(':', 1)\n name, script = line[0].strip().translate(None, \".'\"), line[1].strip(' ')\n if '&' in name or 'and' in name:\n continue\n if '/' in name:\n name = name[0:name.find('/')]\n if name == \"Pen\":\n name = \"Finn\"\n if name == \"Lich\":\n name = \"The Lich\"\n if re.search('[a-zA-Z]', script) != None:\n names.setdefault(name, []).append(script)\n\n return names", "def episode(request, ep_id):\n new_episode = get_object_or_404(Episode, id=ep_id)\n crisis_updates = new_episode.get_viewable_crisis_updates_for_player(request.user)\n emits = new_episode.get_viewable_emits_for_player(request.user)\n return render(\n request,\n \"character/episode.html\",\n {\n \"episode\": new_episode,\n \"updates\": crisis_updates,\n \"emits\": emits,\n \"page_title\": str(new_episode),\n },\n )", "def get_episodes_data(session: Session, show_id: str, conn_id: str, season_id: str) -> dict:\n response = session.get(f\"https://www.vvvvid.it/vvvvid/ondemand/{show_id}/season/{season_id}?conn_id={conn_id}\", headers=HEADERS)\n response.raise_for_status()\n episodes = response.json()['data']\n #check if none of the episodes have url or are playable\n are_not_downloadable = all(not episode['embed_info'] or not episode ['playable'] for episode in episodes)\n if are_not_downloadable:\n raise Exception(\"Non e' possibile scaricare questo show.\")\n \n return episodes", "def search_anime_episode_list(episode_endpoint: str) -> list:\n\n request_url = f\"{BASE_URL}{episode_endpoint}\"\n\n response = requests.get(url=request_url, headers={\"UserAgent\": UserAgent().chrome})\n response.raise_for_status()\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # With this id. get the episode list.\n episode_page_ul = soup.find(\"ul\", {\"id\": \"episode_related\"})\n if episode_page_ul is None or isinstance(episode_page_ul, NavigableString):\n msg = f\"Could not find any anime eposiodes with name {anime_name}\"\n raise ValueError(msg)\n episode_page_li = episode_page_ul.children\n\n episode_list = []\n for episode in episode_page_li:\n if isinstance(episode, Tag):\n url = episode.find(\"a\")\n if url is None or isinstance(url, NavigableString):\n continue\n title = episode.find(\"div\", {\"class\": \"name\"})\n if title is None or isinstance(title, NavigableString):\n continue\n\n episode_list.append(\n {\"title\": title.text.replace(\" \", \"\"), \"url\": url[\"href\"]}\n )\n\n return episode_list", "def expand_episodes( server, db, hash, dry_run = False ):\n \n torrent_files = None\n episodes = []\n \n for episode in db[ \"torrents\" ][ hash ][ \"episodes\" ]:\n \n if \"pattern\" in episode:\n pattern = episode[ \"pattern\" ]\n match_fields = ( \"episode\", \"season\", \"alt\", )\n if torrent_files is None:\n try:\n torrent_files = server.torrent_files( ( hash, ) )[ hash ]\n except anime_manager.torrents.RPCError:\n if dry_run:\n continue\n else:\n raise\n \n for torrent_file in torrent_files:\n # File without top-level name, as expected in the database\n file = pathlib.Path( *torrent_file.parts[ 1 : ] )\n \n generated = {\n \"show\" : episode[ \"show\" ],\n \"file\" : file,\n }\n for field in match_fields:\n if field in pattern:\n generated[ field ] = pattern[ field ]\n \n match = pattern[ \"regex\" ].search( file.as_posix() )\n if not match:\n continue\n \n for field in match_fields:\n if field not in pattern[ \"matches\" ]:\n continue\n \n try:\n value = match.group(\n pattern[ \"matches\" ][ field ][ \"group\" ]\n )\n if field in ( \"episode\", \"season\", ):\n try:\n generated[ field ] = int( value )\n if \"offset\" in pattern[ \"matches\" ][ field ]:\n generated[ field ] -= (\n pattern[ \"matches\" ][ field ][ \"offset\" ]\n )\n except ValueError:\n generated[ field ] = value\n else:\n generated[ field ] = value\n except IndexError:\n pass\n \n # Skip any files that matched but are missing fields\n if sum( f not in generated for f in ( \"episode\", \"season\", ) ):\n log.warning( (\n \"torrent {!r} file {!r} matched regex {!r} but \"\n \"required fields are missing, skipping\"\n ).format(\n hash,\n file.as_posix(),\n pattern[ \"regex\" ].pattern\n ) )\n else:\n episodes.append( generated )\n \n else:\n episodes.append( episode )\n \n return episodes", "def show_link_for_episode( db, episode ):\n \n extension_placeholder = \"$EXTENSION$\"\n \n show = episode[ \"show\" ]\n multiseason = len( show[ \"seasons\" ] ) > 1\n # multiseason = sum( \"title\" not in s for s in show[ \"seasons\" ][ 1 : ] )\n season = show[ \"seasons\" ][ episode[ \"season\" ] - 1 ]\n has_season_title = \"title\" in season\n season_title = season[ \"title\" ] if has_season_title else show[ \"title\" ]\n \n link = pathlib.Path( show[ \"title\" ] )\n \n if \"alt\" in episode:\n link = link / episode[ \"alt\" ]\n \n if multiseason:\n padding = int( math.log10( len( show[ \"seasons\" ] ) ) ) + 1\n if \"title\" in season:\n link = link / \"{:0{}} - {}\".format(\n episode[ \"season\" ],\n padding,\n season [ \"title\" ]\n )\n else:\n link = link / \"{:0{}} - Season {}\".format(\n episode[ \"season\" ],\n padding,\n episode[ \"season\" ]\n )\n \n if \"episode\" in episode:\n episode_number = episode[ \"episode\" ]\n if issubclass( type( episode_number ), ( str, bytes ) ):\n try:\n episode_number = int( episode[ \"episode\" ] )\n except ( ValueError, TypeError ):\n pass\n else:\n episode_number = None\n \n if (\n \"episodes\" in season\n and season[ \"episodes\" ] == 1\n and episode_number == 1\n ):\n if multiseason and not has_season_title:\n link = link / \"{} - s{}.{}\".format(\n show[ \"title\" ],\n episode[ \"season\" ],\n extension_placeholder\n )\n else:\n link = link / \"{}.{}\".format(\n season_title,\n extension_placeholder\n )\n else:\n use_e = multiseason and not has_season_title\n try:\n padding = (\n int( math.log10( season[ \"episodes\" ] ) )\n if \"episodes\" in season else 1\n ) + 1\n try:\n episode_string = \"{:0{}d}\".format( episode_number, padding )\n except ValueError:\n episode_string = \"{:f}\".format( episode_number )\n whole, decimal = episode_string.strip( \"0\" ).split(\n \".\",\n maxsplit = 1\n )\n episode_string = \"{:0{}}.{}\".format(\n int( whole ),\n padding,\n decimal if decimal else 0\n )\n if use_e:\n episode_string = \"e\" + episode_string\n except ValueError:\n episode_string = str( episode_number )\n episode_string = \"{}{}\".format(\n \"e\" if use_e and episode_string[ 0 ] in \"0123456789\" else \" \",\n episode_string\n )\n \n if multiseason and not has_season_title:\n link = link / \"{} - s{}{}.{}\".format(\n show[ \"title\" ],\n episode[ \"season\" ],\n episode_string,\n extension_placeholder\n )\n else:\n link = link / \"{} - {}.{}\".format(\n season_title,\n episode_string.strip(),\n extension_placeholder\n )\n \n return link", "def getEpCast(imdbLink, dicChars):\n\n dicEpCast = dicChars.copy()\n\n urlIDMB = requests.get(imdbLink + \"fullcredits\").text\n soup = BeautifulSoup(urlIDMB, 'lxml')\n seriesTable = soup.find('table', {'class': 'cast_list'}).find_all('tr')\n\n for char in seriesTable:\n charInfo = char.find_all('td')\n if len(charInfo) == 4:\n actorName = charInfo[1].text.strip()\n\n key = normalizeName(actorName)\n\n if key in dicEpCast:\n dicEpCast[key] = '1'\n\n return \",\".join(x for x in dicEpCast.values())", "def funshion_get_title_by_id(single_episode_id, drama_id):\n html = get_content('http://pm.funshion.com/v5/media/episode?id={id}&cl=aphone&uc=5'.format(id = drama_id))\n c = json.loads(html)\n \n for i in c['episodes']:\n if i['id'] == str(single_episode_id):\n return c['name'] + ' - ' + i['name']", "def get_episodes(token, show_id):\n page = 1\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(page)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text).get('links')\n first = json_data.get('first')\n last = json_data.get('last')\n no_of_seasons = 1\n if last > first:\n for p in range(1, last + 1):\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(p)\n s = get_season_no(token, url)\n if s > no_of_seasons:\n no_of_seasons = s\n else:\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes?page=' + str(1)\n s = get_season_no(token, url)\n if s > no_of_seasons:\n no_of_seasons = s\n url = 'https://api.thetvdb.com/series/' + str(show_id) + '/episodes/query?airedSeason='\n update_details = get_episode_details(token, url, no_of_seasons)\n return update_details", "def episode_list(request):\n if request.method == 'GET':\n user = request.GET.get('user')\n episodes = Episodes()\n episodes_list = episodes.get_user_episodes(user)\n return JSONResponse(episodes_list)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = DBSerializer(data=data)\n if serializer.is_valid():\n logging.debug('Creating an episode' + data)\n # serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)", "def watch(config,name,season,episode):\n # check to see if name, season, and episode have been defined\n if name and season and episode:\n # establish a connection to the local sqlite database\n conn = sqlite3.connect(os.path.dirname(os.path.realpath('__file__')) + '\\\\television.db')\n cursor = conn.cursor()\n # generate sql query statements\n se = \"%%Season %s Episode %s%%\" % (season,episode)\n sn = name\n # execute sql query statement against databse\n cursor.execute(\"SELECT episode_link_direct,name,episode_name FROM show WHERE episode_name LIKE ? AND name LIKE ?\", (se,sn))\n # grab first result\n r = cursor.fetchone()\n click.echo(r[0])\n \n # display what the user is watching\n click.secho(\"Now watching: %s %s\" % (r[1],r[2]), bg='green',fg='white')\n # launch in web browser\n webbrowser.open_new_tab(r[0].strip('[]').replace(\"'\",\"\"))\n\n else:\n # let the user know that they have not defined parameters correctly\n click.secho(\"Invalid Input. Try using tv watch --help\",bg='red',fg='white')", "def import_data(filename):\r\n regex = re.compile(\"\"\"\"(?P<show_name>.*?)\"\\s+\\((?P<year>\\d+)(?:|/.*?)\\)\\s+\\{(?P<episode_name>.*?)\\s?\\(\\#(?P<season_no>\\d+)\\.(?P<episode_no>\\d+)\\)\\}\"\"\")\r\n\r\n with codecs.open(filename, \"r\", \"latin-1\") as ratings:\r\n # Generate all the lines that matched.\r\n matches = (match for match in (regex.search(line.strip()) for line in ratings) if match)\r\n counter = 0\r\n for match in matches:\r\n counter += 1\r\n if not counter % 100:\r\n print counter\r\n episode = {}\r\n for field in [\"show_name\", \"year\", \"episode_name\", \"episode_no\", \"season_no\"]:\r\n episode[field] = match.group(field)\r\n\r\n # If the episode has no name it is given the same name as on imdb.com for consistency.\r\n if not episode[\"episode_name\"]:\r\n episode[\"episode_name\"] = \"Episode #%s.%s\" % (episode[\"season_no\"], episode[\"episode_no\"])\r\n\r\n try:\r\n show = session.query(Show).filter_by(name=episode[\"show_name\"], year=episode[\"year\"]).one()\r\n except sqlalchemy.orm.exc.NoResultFound:\r\n show = Show(episode[\"show_name\"], episode[\"year\"])\r\n session.add(show)\r\n\r\n try:\r\n episode = session.query(Episode).filter_by(name=episode[\"episode_name\"], show=show).one()\r\n except sqlalchemy.orm.exc.NoResultFound:\r\n episode = Episode(show, episode[\"episode_name\"], episode[\"season_no\"], episode[\"episode_no\"])\r\n session.add(episode)\r\n\r\n #session.commit()\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
queries the database to insert a line from a character takes a name , character and episode returns a confirmation message
def new_line(script_l, character, episode): if up.check("characters", character): char_id = up.giveId("characters", character) else: up.insertCharacter(character) char_id = up.giveId("characters", character) if up.check("episodes", episode): ep_id = up.giveId("episodes", episode) else: up.insertEpisode(episode) ep_id = up.giveId("episodes", episode) if up.check("script", script_l) and up.check("characters", character) and up.check("episodes", episode): return "line exists" else: engine.execute(f""" INSERT INTO script (script_l, characters_char_id, episodes_ep_id) VALUES ("{script_l}", "{char_id}", "{ep_id}"); """) return f"successfully loaded: {character},{script_l},{episode}"
[ "def insertCharacter(string):\n if check(\"character\", string):\n return \"character exists\"\n else:\n engine.execute(f\"INSERT INTO characters (name) VALUES ('{string}');\")", "def insertEpisode(ep):\n if check(\"episodes\", ep):\n return \"episode exists\"\n else:\n engine.execute(f\"INSERT INTO episodes (episode) VALUES ('{ep}');\")", "def insert_row(conn, episode_info):\n\tp_key = get_p_key(episode_info)\n\t\n\tinsert_statement = f'INSERT INTO shows (p_key, show_stub, show_name, season, episode_number, episode_title watched_status, hidden_status) VALUES (\\\"{p_key}\\\", \\\"{episode_info[\"show_stub\"]}\\\", \\\"{episode_info[\"show_name\"]}\\\", {episode_info[\"season\"]}, {episode_info[\"episode_number\"]}, {episode_info[\"episode_title\"]}, {episode_info[\"watched_status\"]}, {episode_info[\"hidden_status\"]});'\n\t\n\texecute_sql(conn, insert_statement)", "def write_character(data):\r\n db.connect(reuse_if_open=True)\r\n Character.create(**data)\r\n db.commit()\r\n db.close()", "def add_quote(db, chan, nick, add_nick, msg):\n try:\n db.execute('''INSERT OR FAIL INTO quote \n (chan, nick, add_nick, msg, time) \n VALUES(?,?,?,?,?)''',\n (chan, nick, add_nick, msg, time.time()))\n db.commit()\n except db.IntegrityError:\n return \"message already stored, doing nothing.\"\n return \"quote added.\"", "def create_episode(conn, episode):\n sql = '''INSERT INTO episode(date, id_show, id_corpus, partition, path)\n VALUES(?,?,?,?,?)'''\n cur = conn.cursor()\n cur.execute(sql, episode)\n return cur.lastrowid", "def insertClip(dbConnection, audiourl, podcastName, description, parsedDate, title):\n try:\n cursor = dbConnection.cursor()\n title = title.replace(\"'\", \"''\")\n cursor.execute(\"INSERT INTO transcriptions(audiourl, realtimefactor, podcastname, transcription, description, date, title, pending, datetranscribed) VALUES('\" + audiourl + \"', NULL, '\" + podcastName + \"', NULL, '\" + description + \"', '\" + parsedDate + \"', '\" + title + \"', FALSE, NULL);\")\n dbConnection.commit()\n cursor.close()\n return True\n except:\n return False\n return False", "def add_word(word, word_en):\n\n global conn, word_id\n categories = get_categories()\n for k,v in categories.items():\n print(k, \" - \", v)\n\n category = input(\"Category Number: \")\n cur = conn.cursor()\n query = ''' INSERT INTO word (word, word_en, category) values (%s, %s, %s) RETURNING id '''\n values = (word.lower().title(), word_en.lower().title(), int(category))\n\n try:\n cur.execute(query, values)\n word_id = cur.fetchone()[0]\n if int(category) == 1:\n print(\"it's a verb\")\n add_verb()\n else:\n conn.commit()\n click.echo(\"The word %s is successfully inserted: word_id: %s\" % (word, word_id), nl=True)\n\n except Exception as e:\n print(\"couldn't insert data for word: \", e)\n conn.rollback()\n conn.close()\n print(\"database connection closed\")", "def insert_entry(entry):\n\n\tcfg.C.execute('INSERT INTO Contacts VALUES (?,?,?,?,?,?,?,?,?,?,?,?)',\n\t\tentry)\n\n\tcfg.DB.commit()", "def insert(sql, clue):\n\t# clue is [game, airdate, round, category, value, clue, answer]\n\t# note that at this point, clue[4] is False if round is 3\n\tif \"\\\\\\'\" in clue[6]:\n\t\tclue[6] = clue[6].replace(\"\\\\\\'\", \"'\")\n\tif \"\\\\\\\"\" in clue[6]:\n\t\tclue[6] = clue[6].replace(\"\\\\\\\"\", \"\\\"\")\n\tif not sql:\n\t\tprint clue\n\t\treturn\n\tsql.execute(\"INSERT OR IGNORE INTO airdates VALUES(?, ?);\", (clue[0], clue[1], ))\n\tsql.execute(\"INSERT OR IGNORE INTO categories(category) VALUES(?);\", (clue[3], ))\n\tcategory_id = sql.execute(\"SELECT id FROM categories WHERE category = ?;\", (clue[3], )).fetchone()[0]\n\tclue_id = sql.execute(\"INSERT INTO documents(clue, answer) VALUES(?, ?);\", (clue[5], clue[6], )).lastrowid\n\tsql.execute(\"INSERT INTO clues(game, round, value) VALUES(?, ?, ?);\", (clue[0], clue[2], clue[4], ))\n\tsql.execute(\"INSERT INTO classifications VALUES(?, ?)\", (clue_id, category_id, ))", "def add_text_command(self, command_name: str, response: str, admin: bool):\n insert_text_command_sql = \"\"\" INSERT INTO text_commands (command_name, response, admin) VALUES (?,?,?)\"\"\"\n with db_conn(self.db_file) as c:\n c.execute(insert_text_command_sql, (command_name, response, admin))", "def insertion_artiste(nom, date_naissance, date_mort, activite):\n # création d'une liste vide pour y stocker les éventuelles erreurs\n erreurs = []\n if not nom:\n erreurs.append(\"Le nom fourni est vide\")\n if not date_naissance:\n erreurs.append(\"La date de naissance fournie est vide\")\n if not date_mort:\n erreurs.append(\"La date de décès fournie est vide\")\n if not activite:\n erreurs.append(\"L'activité fournie est vide\")\n\n # On vérifie que personne n'a entré cet artiste\n uniques = Artiste.query.filter(\n Artiste.artiste_nom == nom\n ).count()\n if uniques > 0:\n erreurs.append(\"Le nom est déjà inscrit dans notre base de données\")\n\n # Si on a au moins une erreur\n if len(erreurs) > 0:\n return False, erreurs\n\n # On crée une variable stockant les informations des champs\n artiste = Artiste(\n artiste_nom=nom,\n date_naissance=date_naissance,\n date_mort=date_mort,\n artiste_activite=activite\n )\n\n try:\n # On l'ajoute au transport vers la base de données\n db.session.add(artiste)\n # On envoie le paquet\n db.session.commit()\n\n # On renvoie l'artiste\n return True, artiste\n except Exception as erreur:\n # si le résultat du try retourne une erreur, la session de la base de données reste ouverte\n # et bloque la session d'insertion: si l'insertion ou le commit a une erreur, la session\n # reste ouverte et empêche de faire des insertions voire la lecture\n db.session.rollback()\n return False, [str(erreur)]", "def insertPlateForme():", "def dbsave(self):\n\t\tval,ok=QtGui.QInputDialog.getText(self,'Save %d bacteria to coolseqDB' % len(self.selection),'Enter description')\n\t\tprint(ok)\n\t\tif ok:\n\t\t\tseqs=[]\n\t\t\tfor cid in self.selection:\n\t\t\t\tseqs.append(self.cexp.seqs[cid])\n\t\t\ths.cooldb.savecoolseqs(self.cexp,self.cexp.cdb,seqs,val)", "def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()", "def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()", "def insert_data(entries):\n # really neat it 'understands' namedtuples!\n insert_sql = \"INSERT INTO episodes VALUES (?, ?, ?, ?)\"\n cur.executemany(insert_sql, entries)\n conn.commit()", "def create_speaker(conn, speaker):\n\n sql = ''' INSERT INTO speaker(name,gender,native)\n VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, speaker)\n return cur.lastrowid", "def test_adds_seeming_notes(campaign):\n\n npc.commands.create_character.changeling('changeling mann', 'Beast', 'Hunterheart')\n character = campaign.get_character('changeling mann.nwod')\n assert ' Seeming Beast (8-again animal ken and free specialty; glamour adds to presence and composure; -4 untrained mental; no 10-again on Int)' in character.read()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints a string representation of SnakemakeRule instance
def __repr__(self): template = """ SnakemakeRule ({}) - parent_id : {} - input : {} - output : {} - local : {} - template : {} - params : {} """ return template.format( self.rule_id, self.parent_id, self.input, self.output, self.local, self.template, self.params, )
[ "def __str__(self):\n return \"[ %s ]\" % str(self.__rule)", "def __str__(self):\n return \"{ %s }\" % str(self.__rule)", "def format_rule(stats: Stats, rule: Rule) -> str:\n text = __print_body(rule.body)\n text += ' -> '\n text += __print_head(stats, rule.head)\n return text", "def print_rules(self):\n for idx, r in enumerate(self.rules):\n print(idx, \"=>\", r.__repr__())", "def print_rules(rules):\n for rule in rules:\n rule.print_rule()", "def print_rule(rule_type, rule):\n\n # a rule can have a comment that is shown along with the yaml output\n rule_comment = \"\"\n\n # The structure of a SG Rule\n #\n # FromPort\n # ToPort\n # IpProtocol\n # IpRanges\n # [ CidrIp, Description ]\n # Ipv6Ranges\n # [ CidrIpv6, Description ]\n # UserIdGroupPairs\n # [ GroupId, UserId, Description ]\n # [ GroupId, UserId, Description, PeeringStatus, VpcPeeringConnectionId, VpcId ]\n # PrefixListIds\n #\n # TODO: I'm Not sure what PrefixListIds are or what they're used for, so will research\n # that and add support for it/them later.\n\n if key_defined_and_not_none('ToPort', rule):\n to_port = rule['ToPort']\n else:\n to_port = \"0\"\n\n if key_defined_and_not_none('FromPort', rule):\n from_port = rule['FromPort']\n else:\n from_port = \"0\"\n\n source_ip_ranges = []\n if key_defined_and_not_none('IpRanges', rule):\n source_ip_ranges += rule['IpRanges']\n\n if key_defined_and_not_none('Ipv6Ranges', rule):\n source_ip_ranges += rule['Ipv6Ranges']\n\n source_sgs = []\n if key_defined_and_not_none('UserIdGroupPairs', rule):\n source_sgs = rule['UserIdGroupPairs']\n\n #print(f\"\\nBEGIN -- {rule_type} rule --\")\n #for k in list(rule.keys()):\n # print(f\"{k} = {rule[k]}\" )\n #print(f\"END -- {rule_type} rule --\")\n\n for s in source_sgs:\n #print(f\"DEBUG: {s}\")\n if not key_defined_and_not_none('Description', s):\n s['Description'] = \"\"\n\n if key_defined_and_not_none('GroupId', s):\n source_sg = s['GroupId']\n\n if source_sg == sg.id:\n source_sg = \"self\"\n elif key_defined_and_not_none(source_sg, sg_id_to_name):\n # the sg_id exists in the same VPC so we refer to it by name rather than id\n source_sg = sg_id_to_name[source_sg]\n elif key_defined_and_not_none('PeeringStatus', s):\n peer_account = s['UserId']\n peer_vpc = s['VpcId']\n peering_id = s['VpcPeeringConnectionId']\n rule_comment = source_sg + \"-in-\" + peer_account + \"-\" + peer_vpc + \"-via-\" + peering_id\n\n print(f\" - {{ type: \\\"{rule_type}\\\", proto: \\\"{rule['IpProtocol']}\\\", from: \\\"{from_port}\\\", to: \\\"{to_port}\\\", source: \\\"{source_sg}\\\", desc: \\\"{s['Description']}\\\" }}\", end=\"\")\n\n if rule_comment:\n print(f\" # {rule_comment}\")\n else:\n print(\"\")\n\n for s in source_ip_ranges:\n if not key_defined_and_not_none('Description', s):\n s['Description'] = \"\"\n source_cidr = \"\"\n if key_defined_and_not_none('CidrIp', s):\n source_cidr = s['CidrIp']\n if key_defined_and_not_none('CidrIpv6', s):\n source_cidr = s['CidrIpv6']\n print(f\" - {{ type: \\\"{rule_type}\\\", proto: \\\"{rule['IpProtocol']}\\\", from: \\\"{from_port}\\\", to: \\\"{to_port}\\\", source: [ \\\"{source_cidr}\\\" ], desc: \\\"{s['Description']}\\\" }}\")", "def __str__(self):\n return 'Rule' + str((self.lhs, ' '.join(self.rhs), self.sem))", "def print_rules(self):\n\n for i, rule in enumerate(self.rules):\n literals, score = rule\n print(\"Rule %d: \" % i, ' | '.join(\n literals) + ' => split_hat %.4f' % score)", "def makerule_to_string(makerule: MakefileRule) -> str:\n dependencies_str = \" \".join(makerule.dependencies)\n commands_str = \"\".join([f\"\\t{c}\\n\" for c in makerule.commands])\n as_str = f\"{makerule.target}: {dependencies_str}\\n{commands_str}\"\n if makerule.precious:\n as_str = f\"{as_str}\\n.PRECIOUS: {makerule.target}\"\n return f\"{as_str}\\n\"", "def __str__(self):\n msg = \"Machine '%s' (%d rules)\" % (self.uuid, len(self.rules))\n for rule_id in self.rules:\n msg += \"\\n - %s\" % self.get_condition(rule_id)\n return msg", "def test_print_rules(self,mock_stdout):\n\n self.view.print_rules()\n self.assertEqual(\"Connect Four is a two-player connection game in which \"\n \"the players take turns dropping pieces from the top \"\n \"into a seven-column, six-row vertically suspended \"\n \"grid. The pieces fall straight down, occupying the \"\n \"next available space within the column. The objective \"\n \"of the game is to connect four of one's own pieces of \"\n \"the same color next to each other vertically, \"\n \"horizontally, or diagonally before your opponent.\\n\"\n \"\\n\", mock_stdout.getvalue())", "def __str__(self):\n output = []\n for parent in sorted(self._byparent):\n for rule in sorted(self._byparent[parent]):\n output.append(str(rule))\n return '\\n'.join(output)", "def printRules(rules_inferred):\n for r in sorted(rules_inferred, key=order2):\n print(r[0], end='')\n print(' -> ', end='')\n print(r[1])", "def generate(self):\n items = [rule.generate() for rule in self.rules]\n return \"\\n\".join(items)", "def test_rule_representation():\n rule = MethodRule(method=\"POST\")\n assert repr(rule) == \"MethodRule(method='POST')\", \"Wrong representation\"", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def __str__(self):\n\n # Start by building the canonical strings for the rules\n out_rules = {}\n for key, value in self.items():\n # Use empty string for singleton TrueCheck instances\n if isinstance(value, TrueCheck):\n out_rules[key] = ''\n else:\n out_rules[key] = str(value)\n\n # Dump a pretty-printed JSON representation\n return jsonutils.dumps(out_rules, indent=4)", "def describe_rule(Name=None):\n pass", "def __repr__(self):\n template = \"\"\"\n DataIntegrationRule ({})\n \n - inputs : {}\n - output : {}\n - local : {}\n - template : {}\n - params : {}\n \"\"\"\n\n return template.format(\n self.rule_id,\n self.inputs,\n self.output,\n self.local,\n self.template,\n self.params\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints a string representation of DataIntegrationRule instance
def __repr__(self): template = """ DataIntegrationRule ({}) - inputs : {} - output : {} - local : {} - template : {} - params : {} """ return template.format( self.rule_id, self.inputs, self.output, self.local, self.template, self.params )
[ "def __str__(self):\n return \"[ %s ]\" % str(self.__rule)", "def __str__(self):\n return \"{ %s }\" % str(self.__rule)", "def format_rule(stats: Stats, rule: Rule) -> str:\n text = __print_body(rule.body)\n text += ' -> '\n text += __print_head(stats, rule.head)\n return text", "def __repr__(self):\n template = \"\"\"\n SnakemakeRule ({})\n \n - parent_id : {}\n - input : {}\n - output : {}\n - local : {}\n - template : {}\n - params : {}\n \"\"\"\n return template.format(\n self.rule_id,\n self.parent_id,\n self.input,\n self.output,\n self.local,\n self.template,\n self.params,\n )", "def __str__(self):\n return 'Rule' + str((self.lhs, ' '.join(self.rhs), self.sem))", "def print_rules(self):\n for idx, r in enumerate(self.rules):\n print(idx, \"=>\", r.__repr__())", "def __repr__(self):\n\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str", "def print_rules(self):\n\n for i, rule in enumerate(self.rules):\n literals, score = rule\n print(\"Rule %d: \" % i, ' | '.join(\n literals) + ' => split_hat %.4f' % score)", "def __str__(self):\n output = \"Solution for \" + self.vrpdata.InstanceName + \":\\n\"\n output += \"Total distance: \" + str(round(self.objective, 2)) + \"\\n\"\n output += \"Solution valid: \" + str(self.solutionValid) + \"\\n\\n\"\n count = 1 # count routes\n for r in self.routes:\n output += \"Route #\" + str(count) + \"\\n\" + str(r) + \"\\n\" + str(round(r.distance, 2)) + \"\\n\" + str(r.quantity) + \"\\n\"\n count += 1\n return output", "def print_rules(rules):\n for rule in rules:\n rule.print_rule()", "def __str__(self):\n return f\"{self.input_as_str()}\\n{self.output_as_str()}\"", "def __str__(self):\n if len(self.label) > 0:\n descr = [\"'%s', target='%s' [%s]\" % (self.label, self.target.name, self.target.body_type)]\n else:\n descr = [\"target='%s' [%s]\" % (self.target.name, self.target.body_type)]\n if self.baseline:\n descr[0] += ', initial baseline offset=%f' % (self.baseline.poly[-1],)\n if self.beam:\n descr[0] += ', beam height=%f' % (self.beam.height,)\n for scan_ind, scan in enumerate(self.scans):\n descr.append('%4d: %s' % (scan_ind, str(scan)))\n return '\\n'.join(descr)", "def __str__(self, printODData = False):\n networkStr = \"Link\\tFlow\\tCost\\n\"\n for ij in sorted(self.link, key=lambda ij : self.link[ij].sortKey):\n networkStr += \"%s\\t%f\\t%f\\n\" % (ij, self.link[ij].flow, self.link[ij].cost)\n if printODData == True:\n networkStr += \"\\n\"\n networkStr += \"OD pair\\tDemand\\tLeastCost\\n\"\n for ODpair in self.ODpair:\n networkStr += \"%s\\t%f\\t%f\\n\" % (ODpair, self.ODpair[ODpair].demand, self.ODpair[ODpair].leastCost)\n return networkStr", "def __str__(self):\n # self._examples.values\n string = \"\"\n for e in self._examples:\n for i, v in enumerate(e.values):\n if self._attributes[i].type == 'Nominal':\n string = string + self._attributes[i].domain[v]\n else:\n string = string + v\n if i == len(e.values) - 1:\n string = string + \"\\n\"\n else:\n string = string + \" \"\n return string", "def __str__(self):\n\n rep = 'Generalized Syllogism:\\n'\n rep += '\\ttask: {}\\n'.format(self.task)\n rep += '\\tencoded_task: {}\\n'.format(self.encoded_task)\n rep += '\\tp1: {}\\n'.format(self.p1)\n rep += '\\tp2: {}\\n'.format(self.p2)\n rep += '\\tquantifier_p1: {}\\n'.format(self.quantifier_p1)\n rep += '\\tquantifier_p2: {}\\n'.format(self.quantifier_p2)\n rep += '\\tfigure: {}\\n'.format(self.figure)\n rep += '\\tTerms:\\n'\n rep += '\\t\\tA: {}\\n'.format(self.A)\n rep += '\\t\\tB: {}\\n'.format(self.B)\n rep += '\\t\\tC: {}\\n'.format(self.C)\n return rep", "def __str__(self):\n msg = \"Machine '%s' (%d rules)\" % (self.uuid, len(self.rules))\n for rule_id in self.rules:\n msg += \"\\n - %s\" % self.get_condition(rule_id)\n return msg", "def test_rule_representation():\n rule = MethodRule(method=\"POST\")\n assert repr(rule) == \"MethodRule(method='POST')\", \"Wrong representation\"", "def __repr__( self ) :\n\n s = \"\"\n for v in self.data : s = s + endl1dmathmisc.endl1d_repr_xFormat % v + \"\\n\"\n return s", "def __repr__(self):\n repr_parts = ['<', self.__class__.__name__]\n \n repr_parts.append(' title = ')\n repr_parts.append(repr(self.title))\n \n step_type = self.type\n repr_parts.append(', type = ')\n repr_parts.append(step_type.name)\n \n required = self.required\n if required:\n repr_parts.append(', required = ')\n repr_parts.append(repr(required))\n \n values = self.values\n if (values is not None):\n repr_parts.append(', values = [')\n \n index = 0\n limit = len(values)\n \n while True:\n value = values[index]\n \n repr_parts.append(repr(value))\n \n index += 1\n if index == limit:\n break\n \n repr_parts.append(', ')\n continue\n \n repr_parts.append(']')\n \n repr_parts.append('>')\n return ''.join(repr_parts)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the jacobian of a finger at configuration q0.
def compute_jacobian(self, finger_id, q0): frame_id = self.tip_link_ids[finger_id] return pinocchio.computeFrameJacobian( self.robot_model, self.data, q0, frame_id, pinocchio.ReferenceFrame.LOCAL_WORLD_ALIGNED, )
[ "def jacobianF(self):\n\n jacob = np.eye(len(self.x))\n jacob[0][1] = 1\n jacob[2][0] = self.x[4] + self.x[6] * self.x[0] + self.x[8] * (self.x[0] ** 2) / 2\n jacob[2][4] = self.x[0]\n jacob[2][6] = (self.x[0] ** 2) / 2\n jacob[2][8] = (self.x[0] ** 3) / 6\n jacob[3][0] = self.x[5] + self.x[7] * self.x[0] + self.x[9] * (self.x[0] ** 2) / 2\n jacob[3][5] = self.x[0]\n jacob[3][7] = (self.x[0] ** 2) / 2\n jacob[3][9] = (self.x[0] ** 3) / 6\n jacob[4][0] = self.x[6] + self.x[8] * self.x[0]\n jacob[4][6] = self.x[0]\n jacob[4][8] = (self.x[0] ** 2) / 2\n jacob[5][0] = self.x[7] + self.x[9] * self.x[0]\n jacob[5][7] = self.x[0]\n jacob[5][9] = (self.x[0] ** 2) / 2\n jacob[6][0] = self.x[8]\n jacob[6][8] = self.x[0]\n jacob[7][0] = self.x[9]\n jacob[7][9] = self.x[0]\n return jacob", "def jacobian_himmelblau(q):\n # Initialize\n J = np.zeros((2, 2), dtype='float64')\n x = q[0]\n y = q[1]\n\n # Fill the Jacobian\n J[0, 0] = 12 * x * x + 4 * y - 42 \n J[0, 1] = 4 * x + 4 * y\n J[1, 0] = 4 * x + 4 * y\n J[1, 1] = 4 * x + 12 * y *y - 26\n return J", "def jacob0(self, q=None):\n\n if q is None:\n q = np.copy(self.q)\n else:\n q = getvector(q, self.n)\n\n T = (self.base.inv() * self.fkine(q)).A\n U = np.eye(4)\n j = 0\n J = np.zeros((6, self.n))\n\n for link in self._fkpath:\n\n for k in range(link.M):\n\n if k != link.q_idx:\n U = U @ link.ets[k].T().A\n else:\n if link.ets[k]._axis == 'Rz':\n U = U @ link.ets[k].T(q[j]).A\n Tu = np.linalg.inv(U) @ T\n\n n = U[:3, 0]\n o = U[:3, 1]\n a = U[:3, 2]\n y = Tu[1, 3]\n x = Tu[0, 3]\n\n J[:3, j] = (o * x) - (n * y)\n J[3:, j] = a\n\n j += 1\n if link.ets[k]._axis == 'Ry':\n U = U @ link.ets[k].T(q[j]).A\n Tu = np.linalg.inv(U) @ T\n\n n = U[:3, 0]\n o = U[:3, 1]\n a = U[:3, 2]\n z = Tu[2, 3]\n x = Tu[0, 3]\n\n J[:3, j] = (n * z) - (a * x)\n J[3:, j] = o\n\n j += 1\n if link.ets[k]._axis == 'Rx':\n U = U @ link.ets[k].T(q[j]).A\n Tu = np.linalg.inv(U) @ T\n\n n = U[:3, 0]\n o = U[:3, 1]\n a = U[:3, 2]\n y = Tu[1, 3]\n z = Tu[2, 3]\n\n J[:3, j] = (a * y) - (o * z)\n J[3:, j] = n\n\n j += 1\n elif link.ets[k]._axis == 'tx':\n U = U @ link.ets[k].T(q[j]).A\n n = U[:3, 0]\n\n J[:3, j] = n\n J[3:, j] = np.array([0, 0, 0])\n\n j += 1\n elif link.ets[k]._axis == 'ty':\n U = U @ link.ets[k].T(q[j]).A\n o = U[:3, 1]\n\n J[:3, j] = o\n J[3:, j] = np.array([0, 0, 0])\n\n j += 1\n elif link.ets[k]._axis == 'tz':\n U = U @ link.ets[k].T(q[j]).A\n a = U[:3, 2]\n\n J[:3, j] = a\n J[3:, j] = np.array([0, 0, 0])\n\n j += 1\n\n return J", "def jacobian_sys(q):\n # Initialize\n J = np.zeros((2, 2), dtype='float64')\n x = q[0]\n y = q[1]\n A = 10000.0\n\n # Fill the Jacobian\n J[0, 0] = A * y\n J[0, 1] = A * x\n J[1, 0] = -np.exp(-x)\n J[1, 1] = -np.exp(-y)\n return J", "def jacobian(self, dt):\n raise NotImplementedError", "def jacobian_ur5(q, delta=0.0001):\n # Alocacion de memoria\n J = np.zeros((3,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n # Iteracion para la derivada de cada columna\n for i in xrange(6):\n # Copiar la configuracion articular inicial\n dq = copy(q);\n # Incrementar la articulacion i-esima usando un delta\n dq[i] = dq[i] + delta \n dT = fkine_ur5(dq)\n \n J[:,i] = (dT[0:3, 3] - T[0:3, 3])/delta\n\n return J", "def jacobian(self, dt):\n return self._F_cache", "def jacob0v(self, q=None):\n\n r = (self.base.inv() * self.fkine(q)).R\n\n Jv = np.zeros((6, 6))\n Jv[:3, :3] = r\n Jv[3:, 3:] = r\n\n return Jv", "def jacobian(x, u):\n yaw = x[2, 0]\n v = u[0, 0]\n jac = np.array([\n [1.0, 0.0, -dt * v * math.sin(yaw), dt * math.cos(yaw)],\n [0.0, 1.0, dt * v * math.cos(yaw), dt * math.sin(yaw)],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n\n return jac", "def frameJacobian(self, q, frame_id):\n return pin.computeFrameJacobian(self.model, self.data, q, frame_id)", "def jacobian(self, v):\n from scipy.special import erf, erfcx\n def integrand(u_arr):\n \"\"\"Integrand of self-consistency equation\"\"\"\n integrand_all = erfcx(-u_arr)\n #integrand_all = np.zeros(u_arr.shape)\n #u_mask = u_arr < -4.0\n #u = u_arr[u_mask]\n #integrand_all[u_mask] = -1. / np.sqrt(np.pi) * (1.0 / u - 1.0 / (2.0 * u**3) + \n #3.0 / (4.0 * u**5) - \n #15.0 / (8.0 * u**7))\n #integrand_all[~u_mask] = np.exp(u_arr[~u_mask]**2) * (1. + erf(u_arr[~u_mask]))\n return integrand_all\n\n\n mu_v = self.mu(v)\n sd_v = self.sd(v)\n low = (self.V_r - mu_v) / sd_v # reduced resting potential\n up = (self.theta - mu_v) / sd_v # reduced threshold\n f_low = integrand(low)\n f_up = integrand(up)\n jac_mat_1 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_mu\n jac_mat_2 = self.tau_m * 1e-3 * np.sqrt(np.pi) * self.mat_var / (2. * sd_v**2)\n\n jac_T = np.diag(1. / v**2) - \\\n jac_mat_1.T * (f_up - f_low) + \\\n jac_mat_2.T * (f_up * up - f_low * low)\n return jac_T.T", "def jacobian(self, x1, x2, out=None):\n raise NotImplementedError", "def jacobian_pose_ur5(q, delta=0.0001):\n J = np.zeros((7,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n Q = rot2quat(T[0:3,0:3])\n\n for i in xrange(6):\n dq = copy(q)\n dq[i] = dq[i] + delta\n dT = fkine_ur5(dq)\n dQ = rot2quat(dT[0:3,0:3])\n Jpos = (dT[0:3,3] - T[0:3,3])/delta\n Jrot = (dQ - Q)/delta\n #Jrot \t= np.squeeze(np.asarray(Jrot))\n J[:,i] = np.concatenate((Jpos, Jrot), axis=0)\n \n return J", "def jacobian_func(f):\n jacobian = jacfwd(f)\n return jacobian", "def jacobian(self, dt):\n if dt not in self._F_cache:\n d = self._dimension\n with torch.no_grad():\n F = eye_like(self.sa2, d)\n F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)\n self._F_cache[dt] = F\n\n return self._F_cache[dt]", "def Jacobian(self,li=-1):\n #try to find already calculated Jacobian \n try:\n return self.J_cache[join]\n except Exception as e:\n pass\n\n zeroed_lengths = np.copy(self.lengths)\n if li != -1:\n zeroed_lengths[li:] = 0\n # Caluclate Jacobian rows\n Jp1 = [np.sum(np.multiply(-zeroed_lengths[row:], np.sin(np.cumsum(self.state)[row:]))) for row in range(self.n)]\n Jp2 = [np.sum(np.multiply(zeroed_lengths[row:], np.cos(np.cumsum(self.state)[row:]))) for row in range(self.n)]\n Jp3 = [0 for _ in range(self.n)]\n Jo1 = [0 for _ in range(self.n)]\n Jo2 = [0 for _ in range(self.n)]\n Jo3 = [1 for _ in range(self.n)]\n\n ret = np.array([Jp1, Jp2, Jp3, Jo1, Jo2, Jo3])\n self.J_cache[li] = ret\n return ret", "def jacobe(self, q=None):\n\n if q is None:\n q = np.copy(self.q)\n else:\n q = getvector(q, self.n)\n\n J0 = self.jacob0(q)\n Je = self.jacobev(q) @ J0\n return Je", "def jacobev(self, q=None):\n\n r = (self.base.inv() * self.fkine(q)).R\n r = np.linalg.inv(r)\n\n Jv = np.zeros((6, 6))\n Jv[:3, :3] = r\n Jv[3:, 3:] = r\n\n return Jv", "def jacobian(f, x, epsilon = 1e-10):\n f_ = f(x)\n value = np.zeros((len(f_), len(x)))\n \n for i in range(len(x)):\n f_ = partial_derivative(f, x, i, epsilon)\n value[:,i] = f_\n\n return value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the initial search space using latin hypercube sampling.
def lhs_start(hyperbounds, n_samples, rng=None): low_bounds = [] high_bounds = [] for bound in hyperbounds: low_bounds.append(bound[0]) high_bounds.append(bound[1]) low_bounds = np.array(low_bounds, dtype=object) high_bounds = np.array(high_bounds, dtype=object) samples = sample_latin_hypercube(low_bounds, high_bounds, n_samples, rng=rng) samples = samples.tolist() return samples
[ "def generate_latin_hypercube(samples, param_dict, class_root, seed=10):\n # Set random seed\n random.seed(seed)\n\n # Create dictionary to hold sampled parameter values\n sample_points = {}\n for key in param_dict.keys():\n sample_points[key] = np.zeros(samples)\n Ndim = len(param_dict.keys())\n pnames = [key for key in param_dict.keys()]\n\n # List of indices for each dimension\n l = [range(samples) for j in range(Ndim)]\n\n # Generate samples until there are no indices left to choose\n for i in range(samples):\n\n # Randomly choose index and then remove the number that was chosen\n # (Latin hypercubes require at most one item per row and column)\n for j, p in enumerate(pnames):\n pmin, pmax = param_dict[p]\n idx = random.choice(l[j])\n\n # Get value at this sample point (add 0.5 to idx get bin centroid)\n sample_points[p][i] = pmin + (pmax - pmin) \\\n * (idx + 0.5) / float(samples)\n l[j].remove(idx) # Remove choice from list (sampling w/o replacement)\n\n return sample_points", "def constraint_latin_hypercube(self, max_iter = 5):\n n_cycle = 1\n # Parameters for latin hypercube sampling\n lhs_crit = 'c' # 'maximin' ('m'), 'center' ('c'),\n # 'centermaximin' ('cm'), and 'correlation' ('corr')\n sample_size = self.sample_size\n\n while n_cycle <= max_iter:\n #\n # Generate new samples from scratch if last step is failed.\n # Reusing old X values not feasible because LHS points\n # depends on number of samples. The output Design scales all\n # the variable ranges from 0.0 to 1.0 which can then be transformed\n # as the user wishes (like to a specific statistical distribution\n # using the scipy.stats.distributions ppf/inverse cumulative\n # distribution function).\n #\n print \" \"\n print \" Generating the latin hypercube samples at Trial # %d\" %(n_cycle)\n print \" \"\n lhd = lhs(self.n_dim, samples = sample_size, criterion = lhs_crit)\n # The following is slower and less efficient in satisfying the constraints\n # lhd = norm(loc=0, scale=1).ppf(lhd) # this applies to all the dimensions\n\n true_sample_ind = np.zeros((sample_size), dtype = bool)\n ind = np.arange(sample_size)\n count_pass = 0\n\n for sample in ind:\n true_sample_ind[sample] = self.constraint.apply(lhd[sample,:])\n\n # Check number of samples satisfying all the constraints\n is_satisfied = ind[true_sample_ind == True]\n count_pass = len(is_satisfied)\n\n if count_pass >= self.sample_size:\n # Met the requirement. Remove rejected points\n lhd_final = lhd[is_satisfied,:]\n self.X = lhd_final[0:self.sample_size,:]\n\n self.flag = True\n print \" \"\n print \" The requested %d samples are generated in %d tries!\" \\\n %(self.sample_size, n_cycle)\n print \" \"\n break\n else:\n n_cycle += 1\n # Did not meet the requirement. Increase sample size and try again\n if count_pass == 0:\n print \" \"\n print \" Warning!\"\n print \" In Trial # %d NONE of generated latin hypercube samples satisfied all the constraints\" %(n_cycle-1)\n print \" \"\n sample_size = 20*sample_size # sampleSize = 10*sampleSize\n else:\n # new sample size = 1..20 x old sample size\n # oversample by 10% to increase chance of sufficient samples\n print \" \"\n print \" In Trial # %d: %d of the generated latin hypercube samples did not satisfy the constraints \" %((n_cycle - 1), (self.sample_size - count_pass))\n print \" So, moving on to the next trial with bigger sample size. \"\n print \" Number of trials left: %d\" %(max_iter - n_cycle + 1)\n print \" \"\n sample_size = int(math.ceil(min(20, 1.1*self.sample_size \\\n /count_pass)*sample_size))\n # END WHILE\n # Remove excess points\n if self.flag:\n print \" \"\n print \" All the samples generated successfully! \"\n print \" \"\n else:\n percnt = int((self.sample_size - count_pass)/self.sample_size)*100\n print \" \"\n print \" Maximum number of iterations (%d) reached\" %(max_iter)\n print \" Sorry! The program could not generate all the requested samples. \"\n print \" Only %d%% samples are generated (%d out of %d)\" \\\n %(percnt, count_pass, self.sample_size)\n print \" \"", "def latin_hypercube(xmins, xmaxs, n_dim, num_evaluations, seed=None):\n if HAS_SCIPY_QMC:\n return latin_hypercube_scipy(xmins, xmaxs, n_dim, num_evaluations, seed=seed)\n elif HAS_PYDOE2:\n return latin_hypercube_pydoe(xmins, xmaxs, n_dim, num_evaluations, seed=seed)\n else:\n msg = (\n \"scipy.stats.qmc and pydoe2 not unavailable.\"\n \"Cannot generate latin hypercube. Falling back on uniform random sampler.\"\n )\n warnings.warn(msg)\n return uniform_random_hypercube(xmins, xmaxs, n_dim, num_evaluations, seed=seed)", "def latin_hypercube(n_pts, dim):\n X = np.zeros((n_pts, dim))\n centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts)\n for i in range(dim): # Shuffle the center locataions for each dimension.\n X[:, i] = centers[np.random.permutation(n_pts)]\n\n # Add some perturbations within each box\n pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts)\n X += pert\n return X", "def __init__(self, search_space=None):\n super(SegmentationNas, self).__init__(search_space)\n self.search_space = search_space\n self.max_sample_random = self.config.max_sample_random\n self.max_sample_mutate = self.config.max_sample_mutate\n self.sample_count = 0\n self.random = SegmentationRandom()\n self.mutate = SegmentationMutate()", "def __init__(self, search_space=None):\n super(AdelaideRandom, self).__init__(search_space)\n self.search_space = search_space\n self.codec = Codec(self.cfg.codec, search_space)\n self.max_sample = self.cfg.max_sample\n self.sample_count = 0", "def initialise(self):\n sampling_size = 0\n word_counter = {}\n\n for sentence in self.sentences:\n for word in sentence:\n sampling_size += len(sentence)\n word_counter[word] = word_counter.get(word, 0) + 1\n\n true_word_counter = {}\n for word, counter in word_counter.items():\n if counter >= self.minCount:\n self.w2id.setdefault(word, len(self.w2id))\n true_word_counter[self.w2id[word]] = counter\n self.train_set.append(word)\n\n print(f'Number words removed : {len(word_counter) - len(true_word_counter)} on '\n f'{len(word_counter)} words')\n\n nb_unique_words = len(true_word_counter)\n unique_words_idx = list(range(nb_unique_words))\n\n occ = np.array(list(map(lambda w: true_word_counter[w]**self.alpha, unique_words_idx)))\n prob = occ / np.sum(occ)\n self.sampling_matrix = np.random.choice(unique_words_idx,\n size=(sampling_size, self.negativeRate),\n p=prob)\n self.sampling_idx = 0\n\n self.W = np.random.uniform(-1, 1, (nb_unique_words, self.nEmbed))\n self.C = np.random.uniform(-1, 1, (nb_unique_words, self.nEmbed))\n\n if self.mode_OOV:\n full_vocab = self.train_set.copy() # will store words + subwords\n\n # get subwords\n for word in self.train_set:\n word_and_subs = list() # store list of subwords for each word\n word_and_subs.append(word)\n for idx in range(len(word)):\n if (idx + self.n_gram) <= len(word):\n subword = word[idx:(idx + self.n_gram)]\n word_and_subs.append(subword)\n if subword not in full_vocab:\n full_vocab.append(subword) # append to full vocab\n\n # index here are the same as in trainset\n self.words_and_their_ngrams.append(word_and_subs)\n\n N = len(self.train_set)\n N_full = len(full_vocab)\n\n # indexing for W matrice\n self.n_gram_w2id = {w: idx for (idx, w) in enumerate(full_vocab)}\n\n # W includes words and subwords\n self.W = np.random.uniform(-1, 1, (N_full, self.nEmbed))\n # C only includes words\n self.C = np.random.uniform(-1, 1, (N, self.nEmbed))", "def __init__(self, query_set, search_space, buffer_radius):\n\n def validate_input(points):\n \"\"\"\n check the shape of the point clouds\n \"\"\"\n if points.ndim != 2:\n raise ValueError(\"wrong point cloud array shape\")\n elif points.shape[1] != 3:\n raise ValueError(\"only 3D spaces are supported\")\n elif points.shape[0] < 2:\n raise ValueError(\"need at least 2 points to partition\")\n\n validate_input(query_set)\n validate_input(search_space)\n if buffer_radius <= 0:\n raise ValueError(\"buffer radius cannot be negative\")\n\n self.query_set = query_set\n self.search_space = search_space\n self.buffer_radius = buffer_radius\n\n # the bounds we are interested in are the extents of the query set\n self.maximum_corner = query_set.max(0)\n self.minimum_corner = query_set.min(0)\n\n # we will be filling this later\n self.cubes = []\n\n # these are the algorithms we can use to get our 8 cubes\n self.cube_generators = {\n \"naive\": self._naive_cube_generator,\n \"take_one\": self._take_one_cube_generator,\n \"take_three\": self._take_three_cube_generator\n }", "def gen_hypercube(samples, N):\n\n np.random.seed(4654562)\n hypercube = lhs(N, samples=samples)\n\n return hypercube", "def __init__(self):\n self.ALPHABET = string.ascii_uppercase\n self.keyword = self.input_keyword()\n self.keyword_alphabet = self.create_encrypted_alphabet()", "def create_grids_structure(self):\n for indices, hypercube in np.ndenumerate(self.hypercubes):\n self.hypercubes[indices] = Hypercube(coords=indices)", "def build_search_space(search_parameters: List[str], train_epochs: int = None) -> dict:\n available_spaces = {\n \"activation\": hp.choice(\n \"activation\", options=[\"ReLU\", \"LeakyReLU\", \"PReLU\", \"tanh\", \"SELU\", \"ELU\"]\n ),\n \"aggregation\": hp.choice(\"aggregation\", options=[\"mean\", \"sum\", \"norm\"]),\n \"aggregation_norm\": hp.quniform(\"aggregation_norm\", low=1, high=200, q=1),\n \"batch_size\": hp.quniform(\"batch_size\", low=5, high=200, q=5),\n \"depth\": hp.quniform(\"depth\", low=2, high=6, q=1),\n \"dropout\": hp.quniform(\"dropout\", low=0.0, high=0.4, q=0.05),\n \"ffn_hidden_size\": hp.quniform(\"ffn_hidden_size\", low=300, high=2400, q=100),\n \"ffn_num_layers\": hp.quniform(\"ffn_num_layers\", low=1, high=3, q=1),\n \"final_lr_ratio\": hp.loguniform(\"final_lr_ratio\", low=np.log(1e-4), high=0.),\n \"hidden_size\": hp.quniform(\"hidden_size\", low=300, high=2400, q=100),\n \"init_lr_ratio\": hp.loguniform(\"init_lr_ratio\", low=np.log(1e-4), high=0.),\n \"linked_hidden_size\": hp.quniform(\"linked_hidden_size\", low=300, high=2400, q=100),\n \"max_lr\": hp.loguniform(\"max_lr\", low=np.log(1e-6), high=np.log(1e-2)),\n \"warmup_epochs\": hp.quniform(\"warmup_epochs\", low=1, high=train_epochs // 2, q=1)\n }\n space = {}\n for key in search_parameters:\n space[key] = available_spaces[key]\n\n return space", "def main():\n\n parser = argparse.ArgumentParser(description='Create a new Wordsearch')\n parser.add_argument('size', type=grid_size_type,\n help=\"height and width of our wordsearch grid (min: 3)\")\n parser.add_argument('wordfile', type=argparse.FileType('r'),\n help=\"file including words to search for\")\n parser_args = parser.parse_args()\n\n new_matrix = Matrix(parser_args.size)\n\n words_to_find = create_word_list_from_file(parser_args.wordfile, parser_args.size)\n\n words_found = []\n for word in words_to_find:\n if word not in words_found and word in new_matrix:\n words_found.append(word)\n\n print(\"\\n{}\\n\\n{}\\n\".format(new_matrix, \" \".join(sorted(words_found))))", "def recreate_wordmesh(self):\n \n #raise all the clustering flag, so as to run the MDS algorithm again\n self._flag_clustering_criteria = True\n self._generate_embeddings()", "def construct_hyper_grids(X, n_grid=20):\n raise NotImplementedError", "def initialize(self):\n self.SIZE = self.vectors.shape[0]\n # todo can use max distance to allocation farthest apart points\n self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]", "def create_indices(self):\n\t\tself.pg_eng.build_idx_ddl()\n\t\tself.pg_eng.create_indices()", "def initialize( self, layout, numGhostAgents=1000 ):\n self.data.initialize(layout, numGhostAgents) ##self.data is defined in the Grid() class of game.py REF112.It creates an initial game state from a layout array (see layout.py).", "def set_up_test_cube():\n data = np.linspace(-45.0, 45.0, 9, dtype=np.float32).reshape((1, 3, 3)) + 273.15\n\n attributes = {\n \"um_version\": \"10.4\",\n \"source\": \"Met Office Unified Model\",\n \"Conventions\": \"CF-1.5\",\n \"institution\": \"Met Office\",\n \"history\": \"\",\n }\n\n cube = set_up_variable_cube(\n data, attributes=attributes, standard_grid_metadata=\"uk_ens\"\n )\n\n return cube" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
converts time to gmt, appends to list
def gmt(time): gmt = [0]*time.size for i in range(time.size): gmt[i]=datetime.utcfromtimestamp(time[i]).strftime('%Y-%m-%d %H:%M:%S') return gmt
[ "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def create_growing_time_list(time_list):\n # create growing time list\n reversed_time_list = time_list[::-1]\n time_to_append = 0\n previous_time = 0\n growing_time_list = []\n growing_time_list.append(previous_time)\n\n for time_el in reversed_time_list:\n # time in hours\n time_to_append = (float(time_el) / (60 * 60)) + previous_time\n growing_time_list.append(time_to_append)\n previous_time = time_to_append\n\n return growing_time_list", "def time_format(l_time, is_tz=False):\n if l_time:\n difference = l_time[-1] - l_time[0]\n count = len(l_time)\n if count > 1:\n frequency = difference / (count - 1)\n if difference < DAY and frequency < DAY:\n start = 11\n end = None if is_tz else FMT_LENGTH\n elif frequency < DAY <= difference:\n start = 5\n end = None if is_tz else FMT_LENGTH\n elif difference >= DAY and frequency >= DAY:\n start = 5\n end = 10\n else:\n start = None\n end = None if is_tz else FMT_LENGTH\n formated_time = [timestamp_to_timeformat(t)[start:end] for t in l_time]\n else:\n formated_time = [timestamp_to_timeformat(l_time[0])]\n else:\n formated_time = []\n return formated_time", "def get_times():\n global times\n global times_list\n base_url = \"http://www.crawleymosque.com/\"\n r = requests.get(base_url)\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n\n times_list = []\n for salah_time in soup.find_all(class_=\"prayer-start\"):\n times_list.append(salah_time.contents[0].strip())\n\n print(times_list)\n times = []\n for i in times_list:\n datetime_object = datetime.strptime(i, \"%I:%M %p\")\n just_time = datetime.time(datetime_object)\n times.append(just_time)\n\n print(times)\n\n # spam = Label(root, text=\"checking for spam\")\n # spam.place(x=460, y=110)", "def lst_time(self):\n return local_sidereal_time(self.time)", "def rttm2simple(rttm:list) -> list:\n output = list()\n for line in rttm:\n _, _, _, start, duration, _, _, label, _, _ = line.split()\n end = float(start)+float(duration)\n output.append((f\"{label}\", float(start), end))\n return output", "def createTimeList(Ntmp,res):\r\n\r\n from datetime import time # import module to deal with dates, time, etc.\r\n\r\n liste = [] # empty final list\r\n hour = 0 # hour counter\r\n minute = 0 # minute counter\r\n\r\n for i in range(0,Ntmp): # loop for each timepoint\r\n minute += res # next resolution minute of the timepoint\r\n if minute == 60: # we have reached one hour\r\n hour += 1 # next hour\r\n minute = 0 # reset minute\r\n now = str(time(0,hour,minute)) # call the datetime module to get the timepoint in string format\r\n split = now.split(':')[1:3] # we do not take the seconds\r\n join = ':'.join(split) # rejoin only hours and minutes\r\n liste.append(join) # add current timepoint to the list\r\n \r\n return liste", "def add_time(self, index, time):\n self.times[index].append(time)", "def time_to_hour_and_minute(time):\n return [time // 60, time % 60]", "def convert_time(time):\n\n s = time.split()[0]\n s_h = int(s.split(':')[0])\n\n am_pm = s.split(':')[1][-2:]\n if s_h == 12:\n s_h = s_h - 12\n if am_pm == 'PM':\n s_h = s_h + 12\n s_h = s_h + 1\n\n e = time.split()[2]\n e_h = int(e.split(':')[0])\n\n am_pm = e.split(':')[1][-2:]\n if e_h == 12:\n e_h = e_h - 12\n if am_pm == 'PM':\n e_h = e_h + 12\n e_h = e_h + 1\n\n hour_list = range(s_h, e_h + 1)\n return hour_list", "def get_time(self):\n return [self.hours, self.mins, self.secs]", "def sorted_by_time(data):\n\n tlist = []\n tdict = {}\n for ent in data:\n atemp = re.split('\\s+', ent)\n tm = atemp[2]\n tm = tm.replace('-', '')\n tm = tm.replace('T', '')\n tm = tm.replace(':', '')\n tm = int(float(tm))\n tlist.append(tm)\n tdict[tm] = ent\n\n tlist.sort()\n\n out = []\n for ent in tlist:\n out.append(tdict[ent])\n\n return out", "def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]", "def addtimestamp(self, items) :\n secitems = [] # items from this second\n for item in items :\n time = item[\"time\"] # get timestamp\n if secitems == [] or secitems[0][\"time\"] == time : # if same second\n secitems.append(item) # save for this second\n else : # done with this second\n self.fixtimestamp(secitems) # space out in time\n secitems = [item] # done with this second\n self.fixtimestamp(secitems) # do final items\n return", "def get_timestamped_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__string_list):\n ret_list.append(self.__timestamp_list[i].strftime(\"%Y-%m-%d %H:%M:%S\")+\" \"+self.__string_list[i])\n i += 1\n return ret_list", "async def _timein_list(self):\n\t\t\n\t\tmessage = 'Favourites\\n```Name: Timezones\\n'\n\t\t\n\t\tfor fav in self.favourites:\n\t\t\tmessage += fav + ': '\n\t\t\tmessage += self.favourites[fav].replace(',', ', ').replace('_', ' ') + '\\n'\n\t\t\n\t\tmessage += '```'\n\t\tawait self.bot.say(message)", "def get_timestamp_list(client, video_id):\n vtime_regex = re.compile(u'[\\d\\s\\w]{0,1}\\d:\\d\\d')\n comments = get_yt_comments(client=client, video_id=video_id)\n times = []\n for comment in comments:\n cur_times = vtime_regex.findall(comment)\n clean_times = [trim_str_num(t) for t in cur_times]\n times += clean_times # More pythonic than .extends(..)\n return times", "def get_time_types_for_formatting(self):\n return # osid.type.TypeList", "def get_date_time_list(self, dates: List, year: int, month: int):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
finds stations that don't have predictand data and appends them to a list
def miss_station(all_stations,stations): diff = len(all_stations)-len(stations) k=0 i=0 miss_stations = ['']*diff a = all_stations[:] a.sort() s = stations[:] s.sort() while i < len(stations): while a[i] != s[i]: miss_stations[k]=a[i] del a[i] k+=1 i+=1 return miss_stations
[ "def stations():\n\n return station_list", "def _get_stations_data(self):\n return [station for network in self.__networks_objects_list for station in network[\"Stations\"]]", "def list_stations(self):\n for i in range(len(self.data)):\n self.stnlist.append(Station(self.data[i][0]))\n return self", "def _get_ogd_stations():\n return {r[\"Station\"] for r in ZamgData.current_observations()}", "def get_stations(form):\n stations = form.getall(\"sts\")\n if not stations:\n stations.append(\"XXXXX\")\n if len(stations) == 1:\n stations.append(\"XXXXX\")\n return stations", "def get_processed_stations(out_dir):\n lista = [ f.split('_')[0] for f in os.listdir(out_dir) if '.nc' in f ]\n #print('Skipping these stations: ' , lista )\n return lista", "def train_stations(self) -> List[str]:\n return sorted([train_info['HE'] for train_info in train_api.stations_info.values()])", "def _empty_train_data(self):\n dest = self.destination if self.destination else \"\"\n direction = self.direction if self.direction else \"\"\n stops_at = self.stops_at if self.stops_at else \"\"\n return [\n {\n ATTR_STATION: self.station,\n ATTR_ORIGIN: \"\",\n ATTR_DESTINATION: dest,\n ATTR_DUE_IN: \"n/a\",\n ATTR_DUE_AT: \"n/a\",\n ATTR_EXPECT_AT: \"n/a\",\n ATTR_DIRECTION: direction,\n ATTR_STOPS_AT: stops_at,\n ATTR_TRAIN_TYPE: \"\",\n }\n ]", "def collect_stations(self):\n # First, iterate provinces and build url's\n site = urllib.request.urlopen(self.base_url)\n\n # Check that the site is still valid or operating by collecting a list of provinces\n print(\"Collecting provinces\")\n provinces = [s[9:11] for s in re.findall('<a href=\"../\">../</a>', site.read())]\n\n # Iterate provinces and collect list of available times\n print(\"Collecting time periods and station ID's\")\n self.stations = defaultdict(dict)\n for prov in provinces:\n site = urllib.request.urlopen(self.build_url(prov))\n expression = '<a href=\"[hd][a-zA-Z]*/\">[hd][a-zA-Z]*/</a>'\n times = [s.split('>')[1].split('<')[0].replace('/', '') for s in re.findall(expression, site.read())]\n\n # Iterate times and collect the station ID's\n for time in times:\n site = urllib.request.urlopen(self.build_url(prov, time))\n expression = '<a href=\"{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv\">{0}_[a-zA-Z0-9]*_{1}_hydrometric.csv</a>'\n expression = expression.format(prov.upper(), time.lower())\n stations = [s.split('_')[1] for s in re.findall(expression, site.read())]\n self.stations[prov][time] = stations", "def analyze_station(station):\n rez = []\n \n total_trains = len(station.vs.select(lambda vertex: vertex[\"node_type\"] == \"Train\"))\n \n # test what changes to accessbility when you remove a single elevator from it\n for v in station.vs:\n if v[\"node_type\"] != \"Elevator\":\n # only test elevator importance\n continue\n \n t = copy.deepcopy(station)\n t.delete_vertices([v[\"id\"]])\n\n # determine number of trains that lose access to the street as a result\n # of the removal\n station_split = t.clusters(mode=\"WEAK\")\n severity = 0\n for split in station_split:\n severity += calc_elevator_importance(t, split)\n perc = severity / total_trains * 100\n rez.append([v[\"station\"], v[\"id\"], severity, perc])\n return rez", "def filter_show_stations(data: List[Dict[str, List[int]]], config_data):\n AirData = namedtuple(\"AirData\", [\"station\", \"aqi\"])\n data_store = []\n\n station_uids = list(config_data.values())\n flatten_uids = [item for sublist in station_uids for item in sublist]\n\n # Load = data returned from concurrent request.\n for load in data:\n for item in load:\n if item[\"uid\"] in flatten_uids:\n aqi = item[\"aqi\"]\n station = item[\"station\"][\"name\"]\n data_store.append(AirData(station, aqi))\n\n return data_store", "def guess_stations(flats_list, constraint, config):\n distance_threshold = config[\"max_distance_housing_station\"]\n opendata = {\n \"postal_codes\": data.load_data(PostalCode, constraint, config),\n \"stations\": data.load_data(PublicTransport, constraint, config),\n }\n\n for flat in flats_list:\n flat_station = flat.get(\"station\", None)\n\n if not flat_station:\n # Skip everything if empty station\n LOGGER.info(\"No stations field for flat %s, skipping stations lookup.\", flat[\"id\"])\n continue\n\n # Woob modules can return several stations in a comma-separated list.\n flat_stations = flat_station.split(\",\")\n # But some stations containing a comma exist, so let's add the initial\n # value to the list of stations to check if there was one.\n if len(flat_stations) > 1:\n flat_stations.append(flat_station)\n\n matched_stations = []\n for tentative_station in flat_stations:\n matched_stations += fuzzy_match(\n tentative_station,\n [x.name for x in opendata[\"stations\"]],\n limit=10,\n threshold=50,\n )\n\n # Keep only one occurrence of each station\n matched_stations = list(set(matched_stations))\n\n # Filter out the stations that are obviously too far and not well\n # guessed\n good_matched_stations = []\n postal_code = flat[\"flatisfy\"].get(\"postal_code\", None)\n if postal_code:\n # If there is a postal code, check that the matched station is\n # closed to it\n postal_code_gps = next((x.lat, x.lng) for x in opendata[\"postal_codes\"] if x.postal_code == postal_code)\n for station in matched_stations:\n # Note that multiple stations with the same name exist in a\n # city, hence the list of stations objects for a given matching\n # station name.\n stations_objects = [x for x in opendata[\"stations\"] if x.name == station[0]]\n for station_data in stations_objects:\n distance = tools.distance((station_data.lat, station_data.lng), postal_code_gps)\n if distance < distance_threshold:\n # If at least one of the coordinates for a given\n # station is close enough, that's ok and we can add\n # the station\n good_matched_stations.append(\n {\n \"key\": station[0],\n \"name\": station_data.name,\n \"confidence\": station[1],\n \"gps\": (station_data.lat, station_data.lng),\n }\n )\n break\n LOGGER.info(\n (\"Station %s is too far from flat %s (%dm > %dm), discarding this station.\"),\n station[0],\n flat[\"id\"],\n int(distance),\n int(distance_threshold),\n )\n else:\n LOGGER.info(\"No postal code for flat %s, skipping stations detection.\", flat[\"id\"])\n\n if not good_matched_stations:\n # No stations found, log it and cotninue with next housing\n LOGGER.info(\n \"No stations found for flat %s, matching %s.\",\n flat[\"id\"],\n flat[\"station\"],\n )\n continue\n\n LOGGER.info(\n \"Found stations for flat %s: %s (matching %s).\",\n flat[\"id\"],\n \", \".join(x[\"name\"] for x in good_matched_stations),\n flat[\"station\"],\n )\n\n # If some stations were already filled in and the result is different,\n # display some warning to the user\n if \"matched_stations\" in flat[\"flatisfy\"] and (\n # Do a set comparison, as ordering is not important\n set([station[\"name\"] for station in flat[\"flatisfy\"][\"matched_stations\"]])\n != set([station[\"name\"] for station in good_matched_stations])\n ):\n LOGGER.warning(\n \"Replacing previously fetched stations for flat %s. Found \"\n \"stations differ from the previously found ones.\",\n flat[\"id\"],\n )\n\n flat[\"flatisfy\"][\"matched_stations\"] = good_matched_stations\n\n return flats_list", "def stations(self):\n for stat in sorted(self.station_records):\n yield self.station_records[stat]", "def stations_objects(self):\n\n return self.__stations_objects_list", "def read_noaa_stations(self):\n # wget -c http://weather.noaa.gov/data/nsd_bbsss.txt\n #72;656;KSFD;Winner, Bob Wiley Field Airport;SD;United States;4;43-23-26N;099-50-33W;;;619;;\n #93;246;NZRO;Rotorua Aerodrome;;New Zealand;5;38-07S;176-19E;38-07S;176-19E;285;294;\n #block;synop;icao;name;?;country;??;lat;lon;lat2;lon2;height;?;\n #0 1 2 3 4 5 6 7 8 9 10 11 12\n if not os.path.exists(self.noaa_filename):\n LOGGER.warning('could not find noaa file \"%s\"', self.noaa_filename)\n return self.known_stations\n count = 0\n with open(self.noaa_filename, 'r') as csvfile:\n stationreader = csv.reader(csvfile, delimiter=';')\n for row in stationreader:\n station_id = '{}{}'.format(row[0], row[1])\n station_id_icao = row[2].strip().upper()\n data = noaa_station_data_from_row(row)\n if data is not None:\n count += 1\n self.known_stations[station_id] = data\n if len(station_id_icao) == 4 and station_id_icao.isalpha():\n self.known_stations[station_id_icao] = data\n self.noaa_file_age = os.path.getmtime(self.noaa_filename)\n LOGGER.info(' Loaded %i noaa station records from \"%s\"', count, self.noaa_filename)\n return self.known_stations", "def _add_data(self, model_stations: Iterable[model.Station],\n validate_prefix: str = \"\") -> int:\n valid_station_count = 0\n jreast_merged_codes: dict[model.StationID, str] = load_csv_as_mapping(\n DIR_CURATED / \"jreast_merged_codes.csv\",\n itemgetter(\"sta_id\"),\n itemgetter(\"code\")\n )\n\n # Add data from model stations\n for model_sta in model_stations:\n is_invalid = False\n should_validate = model_sta.id.startswith(validate_prefix)\n\n # Find a matching geo_sta\n geo_sta = self.by_id.get(model_sta.id)\n if not geo_sta:\n if should_validate:\n self.logger.critical(f\"{Color.RED}geo.osm is missing station \"\n f\"{Color.MAGENTA}{model_sta.id}{Color.RESET}\")\n self.valid = False\n continue\n\n # Find a name\n name_id = last_part(geo_sta.id)\n geo_sta.name = self.names.get(name_id)\n if geo_sta.name is None and should_validate:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Copy stop_code\n geo_sta.code = model_sta.code\n\n # Check if station was valid\n if is_invalid:\n self.valid = False\n elif should_validate:\n valid_station_count += 1\n\n # Generate codes and names for mother stations\n for sta in self.by_id.values():\n if not sta.children:\n continue\n\n name_id = last_part(sta.id)\n sta.name = self.names.get(name_id)\n if not sta.name:\n self.logger.critical(f\"{Color.RED}sta_names.csv is missing name for \"\n f\"{Color.MAGENTA}{name_id}{Color.RESET}\")\n is_invalid = True\n\n # Get children codes\n children_codes = []\n jreast_merged_code = jreast_merged_codes.get(sta.id)\n if jreast_merged_code:\n children_codes.append(jreast_merged_code)\n\n for child in sta.children:\n # Ignore JR-East child codes if there's a JR-East merged code\n if child.id.startswith(\"JR-East\") and jreast_merged_code:\n continue\n elif child.code:\n children_codes.append(child.code)\n\n sta.code = \"/\".join(children_codes)\n\n return valid_station_count", "async def get_stations() -> List[WeatherStation]:\n # Check if we're really using the api, or loading from pre-generated files.\n use_wfwx = config.get('USE_WFWX') == 'True'\n if use_wfwx:\n return await _get_stations_remote()\n return _get_stations_local()", "def stations( s, n ):\n candidate = set()\n while( len(candidate) < s ):\n candidate.add( random.randint(0,max_station-1) );\n candidate = list(candidate);\n ls = [];\n for i in xrange(n):\n ls.append( candidate[random.randint(0,s-1)] );\n return ls;", "def get_cc_hrs_station_list(update = False):\n if update:\n stations = k.get_timeseries_list(parametertype_name = 'Water Course Discharge', ts_name = 'DMQaQc.Merged.DailyMean.09HR')\n stations.to_csv('available_watercoursedischarge_stations.csv')\n else:\n stations = pd.read_csv('available_watercoursedischarge_stations.csv', index_col=0)\n\n hrs_stations = pd.read_csv('hrs_station_list.csv', skiprows=1)\n\n station_subset = stations.ix[stations.station_no.isin(hrs_stations.station_id)]\n\n if update:\n station_attrs = []\n for i, station in station_subset.iterrows():\n attrs = k.get_station_list(station_no = station.station_no, parametertype_name = 'Water Course Discharge', return_fields=['station_id','station_no','custom_attributes'])\n station_attrs.append(attrs.set_index('station_id'))\n\n station_attributes = pd.concat(station_attrs).drop_duplicates()\n station_attributes.to_csv('station_attributes.csv')\n else:\n station_attributes = pd.read_csv('station_attributes.csv', index_col=0)\n\n cc_providers = pd.read_csv('cc_providers.csv', skiprows=8)\n\n station_list = station_attributes.ix[station_attributes.DATA_OWNER.isin(cc_providers.ProviderID.values)].station_no\n\n return station_list.drop_duplicates()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Hexlify raw text, return hexlified text.
def hexlify(text): if six.PY3: text = text.encode('utf-8') hexlified = binascii.hexlify(text) if six.PY3: hexlified = hexlified.decode('utf-8') return hexlified
[ "def unhexlify(text):\n unhexlified = binascii.unhexlify(text)\n\n if six.PY3:\n unhexlified = unhexlified.decode('utf-8')\n\n return unhexlified", "def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])", "def preprocess_hex_chars(self, text) :\n preprocessed_text = ''\n\n i = 0\n while i < len(text) :\n if '\\\\x' == text[i:i+2] :\n c = int(text[i+2:i+4], base=16)\n preprocessed_text += chr(c)\n i += 4\n else :\n preprocessed_text += text[i]\n i += 1\n\n return preprocessed_text", "def _get_hexplain(data: str) -> str:\n temp = []\n for char in data:\n temp.append(hex(ord(char)).replace(\"0x\", \"\"))\n return \"\".join(temp)", "def unhexlify(data) -> bytes:", "def _get_hexesc(data: str) -> str:\n temp = []\n for char in data:\n temp.append(hex(ord(char)).replace(\"0x\", \"\\\\\\\\x\"))\n return \"\".join(temp)", "def encrypt(text):\r\n\r\n cipher = fuzz(text)\r\n return hexify(cipher)", "def xx(data):\n if sys.version_info < (3, 5):\n return binascii.hexlify(data).decode('ascii')\n return data.hex()", "def hexlify(data: bytes, sep: str = '') -> bytes:", "def hex(self):\n return binascii.hexlify(self.data)", "def test_unhexlify():\n hexlified = uflash.hexlify(TEST_SCRIPT)\n unhexlified = uflash.unhexlify(hexlified)\n assert unhexlified == TEST_SCRIPT.decode('utf-8')", "def hexify_word(word):\r\n\r\n return ''.join([str(hex(ord(c))[2::]) for c in word])", "def encode_hex(data: typing.AnyStr, with_prefix=False) -> str:\n\n if isinstance(data, str):\n return data\n return hexlify(data, with_prefix=with_prefix)", "def refine_text_string(text_string):\n text_list = text_string.split(\",\")\n hex_list = [hex(int(a)) for a in text_list]\n for i in range(len(hex_list)):\n if len(hex_list[i]) == 3:\n hex_list[i] = hex_list[i].replace('x', \"\")\n elif len(hex_list[i]) == 4:\n hex_list[i] = hex_list[i].replace('0x', \"\")\n return \"\".join(hex_list)", "def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")", "def hex(cls, x):\n return c_hex(x)", "def get_hex_tx_data(self):\n return hexlify(self.tx_data).decode(\"utf8\")", "def hexaescrypt(data, key):\n c = AES.new(key, AES.MODE_ECB)\n return b2a_hex(c.encrypt(data))", "def hexstring(self):\n if self.current != b\"<\":\n self.on_parser_error(\"Hexadecimal string expected\")\n self.next()\n token = b''\n self.maybe_spaces_or_comments()\n while self.is_hex_digit:\n token += self.next()\n self.maybe_spaces_or_comments()\n\n ch = self.next()\n if ch != b'>':\n self.on_parser_error(\"Wrong hexadecimal string\")\n if len(token) % 2:\n # if there is an odd number of digits - the last one should be assumed 0\n token += b'0'\n return HexString(token.decode(DEFAULT_ENCODING).upper())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }