query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Return list of values for each div.test element.
def div_value_list(self): return self.q(css='div.test').attrs('value')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def div_text_list(self):\n return self.q(css='div.test').text", "def div_html_list(self):\n return self.q(css='div.test').html", "def get_individual_performance(self):\n\n divs = self.page.find_all(\"span\", {\"class\":\"value\"})\n values = [div.text for div in divs]\n return values", "def get_tests(self):\n return self.tests[:]", "def get_elements(self):\n\t\treturn self._testing_cache", "def get_elements(self):\n\t\treturn self._testing_cache", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test\n # set.\n return list(map(self.classify, test))", "def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test set.\n return list(map(self.classify, test))", "def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt", "def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def get_test_cases(self):\n\n return self._test_cases", "def get_results(self):\n result = [round(self.mr / self.test_size, 1), round(self.mrr / self.test_size, 3),\n round(self.hits1 / self.test_size, 3), round(self.hits3 / self.test_size, 3),\n round(self.hits5 / self.test_size, 3), round(self.hits10 / self.test_size, 3)]\n return result", "def getTestSet(self):\r\n return self.fTestData", "def getValues(self):\n return [ float(val.text()) for val in self.values ]", "def Get_Test_Containers():\n\tlis = []\n\t\n\tlis.append(Container(0, 0.01, 0.01, 0.0025, 100, 293, 0))#Normal\n\tlis.append(Container(1, 0.01, 0.02, 0.0025, 75, 293*1.25, 0))#Nearly full and quite hot\n\tlis.append(Container(2, 0.03, 0.01, 0.0025, 10, 293, 0))#Nearly empty\n\tlis.append(Container(3, 0.02, 0.02, 0.0025, 1000, 293, 0))#Overfull\n\tlis.append(Container(0, 0.5*(2**0.5), 1, 0.0025, 10, 293, 3*(10**-9)))#Huge container with pump\n\t\n\treturn lis", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def getTestResults():", "def load_data(self):\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n data = []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n module = testcase.get('classname')\n name = testcase.get('name')\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err'):\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, module, message, time,\n extra_text))\n\n return testresults", "def list_feature_tests(self):\n\t\treturn self.test_names", "def getTestInstance(self):\n self.test_inst_condition = self.format_data.formatted_test_data[self.data_ref][0]\n self.test_inst_phenotype = self.format_data.formatted_test_data[self.data_ref][1]\n if self.data_ref < (self.format_data.numTestphenotypes - 1):\n self.data_ref += 1\n else:\n self.data_ref = 0\n return [self.test_inst_condition, self.test_inst_phenotype]", "def test_data(self):\n return self._test_data", "def getTestInstance(self):\r\n return [self.currentTestState, self.currentTestPhenotype]", "def getNodeTests():\n\n nodeTestsQuery = NodeTest.query.all()\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def parse_test_context(self, test_list_output):\n # Sample command line output:\n #\n # MyHobbesTest\n # Arrays\n # Compiler\n # Definitions\n #\n #\n # Sample Result:\n #\n # [\n # ['Arrays', []],\n # ['Compiler', []]\n # ['Definitions', []]\n # ]\n result = [[line.strip(), []] for line in test_list_output.splitlines()]\n return result", "def get_sub_values(self):\n return list()", "def get_test_results(self):\n element = self.find_element_by_id(self.results_id, wait=True)\n\n if element:\n return element.text\n else:\n return False", "def get_results(self) -> List[str]:\n output = []\n for row in self.row_layout.children():\n if self.possible_values is None:\n text = row.itemAt(0).widget().text()\n else:\n text = row.itemAt(0).widget().currentText()\n\n if text != \"\":\n output.append(text)\n return output", "def test_cases(self) -> list[str]:\n cases = []\n for t in self._test_cases:\n if t not in cases:\n cases.append(t)\n return cases", "def list_test_cases(program):\n\n return list(INFO[program].test_cases)", "def getTestData(self):\n raise NotImplementedError", "def get_values_for_class(self, class_name) -> list:\n class_items = self.soup.find_all(attrs={'class': class_name})\n class_items_values = []\n for class_item in class_items:\n class_item_value = class_item.text.strip()\n class_items_values.append(class_item_value)\n return class_items_values", "def get_collection_elements(self):\n wrapper = self.data.find('div', id='main-content')\n return wrapper.find_all('section', class_='sbkBrv_SingleResult')", "def get_children(self, test, expression):\n\n for child in self.children:\n if TextMatch.dict_call(test, child.text, expression):\n yield child", "def run_tests(tests):\n return [test(t) for t in tests]", "def _all_tests(by_dir):\n return set(x[0] for item in by_dir.values()\n for x in item.tests)", "def num_divs(self):\n return len(self.q(css='div.test').results)", "def _get_test_generator(self):\n for data_element in self.test:\n image, heatmap = self._generate_input_tuple(data_element)\n \n yield (image, heatmap)", "def sequence(self):\n for tn in self._testnodes:\n yield tn", "def get_sliders_values(self):\n\n slider_values = []\n\n for slider in self.sliders:\n found_slider = self.get_element(slider)\n slider_values.append(found_slider.get_attribute('aria-valuenow'))\n\n return slider_values", "def get_test_context(self):\n return [\n (testcase, [testcase])\n for testcase in self._pyunit_testcases.keys()\n ]", "def get_units(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[3])\n return result", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])", "def Test(self, test_set):\n test_set_retagged = []\n # This loop will call Viterbi method and pass each sentence (word, POS) in \"test_set\" one by one,\n # and save the returned retagged pos in \"test_set_retagged\"\n a = 0\n for sent in test_set:\n test_set_retagged.append(self.Viterbi(sent))\n return test_set_retagged", "def get_tests(self, cluster_id):\n return self._client.get(\n url=\"/tests/{}\".format(cluster_id),\n ).json()", "def get_values_for_tag(self, tag_name: str):\n tag_items = self.soup.find_all(tag_name)\n tag_item_values = []\n for tag_item in tag_items:\n tag_item_value = tag_item.text.strip()\n tag_item_values.append(tag_item_value)\n return tag_item_values", "def get_element_list(self):\n pass", "def values(self):\n return [p.value for p in self]", "def test_data() -> Iterator[Tuple[Label, ChanneledImage]]:\n return zip(*get_data(TEST_FILES, 10000))", "def GetTopLevelTests(self):\n return [node for node in self.Walk() if node.IsTopLevelTest()]", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def tests(self):\n if self._tests is None:\n raise ValueError(\"Individual tests were not kept!\")\n\n return self._tests", "def test_listfield(self):\n self.assertEqual(self.scraped.urls, ['http://google.com', 'http://apple.com'])\n self.assertEqual(self.scraped.in_divs, ['Nested'])", "def evaluate(self, test):\r\n self.logger.info(\"Testing model over test set\")\r\n metrics = self.run_evaluate(test)\r\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\r\n for k, v in metrics.items()])\r\n self.logger.info(msg)\r\n return metrics", "def List(ctx):\n \"\"\"Note: This method is available only through the per-node API endpoint 5.0 or later.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\")\n try:\n ListTestsResult = ctx.element.list_tests()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListTestsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def test(self, test_data):\n with open(test_data, 'r') as test_data:\n results = {}\n for type in self.label_type_map:\n results[self.label_type_map[type]] = []\n while True:\n tokens = test_data.readline().split()\n pos = test_data.readline().split()\n indices = test_data.readline().split()\n if not tokens or not pos or not indices:\n break\n curr_results = self.viterbi(tokens)\n intervals = self.extract_intervals(curr_results, indices)\n for type in intervals:\n for interval in intervals[type]:\n results[type].append(interval)\n self.write_results(results)", "def items(self):\r\n return self.elements.values()", "def get_measures(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[2])\n return result", "def extract_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"input\": {\"molecules\": [\"DDSPDLPK\"], \"score_threshold\": 0.95},\n \"output\": {\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"file_name\": \"BSA1.mzML\",\n \"scaling_factor\": 100,\n \"spec_id\": 1337,\n },\n }\n ]\n for test_dict in TESTS:\n for key, n, entry in self.results.extract_results(**test_dict[\"input\"]):\n print(key, entry)\n assert key.formula == test_dict[\"output\"][\"formula\"]\n assert key.file_name == test_dict[\"output\"][\"file_name\"]\n assert entry.scaling_factor == test_dict[\"output\"][\"scaling_factor\"]\n assert entry.spec_id == test_dict[\"output\"][\"spec_id\"]\n # print(self.results)\n # print(self.results.lookup)\n assert n == 0", "def get_unit_details(driver):\n unit_details = [\n a\n for a in driver.find_elements_by_xpath(\"//span[contains(@class, 'tooltip')]\")\n if \"$\" in a.get_attribute(\"title\")\n ]\n data = []\n for unit in unit_details:\n unit_dict = {}\n unit_dict[\"Unit\"] = unit.get_attribute(\"data-selector\")\n title = unit.get_attribute(\"title\").replace(\"\\xa0\", \" \")\n unit_dict[\"Price\"] = re.compile(\"(\\\\$.+?)<\").findall(title)[0]\n unit_dict[\"Size\"] = re.compile(\">(\\\\d.+Sqm)\").findall(title)[0]\n data.append(unit_dict)\n return data", "def evaluate(self, test):\n self.logger.info(\"Testing model over test set\")\n metrics = self.run_evaluate(test)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n return metrics", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def texts(self):\n return [elem.text for elem in self.web_elements]", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def get_value_texts(self):\n return self.value_texts", "def _get_elements(self):\n return self._elements", "def get_test_series(page: MarinDashboardPage, chart_id: str) -> List:\n csv_reader = page.get_chart_data(chart_id)\n keys = csv_reader.fieldnames\n\n if keys != ['Test Date', 'Positive Tests']:\n raise FormatError(f'Data headers for chart \"{chart_id}\" have changed')\n\n test_series = []\n\n cumul_pos = 0\n for row in csv_reader:\n daily: dict = dict()\n date_time_obj = datetime.strptime(row['Test Date'], '%m/%d/%Y')\n daily[\"date\"] = date_time_obj.strftime('%Y-%m-%d')\n daily[\"positive\"] = int(row[\"Positive Tests\"])\n cumul_pos += daily[\"positive\"]\n daily[\"cumul_positive\"] = cumul_pos\n test_series.append(daily)\n\n return test_series", "def getTestSets():\n return list(_testsetdict.keys())", "def get_elements(self):\n return self.elements", "def process_test_data(self, test_data):\n\n result = []\n for suite in test_data:\n suite_report = TestGroupReport(\n name=suite[\"name\"],\n category=ReportCategories.TESTSUITE,\n uid=suite[\"name\"],\n )\n suite_has_run = False\n\n for testcase in suite[\"data\"]:\n if testcase[\"status\"] != \"skipped\":\n suite_has_run = True\n\n testcase_report = TestCaseReport(\n name=testcase[\"name\"],\n uid=testcase[\"name\"],\n suite_related=True,\n )\n assertion_obj = RawAssertion(\n passed=testcase[\"status\"] == \"pass\",\n content=testcase[\"error\"] or testcase[\"duration\"],\n description=testcase[\"name\"],\n )\n testcase_report.append(registry.serialize(assertion_obj))\n testcase_report.runtime_status = RuntimeStatus.FINISHED\n suite_report.append(testcase_report)\n\n if suite_has_run:\n result.append(suite_report)\n\n return result", "def getValues(self):\n result = []\n for cbox in self.checkboxes:\n if cbox.isChecked():\n result.append(cbox.text())\n\n return result", "def get_values(self):\n \n return []", "def test_get_value_list_result(self):\n test_data = []\n test_data.append(json.loads('{\"name\": \"Pat\"}'))\n test_data.append(json.loads('{\"last_name\": \"Nat\"}'))\n test_data.append(json.loads('{\"name\": \"Gwen\"}'))\n\n key = \"name\"\n result_list = get_value_list(test_data, key)\n self.assertTrue(len(result_list) == 2)", "def _chunked_tests(chunks):\n return set(x[0] for chunk in chunks.values()\n for path in chunk.paths\n for x in path.tests)", "def fetch_test_data(self):\r\n self.fetch_attribute_list()\r\n self.type_conversion()\r\n # Connect to sensor database.\r\n connection = db.Connection(host=\"localhost\", user=\"root\", db=\"sensor\")\r\n dbhandler = connection.cursor()\r\n test_data = []\r\n for i in range(0, len(self.attribute_list)):\r\n # If the value is in user_info.json file.\r\n if self.attribute_list[i] in self.form_attribute_keys == True:\r\n test_data.append(self.form_attribute[self.attribute_list[i]])\r\n else:\r\n # Value is in database.\r\n # Reconstruct attribute name to match with table column name.\r\n self.attribute_list[i] = self.attribute_list[i].replace(' ', '_')\r\n self.attribute_list[i] = self.attribute_list[i].lower()\r\n # Fetch last inserted value of that attribute.\r\n query = \"select `\"\r\n query += (self.attribute_list[i] + \"`FROM `\" + self.attribute_list[i]\r\n + \"` ORDER BY `\" + self.attribute_list[i] + \"` DESC LIMIT 1\")\r\n dbhandler.execute(query=query)\r\n value = dbhandler.fetchall()\r\n print value\r\n # Append to test_data\r\n test_data.append(value[0][0])\r\n print test_data\r\n return np.array(test_data)", "def extract_list(self, property, data):\n\n values = self.get_property(property, data)\n if len(values) == 1:\n return [ self.concat_text(child) for child in values[0].getchildren() ]\n else:\n return [ self.concat_text(val) for val in values ]", "def tests(self):\n return [t for t in self._collection if t.parent is self]", "def _get_values(self) -> ty.List[float]:\r\n ...", "def test_list_int(self):\n result = div(2, 4)\n self.assertEqual(result, 0.5)", "def test_fields(self):\n tags = (\n ('<form', 1),\n ('<input', 6),\n ('type=\"text\"', 3),\n ('type=\"email\"', 1),\n ('type=\"submit\"', 1),\n )\n for text, number in tags:\n with self.subTest():\n self.assertContains(self.reps, text, number)", "def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text", "def list(self):\n print \"\\nAvailable Test Cases\"\n print \"====================\"\n for case in self.cases:\n print case.__name__", "def values(self):\n\t\treturn iter(self.data)", "def list_tests(self, executable):\n # This will return an exit code with the number of tests available\n try:\n output = subprocess.check_output(\n [executable, \"--list-test-names-only\"],\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n except subprocess.CalledProcessError as e:\n output = e.output\n\n result = output.strip().split(\"\\n\")\n\n return result", "def test_scrape_multiple(self):\n self.assertEqual(self.blogs[0].title, 'First article')\n self.assertEqual(self.blogs[0].content, ['First para', 'Second para'])\n self.assertEqual(self.blogs[1].title, 'Second article')\n self.assertEqual(self.blogs[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.blogs[2].title, 'Third article')\n self.assertEqual(self.blogs[2].content, ['Thing one', 'Thing two'])", "def get_attributes_from_amount_of_elements(self, key):\n l_of_attr_val = []\n els = self.driver.find_elements(self.by, self.value)\n for i in range(len(els)):\n el = els[0].find_elements(self.by, self.value)[i].get_attribute(key)\n l_of_attr_val.append(el)\n logging.getLogger(__name__).info(\n \"Attributes from amount of elements: {}\\nby = {}\\nvalue = {}\".format(l_of_attr_val, self.by, self.value))\n return l_of_attr_val", "def _GetAllTestRuns(self, ispy):\n template = JINJA.get_template('list_view.html')\n data = {}\n max_keys = 1000\n marker = 'failures/%s' % self.request.get('marker')\n test_runs = list([path.split('/')[1] for path in\n ispy.GetAllPaths('failures/', max_keys=max_keys,\n marker=marker, delimiter='/')])\n base_url = '/?test_run=%s'\n next_url = '/?marker=%s' % test_runs[-1]\n data['next_url'] = next_url\n data['links'] = [(test_run, base_url % test_run) for test_run in test_runs]\n self.response.write(template.render(data))", "def test_init(self):\n for tag in self.tags:\n for value in self.values:\n this_tag = tag(value)\n self.assertEqual(value, this_tag.value)\n self.assertEqual([], this_tag.body)", "def get_values(self):\n return map(lambda x: x.value(),self)", "def values(self):\n return [i.value for i in self.value]", "def test_search_subnode_attribute(self):\n\n lista = []\n for (_, value) in self.parser.search(self.xml, 'node@id'):\n lista.append(value)\n self.assertEqual(lista, ['1', '2', '3'])", "def parse_and_filter_test(self):\n\n TESTS = [\n {\n \"input\": {\"molecules\": [\"DDSPDLPK\"]},\n \"output\": {\"formula\": \"C(37)H(59)N(9)O(16)\"},\n },\n {\n \"input\": {\"charges\": [3]},\n \"output\": {\"formula\": \"C(43)H(75)N(15)O(17)S(2)\"},\n },\n {\n \"input\": {\"file_names\": [\"BSA1.mzML\"]},\n \"output\": {\"formula\": \"C(37)H(59)N(9)O(16)\"},\n },\n {\n \"input\": {\"formulas\": [\"C(37)H(59)N(9)O(16)\"]},\n \"output\": {\"formula\": \"C(37)H(59)N(9)O(16)\"},\n },\n {\n \"input\": {\"label_percentiles\": [((\"N\", \"0.010\"),)]},\n \"output\": {\"formula\": \"C(43)H(75)N(15)O(17)S(2)\"},\n },\n ]\n for test_dict in TESTS:\n for key in self.results._parse_and_filter(**test_dict[\"input\"]):\n assert key.formula == test_dict[\"output\"][\"formula\"]\n\n return", "def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files", "def __getattr__(self, item):\n\n if item == 'value':\n return [s.value for s in self.elements]\n else:\n raise AttributeError", "def calculate_all_test_stats(test=\"Wilcoxon Ranksum\"):\n\tlabels = get_tpx_labels()\n\t# frame to hold statistics\n\tstats_fr = pd.DataFrame(columns=[\"test_statistic\", \"p_value\"], index=labels)\n\t\n\t# do significance test for all features\n\tfor feature in labels:\n\t\tz_stat, p_val = do_significance_test(feature, test)\n\t\tstats_fr.loc[feature, \"test_statistic\"] = z_stat\n\t\tstats_fr.loc[feature, \"p_value\"] = p_val\n\t\n\tstats_fr = stats_fr.sort_values(\"p_value\", axis=0)\n\t\t\n\t# save results to csv file\n\ttest_name = re.sub(r\"\\s\", \"-\", test.lower())\n\tstats_fr.to_csv(wdir + \"tpx-test-statistics-\" + test_name + \".csv\", sep=\",\", header=True)\n\t\n\tprint(\"Done: All features tested.\")", "def test_data(self):\n\n return self.__valid_data, self.__valid_labels", "def getNodeTestsForPolicy(policyTestId):\n\n nodeTestsQuery = NodeTest.query.filter_by(_policy_test_id=policyTestId)\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def get_list_task(self):\n task_items = self.driver.find_elements(*task_page_locators.TASK)\n return task_items" ]
[ "0.74490196", "0.68997157", "0.64932024", "0.61540717", "0.6152504", "0.6152504", "0.5885815", "0.58685976", "0.5835421", "0.58142436", "0.57759404", "0.5746933", "0.56337065", "0.5621383", "0.5567706", "0.5559967", "0.55586827", "0.55545664", "0.5553262", "0.5503301", "0.54828674", "0.5462416", "0.5438672", "0.5422852", "0.5417449", "0.537168", "0.5332339", "0.5319634", "0.53085124", "0.53067195", "0.52987134", "0.5294849", "0.52541006", "0.5247908", "0.52449256", "0.5240872", "0.5221252", "0.5213826", "0.5209433", "0.5195292", "0.51936114", "0.5187134", "0.5184395", "0.51454556", "0.51454556", "0.51319766", "0.51155025", "0.51084465", "0.510347", "0.5089242", "0.5087283", "0.5076804", "0.5075118", "0.5074097", "0.50731295", "0.5073104", "0.5062838", "0.5059687", "0.5055917", "0.5054945", "0.50457793", "0.50427073", "0.50426084", "0.50370914", "0.50206447", "0.50190926", "0.5007196", "0.5003551", "0.50014746", "0.5001079", "0.49903023", "0.498989", "0.49843735", "0.49752823", "0.49732983", "0.4964144", "0.49631128", "0.49520054", "0.4947908", "0.49392822", "0.4939245", "0.49384984", "0.4931282", "0.4929054", "0.49204367", "0.49162447", "0.49120557", "0.48955593", "0.48950315", "0.48855376", "0.48814508", "0.48798546", "0.48712054", "0.48688883", "0.48688817", "0.4868459", "0.4856779", "0.48555654", "0.4855044", "0.48483747" ]
0.7983814
0
Return list of html for each div.test element.
def div_html_list(self): return self.q(css='div.test').html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def div_text_list(self):\n return self.q(css='div.test').text", "def test_html(self):\n tags = (('<input', 3),\n ('<span', 1),\n ('<button', 1))\n\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)", "def div_value_list(self):\n return self.q(css='div.test').attrs('value')", "def get_tests(self):\n return self.tests[:]", "def test_html(self):\n \n tags = (('<form',1),\n ('<input',6),\n ('type=\"text\"',3),\n ('type=\"email\"',1),\n ('type=\"submit\"',1))\n \n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)", "def test_gettesttools_html(self):\n pass", "def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]", "def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label", "def test_html(self):\n tags = (\n ('<form', 1),\n # Csrf, first_name, last_name, email, superuser, username and password\n ('<input', 7),\n ('type=\"text\"', 3),\n ('type=\"password\"', 1),\n ('type=\"checkbox\"', 1),\n ('type=\"email\"', 1),\n ('type=\"submit\"', 1),\n )\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.response, text, count)", "def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests", "def get_tests(self):\n subtests = itertools.chain(*(s.get_tests() for s in self.suites.values()))\n tt = [t for t in itertools.chain(self.tests,subtests)]\n return tt", "def tests():\n\n\treturn render_template(\"testing.html\")", "def test_listfield(self):\n self.assertEqual(self.scraped.urls, ['http://google.com', 'http://apple.com'])\n self.assertEqual(self.scraped.in_divs, ['Nested'])", "def get_collection_elements(self):\n wrapper = self.data.find('div', id='main-content')\n return wrapper.find_all('section', class_='sbkBrv_SingleResult')", "def num_divs(self):\n return len(self.q(css='div.test').results)", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def generate_tests(cls):\n cases_pat = join(dirname(__file__), cls.cases_dir, \"*.html\")\n for html_path in glob(cases_pat):\n # Load an options (`*.opts` file, if any).\n # It must be a Python dictionary. It will be passed as\n # kwargs to the markdown function.\n opts = {}\n opts_path = splitext(html_path)[0] + \".opts\"\n if exists(opts_path):\n try:\n opts = eval(open(opts_path, 'r').read())\n except Exception:\n _, ex, _ = sys.exc_info()\n print(\"WARNING: couldn't load `%s' opts file: %s\" \\\n % (opts_path, ex))\n\n test_func = lambda self, t=html_path, o=opts: \\\n self._assertSimpleHtmlPath(t, opts=o)\n\n tags_path = splitext(html_path)[0] + \".tags\"\n if exists(tags_path):\n tags = []\n for line in open(tags_path):\n if '#' in line: # allow comments in .tags files\n line = line[:line.index('#')]\n tags += line.split()\n test_func.tags = tags\n\n name = splitext(basename(html_path))[0]\n name = name.replace(' - ', '_')\n name = name.replace(' ', '_')\n name = re.sub(\"[(),]\", \"\", name)\n test_name = \"test_%s\" % name\n setattr(cls, test_name, test_func)", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])", "def _get_markup(self):\n return make_soup(self.driver.find_element_by_id(\"contestDetailTable\").get_attribute(\"innerHTML\"))", "def get_elements(self):\n\t\treturn self._testing_cache", "def get_elements(self):\n\t\treturn self._testing_cache", "def tests():\n dates, times = report_date_time()\n return render_template('tests.html',\n unit_date=dates[0], unit_time=times[0],\n integ_date=dates[1], integ_time=times[1])", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def get_inner_html(self):\n\n pass", "def run_tests(tests):\n return [test(t) for t in tests]", "def _GetAllTestRuns(self, ispy):\n template = JINJA.get_template('list_view.html')\n data = {}\n max_keys = 1000\n marker = 'failures/%s' % self.request.get('marker')\n test_runs = list([path.split('/')[1] for path in\n ispy.GetAllPaths('failures/', max_keys=max_keys,\n marker=marker, delimiter='/')])\n base_url = '/?test_run=%s'\n next_url = '/?marker=%s' % test_runs[-1]\n data['next_url'] = next_url\n data['links'] = [(test_run, base_url % test_run) for test_run in test_runs]\n self.response.write(template.render(data))", "def tests(self):\n return [self]", "def List(ctx):\n \"\"\"Note: This method is available only through the per-node API endpoint 5.0 or later.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\")\n try:\n ListTestsResult = ctx.element.list_tests()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListTestsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def test_html_output(self):\n pass", "def test_scrape_multiple(self):\n self.assertEqual(self.blogs[0].title, 'First article')\n self.assertEqual(self.blogs[0].content, ['First para', 'Second para'])\n self.assertEqual(self.blogs[1].title, 'Second article')\n self.assertEqual(self.blogs[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.blogs[2].title, 'Third article')\n self.assertEqual(self.blogs[2].content, ['Thing one', 'Thing two'])", "def testHTML(self):\n\n html = self.E.html()", "def test_get_root_html(self):\n pass", "def visitTests(tests, grepStr=''):\n\n # First flatten the list of tests.\n testsFlat = []\n toCheck = [t for t in tests]\n while toCheck:\n test = toCheck.pop()\n if isinstance(test, unittest.TestSuite):\n toCheck += [t for t in test]\n else:\n if grepStr in str(type(test)):\n testsFlat.append(test)\n testsFlat.sort()\n\n # Follow the flattened list of tests and show the module, class\n # and name, in a nice way.\n lastClass = None\n lastModule = None\n \n grepPrint = '' if grepStr is '' else red(' (grep: %s)'%grepStr)\n\n for t in testsFlat:\n moduleName, className, testName = t.id().rsplit('.', 2)\n \n # If there is a failure loading the test, show it\n if moduleName.startswith('unittest.loader.ModuleImportFailure'):\n print red(moduleName), \" test:\", t.id()\n continue\n\n if moduleName != lastModule:\n lastModule = moduleName\n print(\" - From %s.py (to run all use --allPrograms)\"\n % '/'.join(moduleName.split('.')) + grepPrint)\n\n\n if className != lastClass:\n lastClass = className\n print(\" ./xmipp test %s\" % className)", "def test_get_services_html(self):\n pass", "def GetTopLevelTests(self):\n return [node for node in self.Walk() if node.IsTopLevelTest()]", "def texts(self):\n return [elem.text for elem in self.web_elements]", "def test_all_scenarios(self):\n client = Client()\n response = client.get(\"/\")\n assert response.status_code == 200\n html = lxml.html.fromstring(response.content)\n a_tags = list(html.xpath('//a'))\n\n # Load the loaded_scenarios from the classes.\n loaded_scenarios = list(scenarios.get_scenarios().values())\n\n # We should have an <a> tag for each scenario.\n assert_equals(len(a_tags), len(loaded_scenarios))\n\n # We should have at least one scenario with a vertical tag, since we use\n # empty verticals as our canary in the coal mine that something has gone\n # horribly wrong with loading the loaded_scenarios.\n assert any(\"<vertical_demo>\" in scen.xml for scen in loaded_scenarios)\n\n # Since we are claiming in try_scenario that no vertical is empty, let's\n # eliminate the possibility that a scenario has an actual empty vertical.\n assert all(\"<vertical_demo></vertical_demo>\" not in scen.xml for scen in loaded_scenarios)\n assert all(\"<vertical_demo/>\" not in scen.xml for scen in loaded_scenarios)", "def Get_Test_Containers():\n\tlis = []\n\t\n\tlis.append(Container(0, 0.01, 0.01, 0.0025, 100, 293, 0))#Normal\n\tlis.append(Container(1, 0.01, 0.02, 0.0025, 75, 293*1.25, 0))#Nearly full and quite hot\n\tlis.append(Container(2, 0.03, 0.01, 0.0025, 10, 293, 0))#Nearly empty\n\tlis.append(Container(3, 0.02, 0.02, 0.0025, 1000, 293, 0))#Overfull\n\tlis.append(Container(0, 0.5*(2**0.5), 1, 0.0025, 10, 293, 3*(10**-9)))#Huge container with pump\n\t\n\treturn lis", "def generate_test_list(tdir):\n\n # Skip this if it already exists\n if os.path.exists(os.path.join(tdir.name, \"kstest-list\")):\n return\n\n kstest_log = os.path.join(tdir.name, \"kstest.log\")\n with open(kstest_log) as f:\n for line in f.readlines():\n if not line.startswith(\"Running tests: \"):\n continue\n\n tests = [os.path.basename(os.path.splitext(s)[0]) for s in line[15:].split()]\n with open(os.path.join(tdir.name, \"kstest-list\"), \"wt\") as klf:\n for t in tests:\n print(t, file=klf)\n break", "def test_iter(cls, suite):\n for test in suite:\n if isinstance(test, unittest.TestSuite):\n for t in cls.test_iter(test):\n yield t\n else:\n yield test", "def test_iter(cls, suite):\n for test in suite:\n if isinstance(test, unittest.TestSuite):\n for t in cls.test_iter(test):\n yield t\n else:\n yield test", "def parse_test_context(self, test_list_output):\n # Sample command line output:\n #\n # MyHobbesTest\n # Arrays\n # Compiler\n # Definitions\n #\n #\n # Sample Result:\n #\n # [\n # ['Arrays', []],\n # ['Compiler', []]\n # ['Definitions', []]\n # ]\n result = [[line.strip(), []] for line in test_list_output.splitlines()]\n return result", "def get_html(self):\r\n pass", "def parse_view_page(self):\n for row in self.driver.find_elements_by_css_selector(\"table\"):\n cells = row.find_elements_by_tag_name(\"td\")\n for cell in cells:\n yield cell.text", "def output_test():\n\toutput_comparison_page(TEST_EVENT_LIST, TEST_COMPARISON_PAGE_FILEPATH)", "def list_feature_tests(self):\n\t\treturn self.test_names", "def test_list(self):\n response = self.app.get(self.url('tags.list'))\n # Test response...", "def tests(c):\n results = [test(c, i) for i, test_path in enumerate(TEST_PATHS)]\n print('\\n\\n\\n############## SUMMARY ##############')\n for i, test_path in enumerate(TEST_PATHS):\n print(i, test_path, 'PASSED' if result[i] == 0 else 'FAILED')", "def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'instructions_html', 'annotation_storage', 'token', 'tag', 'openseadragonjson']:\r\n self.assertIn(key, context)", "def get_inner_html(self, selector):\n return \"\".join([self.driver.execute_script(\"return arguments[0].innerHTML;\", e)\n for e in self.get_list(selector)])", "def test_fields(self):\n tags = (\n ('<form', 1),\n ('<input', 6),\n ('type=\"text\"', 3),\n ('type=\"email\"', 1),\n ('type=\"submit\"', 1),\n )\n for text, number in tags:\n with self.subTest():\n self.assertContains(self.reps, text, number)", "def getTestResults():", "def load_data(self):\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n data = []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n module = testcase.get('classname')\n name = testcase.get('name')\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err'):\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, module, message, time,\n extra_text))\n\n return testresults", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def tests(self):\n return [t for t in self._collection if t.parent is self]", "def process_test(self, data):\n new_utts = []\n for l in data:\n tem = []\n for sent in l:\n tem.append([\"<s>\"] + sent + [\"</s>\"])\n new_utts.append(tem)\n return new_utts # 以输入的测试标题为topic,四句空诗", "def return_textview_elements(self):\n return self.driver.find_elements_by_class_name('android.widget.TextView')", "def list(self):\n print \"\\nAvailable Test Cases\"\n print \"====================\"\n for case in self.cases:\n print case.__name__", "def get_tests(self, cluster_id):\n return self._client.get(\n url=\"/tests/{}\".format(cluster_id),\n ).json()", "def list_tests(tests_module,\n test_module_names=None, test_class_map=None, skip_class_map=None):\n tests = load_tests(tests_module, test_module_names, test_class_map, skip_class_map)\n for test_class in tests:\n print(cmd.COLORS['title'](test_class.__name__) + ':')\n test_cases = unittest.loader.getTestCaseNames(test_class, 'test')\n for test_case in test_cases:\n print(textwrap.indent(test_case, cmd.INDENT))", "def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'instructions_html', 'sourceUrl', 'typeSource', 'poster', 'annotation_storage']:\r\n self.assertIn(key, context)", "def getNodeTests():\n\n nodeTestsQuery = NodeTest.query.all()\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def test_get_root_html1(self):\n pass", "def tests():", "def test_list(self):\n pass", "def test_list(self):\n pass", "def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def extract_data_listing(html):\n id_finder = re.compile(r'PR[\\d]+~')\n return html.find_all('div', id=id_finder)", "def test_container_on_container_html(self):\n draft_container = self._create_item(self.child_container.location, \"wrapper\", \"Wrapper\")\n self._create_item(draft_container.location, \"html\", \"Child HTML\")\n\n def test_container_html(xblock):\n self._test_html_content(\n xblock,\n expected_section_tag=(\n '<section class=\"wrapper-xblock level-page is-hidden studio-xblock-wrapper\" '\n 'data-locator=\"{0}\" data-course-key=\"{0.course_key}\">'.format(draft_container.location)\n ),\n expected_breadcrumbs=(\n '<a href=\"/course/{course}{subsection_parameters}\">Lesson 1</a>.*'\n '<a href=\"/container/{unit_parameters}\">Unit</a>.*'\n ).format(\n course=re.escape(str(self.course.id)),\n unit_parameters=re.escape(str(self.vertical.location)),\n subsection_parameters=re.escape('?show={}'.format(http.urlquote(\n str(self.sequential.location).encode()\n ))),\n ),\n )\n\n # Test the draft version of the container\n test_container_html(draft_container)\n\n # Now publish the unit and validate again\n self.store.publish(self.vertical.location, self.user.id)\n draft_container = self.store.get_item(draft_container.location)\n test_container_html(draft_container)", "def mine(self):\n collections = []\n # Getting HTML snapshot with selenium, storing a soup object in .data\n self.scrape()\n # Returns only the parts of the soup that surround each collection\n collection_elements = self.get_collection_elements()\n # Turns each soup element into a CollectionElement object\n collections = self.get_info_from_collections(collection_elements)\n # NOTE THE RETURN VALUE IS MERELY TO PASS TESTING< MUST BE CHANGED\n return self.data", "def test_get_html(self):\r\n context = self.mod.get_html()\r\n for key in ['display_name', 'tag', 'source', 'instructions_html', 'content_html', 'annotation_storage', 'token']:\r\n self.assertIn(key, context)", "def list_tests(arn=None, nextToken=None):\n pass", "def test_cases(self) -> list[str]:\n cases = []\n for t in self._test_cases:\n if t not in cases:\n cases.append(t)\n return cases", "def get_children(self, test, expression):\n\n for child in self.children:\n if TextMatch.dict_call(test, child.text, expression):\n yield child", "def test_get_publish_content_html(self):\n response = self.setup_get_html_test('/api/publish')\n count_elements = self.count_markup_elements(response.data, 'input')\n self.assertEqual(count_elements, 4)", "def get_html(self) -> List[ComponentMeta]:\n return [Div(id=\"additions\")]", "def get_paragraphs():\n soup = get_html()\n paragraphs = []\n for i in soup.findAll('div', {'class': 'faq-list1__hide'}):\n p = str(i.get_text().strip())\n paragraphs.append(p)\n return paragraphs", "def lab_test_list(\n self, params: Optional[Dict] = None, headers: Optional[Dict] = None\n ) -> List[LabTestDetails]:\n method = self._get_method(\"lab_tests\")\n\n return self.call_api_get(method=method, params=params, headers=headers)", "def _get_test_generator(self):\n for data_element in self.test:\n image, heatmap = self._generate_input_tuple(data_element)\n \n yield (image, heatmap)", "def test_container_on_container_html(self):\r\n published_container = ItemFactory.create(\r\n parent_location=self.child_container.location,\r\n category=\"wrapper\", display_name=\"Wrapper\"\r\n )\r\n ItemFactory.create(\r\n parent_location=published_container.location,\r\n category=\"html\", display_name=\"Child HTML\"\r\n )\r\n\r\n def test_container_html(xblock):\r\n self._test_html_content(\r\n xblock,\r\n expected_section_tag=(\r\n '<section class=\"wrapper-xblock level-page is-hidden studio-xblock-wrapper\" '\r\n 'data-locator=\"{0}\" data-course-key=\"{0.course_key}\">'.format(published_container.location)\r\n ),\r\n expected_breadcrumbs=(\r\n r'<a href=\"/unit/{unit}\"\\s*'\r\n r'class=\"navigation-link navigation-parent\">Unit</a>\\s*'\r\n r'<a href=\"/container/{split_test}\"\\s*'\r\n r'class=\"navigation-link navigation-parent\">Split Test</a>\\s*'\r\n r'<a href=\"#\" class=\"navigation-link navigation-current\">Wrapper</a>'\r\n ).format(\r\n unit=re.escape(unicode(self.vertical.location)),\r\n split_test=re.escape(unicode(self.child_container.location))\r\n )\r\n )\r\n\r\n # Test the published version of the container\r\n test_container_html(published_container)\r\n\r\n # Now make the unit and its children into a draft and validate the container again\r\n modulestore('draft').convert_to_draft(self.vertical.location)\r\n modulestore('draft').convert_to_draft(self.child_vertical.location)\r\n draft_container = modulestore('draft').convert_to_draft(published_container.location)\r\n test_container_html(draft_container)", "def test_html_structure(self):\n self.assertContains(self.response, '<form', 1)\n self.assertContains(self.response, '<input', 3)\n #3 pois são 2 filefield mais o csrf\n self.assertContains(self.response, 'type=\"file\"', 1)\n self.assertContains(self.response, 'type=\"submit\"', 1)", "def sequence(self):\n for tn in self._testnodes:\n yield tn", "def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])", "def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])", "def get_html(self):\r\n return u'This is supposed to be test html.'", "def getTestData(self):\n raise NotImplementedError", "def get_component_html(self):\n return '\\n'.join([hunit.get_component_html() for hunit in self.harmonizationunit_set.all()])", "def extract_all_tags(final_link, driver):\n\n #driver = webdriver.Chrome(executable_path=\"ChromeDriver/chromedriver.exe\")\n driver.get(str(final_link))\n classes = []\n tags = ['div', 'td', 'li', 'a']\n for tag in tags:\n a = driver.find_elements_by_tag_name(str(tag))\n b = len(a)\n for i in range(b):\n try:\n if a[i].get_attribute(\"class\") == None or a[i].get_attribute(\"class\") == '' or a[i].get_attribute(\"class\") == ' ' or a[i].get_attribute(\"class\") == ' ':\n continue\n else:\n className = a[i].get_attribute(\"class\").strip().split(\" \")\n for classN in className:\n classes.append(str(tag) + '.' + str(classN))\n\n except:\n continue\n\n #driver.quit()\n classes = list(dict.fromkeys(classes))\n return(classes)", "def get_testing_data(self):\n\n print 'Loading testing data ', self.test_folder , '...'\n test_text = []\n cnt = 0\n\n for f in listdir(self.test_folder):\n file_path = join(self.test_folder, f)\n if isfile(file_path):\n cnt += 1\n if cnt % 10000 == 0:\n print 'finished:', cnt # line counter\n self.test_index.append(f[:-4])\n with open(file_path, 'rb') as f:\n test_text.append( f.read() )\n\n return test_text", "def test_get_root_html2(self):\n pass", "def list_test_instances():\n run('ls -1 %s' % env.site_root)", "def get_test_examples(self, data_path):\r\n return self.create_examples(self.read_data(data_path), 'test')", "def get_all_platform_tests(self):\n for testitem in self.get_tests(self.discover_tests()):\n if not testitem:\n continue\n prefix = \"tests.\" + self.platform + \".\"\n self.formatted_tests_set.append(\n prefix + self.format_into_test_path(testitem)\n )\n\n if self.denylist:\n try:\n with open(self.denylist, \"r\") as f:\n denylist = f.read().splitlines()\n except FileNotFoundError:\n denylist = []\n\n self.formatted_tests_set = [\n t for t in self.formatted_tests_set if t not in denylist\n ]\n\n return self.formatted_tests_set", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test\n # set.\n return list(map(self.classify, test))", "def process_test_data(self, test_data):\n\n result = []\n for suite in test_data:\n suite_report = TestGroupReport(\n name=suite[\"name\"],\n category=ReportCategories.TESTSUITE,\n uid=suite[\"name\"],\n )\n suite_has_run = False\n\n for testcase in suite[\"data\"]:\n if testcase[\"status\"] != \"skipped\":\n suite_has_run = True\n\n testcase_report = TestCaseReport(\n name=testcase[\"name\"],\n uid=testcase[\"name\"],\n suite_related=True,\n )\n assertion_obj = RawAssertion(\n passed=testcase[\"status\"] == \"pass\",\n content=testcase[\"error\"] or testcase[\"duration\"],\n description=testcase[\"name\"],\n )\n testcase_report.append(registry.serialize(assertion_obj))\n testcase_report.runtime_status = RuntimeStatus.FINISHED\n suite_report.append(testcase_report)\n\n if suite_has_run:\n result.append(suite_report)\n\n return result", "def test_list(request, target, format=None):\n if request.method == 'GET':\n tests = Test.objects.filter(target=target)\n serializer = TestSerializer(tests, many=True)\n return Response(serializer.data)", "def data():\r\n\r\n data = []\r\n m = soup.find('div', {'id': 'main-content'})\r\n\r\n for tag in m.children:\r\n\r\n \"\"\"Creates dict element with tag name and text for h-tags.\"\"\"\r\n if tag.name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:\r\n data.append({'tag': tag.name,\r\n 'text': tag.text.strip(),\r\n 'ul': [],\r\n 'table': [],\r\n 'subheads': []})\r\n\r\n \"\"\"Adds unordered list data.\"\"\"\r\n if tag.name == 'ul':\r\n temp = _parse_list(tag)\r\n data[-1]['ul'].append(temp)\r\n\r\n \"\"\"Adds tabular data.\"\"\"\r\n try:\r\n lst = tag.get_attribute_list('class') # Finds table\r\n if 'table-wrap' in lst:\r\n tbl = []\r\n row = []\r\n\r\n for j in tag.find_all('tr'): # Adds table rows\r\n row = []\r\n\r\n for k in j.children:\r\n if k.name == \"th\" or k.name==\"td\":\r\n '''txt = _parse_list(l.ul)\r\n if txt == None:'''\r\n txt = k.get_text('\\n ').strip()\r\n row.append(txt) \r\n\r\n if row != []:\r\n tbl.append(row)\r\n\r\n if(tbl != []):\r\n data[-1]['table'].append(tbl)\r\n except:\r\n pass\r\n\r\n return data", "def testList(self):\n def _check(results):\n self.assertEqual(results[0], [b'testDirectory', b'testRemoveFile',\n b'testRenameFile', b'testfile1'])\n self.assertEqual(results[1], [b'testDirectory', b'testRemoveFile',\n b'testRenameFile', b'testfile1'])\n self.assertEqual(results[2], [b'testRemoveFile', b'testRenameFile'])\n self.assertEqual(results[3], [b'.testHiddenFile', b'testRemoveFile',\n b'testRenameFile'])\n self.assertEqual(results[4], [b''])\n d = self.runScript('ls', 'ls ../' + self.testDir.basename(),\n 'ls *File', 'ls -a *File', 'ls -l testDirectory')\n d.addCallback(lambda xs: [x.split(b'\\n') for x in xs])\n return d.addCallback(_check)" ]
[ "0.7729957", "0.62460953", "0.62029403", "0.601013", "0.597623", "0.5949348", "0.58157754", "0.5773843", "0.56723", "0.5651943", "0.5582805", "0.5582524", "0.5579697", "0.55494183", "0.5474166", "0.54684716", "0.5461556", "0.5445097", "0.5445097", "0.54369843", "0.54336387", "0.54336387", "0.5420988", "0.5380152", "0.5323457", "0.53081125", "0.5306144", "0.5303314", "0.53003", "0.5293838", "0.52936673", "0.5273362", "0.5272619", "0.5257217", "0.5233425", "0.5210406", "0.5208358", "0.52068615", "0.519873", "0.51848656", "0.5181792", "0.5181792", "0.51564497", "0.51527166", "0.51479053", "0.5144532", "0.5134435", "0.51288545", "0.5118529", "0.5116701", "0.5108009", "0.51048744", "0.50807995", "0.5077929", "0.5075748", "0.50662", "0.50523525", "0.503887", "0.50369316", "0.5031421", "0.5028812", "0.5026282", "0.5021361", "0.50198865", "0.50195175", "0.5016415", "0.5016415", "0.5014273", "0.50126183", "0.501184", "0.500318", "0.50014484", "0.49868137", "0.49862334", "0.49843922", "0.49533567", "0.49263492", "0.4917948", "0.491246", "0.49105418", "0.49005872", "0.4899238", "0.4897452", "0.4897165", "0.4891106", "0.4891106", "0.488754", "0.4886811", "0.48827103", "0.48824295", "0.48790768", "0.48774508", "0.48762926", "0.48697296", "0.48687902", "0.48651665", "0.486424", "0.48581716", "0.48546407", "0.4849848" ]
0.8394743
0
Return a list of the ids of outer divs with the specified text in a child element.
def ids_of_outer_divs_with_inner_text(self, child_text): return self.q(css='div.outer').filter( lambda el: child_text in [inner.text for inner in el.find_elements_by_css_selector('div.inner')] ).attrs('id')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]", "def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)", "def findChildren(widget=None, name=\"\", text=\"\"):\n\t\t# TODO: figure out why the native QWidget.findChildren method\n\t\t# does not seem to work from PythonQt\n\t\tif not widget:\n\t\t\twidget = mainWindow()\n\t\tchildren = []\n\t\tparents = [widget]\n\t\twhile parents != []:\n\t\t\tp = parents.pop()\n\t\t\tparents += p.children()\n\t\t\tif name and p.name.find(name) >= 0:\n\t\t\t\tchildren.append(p)\n\t\t\telif text:\n\t\t\t\ttry:\n\t\t\t\t\tp.text\n\t\t\t\t\tif p.text.find(text) >= 0:\n\t\t\t\t\t\tchildren.append(p)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\treturn children", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def subtype_ids(elements, subtype):\n return [i for (i, element) in enumerate(elements)\n if isinstance(element, subtype)]", "def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]", "def scan_individual_identifiers(text: str, cpf: bool = True) -> List[str]:\n if cpf:\n regex = re.compile(r\"\\w{3}\\.\\w{3}\\.\\w{3}\\-\\w{2}\")\n else:\n regex = re.compile(r\"\\w{2}\\.\\w{3}\\.\\w{3}/\\w{4}\\-\\w{2}\")\n\n identifiers = re.findall(regex, text)\n return identifiers", "def find_by_xpath(self, element):\n for context_elements in self.within_selector:\n final_elements = context_elements.find_by_xpath(element)\n return final_elements", "def children(word, word_dict):\n res = []\n for i in range(len(word)):\n child = word[:i]+word[i+1:]\n if child in word_dict:\n res.append(child)\n return res", "def findIds(self, query):\t\t\t\t\t\t\t## Multiple Elements\n\t\ttry:\n\t\t\tassert(type(query)) == str or Pattern\n\t\t\treturn self.driver.find_elements_by_id(query)\n\t\texcept Exception as e:\n\t\t\tprint(\"Could not find ID: {}\\n\\n{}\".format(query, e))\n\t\t\treturn -1", "def _find_with_text(self, selector, text):\n stripped = text.strip()\n elements = self.selenium.find_elements_by_css_selector(selector)\n return [e for e in elements if e.text.strip() == stripped]", "def get_child_elements_by_id(self, id):\n for item in self._elements:\n if item.get_parent_id() == id:\n yield item", "def get_child_ids(cur, node):\n sql = \"\"\"\n SELECT\n id\n FROM\n nodes\n WHERE\n parent=%s\n ORDER BY\n position;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['id'])", "def div_text_list(self):\n return self.q(css='div.test').text", "def activeChildWellIds(self):\n lst=[]\n if self.isReplicateGroup():\n for tc in self.activeChildWells():\n lst.extend(tc.activeChildWellIds())\n else:\n if self.wellids is not None and self.wellids[0] is not None:\n wellid=self.wellids[0]\n else:\n wellid=str(self.childWellIndices()[0])\n lst.append(wellid)\n return lst", "def _findTextWithinElement(self, selector):\n parent_text = self._getStrippedText(selector) # everybody has got text I think. so this shouldn't raise IndexError\n if parent_text: return parent_text\n subelements = selector.css('*')\n texts_found = []\n for element in subelements:\n elem_text = self._getStrippedText(element)\n if \"CDATA\" in elem_text: continue # that's a part of the document not intended to be visible\n texts_found.append(elem_text)\n return ' '.join(texts_found)", "def GetExpandableIds(children, length_name):\n # I could write a list comprehension here. Would it make the code clearer?\n result = []\n for child_id, child in enumerate(children):\n if child.canExpand(length_name):\n result.append(child_id)\n return result", "def get_descendant_ids(cur, node):\n sql = \"\"\"\n SELECT\n node\n FROM\n ancestors\n WHERE\n ancestor=%s;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['node'])", "def getChildElementsByTagName(element: org.w3c.dom.Element, string: str) -> java.util.List:\n ...", "def find_by_css(self, element):\n for context_elements in self.within_selector:\n final_elements = context_elements.find_by_css(element)\n return final_elements", "def extract_data_listing(html):\n id_finder = re.compile(r'PR[\\d]+~')\n return html.find_all('div', id=id_finder)", "def children(parent, data):\n\n kids = []\n for pid in data:\n if data[pid][\"parentId1\"] == parent or data[pid][\"parentId2\"] == parent:\n kids.append(pid)\n\n return kids", "def get_ids(cls, text):\n tokens = TokenizerContainer.TOKENIZER.tokenize(text)\n token_ids = TokenizerContainer.TOKENIZER.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (cls.MAX_LEN-len(token_ids))\n return tokens, input_ids", "def get_ids(self) -> List[str]:", "def find_elements_inside_element(self, parent_element: Union[WebElement, Tuple[By, str]],\n children_element_locator: Tuple[By, str], wait_time=10,\n skip_exception=False) -> List[WebElement]:\n parent_element = self.find_element(parent_element)\n for i in range(wait_time):\n by_type, value = children_element_locator\n if by_type == By.CSS_SELECTOR:\n children = parent_element.find_elements_by_css_selector(value)\n elif by_type == By.XPATH:\n children = parent_element.find_elements_by_xpath(value)\n else:\n children = parent_element.find_elements(children_element_locator)\n if len(children):\n return children\n time.sleep(1)\n else:\n if not skip_exception:\n raise TimeoutException(f'Elements was not found in {wait_time} seconds')\n return []", "def get_children(search_tag, tag_list):\n list_return = []\n\n for tag in tag_list:\n if str(tag.parent) == str(search_tag):\n list_return.append(tag)\n list_return.extend(get_children(tag, tag_list))\n return list(set(list_return)) # This will return a list of unique elements", "def getIDs():", "def occurence(main_seq,sub_seq):\n start= 0\n indices =[]\n while True:\n start = main_seq.find(sub_seq,start)\n if start > 0:\n indices.append(start)\n else:\n break\n start +=1\n return indices", "def get_child_ids(self, job_specifier, project=None, status=None):\n if project is None:\n project = self._project\n id_master = self.get_job_id(project=project, job_specifier=job_specifier)\n if id_master is None:\n return []\n else:\n if status is not None:\n id_lst = self._job_table[\n (self._job_table.masterid == id_master) & (self._job_table.status == status)].id.values\n else:\n id_lst = self._job_table[(self._job_table.masterid == id_master)].id.values\n return sorted(id_lst)", "def vectorize(self,text):\r\n \r\n lv_active = set()\r\n words = word_tokenize(text)\r\n for word in words:\r\n if word in self.tree:\r\n ancestors = self.tree.word_ancestors(word)\r\n lv_active.update(ancestors)\r\n \r\n return self.nl.isin(lv_active).values", "def leafs_ids(cls, tree_depth):\n return cls.nodes_at_depth(tree_depth)", "def get_order_from_tree(ids, tree_text):\r\n tree = parse_newick(tree_text, PhyloNode)\r\n ordered_ids = []\r\n for tip in tree.iterTips():\r\n if tip.Name in ids:\r\n ordered_ids.append(tip.Name)\r\n return names_to_indices(ids, ordered_ids)", "def get_pids(pid):\n\n pids=set([pid])\n for child in get_children(pid):\n pids.update(traverse_tree(child,pids))\n \n return list(pids)", "def find(self, text, term):\n\t\tlistOfResults = list()\n\n\t\tcurrentIndex = 0\n\t\ttermLength\t = len(term)\n\t\tappend\t\t = listOfResults.append\n\n\t\twhile currentIndex >= 0:\n\t\t\tcurrentIndex = text.find(term, currentIndex+1)\n\t\t\tappend((currentIndex, currentIndex+termLength))\n\n\t\t# Return listOfResults[:-1] because the last tuple contains -1 (negative one)\n\t\treturn listOfResults[:-1]", "def get_translated_ids(id):", "def getTargets(idlist):\n targets = []\n for xmlid in idlist:\n #the <text> element of the mnemonic or accelerator\n elm = findElementWithId(\"text\", xmlid)\n #the parent element of the mnemonic or accelerator\n parentTag = elm.parentNode.parentNode.tagName\n if parentTag == \"action\" or parentTag == \"container\" or parentTag == \"control\" or parentTag == \"dialog\":\n caption = getFirstChildWithTagName(elm.parentNode.parentNode, \"caption\")\n textid = getTextIdForCaption(caption)\n targets.append(textid)\n return targets", "def test_search_subnode_attribute(self):\n\n lista = []\n for (_, value) in self.parser.search(self.xml, 'node@id'):\n lista.append(value)\n self.assertEqual(lista, ['1', '2', '3'])", "def findAll(self, text):\n\n\t\tfindAllResults = list()\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tfindInstance = self.findInstance\n\t\tappend \t\t = findAllResults.append\t\t \n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\tfor i in xrange(len(self.toWORD)):\n\n\t\t\tword = self.toWORD[i]\n\n\t\t\tif i == 0:\n\t\t\t\t#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*#\n\t\t\t\t# Skip the zeroeth index to avoid including punctuation in the findAllResults list\t\t #\n\t\t\t\t#=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*#\n\t\t\t\tpass\n\n\t\t\telse:\n\t\t\t\tfor w in word:\n\n\t\t\t\t\tif len(w) > 0:\n\t\t\t\t\t\tresults = findInstance(text = text, term = w)\n\n\t\t\t\t\t\tif len(results) > 0:\n\t\t\t\t\t\t\tappend((i, results))\n\n\t\treturn findAllResults", "def find_elements_by_text(self,param={},ignore_error_handle = False):\n message = {};\n step = 'find all elements by text ' + param.get('text',None) + ' on current page';\n text = param.get('text',None);\n try:\n elements = self.driver.find_elements(by=By.NAME,value=text);\n message = self.feedback.feedback_action_ok(step);\n message['elements'] = elements;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def _get_parents_from_structure(self, block_id, structure):\r\n items = []\r\n for parent_id, value in structure['blocks'].iteritems():\r\n for child_id in value['fields'].get('children', []):\r\n if block_id == child_id:\r\n items.append(parent_id)\r\n\r\n return items", "def test_custom_ids(self):\n it = [\n \"[[Chapter]]{'id': '/base/chapter/1'} Chapter I\",\n \"This is chapter I text\",\n \"[[Article]]{'id': '/base/article/1'} Article I\",\n \"This is article I text\",\n ]\n\n descriptor = {\n 'components': ['Chapter', 'Section', 'Sub-section', 'Article'],\n 'patterns': ['Chapter', 'Section', 'Sub-section', 'Article']\n }\n\n doc = parse_iterable(it, descriptor)\n\n result = [n for n in doc.graph.nodes(data=True)]\n\n expected = [\n ('ROOT [0]', {'meta': 'root', 'level': 0, 'text': [], 'pad': False, 'id': '/root'}),\n ('Chapter [1]', {'meta': 'Chapter', 'level': 1, 'pad': False, 'text': [\"Chapter I\", 'This is chapter I text'], 'id': '/base/chapter/1'}),\n ('Article [2]', {'meta': 'Article', 'level': 4, 'pad': False, 'text': [\"Article I\", 'This is article I text'], 'id': '/base/article/1'})\n ]\n\n self.assertListEqual(result, expected)", "def get_elements(self, css=None, text=None):\n if css is None and text is None:\n raise ValueError()\n\n # Use ordered sets so we don't muck up the ordering if the caller specifies\n # two or more arguments. This is a bit over-convoluted for having only two\n # ways to query (css and text) but the pattern makes it easy to plug in\n # more ways.\n items = None\n def update(new_items):\n nonlocal items\n if items == None:\n items = OrderedSet(new_items)\n else:\n items = items & OrderedSet(new_items)\n\n if text is not None:\n update([e for e in get_elements(self, css=\"*\") if e.text == text])\n if css is not None:\n update(self.find_elements_by_css_selector(css))\n\n return items", "def _get_matching_node_ids(self, node_name):\n try:\n with closing(self.connection) as con:\n with con:\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT id\n FROM nodes\n WHERE name LIKE (?)\n \"\"\", (node_name,))\n res = cursor.fetchall()\n\n except sqlite3.OperationalError as e:\n print(\"ERROR: An error occurred when retrieving node ids: {}\".format(e))\n\n if len(res) == 0:\n print(\"ERROR: Could not find node ID for name '{0}'.\".format(node_name))\n return []\n\n elif len(res) > 1:\n print(\"Found multiple node IDs for name '{0}', returning first result.\".format(node_name))\n\n # e.g. [(10,), (11,)] => [10, 11]\n return [x[0] for x in res]", "def find_all_sub(self, sub):\n try:\n return [i for i in range(len(self.__dna)) if self.__dna.startswith(sub, i)]\n except ValueError:\n raise ValueError", "def get_child_ids(forum):\n forum_ids = [forum.id]\n if forum.children:\n for child in forum.children:\n forum_ids.extend(\n get_child_ids(child) # Get the children from the children\n )\n return forum_ids", "def getItemsInContainer(elm):\n items = []\n items.extend(getAllChildrenWithTagName(elm, \"action\"))\n items.extend(getAllChildrenWithTagName(elm, \"container\"))\n switches = getAllChildrenWithTagName(elm, \"switch\")\n for sw in switches:\n items.extend(getAllChildrenWithTagName(sw, \"action\"))\n items.extend(getAllChildrenWithTagName(sw, \"container\"))\n return items", "def _subnode_ids(self):\n for ticket in self:\n ticket.subnode_ids = self.search([\n ('parent_id', '=', ticket.id),\n ('type.has_children', '=', True)])", "def get_dependent_control_ids(self, control_id: str) -> List[str]:\n children: List[str] = []\n control = self.get_control(control_id)\n if control.controls:\n new_controls = self._get_all_controls_in_list(control.controls, True)\n children.extend([con.id for con in new_controls])\n return children", "def find_words(root: TrieNode):\n found = []\n\n if root.word:\n found.append(root.data)\n\n for child in root.children:\n for el in find_words(child): \n found.append(el)\n\n return found", "def query_parent(selectors, tree_item):\n return [subitem for subitem in iterate_parent(tree_item)\n if all(selectors, subitem)]", "def positions(self, searchstr: str):\n out = []\n for x in range(0, len(self.sa)):\n sub = self.sa[x]\n if searchstr == sub[0:len(searchstr)]:\n out.append(x)\n return out\n \n pass", "def lookup(root: dict, query: str, exact: bool = False) -> List[Set[int]]:\n if not query:\n return set()\n\n word_ids: List[Set[int]] = [] # ids of items that correspond to query\n for word in preprocess_words(query):\n node = root\n for c in word:\n node: Optional[dict] = node.get(c) # type: ignore\n if not node:\n # dead-end for this word\n word_ids.append(set())\n break\n else:\n word_ids.append(collect(node, exact))\n\n return word_ids", "def get_ids_from_container_name(base_container_name):\n container_prefix = base_container_name + \".\"\n lines = dockercall(\"ps\", \"-a\").splitlines()\n ids = []\n for line in lines[1:]:\n parts = line.strip().split()\n id = parts[0]\n name = parts[-1]\n if name == base_container_name or name.startswith(container_prefix):\n ids.append((name, id)) # name first, for sorting\n ids.sort()\n return {id: name for name, id in ids}", "def extract_events(text: str) -> (List[str], List[str]):\n alert_ids = re.findall(r'\"anomalous\"(?:.+\\n.+)+\"tsId\"\\s:\\s\"(.+)\"', text)\n clear_ids = re.findall(r'\"ok\"(?:.+\\n.+)+\"tsId\"\\s:\\s\"(.+)\"', text)\n return alert_ids, clear_ids", "def get_element_indices_within_rectangle(self, xmin, xmax, zmin, zmax):\n centroids = self.get_element_centroids()\n indices_list = []\n for nr, (x, z) in enumerate(centroids):\n if x >= xmin and x <= xmax and z >= zmin and z <= zmax:\n indices_list.append(nr)\n return np.array(indices_list)", "async def get_child_ids(db, post_id):\n sql = \"SELECT id FROM hive_posts WHERE parent_id = :id AND is_deleted = '0'\"\n return await db.query_col(sql, id=post_id)", "def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids", "def getTestsIds():\n with driver.session() as s:\n ids = s.write_transaction(getTestsId)\n\n tIds = []\n for idEl in ids:\n tIds.append(idEl[\"ID(t)\"])\n\n return tIds", "def subwords(txt, sub):\n txt = txt.lower()\n txt = txt.replace('’', '\\'')\n sub = sub.lower().replace(' ', '')\n it = 0\n indices = []\n for c in sub:\n try:\n while txt[it] != c:\n it += 1\n indices.append(it)\n except (IndexError):\n print('Cannot find secret in text.')\n return []\n return indices", "def get_element_commit_ids(sysmlId):\n elements = get_elements_from_elasticsearch(sysmlId=sysmlId)\n commits = []\n for element in elements:\n commits.append(element[\"_source\"][\"_commitId\"])\n return commits", "def Find(self, children, sink):\n\n tkns = [];\n for child in children:\n key = child.word;\n if not child.word: key = child.tag;\n tkns.append(key);\n self.FindFromTokens(tkns, sink);", "def getContentTransito(self, chave):\n content = self.tree.xpath(\"string(//div[@id='%s'])\" % chave).split(' ')[0]\n return content", "def get_text_reply_ids(self):\n if not self._linked_comment:\n return []\n replies = Comment.objects.filter(replied_comment=self._linked_comment)\n return [ids[0] for ids in replies.exclude(reply_text='').order_by('id').values_list('id')]", "def find_label_element(self, label_text, container):\n element = next(iter(list(map(lambda x: self.find_first_div_parent(x), container.find_all(text=re.compile(f\"^{re.escape(label_text)}\" + r\"(\\*?)(\\s*?)$\"))))), None)\n if element is None:\n return []\n\n return element\n\n '''\n next_sibling = element.find_next_sibling(\"input\")\n # next_input = element.contents.find(\"input\")\n if next_sibling:\n # if next_input:\n return [next_sibling]\n # return [next_input]\n else:\n return []\n '''", "def getElementIds(self):\n\t\ttry:\n\t\t\treturn self.collector.ToElementIds()\n\t\texcept:\n\t\t\tself.all()\n\t\t\treturn self.collector.ToElementIds()", "def doFindAll(self, str):\n matches = []\n for value in self.doId2do.values():\n if repr(value).find(str) >= 0:\n matches.append(value)\n return matches", "def get_searchable_content(self, value):\n content = []\n\n if value:\n for child in value:\n content.extend(child.block.get_searchable_content(child.value))\n\n return content", "def members(self, x):\n root = self.find(x)\n return [i for i in range(self.n) if self.find(i) == root]", "def get_camp_ids_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [x.get(\"id\") for x in all_campaigns if string in x[\"name\"]]", "def containing(letter, text):\n return([word for word in text if word.count(letter) >= 1])", "def load_xml_text_data(parent_element, text_list_name):\n data_from_text = []\n list_item_name = get_singular_from_plural(text_list_name)\n\n for text_element in parent_element.findall(list_item_name):\n new_data = convert_to_int_if_numeric(text_element.text)\n data_from_text.append(new_data)\n\n return data_from_text", "def find_parents(self, tagname):\n res = []\n if self._tagname == tagname:\n res = [self]\n if self._parent is not None:\n res += self._parent.find_parents(tagname)\n return res", "def findAllInstances(text, term):\n index = 0 - len(term)\n text = text.lower()\n term = term.lower()\n try:\n while True:\n index = text.index(term, index + len(term))\n yield index\n except ValueError:\n pass", "def get_electrode_indeces(electrical_series, electrode_ids):\n electrode_table_region = list(electrical_series.electrodes.to_dataframe().index)\n return [elect_idx for elect_idx, elect_id in enumerate(electrode_table_region) if elect_id in electrode_ids]", "def children_ids(self):\n return self._children_ids", "def get_ordered_ids(tree):\n ordered_ids = []\n ordered_ids.extend(id(node) for node in tree.gen_tips())\n ordered_ids.extend(id(node) for node in tree.gen_internal_nodes())\n return ordered_ids", "def parentsUntil(self,selector):\n nList = self.htmlDom.find(selector)\n parentsList = []\n tmpList = []\n selectedNodesList = []\n for node in self.nodeList:\n if not node.ancestorList:\n node.generateAncestorList()\n tmpList = node.ancestorList\n for selectedNode in nList.nodeList:\n try:\n index = tmpList.index( selectedNode )\n selectedNodeList = tmpList[:index]\n parentsList += self.getUniqueNodes( parentsList, selectedNodeList )\n break\n except ValueError:\n pass\n else:\n parentsList += self.getUniqueNodes( parentsList, tmpList )\n parentsList = sorted( parentsList, key = lambda x: x.pos )\n return HtmlNodeList( parentsList, self.htmlDom, self.nodeList, self )", "def position(element1, root=None):\n \n position = [] \n current = element1\n while (current.getparent() is not None) and (current is not root):\n parent = current.getparent()\n #find the index of current under parent\n index = 0\n for i in parent:\n if i is current: break\n index += 1\n position.insert(0, index + 1)\n current = parent\n \n position.insert(0, 1) # for the root element\n return position", "def search(self, term):\n results = set() # Set of elements matching search term.\n element = [] # Current element reached in search.\n def _search(m, node, i):\n # Having just matched m, search for term[i:] starting at node.\n element.append(m)\n if i == len(term):\n if node._end:\n results.add(''.join(element))\n elif term[i] == '*':\n _search('', node, i + 1)\n for k, child in node._children.items():\n _search(k, child, i)\n elif term[i] in node._children:\n _search(term[i], node._children[term[i]], i + 1)\n element.pop()\n _search('', self, 0)\n return results", "def find_all(st, sub):\n\n if not sub: return None\n if sub[0] not in st.root.trans: return None\n \n found, i, s = False, 0, st.root\n scaned = 0 # length of the scaned\n while True:\n k, p, s = s.trans[sub[i]]\n len1, len2 = p-k+1, len(sub)-i\n if len1 >= len2:\n if st.text[k:k+len2] == sub[i:]:\n found, scaned = True, scaned+len1\n break\n else:\n if st.text[k:k+len1] == sub[i:i+len1]:\n i, scaned = i+len1, scaned+len1\n else: break\n if found:\n # shift_of_suffix = len(st.text) - len(suffix)\n leaf_depthes = get_leaf_depthes(s)\n return [len(st.text)-x-scaned for x in leaf_depthes]\n\n return None", "def get_layer_ids(element):\n \n st='./gbxml:LayerId/@layerIdRef'\n return element.xpath(st,namespaces=ns)", "def subnotebook_get_titles_ids(self):\n tabs = dict()\n for tab_id in range(0, self.subnotebook.index(\"end\")):\n tabs[self.subnotebook.tab(tab_id, \"text\")] = tab_id\n logger.debug(tabs)\n return tabs", "def get_parent(self, element):\n return element.find_elements_by_class_name(\"wrap-text\")[2].get_attribute(\"innerHTML\").strip()", "def get_descendant_elements(self, xpath) -> list:\n tmp_xpath = self._chain_xpath(xpath)\n tmp_loc = (By.XPATH, tmp_xpath)\n return self._wait.until(EC.visibility_of_all_elements_located(tmp_loc))", "def getItemsInDialog(elm):\n items = []\n items.extend(getAllChildrenWithTagName(elm, \"control\"))\n return items", "def get_cpd_ids(string):\n return [x for x in string.split(\" \") if x.startswith(\"C\")]", "def find(self, word):\n currnode = self.root\n\n for letter in word:\n if letter not in currnode.children:\n return Set()\n currnode = currnode.children[letter]\n\n return currnode.pages", "def text_to_id(text, word_to_id_dict):\n return [word_to_id_dict[word] for word in text.split(\" \") if word in word_to_id_dict]", "def find_elements_by_partial_text(self,param,ignore_error_handle = False):\n message = {};\n step = 'find elements by partial text:' + str(param.get('partial_text',None));\n partial_text = str(param.get('partial_text',None));\n try:\n elements = self.driver.find_elements_by_partial_text(partial_text);\n message = self.feedback.feedback_action_ok(step);\n message['elements'] = elements;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle)\n finally:\n return message;", "def doFindAllMatching(self, str):\n matches = []\n for value in self.doId2do.values():\n if re.search(str,repr(value)):\n matches.append(value)\n return matches", "def getSiblings():", "def find_all( source, substring, start=None, end=None, overlap=False ):\n return [x for x in find_all_iter( source, substring, start, end, overlap )]", "def subtree_matching(self, subtree):\n\t\t#TODO implement this in a faster way\n\t\ttext = self.preorder_traverse_to_list()\n\t\tpattern = subtree.preorder_traverse_to_list()\n\n\t\tprint text\n\t\tprint pattern\n\n\t\tmatches = []\n\t\tfor i in range(len(text)):\n\t\t\tif text[i:i+len(pattern)] == pattern:\n\t\t\t\tmatches.append(i)\n\t\treturn matches", "def get_child_indices(idx: int):\n return 2 * idx + 1, 2 * idx + 2", "def parents( self, selector = None ): \n tmpList = []\n for node in self.nodeList:\n if not node.ancestorList:\n node.generateAncestorList()\n tmpList += self.getUniqueNodes( tmpList, node.ancestorList )\n if selector:\n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList, self ).filter( selector )\n else:\n tmpList = sorted( tmpList, key = lambda x: x.pos ) \n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList, self)", "def search_id(root, pid):\n for page in root.iter('page'):\n if pid == int(page.find('id').text):\n return page.find('revision').find('text').text", "def findall(l, o):\n return [i for i, u in enumerate(l) if u==o]", "def getIndexes(string1,string2):\n ret = []\n ind = string1.find(string2)\n \n while (ind > -1 and ind < len(string1)):\n ret.append(ind)\n ind = string1.find(string2,ind + 1)\n \n return ret", "def findInstance(self, text, term):\n\t\tindexList = set()\n\t\tindex = 0\n\t\ttext = text.upper()\n\t\tterm = \" {0} \".format(term.upper())\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tadd = indexList.add\n\t\tfind = text.find\t \n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\twhile True:\n\t\t\tindex = find(term, index)\n\t\t\tif index == -1: \n\t\t\t\treturn sorted(indexList)\n\t\t\telse:\n\t\t\t\tadd(index + len(term[1:-1]) + 1)\n\t\t\t\tadd(index + 1)\n\t\t\t\tindex += len(term)" ]
[ "0.55548036", "0.5479126", "0.54318756", "0.54250884", "0.53954", "0.5378344", "0.5358235", "0.532862", "0.53090286", "0.52930194", "0.5247154", "0.5223819", "0.5181133", "0.51650614", "0.5156875", "0.51566947", "0.512827", "0.5109152", "0.5081774", "0.5059653", "0.5021192", "0.5018383", "0.49994254", "0.49627846", "0.49329385", "0.49291104", "0.49055174", "0.48959976", "0.48937145", "0.48680916", "0.48606366", "0.48477486", "0.48307118", "0.48272884", "0.4819024", "0.48097506", "0.4803559", "0.47849387", "0.4773739", "0.4770679", "0.4759471", "0.47543085", "0.47460446", "0.4744075", "0.47312298", "0.47254068", "0.4703863", "0.46998802", "0.46952975", "0.46943995", "0.4664719", "0.46624112", "0.46577168", "0.46492475", "0.46402675", "0.4635736", "0.46207464", "0.46125162", "0.4612055", "0.46108004", "0.46103936", "0.46087077", "0.4606447", "0.4606436", "0.46055916", "0.46036246", "0.45992762", "0.45916694", "0.4588196", "0.45869404", "0.45831412", "0.45823866", "0.4575483", "0.45727614", "0.45690903", "0.4565167", "0.45602903", "0.45587212", "0.45563397", "0.4555641", "0.45471898", "0.45455977", "0.45391563", "0.45365098", "0.45244417", "0.4522172", "0.45189717", "0.45116797", "0.45094287", "0.45069525", "0.4504568", "0.44896686", "0.4486609", "0.44787568", "0.44622216", "0.44571167", "0.4456627", "0.4456235", "0.44476113", "0.4439035" ]
0.877173
0
Wait for click handlers to be installed, then click a button and retrieve the output that appears after a delay.
def trigger_output(self): EmptyPromise(self.q(css='div#ready').is_present, "Click ready").fulfill() self.q(css='div#fixture button').first.click() EmptyPromise(self.q(css='div#output').is_present, "Output available").fulfill()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def click_button(self):\n self.widgets.get('button').click()", "def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()", "def poll(self):\n\tself.met = self.button.poll()", "def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def wait(self, secs):\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))", "def click(self, agent):\n self.grab(agent)\n #eventlet.sleep(5)\n self.degrab(agent)", "def is_button_output_present(self):\n self.wait_for_element_presence('div#ready', 'Page is Ready')\n self.q(css='div#fixture button').first.click()\n self.wait_for_element_presence('div#output', 'Button Output is Available')", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))", "def wait():\n time.sleep(1)", "def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def click(self):\r\n pass", "def wait(self):\n self.mainloop().wait()", "def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()", "def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))", "def WaitForTest(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('waitForTest', payload=payload, response_object=None)", "def wait_dialog_box(self):\n while True:\n time.sleep(0.5)\n dialog = AppWindow.locate_on(SummonSelector.dialog_ok.path, (1 / 3, 2 / 3, 2 / 3, 1 / 3))\n if dialog is not None:\n self.logger.info(\"dialog popped up\")\n return", "def wait_until_transfers_displayed(self):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).wait_until_displayed()", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def wait():\n pass", "async def async_press(self) -> None:\n await self.entity_description.press_action(self.wrapper)", "def is_button_output_visible(self):\n self.wait_for_element_presence('div#ready', 'Page is Ready')\n self.q(css='div#fixture button').first.click()\n self.wait_for_element_visibility('div#output', 'Button Output is Visible')", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def click(self):\n self.dispatch['elementClick'] = self.clickJsFnc", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def click(self, browser, locator, sleep_time=3, expl_time=20):\n\n time.sleep(sleep_time)\n try:\n browser.implicitly_wait(5)\n WebDriverWait(browser, expl_time, ignored_exceptions=StaleElementReferenceException).until(\n ec.presence_of_element_located(locator))\n except (NoSuchElementException, TimeoutException, ElementNotInteractableException, StaleElementReferenceException):\n # additional check were deleted, cause of some unexpected timeout exceptions on it\n browser.implicitly_wait(5)\n WebDriverWait(browser, 10).until(ec.element_to_be_clickable(locator))\n self.waiting_loading_element(browser)\n browser.find_element(*locator).click()\n self.waiting_loading_element(browser)", "def wait(self):\n self.event.wait()", "def tap():\n return \"I have clicked on the elements\"", "def click_the_save_button_which_should_be_returned_to_the_storage_page(driver):\n assert wait_on_element(driver, 5, '//button[contains(.,\"Save Access Control List\")]', 'clickable')\n driver.find_element_by_xpath('//button[contains(.,\"Save Access Control List\")]').click()\n time.sleep(1)\n assert wait_on_element_disappear(driver, 30, '//h6[contains(.,\"Please wait\")]')", "def clickonbutton(titleobj, buttontoclick):\n try:\n ldtp.click(titleobj,buttontoclick)\n logging.info(\"Clicked on : %s\" % buttontoclick)\n except Exception as er:\n print (\"Not able to click on button\")", "def test_ProstateReporting1(self):\n\n self.delayDisplay(\"Starting the test\")\n\n self.delayDisplay('Test passed!')", "def _perform_click_input(\n button=\"left\",\n coords=(None, None),\n double=False,\n button_down=True,\n button_up=True,\n wheel_dist=0,\n pressed=\"\",\n key_down=True,\n key_up=True,\n fast_move=False\n):\n\n # Handle if the mouse buttons are swapped\n if win32functions.GetSystemMetrics(win32defines.SM_SWAPBUTTON):\n if button.lower() == 'left':\n button = 'right'\n elif button.lower() == 'right':\n button = 'left'\n\n events = []\n if button.lower() == 'left':\n events.append(win32defines.MOUSEEVENTF_MOVE)\n if button_down:\n events.append(win32defines.MOUSEEVENTF_LEFTDOWN)\n if button_up:\n events.append(win32defines.MOUSEEVENTF_LEFTUP)\n elif button.lower() == 'right':\n if button_down:\n events.append(win32defines.MOUSEEVENTF_RIGHTDOWN)\n if button_up:\n events.append(win32defines.MOUSEEVENTF_RIGHTUP)\n elif button.lower() == 'middle':\n if button_down:\n events.append(win32defines.MOUSEEVENTF_MIDDLEDOWN)\n if button_up:\n events.append(win32defines.MOUSEEVENTF_MIDDLEUP)\n elif button.lower() == 'move':\n events.append(win32defines.MOUSEEVENTF_MOVE)\n events.append(win32defines.MOUSEEVENTF_ABSOLUTE)\n elif button.lower() == 'x':\n if button_down:\n events.append(win32defines.MOUSEEVENTF_XDOWN)\n if button_up:\n events.append(win32defines.MOUSEEVENTF_XUP)\n\n if button.lower() == 'wheel':\n events.append(win32defines.MOUSEEVENTF_WHEEL)\n\n # if we were asked to double click (and we are doing a full click\n # not just up or down.\n if double and button_down and button_up:\n events *= 2\n\n if button_down and (button.lower() not in ['move', 'wheel']):\n # wait while previous click is not affecting our current click\n while 0 < win32api.GetTickCount() - win32api.GetLastInputInfo() < win32gui.GetDoubleClickTime():\n time.sleep(Timings.after_clickinput_wait)\n\n # set the cursor position\n _set_cursor_pos((coords[0], coords[1]))\n if not fast_move:\n time.sleep(Timings.after_setcursorpos_wait)\n if win32api.GetCursorPos() != (coords[0], coords[1]):\n _set_cursor_pos((coords[0], coords[1]))\n time.sleep(Timings.after_setcursorpos_wait)\n\n keyboard_keys = pressed.lower().split()\n if ('control' in keyboard_keys) and key_down:\n keyboard.VirtualKeyAction(keyboard.VK_CONTROL, up=False).run()\n if ('shift' in keyboard_keys) and key_down:\n keyboard.VirtualKeyAction(keyboard.VK_SHIFT, up=False).run()\n if ('alt' in keyboard_keys) and key_down:\n keyboard.VirtualKeyAction(keyboard.VK_MENU, up=False).run()\n\n dw_flags = 0\n for event in events:\n dw_flags |= event\n\n dw_data = 0\n if button.lower() == 'wheel':\n wheel_dist = wheel_dist * 120\n dw_data = wheel_dist\n\n if button.lower() == 'move':\n x_res = win32functions.GetSystemMetrics(win32defines.SM_CXSCREEN)\n y_res = win32functions.GetSystemMetrics(win32defines.SM_CYSCREEN)\n x_coord = int(ceil(coords[0] * 65535 / (x_res - 1.))) # in Python 2.7 return float val\n y_coord = int(ceil(coords[1] * 65535 / (y_res - 1.))) # in Python 2.7 return float val\n win32api.mouse_event(dw_flags, x_coord, y_coord, dw_data)\n else:\n for event in events:\n if event == win32defines.MOUSEEVENTF_MOVE:\n x_res = win32functions.GetSystemMetrics(win32defines.SM_CXSCREEN)\n y_res = win32functions.GetSystemMetrics(win32defines.SM_CYSCREEN)\n x_coord = int(ceil(coords[0] * 65535 / (x_res - 1.))) # in Python 2.7 return float val\n y_coord = int(ceil(coords[1] * 65535 / (y_res - 1.))) # in Python 2.7 return float val\n win32api.mouse_event(\n win32defines.MOUSEEVENTF_MOVE | win32defines.MOUSEEVENTF_ABSOLUTE,\n x_coord, y_coord, dw_data)\n else:\n win32api.mouse_event(\n event | win32defines.MOUSEEVENTF_ABSOLUTE,\n coords[0], coords[1], dw_data)\n\n if not fast_move:\n time.sleep(Timings.after_clickinput_wait)\n\n if ('control' in keyboard_keys) and key_up:\n keyboard.VirtualKeyAction(keyboard.VK_CONTROL, down=False).run()\n if ('shift' in keyboard_keys) and key_up:\n keyboard.VirtualKeyAction(keyboard.VK_SHIFT, down=False).run()\n if ('alt' in keyboard_keys) and key_up:\n keyboard.VirtualKeyAction(keyboard.VK_MENU, down=False).run()", "def wait(self):\n time.sleep(0.010)", "def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def on_click(self, event):\n if event['button'] == 1 and 'button1' in self.options:\n subprocess.call(self.options['button1'].split())\n elif event['button'] == 2 and 'button2' in self.options:\n subprocess.call(self.options['button2'].split())\n elif event['button'] == 3 and 'button3' in self.options:\n subprocess.call(self.options['button3'].split())", "def click(key, timeout=default_timeout):\n WILD = \"**\" # Indicates places where keys can have variable text.\n WILDNUM = \"##\" # Variable number\n\n # Map of functions and the keys they may be used to click on.\n map = { \"click_function_key\": ['KEY IN CODE', 'HELP', 'PAY'],\n \"click_welcome_key\": ['START'],\n \"click_help_key\": ['REQUEST ASSISTANCE', 'BACK', 'WEIGHTS AND MEASURES'],\n \"click_weights_measures_key\": ['BACK'],\n \"click_payment_key\": ['BACK', 'HELP'],\n \"click_loyalty_key\": ['ENTER ID'],\n \"click_message_box_key\": ['OK', 'YES', 'NO'],\n \"click_prompt_key\": ['OK', 'YES', 'NO'],\n \"click_speed_key\": ['GENERIC ITEM', WILD],\n \"click_keypad\": ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '00', 'BACK', 'CLEAR', 'CANCEL', 'ENTER']\n }\n\n # Figure out which functions might work for the desired key\n def build_func_list(key):\n import sys\n funcs_to_try = []\n for func in map:\n if key.upper() in map[func]:\n funcs_to_try.append(getattr(sys.modules[__name__], func))\n return funcs_to_try\n\n # Create list of functions to search with\n funcs_to_try = build_func_list(key)\n if len(funcs_to_try) == 0:\n # Requested key doesn't match any known menus, try menus that can have custom keys\n if key.isdigit():\n funcs_to_try = build_func_list(WILDNUM) # Menus that are likely to have buttons with varied numbers\n else:\n funcs_to_try = build_func_list(WILD) # Menus that can contain buttons with any text\n \n # Invoke the functions repeatedly until success or timeout\n start_time = time.time()\n while time.time() - start_time <= timeout:\n for func in funcs_to_try:\n if func(key, timeout=0, verify=False):\n return True\n else:\n logger.warning(\"Couldn't find %s within %d seconds.\" % (key, timeout))\n return False", "def wait_for_input(self):\n pass", "def click_download_button(self):\n self._basket.click_download_button()", "def wait_and_click(self, locator_type, locator):\n self.wait.until(EC.element_to_be_clickable((locator_type, locator)))\n return self.driver.find_element(by=locator_type, value=locator).click()", "def wait(delay=2):\n time.sleep(delay)", "def wait(self):\n pass", "def wait(self):\n pass", "def run_handler(self, handler):\n self.last_activity = time.time()\n const_name = handler.upper()\n try:\n const_value = getattr(cwiid, const_name)\n if self.wm.state['buttons'] == const_value:\n self.exec_script(handler)\n except AttributeError:\n return 0", "def _done_button_cb(self, widget=None):\n if self.lastTestResult:\n self._trigger_event(\"success\")\n else:\n self._launch_click_through_dialog()", "def step_async(self, actions):", "def on_click(self) -> None:\n pass", "def do_wait(self):\n pass", "def clickButton(self, xpath):\n WebDriverWait(self.driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, xpath))).click()\n self.sleep_approx(1)", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def wait_for_start(self):\n while True:\n ev = self.scene.waitfor('click')\n game_type = self.on_click(ev)\n if game_type:\n return game_type", "def wait(self):\n try:\n confirm_modal_dialog = EC.presence_of_all_elements_located((By.CLASS_NAME, 'btn-default'))\n WebDriverWait(self.web_element, 2).until(confirm_modal_dialog)\n except TimeoutException:\n confirm_ajs_dialog = EC.presence_of_all_elements_located((By.CLASS_NAME, 'ajs-cancel'))\n WebDriverWait(self.web_element, 2).until(confirm_ajs_dialog)", "def contextualhelpverificationhome(window,contextualhelpbutton):\n try:\n testcaseDescription = \"contextual help\"\n filename = testcaseDescription + \"fail\" + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n atomacclick(contextualhelpbutton)\n appbuttons = getAppButtons(window)\n for i in range(1,5):\n time.sleep(3)\n screenshot(filename)\n atomacclick(appbuttons[26])\n time.sleep(3)\n atomacclick(appbuttons[26])\n except Exception as er:\n return False\n print \"Not able to click on contextualhelpverification\"", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def test_Analytics1(self):\n\n self.delayDisplay(\"We don't have a test\")", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def _launch_click_through_dialog(self):\n text = \"The port test did not complete successfully. If you are certain that you really did forward the port and would like to continue anyway, you can do so.\\\n Otherwise, you may want to try again.\"\n self.controller.show_msgbox(text, title=\"Do You Really Want to Do That?\", cb=self._click_through_dialog_cb, buttons=(gtk.STOCK_CANCEL, 0, gtk.STOCK_OK, 1), width=300)", "def wait(self):\n time.sleep(self.next())", "def test1(stopEvent: Event):\n auto.InitializeUIAutomationInCurrentThread()\n n = 0\n child = None\n auto.Logger.WriteLine('Use UIAutomation in another thread:', auto.ConsoleColor.Yellow)\n while True:\n if stopEvent.is_set():\n break\n if not child:\n n = 1\n child = auto.GetRootControl().GetFirstChildControl()\n auto.Logger.WriteLine(n, auto.ConsoleColor.Cyan)\n auto.LogControl(child)\n child = child.GetNextSiblingControl()\n n += 1\n stopEvent.wait(1)\n auto.UninitializeUIAutomationInCurrentThread()\n print('test1 exits')", "def testButtonCB(self, testId):\n button = self.test_buttons[testId]\n if self.result:\n self.showTestOutput(testId)\n return", "def wait_for_tag():\n time.sleep(1.1)", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def main_demo() -> None:\n with ui.row():\n ui.button('A', on_click=lambda: ui.notify('You clicked the button A.'))\n ui.button('B').on('click', lambda: ui.notify('You clicked the button B.'))\n with ui.row():\n ui.button('C').on('mousemove', lambda: ui.notify('You moved on button C.'))\n ui.button('D').on('mousemove', lambda: ui.notify('You moved on button D.'), throttle=0.5)\n with ui.row():\n ui.button('E').on('mousedown', lambda e: ui.notify(e))\n ui.button('F').on('mousedown', lambda e: ui.notify(e), ['ctrlKey', 'shiftKey'])", "def wait_and_click(self, locator_type, locator):\n self.wait.until(EC.element_to_be_clickable((locator_type, locator)))\n self.driver.find_element(by=locator_type, value=locator).click()", "def button_multiclick_fired(self, payload: str):\n json_object = util.deserialize_json(payload)\n logger.debug(f\"Process Multiclick action\")\n clicks = int(json_object['clicks'])\n arduino_name = json_object['name']\n arduino = self.arduinos.get(arduino_name, None)\n if arduino is None:\n # Unknown arduino\n logger.info(f\"Could not find arduino with name '{arduino_name}'.\")\n return\n button = arduino.button_pins[json_object['button_pin']]\n logger.debug(f\"Compare button click {button.clicks} with payload {clicks}\")\n if button.clicks == clicks:\n self.__action_executor.execute_actions(button.get_button_after_release_actions(button.clicks), button,\n arduino.name)\n button.clicks = 0\n button.stop_multi_click_timer()\n else:\n logger.debug(f\"New click has been received\")\n return", "def _jsclick(self, locator):\n\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.wait_until_element_is_enabled(locator)\n for should_retry in (True, False):\n try:\n # Setting the focus first seems to be required as of Spring'20\n # (read: without it, tests started failing in that release). I\n # suspect it's because there is a focusOut handler on form\n # fields which need to be triggered for data to be accepted.\n element = self.selenium.get_webelement(locator)\n self.selenium.driver.execute_script(\n \"arguments[0].focus(); arguments[0].click()\", element\n )\n return\n except StaleElementReferenceException:\n if should_retry:\n time.sleep(1)\n else:\n raise", "def wait_one_click(self, max_wait, min_wait, live_buttons,\n timestamp, relative_to, visible):\n relative_to, start_time, was_visible = self._init_wait_click(\n max_wait, min_wait, live_buttons, timestamp, relative_to, visible)\n\n clicked = []\n while (not len(clicked) and\n self.master_clock() - start_time < max_wait):\n clicked = self._retrieve_events(live_buttons)\n\n # handle non-clicks\n if len(clicked):\n clicked = self._correct_clicks(clicked, timestamp, relative_to)[0]\n elif timestamp:\n clicked = (None, None)\n else:\n clicked = None\n return clicked", "def batch_test_open():\n try:\n WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.CLASS_NAME, \"cdk-overlay-pane\")))\n ActionChains(browser).send_keys(Keys.ESCAPE).perform()\n except:\n print(\"No migration pop-up\")\n\n WebDriverWait(browser, 2).until(EC.element_to_be_clickable((By.LINK_TEXT, config.app_name)))\n browser.find_element_by_link_text(config.app_name).click()\n WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.CLASS_NAME, 'nav-section')))\n buttons = browser.find_elements_by_class_name('nav-section')\n buttons[1].click()\n WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH, '//button[contains(text(), '\n '\"Batch testing\")]')))\n browser.find_element_by_xpath('//button[contains(text(), \"Batch testing\")]').click()", "def mainWebActions(self, **kwargs):\n # If the dictionary item value is the required opens the webpage\n if kwargs['button']=='docs':\n # Only 1 click at every 5 seconds\n self.docs_Button.setDown(True)\n QTimer.singleShot(5000, lambda: self.docs_Button.setDown(False))\n webbrowser.open('https://italorenan.gitbook.io/roc/')", "def state_wait_do(cfg, app, win, events):", "def after_all_sweeps(self):\r\n _debug('GUISignalGenerator: after_all_sweeps()')\r\n self.window.sleep(0.05)", "def Click(self):\n if self.function == None:\n return\n \n self.function()", "def card_success(self): \n handles = self.driver.window_handles\n while len(handles) != 3:\n handles = self.driver.window_handles\n self.driver.switch_to_window(handles[2])\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR,'.success'))) \n self.driver.find_element_by_class_name(\"success\").click()\n self.driver.switch_to_window(handles[0])", "def answer_waiting_call(self) -> None:", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def smart_wait(self,locator = None, wait_seconds=10, locator_type = None):\n try:\n loc = locator\n if locator_type == 'button':\n WebDriverWait(self.driver, wait_seconds).until(EC.element_to_be_clickable((By.XPATH, loc)))\n else:\n WebDriverWait(self.driver, wait_seconds).until(EC.presence_of_element_located((By.XPATH,loc)))\n except Exception as e:\n print(e + 'Exception')\n return False\n return True", "def doWaitVisibleClickElement(self, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout}\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n\n # locate the element\n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, cssSelector=cssSelector,\n location=location, more=more)\n rsp = self.hasElement(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False\n return ret\n \n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n \n if elementId is None:\n self.error(\"element id is missing in response\")\n \n # checking if visible\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout, \"wait-until-value\": True}\n cmdId = self.displayedElement(elementId=elementId, more= more)\n rsp = self.isElementDisplayed(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False \n return ret\n \n # finally click on it\n cmdId = self.clickElement(elementId=elementId)\n if self.isElementClicked(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret", "def click(self, element_tuple):\n current_state = self.change_monitor()\n self.log_info(f\"Browser.click: Clicking {element_tuple}\")\n self.CORE.find_element(*self.format_element(element_tuple)).click()\n self.change_monitor(previous_data=current_state)\n return", "def keil_download(button_download=None,md5='',build=False,nircmd=None):\t\r\n\tU,T,N,F=py.importUTNF() \r\n\tfrom qgb import Win\r\n\tbutton_download=button_download or U.get(keil_download.__name__+'Download')\r\n\tbutton_build=\t\t\t\t\t U.get(keil_download.__name__+'Build')\r\n\tif not button_download or not button_build:\r\n\t\tes=get_keil_es()\r\n\t\tbs=[e for e in es if py.getattr(e,'friendlyclassname',0)=='Button']\r\n\t\t# if build:return es,bs\r\n\t\t# bs=[]\r\n\t\t# for i in range(9):\r\n\t\t\t# print(U.stime(),'wait bs',len(bs))\r\n\t\t\t# U.sleep(0.5)\r\n\t\t# if not bs:return es\r\n\t\tbutton_download=[e for e in bs if e.texts()==['Download']][0]\r\n\t\tbutton_build\t=[e for e in bs if e.texts()==['Build']][0]\r\n\t\t\r\n\tU.set(keil_download.__name__+'Download',button_download)\r\n\tU.set(keil_download.__name__+'Build',button_build)\r\n\t\r\n\tif md5:\r\n\t\t# md5=md5.replace(py.chr(0x0a),T.eol)\r\n\t\tms=[i for i in md5.splitlines() if '.elf' in i]\r\n\t\tmd5=ms[0][:32]\r\n\t\t\r\n\t\tt=button_download.parent().parent().parent().texts()[0]\r\n\t\tsp=T.subLast(t,'','\\\\')\r\n\t\tname=T.subLast(t,'\\\\','.uvprojx')\r\n\t\tif sp and name:\r\n\t\t\tsp=f'{sp}/Objects/{name}.axf'\r\n\t\t\tif md5==U.md5(file=sp):\r\n\t\t\t\timport win32gui\r\n\t\t\t\th=win32gui.GetForegroundWindow()\r\n\t\t\t\tbutton_download.click()\r\n\t\t\t\tU.nircmd('win activate stitle tmux')\r\n\t\t\t\tU.nircmd('win max stitle tmux')\t\r\n\t\t\t\t# for i in range(3):\r\n\t\t\t\t\t# print(Win.GetForegroundWindow())\r\n\t\t\t\t\t#win32gui.SetForegroundWindow(h)\r\n\t\t\t\t\t# U.sleep(0.5)\r\n\t\t\t\t\r\n\t\t\t\treturn [U.StrRepr('='*122+T.eol*3),'Success keil_download !',md5,sp,\r\n\t\t\t\th,get_title(h),\r\n\t\t\t\tU.stime(),U.StrRepr(T.eol*2+'='*122)]\r\n\t\t\t\r\n\t\treturn U.StrRepr('#'*122+T.eol*3),'check failed !!!',md5,sp,U.md5(file=sp),U.StrRepr(T.eol*2+'#'*122)\r\n\t\t\r\n\tif build:\r\n\t\t# print(U.stime(),button_build)\r\n\t\tbutton_build.click()\r\n\t\tprint(U.stime(),button_build)\r\n\t\tU.set('keil.log',U.stime())\r\n\t\tlog=''\r\n\t\twhile ' Error(s)' not in log:\r\n\t\t\tlog=get_keil_log(-10)\r\n\t\t\tU.sleep(0.6)\r\n\t\tif '- 0 Error(s)' not in log:\r\n\t\t\tprint(U.stime(),log)\r\n\t\t\tlog=get_keil_log()\r\n\t\t\tU.set('keil.log',log)\r\n\t\t\treturn py.No(log)\r\n\t\t\t\r\n\tbutton_download.click()\r\n\t\r\n\tif nircmd:\r\n\t\tU.nircmd('win','activate',*nircmd)\r\n\t\tU.nircmd('win','activate',*nircmd)\r\n\t\t\r\n\treturn button_download", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")" ]
[ "0.6465646", "0.6465646", "0.6274144", "0.62248296", "0.6096584", "0.6029719", "0.5998305", "0.5964109", "0.5912692", "0.58695084", "0.5805828", "0.5800075", "0.5798295", "0.5771682", "0.5740042", "0.5727599", "0.5700866", "0.56949717", "0.56949717", "0.56891507", "0.5687063", "0.5686228", "0.56642467", "0.5661366", "0.5657441", "0.56561476", "0.56530184", "0.5621231", "0.56172866", "0.56149995", "0.56149995", "0.5592343", "0.55802435", "0.55767584", "0.5567309", "0.5548005", "0.55377096", "0.553505", "0.5514439", "0.5510296", "0.55088216", "0.54914623", "0.54714805", "0.545634", "0.5449632", "0.5442796", "0.5439934", "0.54339415", "0.54243493", "0.5412429", "0.5412429", "0.5412429", "0.5412429", "0.5397815", "0.5381982", "0.5377019", "0.5374801", "0.5373844", "0.5373297", "0.53680897", "0.5363679", "0.5363679", "0.53457284", "0.53392786", "0.532812", "0.5308266", "0.53076017", "0.53017753", "0.5298124", "0.52962315", "0.52945316", "0.5283283", "0.52785957", "0.5274592", "0.52716124", "0.5270657", "0.52605903", "0.5254671", "0.5238372", "0.5237838", "0.5228229", "0.5227859", "0.5211415", "0.5211138", "0.52047545", "0.51954526", "0.5194263", "0.519206", "0.5190639", "0.51903725", "0.5189108", "0.5187895", "0.51853967", "0.5184245", "0.51605225", "0.51466703", "0.5141084", "0.5138384", "0.51311713", "0.5123974" ]
0.66481423
0
Make a promise that will not be fulfilled. Should raise a `BrokenPromise` exception.
def make_broken_promise(self): return EmptyPromise( self.q(css='div#not_present').is_present, "Invalid div appeared", try_limit=3, try_interval=0.01 ).fulfill()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def promise_forced(promise):\n require_type(isa(promise,Promise),\n 'the parameter of promise_forced must be a Promise')\n return promise.exprs.env.find(Symbol('already-run?'))['already-run?']", "async def test_task_not_awaitable(arg):\n with pytest.raises(OSError):\n async with Scope() as n:\n n.spawn(arg)", "def _wait_for(self, check_func, desc, result=False, timeout=200):\r\n if result:\r\n return Promise(check_func, desc, timeout=timeout).fulfill()\r\n else:\r\n return EmptyPromise(check_func, desc, timeout=timeout).fulfill()", "def prove_NO() -> Proof:\n # Optional Task 6.9c", "async def rejected(error: Exception) -> Any:\n raise error", "def reject_waiting_call(self) -> None:", "def maybe_future(x):\n if is_future(x):\n return x\n else:\n fut = Future()\n fut.set_result(x)\n return fut", "def promise(self):\n return Promise(self)", "def is_promise_type(self):\n raise exceptions.NotImplementedError()", "def test_deferred_failure_result(self):\n passthrough = self.make_wrapped_function()\n result = passthrough(fail(ZeroDivisionError()))\n self.assertIsInstance(result, EventualResult)\n self.assertRaises(ZeroDivisionError, result.wait, 0.1)", "def never() -> ObservableBase:\n from ..operators.observable.never import never\n return never()", "def _on_future_cancelled(self, promise):\n promise.setCanceled()", "def test_dies_if_no_job(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n work = Async(\"dir\", kwargs={'something': None})\n work._options.pop('job')\n assert 'job' not in work._options\n\n with _ExecutionContext(work):\n self.assertRaises(Exception, run_job)", "def test_reactor_thread_disallowed(self):\n self.patch(threadable, \"isInIOThread\", lambda: True)\n d = Deferred()\n dr = EventualResult(d, None)\n self.assertRaises(RuntimeError, dr.wait, 0)", "def test_original_failure_no_result(self):\n dr = EventualResult(Deferred(), None)\n self.assertIdentical(dr.original_failure(), None)", "def test_success_result_of_no_result(self):\n d = Deferred()\n err = self.assertRaises(FailTest, success_result_of, d)\n self.assertEqual(\n err.args[0], \"No result available for deferred: %r\" % (d,))", "def wait_until_not_raised(condition, delay, max_attempts):\n def wrapped_condition():\n try:\n result = condition()\n except:\n return False, None\n\n return True, result\n\n attempt = 0\n while attempt < (max_attempts-1):\n attempt += 1\n success, result = wrapped_condition()\n if success:\n return result\n\n time.sleep(delay)\n\n # last attempt, let the exception raise\n return condition()", "async def test_nursery_cant_be_reused():\n nursery = Nursery()\n async with nursery:\n pass\n\n with pytest.raises(NurseryClosed):\n async with nursery:\n pass\n\n with pytest.raises(NurseryClosed):\n nursery.start_soon(asyncio.sleep(0))", "def as_deferred(f: Awaitable[Any]) -> Deferred:\n return Deferred.fromFuture(asyncio.ensure_future(f))", "def make_future(result=None):\n future = Future()\n future.set_result(result)\n return future", "def test_whenProposedFailure(self):\n cph = ConnectionPoolHelper()\n cph.setUp(self)\n cph.pauseHolders()\n firstConnection = cph.factory.willConnectTo()\n enqTxn = cph.createTransaction()\n # Execute some SQL on the connection before enqueueing the work-item so\n # that we don't get the initial-statement.\n enqTxn.execSQL(\"some sql\")\n lq = LocalQueuer(cph.createTransaction)\n cph.flushHolders()\n cph.pauseHolders()\n wp = lq.enqueueWork(enqTxn, DummyWorkItem, a=3, b=4)\n firstConnection.executeWillFail(lambda: RuntimeError(\"foo\"))\n d = wp.whenProposed()\n r = cph.resultOf(d)\n self.assertEquals(r, [])\n cph.flushHolders()\n self.assertEquals(len(r), 1)\n self.assertIsInstance(r[0], Failure)", "async def no_sleep_coro():\n pass", "def instantiateShootErrback():\n d = defer.Deferred()\n try:\n 1/0\n except:\n d.errback()\n d.addErrback(lambda x: None)", "async def test_wait_for(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death)\n\n def do_nothing(_: auraxium.event.Event) -> None:\n pass\n\n trigger.action = do_nothing\n\n await self.client.wait_for(trigger, timeout=-1.0)\n\n with self.assertRaises(TimeoutError):\n await self.client.wait_for(trigger, timeout=0.00001)", "def maybe_future(obj):\n if inspect.isawaitable(obj):\n # already awaitable, use ensure_future\n return asyncio.ensure_future(obj)\n elif isinstance(obj, concurrent.futures.Future):\n return asyncio.wrap_future(obj)\n else:\n # could also check for tornado.concurrent.Future\n # but with tornado >= 5.1 tornado.Future is asyncio.Future\n f = asyncio.Future()\n f.set_result(obj)\n return f", "def NeverNeededExpectation(self, expectation: BaseExpectation) -> bool:\n return self.did_fully_pass", "def test_exception_raised_no_dlq():\n\n proc: missive.Processor[missive.RawMessage] = missive.Processor()\n\n @proc.handle_for(always)\n def crash(message, ctx):\n raise RuntimeError(\"bad bytes!\")\n\n with proc.test_client() as test_client:\n blank_message = missive.RawMessage(b\"\")\n with pytest.raises(RuntimeError):\n test_client.send(blank_message)", "def cannot_resolve ( self, *deps, **kw ):\n return self._do_resolve_weak_greedy ( deps, kw, greedy=True ) is None", "def expect_non_trial(self, expected_result):\n return self.expect(False, expected_result)", "def test_raises_on_missing_job(self):\n from furious.async import Async\n from furious.errors import NotInContextError\n from furious.processors import run_job\n\n work = Async(\"nothere\")\n work._options.pop('job')\n assert 'job' not in work._options\n\n self.assertRaises(NotInContextError, run_job)", "async def test_missing(cli):\n response = await cli.get(f'/result/nope')\n assert response.status == 404", "def do_nothing(sig):\n return sig", "def unsatisfy(self):\n self.stub.Unsatisfy(\n depend_pb2.DependUnsatisfyRequest(depend=self.data), timeout=Cuebot.Timeout)", "def _fail_future_requests(filename):\n def _helper(ps, _):\n ps.set_response(\n filename,\n \"HTTP/1.1 410 Gone\\r\\n\"\n \"\\r\\n\"\n \"This webserver has been instructed to fail further requests for this \"\n \"resource\\n\",\n _nohook)\n return _helper", "def download_flaky(probability=1.0):\n def download(*args, **kwargs):\n \"\"\" Raise RuntimeError with probability `probability`. \"\"\"\n if random.random() < probability:\n raise RuntimeError()\n else:\n download_first_request(*args, **kwargs)\n return download", "def test_deferred_failure_result(self):\n passthrough = self.make_wrapped_function()\n self.assertRaises(\n ZeroDivisionError, passthrough, fail(ZeroDivisionError()))", "def return_fake_future(f):\n def wrap(*args, **kwargs):\n future = Future()\n future.set_result(f(*args, **kwargs))\n return future\n return wrap", "def reject(self):\n pass", "def testSubsequentCallsDontRun(self):\n\n runs = []\n\n @utils.make_async()\n def async_fn():\n runs.append(None)\n raise ValueError()\n\n # First call will trigger an error in the background thread.\n async_fn()\n\n for _ in range(2):\n with self.assertRaises(ValueError):\n # Background thread error will be raised in the main thread on\n # subsequent calls and _bad_function will not be run.\n async_fn()\n\n self.assertListEqual(runs, [None])", "def reject(self, error: Any) -> None:\n self.expr.call_hash = self.call_hash\n self.result_promise.do_reject(error)\n self.clear()", "def function_with_wrong_return() -> None:\n return 42", "def mock_software_secure_post_unavailable(url, headers=None, data=None, **kwargs):\r\n raise requests.exceptions.ConnectionError", "def test_failure_result(self):\n dr = EventualResult(fail(RuntimeError()), None)\n self.assertRaises(RuntimeError, dr.wait, 0.1)", "def test_noWaitingDuringImport(self):\n if sys.version_info[0] > 2:\n from unittest import SkipTest\n raise SkipTest(\n \"This test is too fragile (and insufficient) on \"\n \"Python 3 - see \"\n \"https://github.com/itamarst/crochet/issues/43\")\n directory = tempfile.mktemp()\n os.mkdir(directory)\n sys.path.append(directory)\n self.addCleanup(sys.path.remove, directory)\n with open(os.path.join(directory, \"shouldbeunimportable.py\"),\n \"w\") as f:\n f.write(\n \"\"\"\\\nfrom crochet import EventualResult\nfrom twisted.internet.defer import Deferred\n\nEventualResult(Deferred(), None).wait(1.0)\n\"\"\")\n self.assertRaises(RuntimeError, __import__, \"shouldbeunimportable\")", "def raise_error_on_submission_fetch(mocker):\n return mocker.patch(\n \"praw.models.reddit.submission.Submission._fetch\",\n side_effect=Exception(\"_fetch() should not be called\"),\n )", "def test_create_ticket_invalid_pgturl(self):\n with patch('requests.get') as mock:\n mock.side_effect = requests.exceptions.ConnectionError\n pgt = ProxyGrantingTicket.objects.create_ticket('https://www.example.com', 'https://www.example.com/',\n user=self.user, granted_by_pt=self.pt)\n self.assertIsNone(pgt)", "async def test_child_crash_basic():\n error = ValueError('whoops')\n\n async def child():\n raise error\n\n try:\n async with Nursery() as nursery:\n nursery.start_soon(child())\n except ValueError as exc:\n assert exc is error", "def make_waitable(self):\n if not self.is_waitable():\n self._condition = threading.Condition()", "async def test_child_crash_wakes_parent():\n async def crasher():\n raise ValueError\n\n with pytest.raises(ValueError):\n async with Nursery() as nursery:\n nursery.start_soon(crasher())\n await asyncio.sleep(1000 * 1000)", "def promise_value(promise):\n require_type(isa(promise,Promise),\n 'the parameter of promise_forced must be a Promise')\n if promise_forced(promise):\n return promise.exprs.env.find(Symbol('result'))['result']\n raise RuntimeError('the promise has not been forced')", "def test_original_failure_not_error(self):\n dr = EventualResult(succeed(3), None)\n self.assertIdentical(dr.original_failure(), None)", "def prove_NA1() -> Proof:\n # Optional Task 6.9a", "def testSendRequestWithoutSignatureFails(pool):\n\n async def go(ctx):\n client1, wallet = genTestClient(ctx.nodeset, tmpdir=ctx.tmpdir)\n\n # remove the client's ability to sign\n assert wallet.defaultId\n\n ctx.looper.add(client1)\n await client1.ensureConnectedToNodes()\n\n request = wallet.signOp(op=randomOperation())\n request.signature = None\n request = client1.submitReqs(request)[0]\n with pytest.raises(AssertionError):\n for node in ctx.nodeset:\n await eventually(\n checkLastClientReqForNode, node, request,\n retryWait=1, timeout=10)\n\n for n in ctx.nodeset:\n params = n.spylog.getLastParams(Node.handleInvalidClientMsg)\n ex = params['ex']\n _, frm = params['wrappedMsg']\n assert isinstance(ex, EmptySignature)\n assert frm == client1.stackName\n\n params = n.spylog.getLastParams(Node.discard)\n reason = params[\"reason\"]\n (msg, frm) = params[\"msg\"]\n assert msg == request.__dict__\n assert frm == client1.stackName\n assert \"EmptySignature\" in reason\n\n pool.run(go)", "def test_reject_proposal_demand(self):\n pass", "def from_awaitable(a: Awaitable[_A]) -> Deferred[_A]:\n\n async def adapt() -> _A:\n return await a\n\n return Deferred.fromCoroutine(adapt())", "def assert_no_api_call():\n with mock.patch.object(\n github_api._PathMetadata,\n '_query_github',\n side_effect=AssertionError('Forbidden API call'),\n ):\n yield", "def cancelok(foo):\n @functools.wraps(foo)\n async def wrapper(*args, **kwargs):\n try:\n return await foo(*args, **kwargs)\n except asyncio.CancelledError:\n return\n return wrapper", "async def boom_async(value) -> None:\n raise ValueError(\"Boom\")", "def test_crashes(self, exit_on_deadlock, func, args, expected_err):\n executor = get_reusable_executor(max_workers=2)\n res = executor.submit(func, *args)\n with pytest.raises(expected_err):\n res.result()", "def __call__(self):\r\n if self.__failure is not None:\r\n return fail(self.__failure)\r\n\r\n if self.__pending is not None:\r\n d = Deferred()\r\n self.__pending.append(d)\r\n return d\r\n\r\n return succeed(self.__obj)", "def run_blocking(promise: Coroutine[Any, Any, _T]) -> _T:\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(promise)", "def auto_reject(self, reason):\n self.auto_rejection_reason = reason\n self.reject(responder=None)", "async def raise_flag(\n flag: Optional[Flag],\n) -> None:\n if flag is None:\n return None\n elif isinstance(flag, asyncio.Future):\n flag.set_result(None)\n elif isinstance(flag, asyncio.Event):\n flag.set()\n elif isinstance(flag, concurrent.futures.Future):\n flag.set_result(None)\n elif isinstance(flag, threading.Event):\n flag.set()\n else:\n raise TypeError(f\"Unsupported type of a flag: {flag!r}\")", "def raiseNonRecoverableError(msg):\n error(msg)\n raise NonRecoverableError(msg)", "def _fail(raise_):\n if raise_:\n raise _UnexpectedForm()\n return None", "def functionThatShouldNotTimeout():\n return None", "def create_dispersy_missing_proof(self, member, global_time):\n assert isinstance(member, Member)\n assert isinstance(global_time, (int, long))\n assert global_time > 0\n meta = self._community.get_meta_message(u\"dispersy-missing-proof\")\n return meta.impl(distribution=(global_time,), payload=(member, global_time))", "async def test_shielded_child_continues_running():\n work_done = False\n\n async def worker():\n nonlocal work_done\n await asyncio.sleep(0)\n work_done = True\n\n try:\n async with Nursery() as nursery:\n nursery.start_soon(asyncio.shield(worker()))\n raise RuntimeError\n except RuntimeError:\n pass\n\n assert work_done", "def test_solution_of_inner_deadlock_of_component_with_must_be_complete_false(self):\n a = DummyProducingInputIncompleteBuild(scope=\"A\")\n deadlock_component = a.sub_components[\"dummy-calling-one-api-from-within-other\"]\n # Manually set the must_be_complete flag to false.\n deadlock_component.api_methods[\"run_inner\"].must_be_complete = False\n test = ComponentTest(component=a, input_spaces=dict(input_=float))\n print(\"Not seeing RLGraphBuildError. Test ok.\")", "def test_reject_foo(self):\n self.spawn(\"./binary\").stdin(\"foo\").reject()", "def test_dcgm_policy_negative_register_standalone(handle):\n policy = pydcgm.DcgmGroupPolicy(pydcgm.DcgmHandle(handle), 9999, None)\n empty_c_callback = create_c_callback() # must hold ref so func is not GC'ed before c api uses it\n with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):\n policy.Register(dcgm_structs.DCGM_POLICY_COND_DBE, empty_c_callback)", "async def test_subprocess_forbid(event_loop):\n proc = await asyncio.subprocess.create_subprocess_exec(\n sys.executable, '--version', stdout=asyncio.subprocess.PIPE,\n loop=event_loop)\n await proc.communicate()", "def test_reactor_thread_disallowed(self):\n self.patch(threadable, \"isInIOThread\", lambda: True)\n f = self.make_wrapped_function()\n self.assertRaises(RuntimeError, f, None)", "def create_promise_list(self, *promises):\n return PromiseList(promises, self).promise", "def test_reject_negative(self):\n self.spawn(\"./binary\").stdin(\"-1\").reject()", "def test_resolve_no_token_passed(client):\n g.test_authorized_for = []\n res = client.get(\"/v0/resolve?fp=splunk_kjsdkjfskdfhskjdf\")\n assert res.status == \"500 INTERNAL SERVER ERROR\"", "def maybe_awaitable(value: Union[Awaitable[R], R]) -> Awaitable[R]:\n if inspect.isawaitable(value):\n assert isinstance(value, Awaitable)\n return value\n\n return DoneAwaitable(value)", "def test_fail(self) -> defer.Deferred[None]:\n return deferLater(reactor, 0, self.fail, \"I fail later\") # type: ignore[arg-type]", "def with_manual_check_never(self):\n self.__manual_check = constants.NEVER\n return self", "def require(assertion):\n if not assertion:\n raise PermissionDenied", "def require(assertion):\n if not assertion:\n raise PermissionDenied", "async def test_child_crash_propagation():\n looper_cancelled = False\n\n async def looper():\n nonlocal looper_cancelled\n try:\n while True:\n await asyncio.sleep(0)\n except asyncio.CancelledError:\n looper_cancelled = True\n\n error = ValueError('crashed')\n\n async def crasher():\n raise error\n\n with pytest.raises(ValueError) as excinfo:\n async with Nursery() as nursery:\n nursery.start_soon(looper())\n nursery.start_soon(crasher())\n\n assert looper_cancelled\n assert excinfo.value is error", "def AlwaysNeededExpectation(self, expectation: BaseExpectation) -> bool:\n return self.did_never_pass", "def test_wait_for_predicate_instant_false(self):\n predicate_mock = mock.MagicMock(side_effect=[False])\n # 10 retry limit to avoid a near infinite loop on an error.\n train_utils.wait_for_predicate(predicate_mock, num_retries=10)\n self.assertEqual(predicate_mock.call_count, 1)", "def AddNoAsyncFlag(parser):\n help_text = ('Waits for the operation in progress to complete before '\n 'returning.')\n parser.add_argument('--no-async', action='store_true', help=help_text)", "def resolve(self, msg):\n if msg['id'] in self._promises:\n try:\n if msg['error'] is not None:\n self._promises[msg['id']].reject(Exception(msg['error']))\n else:\n self._promises[msg['id']].fulfill(msg['result'])\n\n except Exception as e:\n raise e\n else:\n self.log.warn(\"Could not find promise with id %s\" % msg['id'])", "def check_no_alert(step):\r\n\r\n try:\r\n alert = Alert(world.browser)\r\n raise AssertionError(\"Should not see an alert. Alert '%s' shown.\" %\r\n alert.text)\r\n except NoAlertPresentException:\r\n pass", "def return_none() -> None:\n pass", "async def _do_if_possible(self, coroutine: Awaitable[None]) -> None:\n try:\n await coroutine\n except IncorrectStateException:\n pass", "async def wait_for(self, active: \"Invocation\") -> Optional[BaseException]:\n self._become_current()\n\n Logger.debug(f\"Paused by waiting for: {active.log}\")\n\n if active.condition is None:\n active.condition = asyncio.Condition()\n\n await self.done(active.condition.acquire())\n await self.done(active.condition.wait())\n active.condition.release()\n\n Logger.debug(f\"Resumed by completion of: {active.log}\")\n\n return active.exception", "def testErrorInBackgroundThread(self):\n\n @utils.make_async()\n def async_fn():\n raise ValueError()\n\n future = async_fn() # pylint: disable=assignment-from-no-return\n self.assertIsNotNone(future.exception())", "def test_unavailable(self):\n feature_guard = _make_requires(False, \"Error text\")\n\n @feature_guard\n def inner(): # pragma: nocover\n pytest.fail(\"Should not be called\")\n\n with pytest.raises(NotImplementedError) as e:\n inner()\n\n assert \"Error text\" in str(e.value)", "def test_request_invalid(self, nosleep, method):\n # Dummies for K8s API URL and `requests` session.\n url = 'http://localhost:12345/'\n client = k8s.requests.Session()\n\n urls = [\n \"localhost\", # missing schema like \"http://\"\n \"httpinvalid://localhost\",\n \"http://localhost-does-not-exist\",\n ]\n\n # Test function must not return a response but indicate an error.\n for url in urls:\n ret = k8s.request(client, method, url, None, None)\n assert ret == ({}, True)", "def test_request_fetch_bogus_url():\n with pytest.raises(SystemExit):\n request.fetch(\"lsdfjlsdjf\")", "def no_atom():\n\n class NoOpValAtom(Atom):\n v = Value()\n v.set_validate_mode(Validate.NoOp, None)\n\n return NoOpValAtom()", "async def sync() -> Optional[BaseException]:\n current = Invocation.current\n return await current.done(current.sync())", "def remote_assert_empty(path):\n path = normpath(path)\n try:\n remote = get_remote(path)\n except ValueError: # Nothing exists at path, nothing to worry about.\n return\n raise ValueError(\"Something exists at %s\" % remote.uri)", "def dead_lock(self):\n return None", "def test_reject_empty(self):\n self.spawn(\"./binary\").stdin(\"\").reject()", "def no_payment_required(self):" ]
[ "0.5204269", "0.5157995", "0.5118575", "0.50748104", "0.5045879", "0.50228643", "0.48635247", "0.48367554", "0.48231182", "0.48055142", "0.48020154", "0.4710334", "0.4705783", "0.47007585", "0.4683378", "0.4586624", "0.45848158", "0.45210996", "0.45157987", "0.449673", "0.4495356", "0.4489491", "0.44793853", "0.44359413", "0.44212636", "0.44171405", "0.44124514", "0.4410409", "0.44030467", "0.43910775", "0.43839952", "0.43758303", "0.43704858", "0.43617865", "0.43598762", "0.43484575", "0.43397084", "0.43325377", "0.43282965", "0.4323803", "0.42969766", "0.4295844", "0.42941856", "0.42931053", "0.42905587", "0.4285773", "0.42712125", "0.4260095", "0.42484072", "0.42479223", "0.42476568", "0.4247299", "0.42369124", "0.41940907", "0.41777027", "0.41752875", "0.41744566", "0.41741037", "0.41738945", "0.41702285", "0.41653025", "0.41641656", "0.4162172", "0.41619408", "0.41562048", "0.41536018", "0.41523165", "0.4146979", "0.4144831", "0.413221", "0.41297713", "0.41292423", "0.41286817", "0.41276744", "0.41249597", "0.41193095", "0.41157213", "0.41111225", "0.41055128", "0.41039604", "0.41039604", "0.41031268", "0.41022986", "0.41008753", "0.40958783", "0.40955192", "0.4090414", "0.40873674", "0.40873057", "0.40860593", "0.40854245", "0.4081441", "0.40799278", "0.4077647", "0.40720564", "0.4071054", "0.40645915", "0.40614918", "0.40490866", "0.4047584" ]
0.7172702
0
Load the page named `page_name` after waiting for `delay_sec`.
def load_next(self, page, delay_sec): time.sleep(delay_sec) page.visit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_page_content(self, url, delay):\r\n\r\n # if browser cannot connect to the server, repeat it infinitely.\r\n while True:\r\n try:\r\n # load the page\r\n self.sel_driver.get(url)\r\n\r\n # if the page is loaded, wait for delay seconds until loading would finish.\r\n # this delay is also to avoid being blocked by upwork due to so frequent access\r\n time.sleep(delay)\r\n\r\n # read and parse the page contents\r\n soup = BeautifulSoup(self.sel_driver.page_source, 'html.parser')\r\n\r\n # page loading succeeded. escape from the endless iteration\r\n break\r\n except (WebDriverException, TimeoutException):\r\n # error occurred, do it again\r\n print(\"(ERROR) Driver could't be load: \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n self.relaunch(60)\r\n\r\n # check if the page is ACCESS DENIED\r\n # get the title of the page\r\n elements = soup.find_all(\"title\")\r\n if len(elements) == 0:\r\n return soup # if it has no title, it's may be a normal page\r\n\r\n # if the title is UPWORK ACCESS DENIED, I deal with it\r\n title = elements[0].text\r\n if 'access denied' in title.lower():\r\n print(\"(ERROR) UPWORK DENIED at \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n\r\n self.relaunch(200) # relaunch after about 3 minutes\r\n\r\n return self.get_page_content(url, delay)\r\n\r\n # if the title is Upwork - Maintenance, let it wait\r\n if title == 'Upwork - Maintenance':\r\n print(\"(ERROR) UPWORK is under the Maintenance - \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n time.sleep(random.randint(200, 400)) # We don't need relaunch browser.\r\n return self.get_page_content(url, delay)\r\n\r\n return soup", "def wait(delay=2):\n time.sleep(delay)", "def _httpGetDelay(self, url, waitSecs, mustGet=False, noFlash=False, useSelCookies=False, referer=None):\n page = None\n if self.useSelenium:\n page = httpGetSelenium(url, waitSecs, mustGet=mustGet)\n time.sleep(5)\n else:\n cookies = None\n if useSelCookies:\n logging.debug('Importing cookies from selenium')\n all_cookies = browser.get_cookies()\n cookies = {}\n for s_cookie in all_cookies:\n cookies[s_cookie['name']] = s_cookie['value']\n\n page = httpGetDelay(url, waitSecs, mustGet=mustGet, blockFlash=noFlash, cookies=cookies, referer=referer)\n return page", "def get(self, url:str, time=1):\n page = self.driver.get(url)\n sleep(time)\n return page", "def wait_for_page_load(self):\n # For right now, just wait for 2 seconds since webdriver returns when loaded.\n # TODO: switch to waiting for network idle\n time.sleep(2)", "def update_page(self, waittime):\n if not self.runningtask.get():\n return\n if self.vars[\"enabled\"].get():\n logger.trace(\"Updating page\")\n self.display_item_set()\n self.load_display()\n self.after(waittime, lambda t=waittime: self.update_page(t))", "def wait_page_loaded(self, timeout=10):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as ec\n\n old_page = self.selenium.find_element(By.TAG_NAME, \"html\")\n yield\n # Wait for the next page to be loaded\n self.wait_until(ec.staleness_of(old_page), timeout=timeout)\n self.wait_page_ready(timeout=timeout)", "def fetch(url,delay=(1,3)):\n time.sleep(random.randint(delay[0],delay[1])) # wait random seconds\n try:\n response = requests.get(url)\n except ValueError as e:\n print(str(e))\n return '', BeautifulSoup('', \"html.parser\")\n html = response.text\n soup = BeautifulSoup(html, \"html.parser\")\n return (html,soup)", "def delay():\r\n time.sleep(2)", "def wait_for_page_load(self):\n pass", "def wait_for(old_html, timeout=60):\n\tstart_time = time.time() \n\twhile time.time() < start_time + timeout: \n\t\tif check_new_page_loaded(old_html): \n\t\t\treturn time.time() - start_time \n\t\telse: \n\t\t\ttime.sleep(0.1) \n\traise Exception('WebPage Load Timeout')", "def nav(self, url):\r\n\r\n self.driver.get(url)\r\n time.sleep(3) # wait for page load\r", "def delay_response(delay):\n delay = min(float(delay), 10)\n\n time.sleep(delay)\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\")\n )", "def wait_for_page_loaded(self, time_for_stop=None):\n return self.wait_for(lambda: self._loaded,\n 'Unable to load requested page', time_for_stop=time_for_stop)", "def wait(delaySec, host='default'):\n global lastCallSec\n delaySec = float(delaySec)\n nowSec = time.time()\n sinceLastCallSec = nowSec - lastCallSec.get(host, nowSec)\n if sinceLastCallSec > 0.1 and sinceLastCallSec < delaySec:\n waitSec = max(0.0, delaySec - sinceLastCallSec)\n logging.info('Waiting for %f seconds before downloading from host %s' % (waitSec, host))\n time.sleep(waitSec)\n lastCallSec[host] = time.time()", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def change_to_with_delay(_path: str):\n time.sleep(1)", "def wait_for_page_load(self, timeout=30):\n old_page = self.driver.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.driver, timeout).until(\n staleness_of(old_page)\n )", "def wait(self, sleep_time):\n time.sleep(sleep_time)", "def loading(delay):\r\n\r\n for i in range(3):\r\n\r\n print \".\",\r\n sys.stdout.flush()\r\n time.sleep(delay)\r\n\r\n print(\"\")", "def load_until(self, url, until):\n self.browser.get(url)\n WebDriverWait(self.browser, NEXT_WAIT_TIMEOUT) \\\n .until(EC.element_to_be_clickable((By.XPATH, until)))\n return scrapy.Selector(text=self.browser.page_source)", "def wait_for_load(driver):\n html = driver.page_source\n time.sleep(0.5)\n while html != driver.page_source:\n html = driver.page_source\n time.sleep(0.5)", "def load(self, filepath=''):\n sleep(20)\n pass", "def sleep(sleep_time=0.250):\n time.sleep(sleep_time)", "def loader(driver, stringId, page, directory):\n\n _tld = '.com/'\n if page == 'Bild':\n _tld = '.de/'\n if \".\" in page:\n _tld = \"\"\n page = page.title()\n #driver.maximize_window()\n try:\n driver.get('https://' + page + _tld)\n print (\"Page is ready!\")\n _cookies = pickle.load(open((directory +stringId+ page+ \"Cookies.pkl\"),\"rb\"))\n for _cookie in _cookies:\n driver.add_cookie(_cookie)\n except TimeoutException:\n print (\"Loading took too much time!\")\n\n try:\n driver.get('https://' + page + _tld)\n print (\"Page is ready!\")\n except TimeoutException:\n print (\"Loading took too much time!\")", "def sleep_based_on_name_length(self, name):\n length = int(self.length_of_name(name))\n time.sleep(length + 0.001)\n return True", "def wait_and_refresh_static_page_until_text(self, search_text, wait_time, loc_frame, loc_text):\n self.selenium.driver.refresh()\n self.selenium.select_frame(loc_frame)\n text_portion = self.selenium.get_text(loc_text)\n while text_portion != search_text:\n self.selenium.driver.refresh()\n self.selenium.select_frame(loc_frame)\n text_portion = self.selenium.get_text(loc_text)", "async def sleep(cls, delay: float) -> None:", "def download_page(name=None, url=None):\n if name and url:\n timestamp = construct_date()\n filename = name + '_' + timestamp + '.html'\n os.system('wget ' + url + ' -O ' + os.path.join('..', 'html', filename))\n with open(os.path.join('..', 'html', filename), 'rb') as f:\n page = f.read()\n print('done with page {}'.format(url))\n return page", "def wait(wait_time=WAIT_TIME):\n # time.sleep(wait_time)\n pass", "def request_page(self, url, action=None):\n if url.startswith(self.url):\n self.driver.get(url)\n else:\n self.driver.get(self.url + url)\n self.default_wait.until(EC.invisibility_of_element_located((By.XPATH, \"//div[@class='loading-bar']\")))\n if action:\n action(self.driver)\n return self.driver.page_source", "def wait_for_problem(display_name):\r\n # Wait for the problem to reload\r\n world.wait_for_ajax_complete()\r\n\r\n wait_func = lambda _: world.css_has_text(\r\n 'h2.problem-header', display_name, strip=True\r\n )\r\n world.wait_for(wait_func)", "def wati_until_page_change(driver, url):\n while driver.current_url == url:\n time.sleep(10)", "def delay(self, length):\n self.log_info(f\"Browser.delay: Sleeping for {length} seconds\")\n return sleep(length)", "def __call__(self):\n self.page1() # GET supercars.do (requests 101-111)\n\n grinder.sleep(2117)\n self.page2() # GET cars.do (requests 201-202)\n\n grinder.sleep(1867)\n self.page3() # GET car.do (request 301)\n\n grinder.sleep(4351)\n self.page4() # GET enquire.do (requests 401-402)\n\n grinder.sleep(16341)\n self.page5() # POST enquire.do (request 501)\n\n grinder.sleep(1309)\n self.page6() # GET supercars.do (request 601)\n\n grinder.sleep(669)\n self.page7() # GET cars.do (requests 701-702)\n\n grinder.sleep(1260)\n self.page8() # GET car.do (request 801)\n\n grinder.sleep(837)\n self.page9() # GET car.do (request 901)\n\n grinder.sleep(1108)\n self.page10() # GET search.do (request 1001)\n\n grinder.sleep(3146)\n self.page11() # POST search.do (requests 1101-1102)\n\n grinder.sleep(2822)\n self.page12() # POST search.do (request 1201)\n\n grinder.sleep(1333)\n self.page13() # GET sell.do (request 1301)\n\n grinder.sleep(17417)\n self.page14() # POST sell.do (request 1401)\n\n grinder.sleep(6680)\n self.page15() # GET insurance.do (request 1501)\n\n grinder.sleep(600)\n self.page16() # GET about.do (requests 1601-1602)\n\n grinder.sleep(584)\n self.page17() # GET supercars.do (request 1701)\n\n grinder.sleep(1049)\n self.page18() # GET cars.do (requests 1801-1802)\n\n grinder.sleep(2901)\n self.page19() # GET car.do (request 1901)\n\n grinder.sleep(1441)\n self.page20() # GET car.do (request 2001)\n\n grinder.sleep(791)\n self.page21() # GET supercars.do (request 2101)\n\n grinder.sleep(1365)\n self.page22() # GET cars.do (request 2201)\n\n grinder.sleep(1067)\n self.page23() # GET supercars.do (request 2301)\n\n grinder.sleep(1284)\n self.page24() # GET cars.do (request 2401)\n\n grinder.sleep(879)\n self.page25() # GET supercars.do (request 2501)\n\n grinder.sleep(1066)\n self.page26() # GET cars.do (request 2601)\n\n grinder.sleep(974)\n self.page27() # GET supercars.do (request 2701)", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "def _delay(self, delay=None):\n return self.screen.delay(delay)", "def get_person_delayed(self, url: str,) -> dict:\n time.sleep(random.randint(self._min_range, self._max_range))\n return self.get_person(url)", "def wait(self, seconds):\n self.driver.implicitly_wait(seconds)", "def load(self, url):\n # Download the raw dom to our local build folder so we can load it quickly.\n self._save_raw_dom_to_local(url)\n # Randomized content check:\n # Load it multiple times and see if any content has changed.\n # This is not foolproof. Sometimes we load a page three times and won't find\n # all the randomized content, or any at all.\n if self._driver is None:\n self._create_driver(self._config)\n load_count = 0\n template = HtmlTemplate()\n longest_load_time = 0\n while load_count < self._config.PAGE_CHANGE_NUM_LOADS:\n load_count += 1\n logger.info(f\"Page load #{load_count}\")\n # Load it back into the browser so the javascript will run.\n if not self._load_raw_dom_from_local():\n return False # We can't proceed if we can't load the page.\n self.reset_state()\n load_time, template2 = self.wait_for_stable_template(seconds_threshold=self._config.PAGE_CHANGE_THRESHOLD,\n seconds_timeout=self._config.PAGE_CHANGE_TIMEOUT)\n longest_load_time = max(longest_load_time, load_time)\n if template2.is_stable():\n logger.info(f\"{url} took {load_time} seconds to stabilize.\")\n else:\n logger.warning(f\"{url} loaded in {load_time} seconds except for unstable xpaths: {template2.get_unstable_xpaths()}\")\n template.add_template(template2)\n for el_xpath in sorted(template.get_unstable_xpaths()):\n # When we do comparisons, we should remove or ignore these elements.\n # Maybe do: If there is a lot of changing content under a particular ancestor, ignore the whole ancestor?\n logger.info(f\"Found unstable content {el_xpath}\")\n\n self.wait_for_animation() # Have to do this to freeze the styles and positions.\n self._current_state_data = self._create_state_data()\n # Explore all reachable elements.\n self._current_state_data.elements_to_explore = self.query_xpath('html/body//*[@demod_reachable=\"true\"]')\n self._current_state_data.load_time = longest_load_time\n self._current_state_data.template = template\n return True", "def __delay(msecs):\n time.sleep(msecs / 1000)", "def load_page(page: int):\n response = httpx.get(BASE_URL, params={'page': page}).json()\n vacancies_ids = PageParser(response).collect_vacancies_ids()\n tasks = [collect_vacancy.s(vacancy_id) for vacancy_id in vacancies_ids]\n chord(tasks, save_vacancies.s())()", "def __call__(self):\n self.page1() # GET web (request 101)\n\n grinder.sleep(1000)\n self.page2() # GET web (request 201)\n\n grinder.sleep(1000)\n self.page3() # GET web (request 301)\n\n grinder.sleep(1000)\n self.page4() # GET web (request 401)\n\n grinder.sleep(1000)\n self.page5() # GET web (request 501)\n\n grinder.sleep(1000)\n self.page6() # GET web (request 601)\n\n grinder.sleep(1000)\n self.page7() # GET web (request 701)\n\n grinder.sleep(1000)\n self.page8() # GET web (request 801)\n\n grinder.sleep(1000)\n self.page9() # GET web (request 901)\n\n grinder.sleep(1000)\n self.page10() # GET web (request 1001)\n\n grinder.sleep(1000)\n self.page11() # GET web (request 1101)\n\n grinder.sleep(1000)\n self.page12() # GET web (request 1201)\n\n grinder.sleep(1000)\n self.page13() # GET web (request 1301)\n\n grinder.sleep(1000)\n self.page14() # GET web (request 1401)\n\n grinder.sleep(1000)\n self.page15() # GET web (request 1501)\n\n grinder.sleep(1000)\n self.page16() # GET web (request 1601)\n\n grinder.sleep(1000)\n self.page17() # GET web (request 1701)\n\n grinder.sleep(1000)\n self.page18() # GET web (request 1801)\n\n grinder.sleep(1000)\n self.page19() # GET web (request 1901)\n\n grinder.sleep(1000)\n self.page20() # GET web (request 2001)\n\n grinder.sleep(1000)\n self.page21() # GET web (request 2101)\n\n grinder.sleep(1000)\n self.page22() # GET web (request 2201)\n\n grinder.sleep(1000)\n self.page23() # GET web (request 2301)\n\n grinder.sleep(1000)\n self.page24() # GET web (request 2401)\n\n grinder.sleep(1000)\n self.page25() # GET web (request 2501)\n\n grinder.sleep(1000)\n self.page26() # GET web (request 2601)\n\n grinder.sleep(1000)\n self.page27() # GET web (request 2701)\n\n grinder.sleep(1000)\n self.page28() # GET web (request 2801)\n\n grinder.sleep(1000)\n self.page29() # GET web (request 2901)\n\n grinder.sleep(1000)\n self.page30() # GET web (request 3001)\n\n grinder.sleep(1000)\n self.page31() # GET web (request 3101)\n\n# grinder.sleep(1000)\n# self.page32() # POST downloads (request 3201)\n\n# grinder.sleep(1000)\n# self.page33() # GET goog-malware-shavar_s_10501-10520.10501.10502-10520: (request 3301)\n\n grinder.sleep(1000)\n self.page34() # GET web (request 3401)\n\n grinder.sleep(1000)\n self.page35() # GET web (request 3501)\n# self.page36() # GET goog-malware-shavar_a_9606-9610.9606-9609.9610: (request 3601)\n\n# grinder.sleep(1000)\n# self.page37() # GET goog-phish-shavar_s_36981-36985.36981-36985.: (request 3701)\n\n# grinder.sleep(1000)\n# self.page38() # GET goog-phish-shavar_s_36986-36990.36986-36987.36988-36990: (request 3801)\n\n# grinder.sleep(1000)\n# self.page39() # GET goog-phish-shavar_a_46491-46500.46491-46499.46500: (request 3901)\n\n grinder.sleep(1000)\n self.page40() # GET web (request 4001)\n\n grinder.sleep(1000)\n self.page41() # GET web (request 4101)\n\n grinder.sleep(1000)\n self.page42() # GET web (request 4201)\n\n grinder.sleep(1000)\n self.page43() # GET web (request 4301)\n\n grinder.sleep(1000)\n self.page44() # GET web (request 4401)\n\n grinder.sleep(1000)\n self.page45() # GET web (request 4501)\n\n grinder.sleep(1000)\n self.page46() # GET web (request 4601)\n\n grinder.sleep(1000)\n self.page47() # GET web (request 4701)\n\n grinder.sleep(1000)\n self.page48() # GET web (request 4801)\n\n grinder.sleep(1000)\n self.page49() # GET web (request 4901)\n\n grinder.sleep(1000)\n self.page50() # GET web (request 5001)\n\n grinder.sleep(1000)\n self.page51() # GET web (request 5101)\n\n grinder.sleep(1000)\n self.page52() # GET web (request 5201)\n\n grinder.sleep(1000)\n self.page53() # GET web (request 5301)", "def wait(self, seconds):\n time.sleep(seconds)", "async def parse_url(self, url: str, delay: int = 0) -> BeautifulSoup:\n if url != self.driver.current_url:\n self.driver.get(url)\n return BeautifulSoup(self.driver.page_source, 'lxml')", "def wait(wait_time):\n\n time.sleep(wait_time)", "def localLoad(url, delaySecs=0):\n try:\n contents = open(url[len(\"file://\"):]).read()\n except IOError, e:\n d = Deferred()\n d.errback(e)\n return d\n if not delaySecs:\n return succeed(contents)\n d = Deferred()\n reactor.callLater(delaySecs, lambda: d.callback(contents))\n return d", "def sleep(seconds):\r\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def wait_until_loading_is_complete(self, locator=None):\n locator = lex_locators[\"body\"] if locator is None else locator\n try:\n self.selenium.wait_until_page_contains_element(locator)\n self.wait_for_aura()\n # this knowledge article recommends waiting a second. I don't\n # like it, but it seems to help. We should do a wait instead,\n # but I can't figure out what to wait on.\n # https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1\n time.sleep(1)\n\n except Exception:\n try:\n self.selenium.capture_page_screenshot()\n except Exception as e:\n self.builtin.warn(\"unable to capture screenshot: {}\".format(str(e)))\n raise", "def next_results_page(driver, delay):\n try:\n # wait for the next page button to load\n print(\" Moving to the next page of search results... \\n\" \\\n \" If search results are exhausted, will wait {} seconds \" \\\n \"then either execute new search or quit\".format(delay))\n wait_for_clickable_element_css(driver, delay, \"a.next-btn\")\n # navigate to next page\n driver.find_element_by_css_selector(\"a.next-btn\").click()\n except Exception as e:\n print (\"\\nFailed to click next page link; Search results \" \\\n \"may have been exhausted\\n{}\".format(e))\n raise ValueError(\"Next page link not detected; search results exhausted\")\n else:\n # wait until the first job post button has loaded\n first_job_button = \"a.job-title-link\"\n # wait for the first job post button to load\n wait_for_clickable_element_css(driver, delay, first_job_button)", "def schedule(self, sleep_time, delay):\n self.sleep_time = sleep_time\n self.delay = delay\n self.thread = Thread(target=self.run)\n self.thread.start()", "def slow(request):\n time.sleep(.1)\n return TemplateResponse(request, 'slow.html', {})", "def wait_for_load(browser):\n loader = browser.find_element_by_class_name('ui-loader')\n while loader.is_displayed():\n time.sleep(0.1)", "def __call__(self):\r\n self.init_data = td.import_data(self.__module__)\r\n self.page1() # GET navigation (requests 101-153)\r\n\r\n grinder.sleep(20)\r\n self.page2() # GET case (requests 201-252)\r\n\r\n grinder.sleep(20)\r\n self.page3() # GET view (requests 301-365)\r\n\r\n grinder.sleep(20)\r\n self.page4() # POST view (requests 401-452)\r", "def wait():\n time.sleep(1)", "def get_page(self,\n url,\n retries=3,\n timeout=20,\n dynamic_class_name=\"asrank-row-org\"):\n\n for retry in range(retries):\n try:\n self._driver.get(url)\n # Wait for the page to load dynamically\n wait = WebDriverWait(self._driver, timeout)\n wait.until(EC.visibility_of_element_located(\n (By.CLASS_NAME, dynamic_class_name)))\n break\n except TimeoutException:\n timeout *= 2\n\n # Extract the html and then use it to create a beautiful soup object\n return BeautifulSoup(self._driver.page_source, \"html.parser\")", "def wait(n=3):\n sleep(n)", "def wait(period=5):\n import time\n print ('Wait for {val} seconds'.format(val=period))\n time.sleep(float(period))", "def wait_for_response(self, message: str = None, delay_time: int = 0):\n self._num_rounds += 1\n logging.info(\n f'{self._world_name} waiting for response at round {self._num_rounds}'\n )\n if delay_time > 0:\n time.sleep(delay_time)\n self.agent.observe(\n {'id': constants.ONBOARDING_AGENT, 'text': message, 'episode_done': False}\n )\n self.messages.append(self.agent.act(timeout=self.turn_timeout))", "def sleep(secs=1.0):\n time.sleep(secs)", "def fetch_page(page_url):\n response = requests.get(page_url, headers=headers) # define proxies if necessary\n\n if response.status_code == 429:\n raise TooManyRequests('IP temporarily blocked')\n time.sleep(randint(10, 18))\n return response.text", "def do_load(self, name):\n try:\n self.runner.run()\n\n except():\n print('Loading failed')", "def get(self, url):\n\t\ttry:\n\t\t\tassert(type(url)) == str\n\t\t\tself.driver.get(url)\n\t\t\t# sleep(1) # Even tho driver.get is blocking, it returns as soon as DOM loads, without waiting for JS to run and update the DOM with the new elements\n\t\t\t# wait(self.driver, 10).until( EC.visibility_of_element_located() ) # Not sure how to wait here efficiently\n\t\t\tsleep(5) # A little long, but without a conditional variable to tell us when the page is ready us when to go our only choice is to nap\n\t\t\tself.bsource = bs( self.viewSource(), \"lxml\" ) # Update internal BeautifulSoup source with new javascript-encriched code (\"lxml\" is faster that \"html.parser\")\n\t\texcept Exception as e:\n\t\t\tprint(\"[*] Unable to GET page {}\\n{}\".format(url, e))\n\t\t\treturn -1", "def load_page(url):\n with requests.Session() as session:\n return session.get(url).content.decode('utf-8')", "def wait_for(self, condition, timeout_message='', time_for_stop=None):\n\n if self._loaded:\n time_for_stop = time_for_stop or self.operate_timeout\n else:\n time_for_stop = time_for_stop or self.loading_timeout\n\n started_at = time.time()\n while not condition():\n if time_for_stop != -1 and time.time() > (started_at + time_for_stop):\n if self._loaded:\n raise OperateTimeout, timeout_message\n else:\n # raise LoadingTimeout, timeout_message\n self.trigger_action('Stop') #QWebPage::Stop\n self._loaded = True\n logger.warning(\"Page loading timeout.Force to stop the page\")\n break\n\n gevent.sleep(2)", "def id_sleep(x, delay=0):\n sleep(delay)\n return x", "def sleep(self):\n time.sleep(0.2)", "def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"", "def load_page(url):\n parameters = {'User-Agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \\\n Chrome/69.0.3497.100 Safari/537.36\"}\n response = requests.get(url, params=parameters)\n\n # Abort if server is responding with error\n if not response.status_code == 200:\n print(\"Server stopped responding. Execution aborted.\")\n sys.exit(1)\n\n content = response.content.decode(response.encoding)\n\n # Save page to a file for debugging\n # with open(self.lastpage_path, 'w') as output_file:\n # output_file.write(content)\n\n return content", "def init_job_page(self, base_url):\n self.driver.get(base_url)\n self.driver.implicitly_wait(100)", "def read_soup_from_url(url, wait_time=10, debug=False):\n\n soup = None\n try:\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n print('Reading %s , Status %d , waiting %d seconds ...' %\n (url, r.status_code, wait_time))\n time.sleep(wait_time)\n except:\n print(traceback.format_exc())\n return soup", "def wait_for_text(self, text, time_for_stop=None):\n\n logger.debug(\"Wait for text %s\" % text)\n\n self.wait_for(lambda: text in self.content(),\n \"Can\\'t find '%s' in current frame\" % text, time_for_stop=time_for_stop)\n\n return self.wait_for_page_loaded()", "def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'", "def go_to_specific_results_page(driver, delay, results_page):\n if results_page < 2:\n return\n current_page = 1\n for i in range(results_page):\n current_page += 1\n time.sleep(5)\n try:\n next_results_page(driver, delay)\n print(\"\\n**************************************************\")\n print(\"\\n\\n\\nNavigating to results page {}\" \\\n \"\\n\\n\\n\".format(current_page))\n except ValueError:\n print(\"**************************************************\")\n print(\"\\n\\n\\n\\n\\nSearch results exhausted\\n\\n\\n\\n\\n\")", "async def async_get(self, url):\n self.reset()\n self.next_link = url\n return await self.async_advance_page()", "def pageText(self, url):\n try:\n request = urllib2.Request(url)\n request.add_header(\"User-Agent\", \"siruta_postcodes.py 1.0\")\n response = urllib2.urlopen(request)\n text = response.read()\n response.close()\n # When you load to many users, urllib2 can give this error.\n except urllib2.HTTPError, urllib2.URLError:\n self.loge(u\"Server or connection error. Pausing for 10 seconds... \" + time.strftime(\"%d %b %Y %H:%M:%S (UTC)\", time.gmtime()) )\n response.close()\n time.sleep(10)\n return pageText(url)\n return text", "def new_page(page_link):\n\told_param = old_param = driver.find_element_by_tag_name('html').text\n\tdriver.get(page_link)\n\treturn wait_for(old_param)", "def delay(ms: int, /) -> None:", "def _httpGetKarger(self, url, delaySecs):\n wait(delaySecs, 'karger.com')\n if self.useSelenium:\n page = httpGetSelenium(url, delaySecs)\n if 'Incapsula incident' in page['data']:\n raise pubGetError('Got blocked by Incapsula', 'incapsulaBlock')\n return page\n else:\n count = 0\n if self.session is None:\n if not requestsLoaded:\n raise pubGetError(\"Karger.com requires the requests python module. Install it with 'pip install requests' and retry\", 'noRequestsKarger')\n self.session = requests.Session()\n while count < 5:\n try:\n response = self.session.get(url)\n response = crack(self.session, response)\n break\n except requests.exceptions.ConnectionError:\n count += 1\n logging.warn('Got connection error when trying to get Karger page, retrying...')\n\n else:\n while count < 5:\n try:\n response = self.session.get(url)\n break\n except requests.exceptions.ConnectionError:\n count += 1\n logging.warn('Got connection error when trying to get Karger page, retrying...')\n\n if count >= 5:\n raise pubGetError('Too many Karger connection errors', 'KargerConnErrors')\n page = {}\n page['data'] = response.content\n page['url'] = response.url\n page['mimeType'] = response.headers['content-type'].split(';')[0]\n if 'Incapsula incident' in page['data']:\n self.useSelenium = True\n page = httpGetSelenium(url, delaySecs)\n if 'Incapsula incident' in page['data']:\n raise pubGetError('Got blocked by Incapsula even with selenium', 'incapsulaBlockFirefox')\n return page", "def wait_for(self, query_delay=0):\n raise NotImplementedError(\"Implement in subclass!\")", "def RandomDelay():\r\n sleep(random())", "def go_then_wait(self, position, seconds):\n self.go(position)\n self.wait(seconds)", "def sleep(interval):\n time.sleep(interval) # pragma: no cover", "def wait_for_page_change(self, current_page):\n WebDriverWait(self.driver, 5).until(EC.url_changes(current_page))", "async def sleep(self, name: str, delay_ms: int) -> None:\n\n # Create a deferred that gets called in N seconds\n sleep_deferred: \"defer.Deferred[None]\" = defer.Deferred()\n call = self._reactor.callLater(delay_ms / 1000, sleep_deferred.callback, None)\n\n # Create a deferred that will get called if `wake` is called with\n # the same `name`.\n stream_set = self._streams.setdefault(name, set())\n notify_deferred: \"defer.Deferred[None]\" = defer.Deferred()\n stream_set.add(notify_deferred)\n\n try:\n # Wait for either the delay or for `wake` to be called.\n await make_deferred_yieldable(\n defer.DeferredList(\n [sleep_deferred, notify_deferred],\n fireOnOneCallback=True,\n fireOnOneErrback=True,\n consumeErrors=True,\n )\n )\n finally:\n # Clean up the state\n curr_stream_set = self._streams.get(name)\n if curr_stream_set is not None:\n curr_stream_set.discard(notify_deferred)\n if len(curr_stream_set) == 0:\n self._streams.pop(name)\n\n # Cancel the sleep if we were woken up\n if call.active():\n call.cancel()", "def pageText(url):\n try:\n request = urllib2.Request(url)\n request.add_header(\"User-Agent\", pywikibot.useragent)\n response = urllib2.urlopen(request)\n text = response.read()\n response.close()\n # When you load to many users, urllib2 can give this error.\n except urllib2.HTTPError:\n pywikibot.output(u\"Server error. Pausing for 10 seconds... \" + time.strftime(\"%d %b %Y %H:%M:%S (UTC)\", time.gmtime()) )\n response.close()\n time.sleep(10)\n return pageText(url)\n return text", "def waitFor(self,duration=2):\n time.sleep(duration)\n print('Done waiting for ',duration)\n return", "def wait(self, secs):\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))", "def sleep(self, amount: float):\n time.sleep(amount)", "def element_wait(self, selector, secs=1):\n if \"=>\" not in selector:\n raise NameError(\"Positioning syntax errors, lack of '=>'.\")\n\n by = selector.split(\"=>\")[0].strip()\n value = selector.split(\"=>\")[1].strip()\n messages = 'Element: {0} not found in {1} seconds.'.format(selector, secs)\n\n if by == \"id\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.ID, value)), messages)\n elif by == \"name\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.NAME, value)), messages)\n elif by == \"class\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.CLASS_NAME, value)),messages)\n elif by == \"link_text\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.LINK_TEXT, value)),messages)\n elif by == \"xpath\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.XPATH, value)), messages)\n elif by == \"css\":\n WebDriverWait(self.driver, secs, 1).until(EC.presence_of_element_located((By.CSS_SELECTOR, value)),messages)\n else:\n raise NameError(\"Please enter the correct targeting elements,'id','name','class','link_text','xpaht','css'.\")", "def httpGetSelenium(url, delaySecs, mustGet=False):\n global display\n global browser\n logging.info('Downloading %s using Selenium/Firefox' % url)\n host = urlparse.urlsplit(url)[1]\n delaySecs = getDelaySecs(host, delaySecs)\n wait(delaySecs, host)\n if not seleniumLoaded:\n raise pubGetError('Cannot get page, selenium is not installed on this machine', 'noSelenium')\n page = {}\n if not display:\n logging.info('Starting pseudo-display')\n display = Display(visible=0, size=(1024, 768))\n display.start()\n proxy = None\n if pubConf.httpProxy:\n proxy = Proxy({'proxyType': ProxyType.MANUAL,\n 'httpProxy': pubConf.httpProxy,\n 'ftpProxy': pubConf.httpProxy,\n 'sslProxy': pubConf.httpProxy})\n if not browser:\n logging.info('Starting firefox on pseudo-display')\n browser = webdriver.Firefox(proxy=proxy)\n count = 0\n while count < 5:\n try:\n browser.get(url)\n break\n except (requests.Timeout, timeout):\n logging.warn('timeout from selenium')\n count += 1\n except httplib.CannotSendRequest:\n logging.warn(\"selenium's firefox died, restarting\")\n count += 1\n browser = webdriver.Firefox(proxy=proxy)\n\n if count >= 5:\n logging.warn('too many timeouts/errors from selenium')\n if mustGet:\n raise pubGetError('too many timeouts', 'httpTimeout')\n return\n else:\n page['seleniumDriver'] = browser\n page['url'] = browser.current_url\n page['data'] = browser.page_source\n page['mimeType'] = 'unknown'\n if type(page['data']) == str:\n page['data'] = page['data'].encode('utf8')\n return page", "def sleep(self, seconds):\n time.sleep(seconds)", "def __wait(min_sec, max_sec):\n time.sleep(randint(min_sec, max_sec))", "def waitReady(self, spin_delay=0.01):\n while not self.isReady():\n time.sleep(spin_delay)", "def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()", "def wait_job_loading(self, elt, timeout=10):\n time_slept = 0\n wait = True\n while wait:\n time.sleep(1)\n time_slept += 1\n company_name = elt.text.splitlines()[0]\n try:\n employer_info_txt = self.driver.find_element_by_class_name(\n \"empInfo\").text.splitlines()[0]\n if (employer_info_txt.splitlines()[0]\n == company_name):\n wait = False\n except NoSuchElementException as err:\n logger.error(\"Element not found \", err)\n if time_slept > timeout:\n raise TimeoutError(\n \"job offer of {} was not loaded in 10 seconds\".format(\n company_name))", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "def _delay(self, n=None):" ]
[ "0.6076569", "0.60745835", "0.5974487", "0.5962621", "0.589742", "0.58021855", "0.5790462", "0.5778986", "0.5760904", "0.5742551", "0.55892485", "0.555522", "0.55303234", "0.550315", "0.5468579", "0.54579824", "0.54159063", "0.5409639", "0.53982323", "0.53721374", "0.5353541", "0.5353355", "0.5324486", "0.5315846", "0.53092784", "0.5276241", "0.5270041", "0.5268084", "0.52659744", "0.52364", "0.52133554", "0.5198038", "0.519137", "0.5189836", "0.515436", "0.5142527", "0.51316446", "0.5128058", "0.5112448", "0.5109928", "0.508924", "0.50772387", "0.5058052", "0.5054222", "0.50361055", "0.5033015", "0.5021248", "0.5010209", "0.49961323", "0.49961323", "0.49841866", "0.49691057", "0.49648938", "0.49377918", "0.49296656", "0.49195325", "0.49159226", "0.4871912", "0.4864611", "0.48586115", "0.4834642", "0.48341048", "0.4833508", "0.48246792", "0.4814269", "0.4797784", "0.47903296", "0.47888073", "0.4782793", "0.4773555", "0.47645748", "0.4760502", "0.4758379", "0.4741733", "0.4736002", "0.473308", "0.47298366", "0.47288635", "0.47278443", "0.47165248", "0.47140226", "0.4709591", "0.47079983", "0.47023457", "0.47014433", "0.46953943", "0.46951473", "0.46801677", "0.46744645", "0.46735352", "0.46734217", "0.4670847", "0.46628022", "0.46572897", "0.46536535", "0.46535963", "0.46525586", "0.4651035", "0.4649526", "0.46475455" ]
0.7595584
0
Give focus to the element with the ``maincontent`` ID.
def focus_on_main_content(self): self.browser.execute_script("$('#main-content').focus()")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetFocus(self):\r\n \r\n self._main_win.SetFocus()", "def onMnemoToMain(self):\n self.second_main_text.SetFocus()", "def _focus(self, element):\n actions = ActionChains(self.selenium.driver)\n actions.move_to_element(element).click().perform()\n self.selenium.set_focus_to_element(element)", "def setFocus(self):\n self._urlEdit.setFocus()", "def onMainToMnemo(self):\n self.second_mnemo_text.SetFocus()", "def setFocusId(*args):", "def setFocusId(*args):", "def setFocusId(*args):", "def setFocusId(*args):", "def focus_window(i3, container_id):\n i3.command(f'[con_id=\"{container_id}\"] floating enable')\n i3.command(f'[con_id=\"{container_id}\"] focus')", "def focus_master(qtile):\n grp = qtile.current_group\n if grp.layout.clients.current_index > 0:\n c = grp.layout.clients.focus_first()\n grp.focus(c, True)\n elif grp.layout.clients.current_index == 0 and len(grp.layout.clients.clients) > 0:\n grp.layout.cmd_down()", "def setFocus(*args):", "def setFocus(*args):", "def setFocus(*args):", "def setFocus(*args):", "def focus(self):\n self.image_window.focus_set()", "def XPSetKeyboardFocus(inWidget):\n pass", "def edit_widget_focus(self):\n if self.goto:\n self.goto_node()\n self.update_position(self.get_position())", "def _focus(self, event) -> None:\n self.focus = True", "def focus(self):\n raise NotImplementedError", "def get_focus(self):\n\n self.activateWindow()\n self.setFocus()", "def OnSetFocus(self, event):\r\n\r\n self._owner.SetFocus()", "def set_focus(self, locator: Locator) -> None:\n element = self.ctx.get_element(locator)\n if not hasattr(element.item, \"SetFocus\"):\n raise ActionNotPossible(\n f\"Element found with {locator!r} does not have 'SetFocus' attribute\"\n )\n element.item.SetFocus()", "def setFocus(*args, **kwargs)->None:\n pass", "def focus(self, focus_library=True):\n if focus_library:\n self.treeview.grab_focus()\n if not self.grid.is_visible():\n self.toggle()\n else:\n self.vimiv.image.vimiv.image.scrolled_win.grab_focus()\n # Update info for the current mode\n self.vimiv.statusbar.update_info()", "def __switchFocus(self):\n if self.__focus == 0:\n self.__isoWindow.unfocus()\n self.__logWindow.focus()\n self.__focus = 1\n self.__focusedWindow = self.__logWindow\n else:\n self.__isoWindow.focus()\n self.__logWindow.unfocus()\n self.__focus = 0\n self.__focusedWindow = self.__isoWindow", "def add_to(self, main_lay):\n main_lay.addWidget(self._tab)\n self.setParent(main_lay.parentWidget())", "def focus_next(self):\n self.focus_item()", "def defaultFrame(self):\n\t\tself.driver.switch_to.default_content()", "def element_focused(step, id):\r\n\r\n elem = world.browser.find_element_by_xpath(str('id(\"{id}\")'.format(id=id)))\r\n focused = world.browser.switch_to_active_element()\r\n\r\n assert_true(step, elem == focused)", "def set_focus_mode(self, focus_mode):\n gevent.spawn(self.focus_mode_task,\n focus_mode)\n self.emit('focusingModeRequested', focus_mode)", "def switch_to_default_content(self):\n self.driver.switch_to.default_content()", "def OnSetFocus(self, event):\r\n\r\n self.Refresh()", "def run_autofocus(self):\n raise NotImplementedError", "def force_focus_set(self, event):\n self.focus_set()", "def __window_focus(self):\n pass", "def focus_and_click(self, element_id: str, wait_time: Optional[float] = None):\n sleeptime = wait_time or self.explicit_wait\n self.set_focus(element_id)\n self.click_element(element_id)\n if sleeptime and sleeptime > 0:\n time.sleep(sleeptime)", "def onClick(self):\n self.app.setActiveMode(\"start\")", "def focus_password(self, **kws):\r\n self.password_box.focus()", "def OnChildFocus(self, event):\r\n\r\n # when a child pane has it's focus set, we should change the \r\n # pane's active state to reflect this. (this is only true if \r\n # active panes are allowed by the owner)\r\n\r\n window = event.GetWindow()\r\n if isinstance(window, wx.Dialog):\r\n # Ignore EVT_CHILD_FOCUS events originating from dialogs not\r\n # managed by AUI\r\n rootManager = None\r\n elif isinstance(window.GetParent(), AuiFloatingFrame):\r\n rootManager = GetManager(window)\r\n else:\r\n rootManager = self\r\n \r\n if rootManager:\r\n rootManager.ActivatePane(window)\r\n \r\n event.Skip()", "def focus_force(self):\n self._canvas.focus_force()", "def cb_main_window(self, event):\n self.main_frame.Show()", "def select_entry(self):\n logging.debug(\"element selected\")\n if len(self.contents) > 0:\n self.to_background()\n self.contents[self.pointer][1]()\n self.to_foreground()\n if self.path_chosen:\n self.deactivate()\n else:\n self.to_foreground()", "def XPGetWidgetWithFocus():\n pass", "def focus_on(self, card_idx: int) -> None:", "def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)", "def set_focus(self, c: Cmdr, w: Wrapper) -> None:\n if not w:\n return\n if getattr(w, 'widget', None):\n if not isinstance(w, QtWidgets.QWidget):\n # w should be a wrapper.\n w = w.widget\n if 'focus' in g.app.debug:\n name = w.objectName() if hasattr(w, 'objectName') else w.__class__.__name__\n g.trace('(LeoQtGui)', name)\n w.setFocus()", "def __quickSearchFocusIn(self):\n self.quickFindtextCombo.lastActive = self.activeWindow()", "def change_focus(window):\n set_active_window_checked(window).check()\n sleep(0.01)", "def push_focus(self, identity):\n self._focus.append(identity)", "def run_autofocus_stig(self):\n raise NotImplementedError", "def start_edit(self):\n txt = self.model.get_current_line()\n self._line.original_widget = self._line_edit\n self._line_edit.set_edit_text(txt)\n self._line_edit.set_edit_pos(len(txt))\n self._top.set_focus(2)", "def navigate_to_col_content_tab(self):\n content = \"//div[@id='subNavigationBar']/ul[2]//a[.='Content']\"\n content_sitem = self.locator_finder_by_xpath(content)\n content_sitem.click()\n time.sleep(1)", "def SetBitmapFocus(self, bitmap):\n\n self.bmpFocus = bitmap\n self.SetUseFocusIndicator(False)", "def main_entity_of_page(self, main_entity_of_page: str):\n\n self._main_entity_of_page = main_entity_of_page", "def main_menu ( self ):\n\t\tif self.style == 'qt':\n\t\t\tp = Process( target=self.qt_main_menu )\n\t\t\tp.start()\n\t\t\tself.menus.append( p )", "def set_main(self, main_loop):\n self.main_loop = main_loop", "def switch_to_default(self):\n self.base_driver.switch_to.default_content()", "def open_keyboard(self, instance):\n self.popup.open()", "def set_focus(self, pos):\n urwid.emit_signal(self, 'focus_change', pos)\n return super(OptionListWalker, self).set_focus(pos)", "def _link_clicked(self, href):\n\n self.main_frame.load(href)", "def exec_script_and_interact(self):\r\n self.exec_script(set_focus=True)", "def select_app_launcher_tab(self, tab_name):\n locator = lex_locators[\"app_launcher\"][\"tab_link\"].format(tab_name)\n self.open_app_launcher()\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.set_focus_to_element(locator)\n self._jsclick(locator)\n self.wait_until_modal_is_closed()", "def focusInEvent(self, evt):\n self.gotFocus.emit()\n super(QuickSearchLineEdit, self).focusInEvent(evt) # pass it on", "def perform_as(self, the_actor: Actor) -> None:\n browser = the_actor.ability_to(BrowseTheWeb).browser\n if self.target is None:\n browser.switch_to.default_content()\n else:\n browser.switch_to.frame(self.target.found_by(the_actor))", "def focus_slider(self, name):\n # If manipulate is not toggled, this makes no sense\n if not self.is_visible():\n self._app[\"statusbar\"].message(\n \"Focusing a slider only makes sense in manipulate\", \"error\")\n elif name not in self.sliders:\n self._app[\"statusbar\"].message(\n \"No slider called \" + name, \"error\")\n else:\n self.sliders[name].grab_focus()", "def open_email(self):\n self.driver.execute_script(\"window.scrollTo(0, 700)\")\n self.click_on_element_by_css(tep.OPEN_EMAIL_BUTTON)", "def focus_and_input_text(\n self, element_id: str, text: str, wait_time: Optional[float] = None\n ):\n sleeptime = wait_time or self.explicit_wait\n self.set_focus(element_id)\n self.input_text(element_id, text)\n if sleeptime and sleeptime > 0:\n time.sleep(sleeptime)", "def getFocus(*args):", "def getFocus(*args):", "def getFocus(*args):", "def getFocus(*args):", "def goto_menu(self, *args):\n self.manager.current = 'Main Menu'\n self.reset()\n self.manager.reset()", "def OnSetFocus(self, event):\r\n\r\n treectrl = self._owner\r\n select = treectrl.GetSelection()\r\n\r\n # If the window is associated to an item that currently is selected\r\n # (has focus) we don't kill the focus. Otherwise we do it.\r\n if select != self:\r\n treectrl._hasFocus = False\r\n else:\r\n treectrl._hasFocus = True\r\n \r\n event.Skip()", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def OnSetFocus(self, event):\r\n\r\n treectrl = self._wnd.GetParent()\r\n select = treectrl.GetSelection()\r\n\r\n # If the window is associated to an item that currently is selected\r\n # (has focus) we don't kill the focus. Otherwise we do it.\r\n if select != self:\r\n treectrl._hasFocus = False\r\n else:\r\n treectrl._hasFocus = True\r\n \r\n event.Skip()", "def action_goto(self):\n dialog = GoToDialog(self)\n dialog.exec()\n\n # Re-focus the main window\n self.activateWindow()", "def select_app_launcher_app(self, app_name):\n locator = lex_locators[\"app_launcher\"][\"app_link\"].format(app_name)\n self.open_app_launcher()\n self.selenium.wait_until_page_contains_element(locator, timeout=30)\n self.selenium.set_focus_to_element(locator)\n elem = self.selenium.get_webelement(locator)\n link = elem.find_element_by_xpath(\"../../..\")\n self.selenium.set_focus_to_element(link)\n link.click()\n self.wait_until_modal_is_closed()", "def scroll_to(self):\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", self._element)", "def id_click(elem_id):\r\n css_click('#{}'.format(elem_id))", "def _enter_edit_mode(self):\n edit_mode = self.UTILS.element.getElement(DOM.DownloadManager.download_edit_button,\n \"Download edit button\", True, 10)\n edit_mode.tap()\n self.UTILS.element.waitForElements(DOM.DownloadManager.downloads_edit_header_title,\n \"Edit downloads header\")", "def i_navigate_to_contact_link():\n driver.find_element_by_id(\"contact_link\").click()", "def set_foreground(self):\n shell = win32com.client.Dispatch(\"WScript.Shell\")\n shell.SendKeys('%')\n win32gui.SetForegroundWindow(self._handle)", "def click(self, id):\n el = self.wait_n_get(By.ID, id)\n el.click()", "def focusChanged (self):\n weditor = QApplication.focusWidget()\n if isinstance(weditor, PyEditor):\n if weditor.editorId == self.TEST_DATA_EDITOR:\n self.viewer().findWidget.setEditor( editor = self.srcEditor)\n\n self.viewer().FocusChanged.emit(self)", "def _focus_exit(self):\n self._switch(exiting=True)", "def create_focus_fenode(t_id):\n # Create focus <fenode>\n focus_fenode = chapter_input.new_tag('fenode')\n focus_fenode['idref'] = t_id\n focus.insert(0, focus_fenode)", "def onFocus(*args):", "def onFocus(*args):", "def onFocus(*args):", "def onFocus(*args):", "def switchToDefaultContent(self):\n self.log_info(f\"Browser.switchToDefaultContent: Switching to default content\")\n self.CORE.switch_to.default_content()\n return", "def siguiente(self, widget):\n window = widget.get_toplevel()\n window.do_move_focus(window, gtk.DIR_TAB_FORWARD)", "def setfocus(self, focus):\n self.focus = self.data[focus]\n self.focus_stage = focus\n for k in self.focus.keys():\n setattr(self, k, self.focus[k])", "def HandleFocusIn(self, event: tkEvent):\n pass", "def main_frame(self):\n debug.virtual('wxMediator.main_frame')", "def goto_faq(self):\n\n self.faq.click()", "def goto_faq(self):\n\n self.faq.click()", "def login(self):\n self.driver.find_element(*BaseLocators.PRIMARY_BUTTON).click()", "def main(self):\n self.root.mainloop()" ]
[ "0.68469656", "0.64330465", "0.62243736", "0.5957323", "0.5938014", "0.59248924", "0.59248924", "0.59248924", "0.59248924", "0.58128226", "0.56373054", "0.5598049", "0.5598049", "0.5598049", "0.5598049", "0.55663764", "0.555354", "0.55340254", "0.5431309", "0.5403516", "0.53790075", "0.53459644", "0.53059095", "0.5301749", "0.52448714", "0.5229822", "0.5139656", "0.5104946", "0.51001406", "0.5073151", "0.50729203", "0.50669837", "0.5062927", "0.50506955", "0.50186014", "0.50177324", "0.50146866", "0.49881062", "0.49800968", "0.49684885", "0.49639893", "0.49538255", "0.49404338", "0.49098223", "0.4891004", "0.48813", "0.48690104", "0.48680127", "0.4863883", "0.48556423", "0.48438227", "0.48119247", "0.4810237", "0.47872335", "0.47498447", "0.47447035", "0.47273946", "0.47204977", "0.47173533", "0.46907237", "0.46826363", "0.4649226", "0.464359", "0.46432796", "0.46393758", "0.4632784", "0.46313486", "0.46263227", "0.46256733", "0.46256733", "0.46256733", "0.46256733", "0.46073315", "0.45998043", "0.45970643", "0.45963672", "0.45871067", "0.4573813", "0.45698667", "0.455875", "0.45555535", "0.45500946", "0.4544785", "0.45346707", "0.45336896", "0.45221862", "0.4522124", "0.452094", "0.452094", "0.452094", "0.452094", "0.45125964", "0.45104763", "0.4509245", "0.45044008", "0.44999707", "0.4473988", "0.4473988", "0.44667843", "0.44528803" ]
0.8871579
0
Return a boolean indicating whether the given item is visible.
def is_visible(self, name): return self.q(css="div.{}".format(name)).first.visible
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsItemVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def IsVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def is_visible(self):\n return self.container['is_visible']", "def is_visible(self):\n return self._visible", "def is_visible(self):\n return self.proto.display_type == DISPLAY_TYPE.Visible.value", "def is_visible ( self ):\n return not self.is_hidden and (\n self.priority is None or self.priority >= 0\n )", "def has_visible_entity(self):\n ret = False\n for e in self:\n if e.is_visible() == True:\n ret = True\n break\n return ret", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def is_visible(self):\n return self._currently_shown", "def is_visible(self):\n return self.window.active_panel() == self.full_name", "def _is_visible(self, key) -> bool:\n return self._get_DecoSetting(key).visible", "def visible(self):\n return self._visible", "def visible(self):\n return self._visible", "def is_visible(self):\n try:\n return self.element.is_displayed()\n except (NoSuchElementException,\n ElementNotVisibleException,\n StaleElementReferenceException):\n return False", "def is_visible(self):\n return self.visible_date < timezone.now()", "def is_visible(self, url=''):\n return bool(url)", "def is_visible(self):", "def get_visible(self):\n return self._visible", "def isVisible( self ):\n layer = self.layer()\n if ( layer and not layer.isVisible() ):\n return False\n# \n# if ( self.isIsolateHidden() ):\n# return False\n# \n return self._visible", "def isVisible(self):\n\t\treturn True", "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False", "def is_visible(self):\n return self.rect.x < self.screen_rect.width", "def is_visible(self, path):\n return True", "def visible(self):\n return ctypes.windll.user32.IsWindowVisible(self.hwnd)", "def visible(self):\n return self._turtle.isvisible()", "def visible(self):\n return self._turtle.isvisible()", "def is_visible(self):\n return self.real > 0", "def visible(self):\r\n return self.column.visible", "def isPlayed(self, item):\n userState = self.userState(item)\n return bool(userState.viewCount > 0) if userState.viewCount else False", "def is_attribute_visible(self, key):\n if self.has_key(key):\n attribute_status = getattr(self, key)\n if isinstance(attribute_status, bool) and attribute_status == True:\n return True\n elif isinstance(attribute_status, self.__class__) and \\\n attribute_status.are_any_attributes_visible():\n return True\n\n return False", "def is_visible(self, timeout=None):\n try:\n self.visibility_of_element_located(timeout)\n except TimeoutException:\n return False\n return True", "def inspectedNodeIsVisible(self):\n return self._inspected_node_is_visible", "def is_visible_to(self, user):\n return True", "def IsVisible(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_IsVisible(self, *args)", "def IsExpanded(self, item):\r\n\r\n return item.IsExpanded()", "def visible(self) -> bool:\n try:\n return bool(self.driver.wait_until_all_visible(*self.ROOT_LOCATOR))\n except WebDriverException:\n return False", "def IsVisibleInView(object_id, view=None):\n rhobj = rhutil.coercerhinoobject(object_id, True, True)\n viewport = __viewhelper(view).MainViewport\n bbox = rhobj.Geometry.GetBoundingBox(True)\n return rhobj.Visible and viewport.IsVisible(bbox)", "def IsItemEnabled(self, item):\r\n\r\n return item.IsEnabled()", "def IsItemEnabled(self, item):\r\n\r\n return item.IsEnabled()", "def IsItemChecked(self, item):\r\n\r\n return item.IsChecked()", "def IsShown(self):\r\n\r\n return self._shown", "def is_visible(self, x, y) :\n\t\tres_x = (x > self.x_min) and (x < self.x_max)\n\t\t# print 'res_x : {0}, x : {1}, x_min : {2}, x_max:{3}'.format(res_x, x, self.x_min, self.x_max)\n\t\tres_y = (y > self.y_min) #and (y < self.y_max)\n\t\treturn res_x and res_y", "def _is_visible(self, point):\n return point[0] > 0 and point[0] < 1 and point[1] > 0 and point[1] < 1", "def is_visible(self):\n if self._namespace and self._namespace.is_anonymous():\n return True\n return self._rawdoc.get_inherited_visibility() != DocType.none", "def is_displayed(self, unit):\n try:\n field_is_displayed = getattr(self.unit.get_model_name()+'_is_displayed')\n if field_is_displayed:\n return field_is_displayed(unit)\n except AttributeError:\n pass\n if not self.displayed and not self.excluded:\n return True\n elif self.displayed and self.excluded:\n return unit.get_model_name() in self.displayed \\\n and unit.get_model_name() not in self.excluded\n elif self.excluded:\n return unit.get_model_name() not in self.excluded\n elif self.displayed:\n return unit.get_model_name() in self.displayed\n else:\n return True", "def isShown(self):\n return self.shown", "def is_view_dropdown_visible(self):\n return self.is_element_visible(self.view_dropdown_locator)", "def is_visible(cls, request):\n if cls.permission_required:\n return request.user.has_perm(cls.permission_uri)\n else:\n return True", "def is_visible(self, locator, timeout=15):\n try:\n ui.WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))\n return True\n except TimeoutException:\n return False", "def isItemVisible(self, itemName, touchType=True, contentType=None, index=1, area=None, refresh=True, containerObject=None, relatedAreaEnd=None):\r\n\r\n item=0\r\n result = None\r\n\r\n #if index==None:\r\n # area = (0,0,self.getScreenWidth(),self.getScreenHeight())\r\n\r\n item=self.searchItem(itemName, touchType, contentType, index=index, area=area, refresh=refresh, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n\r\n if item:\r\n if index is not None:\r\n x,y,w,h = [int(p) for p in item.getAttribute('coords').split(\",\")]\r\n\r\n x_center = x+(w/2)\r\n y_center = y+(h/2)\r\n\r\n topLeft = (x,y)\r\n topRight = (x+w,y)\r\n bottomLeft = (x,y+h)\r\n bottomRight = (x+w,y+h)\r\n top = int(item.getAttribute('top'))\r\n bottom = int(item.getAttribute('bottom'))\r\n left = int(item.getAttribute('left'))\r\n right = int(item.getAttribute('right'))\r\n\r\n if (item.getAttribute('visible') != 'hidden') and (self.__isPointOnScreen(topLeft) or self.__isPointOnScreen(topRight) or self.__isPointOnScreen(bottomLeft) or self.__isPointOnScreen(bottomRight) or (top < 0 and bottom > self.getScreenHeight()) or (left < 0 and right > self.getScreenWidth()) ):\r\n result = (self.VISIBLE,(x_center,y_center), item)\r\n else:\r\n result = (self.HIDDEN,(x_center,y_center), item)\r\n else:\r\n resultList = []\r\n for i in item:\r\n x,y,w,h = [int(p) for p in i.getAttribute('coords').split(\",\")]\r\n\r\n x_center = x+(w/2)\r\n y_center = y+(h/2)\r\n\r\n topLeft = (x,y)\r\n topRight = (x+w,y)\r\n bottomLeft = (x,y+h)\r\n bottomRight = (x+w,y+h)\r\n\r\n top = int(i.getAttribute('top'))\r\n bottom = int(i.getAttribute('bottom'))\r\n left = int(i.getAttribute('left'))\r\n right = int(i.getAttribute('right'))\r\n\r\n if (i.getAttribute('visible') != 'hidden') and (self.__isPointOnScreen(topLeft) or self.__isPointOnScreen(topRight) or self.__isPointOnScreen(bottomLeft) or self.__isPointOnScreen(bottomRight) or (top < 0 and bottom > self.getScreenHeight()) or (left < 0 and right > self.getScreenWidth()) ):\r\n return (self.VISIBLE,(x_center,y_center), i)\r\n else:\r\n resultList.append((self.HIDDEN,(x_center,y_center), i))\r\n result = resultList[0]\r\n else:\r\n result = (self.NOT_FOUND,(0,0), item)\r\n\r\n return result", "def IsColumnShown(self, column):\r\n\r\n return self._header_win.GetColumn(column).IsShown()", "def _is_task_visible(context, task):\n # Is admin == task visible\n if context.is_admin:\n return True\n\n # No owner == task visible\n if task['owner'] is None:\n return True\n\n # Perform tests based on whether we have an owner\n if context.owner is not None:\n if context.owner == task['owner']:\n return True\n\n return False", "def IsShown(self):\r\n \r\n return not self.HasFlag(self.optionHidden)", "def GetNextVisible(self, item):\r\n\r\n id = item\r\n\r\n while id:\r\n id = self.GetNext(id)\r\n if id and self.IsVisible(id):\r\n return id\r\n \r\n return None", "def is_viewed(self):\n return self.has_label(VIEWED_LABEL)", "def top_visible(self) -> bool:\n return self.vertical_scroll == 0", "def __contains__(self, item):\n\n if self.is_view:\n return item in self._view\n return item in self._storage", "def is_ruler_visible(self):\n return self.container['is_ruler_visible']", "def is_alive(self):\r\n return self.visible", "def IsVisible(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_IsVisible(self, *args)", "def IsWindowVisible(hwnd):\r\n return bool(__IsWindowVisible(hwnd))", "def visible(self, hipid):\n s = self.hip_stars[hipid]\n if s[3]<min(self.inner_dec, self.outer_dec): return False\n return s[3]<=max(self.inner_dec, self.outer_dec)", "def has_item(self, item):\n return item in self.set", "def has_item(self, item):\n return item in self.set", "def IsSelected(self, item):\r\n\r\n return item.IsSelected()", "def has_item(self, item):\n return item in self.cache", "def is_visible(self, is_visible):\n\n self.container['is_visible'] = is_visible", "def is_displayed(self):\n return len(self._find_all_by_locator()) > 0", "def is_obj_visible(obj, scene, context=None, is_dupli=False):\r\n if is_dupli:\r\n return True\r\n\r\n # Mimic Blender behaviour: if object is duplicated via a parent, it should be invisible\r\n if obj.parent and obj.parent.dupli_type != \"NONE\":\r\n return False\r\n\r\n # Check if object is used as camera clipping plane\r\n if is_valid_camera(scene.camera) and obj == scene.camera.data.luxcore.clipping_plane:\r\n return False\r\n\r\n render_layer = get_current_render_layer(scene)\r\n if render_layer:\r\n # We need the list of excluded layers in the settings of this render layer\r\n exclude_layers = render_layer.layers_exclude\r\n else:\r\n # We don't account for render layer visiblity in viewport/preview render\r\n # so we create a mock list here\r\n exclude_layers = [False] * 20\r\n\r\n on_visible_layer = False\r\n for lv in [ol and sl and not el for ol, sl, el in zip(obj.layers, scene.layers, exclude_layers)]:\r\n on_visible_layer |= lv\r\n\r\n hidden_in_outliner = obj.hide if context else obj.hide_render\r\n return on_visible_layer and not hidden_in_outliner", "def IsColumnShown(self, column):\r\n\r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n\r\n return self._columns[column].IsShown()", "def elementIsVisible(self, element_tuple):\n result = self.CORE.find_element(*self.format_element(element_tuple)).is_displayed()\n self.log_info(f\"Browser.elementIsVisible: {element_tuple} is {'' if result else 'not '}present\")\n return result", "def IsInstanceVisible(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_IsInstanceVisible(self, *args)", "def IsHidden(self):\r\n\r\n return self._hidden", "def visible(self, show):", "def __checkCenterVisibility(self, itemNode, itemXc, itemYc):\r\n for sibling in itemNode.findall('following-sibling::*[@is-in-tab-area=\"true\"]'):\r\n name = sibling.getAttribute('image')\r\n siblingX, siblingY, siblingW, siblingH = [int(c) for c in sibling.getAttribute('coords').split(\",\")]\r\n if itemXc>=siblingX and itemXc <=(siblingX + siblingW) and itemYc>=siblingY and itemYc <= (siblingY + siblingH):\r\n return (self.HIDDEN,(itemXc,itemYc), itemNode)\r\n\r\n return (self.VISIBLE,(itemXc,itemYc), itemNode)", "def has_item(self, item: Inventory) -> bool:\n return (item.pk,) in self.orderitem_set.values_list('item')", "def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH", "def is_visible_in_classroom(self):\n if self.is_block():\n return False\n elif self.is_lesson():\n if self.is_published_in_class:\n return True\n elif self.is_step():\n if self.is_published_in_class or self.parent.is_published_in_class:\n return True\n return False", "def __contains__(self, item):\n try:\n pos = Vec2(*item)\n return pos.x >= self.origin.x and pos.y >= self.origin.y \\\n and pos.x < self.origin.x + self.size.x \\\n and pos.y < self.origin.y + self.size.y\n except TypeError:\n return False", "def is_visible_on_course_page(self):\n return self.catalog_visibility != CourseRunCatalogVisibility.HIDDEN", "def XPIsWidgetVisible(inWidget):\n pass", "def GetItemWindowEnabled(self, item):\r\n\r\n return item.GetWindowEnabled()", "def __contains__(self, key):\n return key in self._tagged_values_dict and self._is_visible(key)", "def is_visible(self, position, size=0):\n # return True\n size /= self.scale # size is in pixel\n in_x = (self.focus.x + self.offset.x / self.scale - size <=\n position.x <=\n self.focus.x - self.offset.x / self.scale + size)\n in_y = (self.focus.y + self.offset.y / self.scale - size <=\n position.y <=\n self.focus.y - self.offset.y / self.scale + size)\n # if name == \"earth\":\n # print(\"{:+e} {:+e} {}\".format(self.focus.y + self.offset2.y\n # , position.y, in_y))\n # print(\"{:+e} {:+e}\".format(self.focus.x, self.focus.y))\n return in_x and in_y", "def is_window_visible(h_wnd):\n _is_window_visible = WINDLL.user32.IsWindowVisible\n _is_window_visible.argtypes = [HWND]\n _is_window_visible.restype = bool\n\n return _is_window_visible(h_wnd)", "def __bool__(self):\n return bool(self._items)", "def __bool__(self):\n return bool(self._items)", "def GetGripperVisible(self):\r\n\r\n return self._gripper_visible", "def has_item(self, item_name):\n if item_name in self.item_list:\n return True\n return False", "def existsitem(self,item,listwidgets):\n exists = listwidgets.findItems(item, Qt.MatchExactly)\n if exists:\n return True\n else:\n return False", "def isstart(self) -> bool:\n if len(self._pile) != self._pos + 1:\n return False\n visible_count = 0\n hidden_count = 0\n for c_card in self._pile:\n if c_card.visible:\n visible_count += 1\n else:\n hidden_count += 1\n return hidden_count == self._pos and visible_count == 1", "def is_auto_expire_checkbox_visible(self):\n return self.is_element_visible(self.auto_expire_checkbox_locator)", "def has(self, item):\n return item in self.mut", "def is_gridlines_visible(self):\n return self.container['is_gridlines_visible']", "def check_visibility(self):\r\n\r\n for gs in self.ground_stations:\r\n if self.visible ^ (elevation_dot_product(self.r_ecef,self.ground_stations[gs][1],self.earth) > 0.0):\r\n self.visible ^= 1\r\n self.gs_id = self.ground_stations[gs][0]\r\n return True", "def is_shown(self):\n return self.page.q(css=self.MODAL_SELECTOR).present", "def fl_object_is_visible(ptr_flobject):\n _fl_object_is_visible = library.cfuncproto(\n library.load_so_libforms(), \"fl_object_is_visible\",\\\n cty.c_int, [cty.POINTER(xfdata.FL_OBJECT)],\\\n \"\"\"int fl_object_is_visible(FL_OBJECT * obj)\"\"\")\n library.check_if_flinitialized()\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_object_is_visible(ptr_flobject)\n return retval", "def can_show(self):\n return self.can_show", "def revealItem(self, item, timeout=None, index=1, containerObject=None, relatedAreaEnd=None):\r\n\r\n item = scripting.escape(scripting.unescape(item)) # unescape is needed if user gives already escaped characters\r\n\r\n result = self.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n\r\n if result[0]==self.VISIBLE:\r\n return result[1]\r\n else:\r\n # If result is Hidden then scroll display to get xml which has result which is visible\r\n if result[0]==self.HIDDEN and self.isItemScrollable(item,containerObject=containerObject, relatedAreaEnd=relatedAreaEnd):\r\n #Calling scrolling of screen and it returns new result when item is visible\r\n result=self.__scroll(result, item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #self.tc.delay(500,False)\r\n #Verifying that item is visible before pressing coordinates\r\n if result[0]==self.VISIBLE:\r\n return result[1]\r\n\r\n # Item not found from the dump yet, try again\r\n else:\r\n if timeout==None:\r\n timeout=core.FW_conf['settings'].System.ExpectTimeout\r\n\r\n if self.waitForItem(item, timeout, index=index,containerObject=containerObject, relatedAreaEnd=relatedAreaEnd):\r\n result = self.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n if result[0]==self.VISIBLE:\r\n return result[1]\r\n else:\r\n # If result is Hidden then scroll display to get xml which has result which is visible\r\n if result[0]==self.HIDDEN:\r\n #Calling scrolling of screen and it returns new result when item is visible\r\n result=self.__scroll(result, item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #self.tc.delay(500,False)\r\n #Verifying that item is visible before pressing coordinates\r\n if result[0]==self.VISIBLE:\r\n return result[1]\r\n\r\n else:\r\n return None\r\n else:\r\n return None" ]
[ "0.8459566", "0.84089726", "0.77478164", "0.7686081", "0.75969285", "0.73080444", "0.7240645", "0.7220778", "0.7220778", "0.72025424", "0.71858346", "0.7166278", "0.7160818", "0.7160818", "0.71175337", "0.7071212", "0.7057632", "0.70267516", "0.70217055", "0.70079386", "0.69779074", "0.6918357", "0.6902314", "0.6889049", "0.6824733", "0.67953753", "0.67953753", "0.678513", "0.6728026", "0.66976076", "0.6646748", "0.6644287", "0.65377", "0.6522658", "0.6512327", "0.649474", "0.64552337", "0.6430606", "0.6387321", "0.6387321", "0.6369524", "0.63631374", "0.6332243", "0.6327067", "0.63096154", "0.6300225", "0.62844914", "0.6277942", "0.62714994", "0.62536913", "0.62453085", "0.62357676", "0.62110513", "0.61992323", "0.61986077", "0.61915547", "0.61600596", "0.6157579", "0.6152747", "0.6145217", "0.612351", "0.60814357", "0.6050228", "0.6040926", "0.6040926", "0.6030692", "0.6007971", "0.600308", "0.59995544", "0.59886444", "0.59768", "0.59693336", "0.5961736", "0.5950519", "0.5947153", "0.593002", "0.59294504", "0.5921968", "0.5909139", "0.59051883", "0.5902769", "0.5889668", "0.5889239", "0.5888803", "0.5854089", "0.5851852", "0.5847033", "0.5847033", "0.58356327", "0.58301896", "0.58087194", "0.58055943", "0.5792925", "0.57802165", "0.5763016", "0.5758176", "0.5751656", "0.57343954", "0.5734345", "0.57317966" ]
0.7075357
15
Return a boolean indicating whether the given element is present, but not visible.
def is_invisible(self, name): return self.q(css="div.{}".format(name)).first.invisible
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False", "def is_visible(self):\n try:\n return self.element.is_displayed()\n except (NoSuchElementException,\n ElementNotVisibleException,\n StaleElementReferenceException):\n return False", "def tag_visible(element):\n\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True", "def _is_element_present():\r\n return self.q(css=element_selector).present", "def tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, bs4.element.Comment):\n return False\n if re.match(r\"[\\n]+\", str(element)):\n return False\n return True", "def elementIsVisible(self, element_tuple):\n result = self.CORE.find_element(*self.format_element(element_tuple)).is_displayed()\n self.log_info(f\"Browser.elementIsVisible: {element_tuple} is {'' if result else 'not '}present\")\n return result", "def is_element_available(self, locator):\r\n if self.driver.is_element_present(locator):\r\n if self.driver.is_visible(locator):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def is_not_present(self):\n logging.getLogger(__name__).info(\"Element is not present\\nby = {}\\nvalue = {}\".format(self.by, self.value))\n return not(self.driver.find_element(self.by, self.value))", "def waitForElementNotVisible(self, element_tuple, *, timeout=5):\n try:\n WebDriverWait(self.CORE, timeout).until(EC.invisibility_of_element_located(self.format_element(element_tuple))) # Don't unpack, use function to parse out first 2 items\n self.log_info(f\"Browser.waitForElementNotVisible: {element_tuple} is invisible within {timeout} seconds\")\n return True\n except SeleniumExceptions.TimeoutException:\n self.log_warning(f\"Browser.waitForElementNotVisible: {element_tuple} did not become invisible after {timeout} seconds\")\n return False", "def is_visible(self, timeout=None):\n try:\n self.visibility_of_element_located(timeout)\n except TimeoutException:\n return False\n return True", "def is_element_in_view(self, element: Element) -> bool:\n return self.find_element_view(element=element) is not None", "def is_element_display(self, selector):\n return True if self.get_element(selector).is_displayed() else False", "def _is_element_present():\r\n is_present = self.q(css=selector).present\r\n return is_present, is_present", "def _is_element_present(xpath, timeout=prompt_timeout):\n start_time = time.time()\n try:\n element = WebDriverWait(driver, timeout).until(\n EC.visibility_of_element_located(\n (By.XPATH, xpath)))\n except:\n logger.warning(f\"Element with XPATH {xpath} not available within {timeout} seconds\")\n return False\n while time.time() - start_time <= timeout:\n if element.is_displayed():\n return True\n else:\n logger.warning(f\"Element with XPATH {xpath} was available but not displayed within {timeout} seconds\")\n return False", "def waitForElementNotPresent(self, element_tuple, *, timeout=5):\n try:\n\n self.disable_logging()\n if self.waitForElementNotVisible(element_tuple, timeout=timeout) is False or self.elementIsPresent(element_tuple) is True:\n self.revert_logging()\n raise SeleniumExceptions.TimeoutException()\n self.revert_logging()\n self.log_info(f\"Browser.waitForElementNotPresent: {element_tuple} is present within {timeout} seconds\")\n return True\n except SeleniumExceptions.TimeoutException:\n self.log_warning(f\"Browser.waitForElementNotPresent: {element_tuple} did not become not present after {timeout} seconds\")\n return False", "def field_is_not_hidden_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n log_to_file('Hidden field displayed test failed', 'WARNING')\n return False\n else:\n print \"Hidden field displayed = true\"\n return True", "def is_element_present(self, xpath):\n try:\n self.driver.find_element_by_css_selector(xpath).is_displayed()\n return True\n except NoSuchElementException:\n return False", "def is_visible(self, locator, timeout=15):\n try:\n ui.WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))\n return True\n except TimeoutException:\n return False", "def is_element_displayed(self, locator=\"\", locator_type=\"id\", element=None):\n is_displayed = False\n try:\n if locator: # This means if locator is not empty\n element = self.get_element_(locator, locator_type)\n if element is not None:\n is_displayed = element.is_displayed()\n self.log.info(\"Element is displayed with locator: \" + locator +\n \" locatorType: \" + locator_type)\n else:\n self.log.info(\"Element not displayed with locator: \" + locator +\n \" locatorType: \" + locator_type)\n return is_displayed\n except:\n print(\"Element not found\")\n return False", "def is_visible(self, name):\n return self.q(css=\"div.{}\".format(name)).first.visible", "def is_element_only(self) -> bool:\n raise NotImplementedError()", "def visible(self) -> bool:\n try:\n return bool(self.driver.wait_until_all_visible(*self.ROOT_LOCATOR))\n except WebDriverException:\n return False", "def wait_for_invisible(self, locator, timeout=2):\n try:\n WebDriverWait(self.driver, timeout).until(\n ec.invisibility_of_element_located(locator)\n )\n except (NoSuchElementException, TimeoutException):\n return False\n return True", "def elementIsPresent(self, element_tuple):\n try:\n self.CORE.find_element(*self.format_element(element_tuple))\n result = True\n except SeleniumExceptions.NoSuchElementException:\n result = False\n self.log_info(f\"Browser.elementIsPresent: {element_tuple} is {'' if result else 'not '}present\")\n return result", "def is_visible ( self ):\n return not self.is_hidden and (\n self.priority is None or self.priority >= 0\n )", "def assert_visible(self, locator, msg=None):\r\n e = driver.find_elements_by_locator(locator)\r\n if len(e) == 0:\r\n raise AssertionError(\"Element at %s was not found\" % locator)\r\n assert e.is_displayed()", "def isElementDisplayed(self, locator=\"\",locatorType='id', element=None):\n isDisplayed=False\n\n try:\n if locator:\n element=self.getElement(locator,locatorType)\n\n if element is not None:\n isDisplayed=element.is_displayed()\n self.logger.info(\"Element is displayed with locator\" + locator + \"LocatorType\" + locatorType)\n\n else:\n self.logger.info(\"Element is not displayed with locator\" + locator + \"LocatorType\" + locatorType)\n return isDisplayed\n\n except:\n print(\"Element not found\")\n return False", "def isVisible( self ):\n layer = self.layer()\n if ( layer and not layer.isVisible() ):\n return False\n# \n# if ( self.isIsolateHidden() ):\n# return False\n# \n return self._visible", "def has_visible_entity(self):\n ret = False\n for e in self:\n if e.is_visible() == True:\n ret = True\n break\n return ret", "def is_displayed(self):\n return len(self._find_all_by_locator()) > 0", "def isElementOnly(self):\n return _libsbml.SBaseExtensionPoint_isElementOnly(self)", "def is_visible(self):\n return self.proto.display_type == DISPLAY_TYPE.Visible.value", "def hasElement(self, name, excludeNullElements=False):\n return self.asElement().hasElement(name, excludeNullElements)", "def field_is_hidden_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n print \"Hidden field = true\"\n return True\n else:\n log_to_file('Hidden field test failed', 'WARNING')\n return False", "def field_is_not_hidden_css(driver, locator):\n elem = driver.find_element_by_css_selector(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n log_to_file('Hidden field displayed test failed', 'WARNING')\n return False\n else:\n print \"Hidden field displayed = true\"\n return True", "def is_visible(self):\n if self._namespace and self._namespace.is_anonymous():\n return True\n return self._rawdoc.get_inherited_visibility() != DocType.none", "def is_win_dispute_button_present(self):\n return self.is_element_present(self.win_dispute_button_locator)", "def is_visible(self):\n return self.container['is_visible']", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def isVisible(self, p_int): # real signature unknown; restored from __doc__\n return False", "def is_present(self):\n self._browser.implicitly_wait(1)\n try:\n if self._root is None:\n self._browser.find_element(*self._locator)\n else:\n self._root.find_element(*self._locator)\n except (NoSuchElementException,\n StaleElementReferenceException):\n return False\n finally:\n self._browser.implicitly_wait(TMO)\n return True", "def isElement(self, elementXpath):\r\n try:\r\n self.browser.find_element_by_xpath(elementXpath)\r\n return True\r\n except:\r\n return False", "def is_element_present(self, web_element, timeout):\n try:\n WebDriverWait(self.web_driver, timeout).until(web_element)\n return True\n except TimeoutException:\n return False", "def wait_for_not_displayed_xpath(self, xpath):\n\n wait = ui.WebDriverWait(self.driver, self.wait_timeout)\n try:\n wait.until_not(lambda bool: self.driver.find_element_by_css_selector(xpath))\n return True\n except TimeoutException:\n return False", "def wait_for_visible(self, locator, timeout=2):\n try:\n WebDriverWait(self.driver, timeout).until(\n ec.visibility_of_element_located(locator)\n )\n except (NoSuchElementException, TimeoutException) as err:\n logging.error(f\"Exception Type: {type(err)}\")\n logging.info(f\"Element does not exist: {(locator, )} \")\n return False\n return True", "def is_visible(self, url=''):\n return bool(url)", "def is_visible(self):", "def inspectedNodeIsVisible(self):\n return self._inspected_node_is_visible", "def is_element_visible_by_css(self, css_selector, timeout=1):\n try:\n wait = WebDriverWait(self.web_driver, timeout)\n wait.until(lambda driver: self.web_driver.find_element_by_css_selector(css_selector))\n return True\n except TimeoutException:\n return False", "def wait_for_element_not_present(self, locator):\r\n for i in range(timeout_seconds):\r\n if self.driver.is_element_present(locator):\r\n time.sleep(1)\r\n else:\r\n break\r\n else:\r\n raise ElementVisiblityTimeout(\"%s presence timed out\" % locator)\r\n return True", "def displayed(self, locator, timeout=0):\n try:\n WebDriverWait(self.browser, timeout).until(EC.visibility_of_element_located(locator))\n return True\n except ex.TimeoutException:\n return False", "def is_visible(self):\n return self.rect.x < self.screen_rect.width", "def is_element_displayed(self, locator, delay=5):\n return WebDriverWait(self.driver, delay).until(\n lambda x: self.get_element(locator).is_displayed())", "def is_visible(self):\n return self._visible", "def is_visible(self):\n return self.window.active_panel() == self.full_name", "def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None", "def exists(self, selector):\n return not self.main_frame.findFirstElement(selector).isNull()\n\n\n #TODO: Still not work.", "def isElementPresent(self,locator=\"\",locatorType='id', element=None):\n\n\n\n\n try:\n if locator:\n element = self.getElement(locator, locatorType)\n\n if element is not None:\n self.logger.info(\"Element found with locator \"+locator+\" LocatorType \"+locatorType)\n return True\n\n else:\n self.logger.info(\"Element not found with locator \" + locator + \" LocatorType \" + locatorType)\n return False\n\n except:\n print(\"Element not found\")\n return False", "def wait_for_component_to_not_be_visible(self, wait_time=None) -> bool:\n try:\n self.driver.wait_until_all_not_visible(*self.ROOT_LOCATOR, wait_time=wait_time)\n return True\n except WebDriverException:\n return False", "def XPIsWidgetVisible(inWidget):\n pass", "def isVisible(self):\n\t\treturn True", "def is_analyze_and_complete_present(self):\n return self.is_element_present(self.analyze_and_complete_locator)", "def doWaitNotVisibleElement(self, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout}\n\n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n\n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, cssSelector=cssSelector, \n location=location, more=more)\n rsp = self.hasElement(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False \n return ret\n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout, \"wait-until-value\": False}\n\n cmdId = self.displayedElement(elementId=elementId, more= more)\n rsp = self.isElementDisplayed(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False \n return ret", "def is_present(self):\n logging.getLogger(__name__).info(\"Element is present\\nby = {}\\nvalue = {}\".format(self.by, self.value))\n return self.driver.find_element(self.by, self.value)", "def visible(self):\n return self._turtle.isvisible()", "def visible(self):\n return self._turtle.isvisible()", "def waitForElementVisible(self, element_tuple, *, timeout=5):\n try:\n WebDriverWait(self.CORE, timeout).until(EC.visibility_of_element_located(self.format_element(element_tuple))) # Don't unpack, use function to parse out first 2 items\n self.log_info(f\"Browser.waitForElementVisible: {element_tuple} is visible within {timeout} seconds\")\n return True\n except SeleniumExceptions.TimeoutException:\n self.log_warning(f\"Browser.waitForElementVisible: {element_tuple} did not become visible after {timeout} seconds\")\n return False", "def invisibility_of_element_located(self, timeout=None):\n if timeout is None:\n timeout = Control.default_timeout\n return WebDriverWait(self.driver, timeout).until(\n EC.invisibility_of_element_located(self)\n )", "def field_is_hidden_css(driver, locator):\n elem = driver.find_element_by_css_selector(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n print \"Hidden field = true\"\n return True\n else:\n log_to_file('Hidden field test failed', 'WARNING')\n return False", "def check_if_ask_question_page_is_presented(self):\n return self.if_element_displayed(by_locator=self.__ASK_QUESTION_PAGE)", "def is_present(self):\n return self._is_present()", "def wait_until_element_visible(self, element):\n LOG.info(\"Waiting for '%s' element to get visible\" % element[1])\n try:\n self.wait.until(EC.visibility_of_element_located(element))\n except TimeoutException:\n raise NoSuchElementException(\"UI Element %s not found\" % element[1])\n except Exception as exce:\n raise exce", "def is_displayed(self, unit):\n try:\n field_is_displayed = getattr(self.unit.get_model_name()+'_is_displayed')\n if field_is_displayed:\n return field_is_displayed(unit)\n except AttributeError:\n pass\n if not self.displayed and not self.excluded:\n return True\n elif self.displayed and self.excluded:\n return unit.get_model_name() in self.displayed \\\n and unit.get_model_name() not in self.excluded\n elif self.excluded:\n return unit.get_model_name() not in self.excluded\n elif self.displayed:\n return unit.get_model_name() in self.displayed\n else:\n return True", "def wait_for_invisible(self, timeout=None):\n wait_until(lambda: not self.is_displayed(),\n \"Element '%s' still visible after <TIMEOUT>.\" % self._locator,\n timeout)", "def is_element_enabled(self):\n if self.web_element.is_enabled():\n return True\n else:\n return False", "def IsVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def is_hidden(self):\n return self.has_label(HIDDEN_LABEL)", "def is_element_present(self, how, what):\n try: driver.find_element(by=how, value=what)\n except NoSuchElementException: return False\n return True", "def wait_until_displayed(self, locator, timeout=5):\n try:\n WebDriverWait(self.browser, timeout).until(EC.visibility_of_element_located(locator))\n return True\n except ex.TimeoutException:\n return False", "def IsWindowVisible(hwnd):\r\n return bool(__IsWindowVisible(hwnd))", "def IsHidden(self):\n return self._common_type.IsHidden()", "def is_element_present(self, how, what):\n try:\n self.driver.find_element(by=how, value=what)\n except NoSuchElementException:\n return False\n return True", "def is_visible(self):\n return self.real > 0", "def wait_for_invisibility(self, crucial_element_css_selector, timeout=None):\n self.wait_for_existence(\n EC.invisibility_of_element_located,\n crucial_element_css_selector,\n timeout=timeout,\n )", "def is_lose_dispute_button_present(self):\n return self.is_element_present(self.lose_dispute_button_locator)", "def wait_for_hidden(self, locator):\r\n for i in range(timeout_seconds):\r\n if self.driver.is_visible(locator):\r\n time.sleep(1)\r\n else:\r\n break\r\n else:\r\n raise ElementVisiblityTimeout(\"%s visibility timed out\" % locator)\r\n return True", "def _is_visible(self, key) -> bool:\n return self._get_DecoSetting(key).visible", "def empty(self) -> bool:\n \n return not self.elements", "def is_visible(self):\n return self.visible_date < timezone.now()", "def is_element_exist(self, locator):\r\n t1 = time.time()\r\n try:\r\n self.driver.find_element(locator)\r\n self.my_print(\"{0} Element: <{1}> is exist, Spend {2} seconds\".format(success,locator, time.time() - t1))\r\n return True\r\n except TimeoutException:\r\n self.my_print(\"{0} Element: <{1}> is not exist, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n return False", "def isElementDisplayed(self, timeout=20.0, commandId=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n return self.isActionAccepted(timeout=timeout, commandName=Command.IS_ELEMENT_DISPLAYED, \n commandId=commandId)", "def is_discrepancy_dropdown_visible(self):\n return self.is_element_visible(self.discrepancy_dropdown_locator)", "def is_element_present_by_class(self, element_class, timeout=10):\n return self.is_element_present(\n EC.presence_of_element_located((By.CLASS_NAME, str(element_class))),\n timeout\n )", "def is_hidden():\n return False", "def is_hidden():\n return False", "def element_not_present(self, target: ElementType, timeout: TimeoutType = DEFAULT_TIMEOUT):\n\n def no_wrapped_webelement_in_dom():\n try:\n target.get_web_element_by_timeout(self.PULL_FREQUENCY)\n if target.web_element.is_enabled():\n return False\n # return False even element isn't enabled, but still present\n return False\n except (NoSuchElementException, StaleElementReferenceException):\n return target\n\n return self.wait_fluently(no_wrapped_webelement_in_dom, timeout,\n TIMEOUT_BASE_ERR_MSG.format(timeout, target.selector, \"not be present in DOM\"))", "def is_shown(self):\n return self.page.q(css=self.MODAL_SELECTOR).present", "def is_visible(self):\n return self._currently_shown", "def IsItemVisible(self, item):\r\n\r\n # An item is only visible if it's not a descendant of a collapsed item\r\n parent = item.GetParent()\r\n\r\n while parent:\r\n \r\n if not parent.IsExpanded():\r\n return False\r\n \r\n parent = parent.GetParent()\r\n \r\n startX, startY = self.GetViewStart()\r\n clientSize = self.GetClientSize()\r\n\r\n rect = self.GetBoundingRect(item)\r\n \r\n if not rect:\r\n return False\r\n if rect.GetWidth() == 0 or rect.GetHeight() == 0:\r\n return False\r\n if rect.GetBottom() < 0 or rect.GetTop() > clientSize.y:\r\n return False\r\n if rect.GetRight() < 0 or rect.GetLeft() > clientSize.x:\r\n return False\r\n\r\n return True", "def is_element_present_by_xpath(self, x_path, timeout=10):\n try:\n wait = WebDriverWait(self.web_driver, timeout)\n wait.until(lambda driver: self.web_driver.find_element(By.XPATH, x_path))\n return True\n except TimeoutException:\n return False" ]
[ "0.79927605", "0.76851815", "0.7514392", "0.73334104", "0.72232956", "0.7165965", "0.7094234", "0.704868", "0.70152134", "0.6991982", "0.6934526", "0.6922148", "0.6860868", "0.67825663", "0.6755709", "0.6745916", "0.67344236", "0.6696617", "0.66683865", "0.6668148", "0.65603226", "0.6559192", "0.65313894", "0.65044045", "0.64672184", "0.64582884", "0.64563364", "0.64494693", "0.64466995", "0.63765466", "0.63707286", "0.6346023", "0.63384074", "0.63298184", "0.63294387", "0.6318493", "0.6310647", "0.63039684", "0.6287944", "0.6287944", "0.6264747", "0.62570053", "0.6242441", "0.62187654", "0.62082064", "0.6173784", "0.6151602", "0.61384714", "0.61307126", "0.6125902", "0.6109781", "0.61088336", "0.60713553", "0.60697544", "0.6047821", "0.60454774", "0.603569", "0.6035155", "0.6018621", "0.6018443", "0.60122955", "0.6005333", "0.59806126", "0.5977531", "0.5966212", "0.5966212", "0.5944484", "0.5915738", "0.5909879", "0.59067875", "0.58993906", "0.5877448", "0.587407", "0.587107", "0.58693624", "0.5862828", "0.5851368", "0.58450335", "0.58444893", "0.58425623", "0.583523", "0.5833628", "0.58261585", "0.58106863", "0.57810134", "0.5773769", "0.5771273", "0.57643163", "0.57623464", "0.5760378", "0.57550263", "0.57528436", "0.5741263", "0.5726437", "0.5726437", "0.5724433", "0.5721982", "0.5720244", "0.57153755", "0.5714793" ]
0.64932275
24
Click a button which will only work once RequireJS finishes loading.
def trigger_output(self): self.q(css='div#fixture button').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click_button(self):\n self.widgets.get('button').click()", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def click(self):\n self.dispatch['elementClick'] = self.clickJsFnc", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def __on_click(self):\n if self.enable:\n self.__function_to_activate()", "def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()", "def run_button(self):\n if self.run.label == 'Run':\n self.run.label = 'Stop'\n self.run.button_type = 'danger'\n self.callback_obj = self.doc.add_periodic_callback(self.unlocked_task, 1000)\n\n else:\n self.run.label = 'Run'\n self.run.button_type = 'success'\n self.doc.remove_periodic_callback(self.callback_obj)", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def click(self):\r\n pass", "def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")", "def cb_something_2(self, button):\n print(\"Do Something 2\")", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def Click(self):\n if self.function == None:\n return\n \n self.function()", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def regression_pressed(self):\n\t\tregr_button = self.ui.findChild(QWidget, \"regr_button\")\n\t\tif regr_button.checkState():\n\t\t\tprint \"regression activated\"\n\t\telse:\n\t\t\tprint \"regression deactivated\"", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def cb_something_4(self, button): \n print(\"Do Something 4\")", "def on_run_clicked(self, button):\n active_tab = self.get_active_tab()\n active_tab.save() # enables auto-save before running\n active_tab.execute()", "def on_run_button(self, event):\n text = _(u\"Run button pressed.\")\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n self.run_command()", "def on_click(self) -> None:\n pass", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def click_download_button(self):\n self._basket.click_download_button()", "def OnButtonClick(self):\n self.choice()", "def _clicked_yes_button(self):\n self.yes = True", "def Button(request):\n params = {\n 'mimetype': 'text/javascript',\n 'fn': request.GET.get('fn', '_bRunTest'),\n 'btn_text': request.GET.get('btn_text', 'Run the test'),\n 'cb_text': request.GET.get('cb_text',\n 'and send my results to Browserscope (anonymously)'),\n }\n return util.Render(request, 'user_test_button.js', params)", "def click_modal_button(self, title):\n locator = lex_locators[\"modal\"][\"button\"].format(title)\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.wait_until_element_is_enabled(locator)\n self._jsclick(locator)", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def on_click(self) -> None:\n self.cycle()", "def run_user_code(self, button):\n button.setEnabled(False)\n self.user_thread.start()", "def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global success\n global fails\n if success or fails == 20:\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n else:\n self.minigame.window.show_view(self.minigame.main_view)\n print(f\"Exit Button.\")", "def ready(self):\n self.btnAdd.setEnabled(True)", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)", "def clicar_no_botao_start():\r\n # terceiro\r\n try:\r\n start_button = _browser.find_element_by_xpath(\r\n \"//button[@class='waves-effect col s12 m12 l12 btn-large uiColorButton']\")\r\n except:\r\n start_button = None\r\n\r\n start_button.click()\r\n assert start_button", "def click_login_button(self):", "def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def _done_button_cb(self, widget=None):\n if self.lastTestResult:\n self._trigger_event(\"success\")\n else:\n self._launch_click_through_dialog()", "def checkout_btn(self):\n self._checkout_btn.click()", "def clickonbutton(titleobj, buttontoclick):\n try:\n ldtp.click(titleobj,buttontoclick)\n logging.info(\"Clicked on : %s\" % buttontoclick)\n except Exception as er:\n print (\"Not able to click on button\")", "def press(self):\n self.clicked = True\n if self.command:\n self.command(self.name)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def _click(self):\n if hasattr(self.canvas[\"items\"][self.index], 'commandFunc'):\n self.canvas[\"items\"][self.index].commandFunc(None)", "def on_run_clicked(self):\n self.start_threading()\n self.stepping = False\n self.step_event.set()", "def poll(self):\n\tself.met = self.button.poll()", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def on_pushButton_clicked(self):\n print(\"hello\")", "def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()", "def initialize(window):\n MY.restart_button.location = window / 2", "def activateRefreshButton(self):\r\n \r\n self.refreshButton.show()", "def exec_(self):\n super().exec_()\n return self.clicked_button", "def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def toggle_run_button(self, event):\n if not self.running:\n self.start_thread()\n else:\n self.stop_thread()", "def click_continue(self):\n self.click_element(self.continue_button_selector)", "def gt_helper_clicked(self):\n if not self.gt_helper_open:\n self.gt_helper_open = True\n self.gt_helper.show()", "def on_click(self, event):\n if event['button'] == 1 and 'button1' in self.options:\n subprocess.call(self.options['button1'].split())\n elif event['button'] == 2 and 'button2' in self.options:\n subprocess.call(self.options['button2'].split())\n elif event['button'] == 3 and 'button3' in self.options:\n subprocess.call(self.options['button3'].split())", "def click(self, element):\n element.click()", "def click(self, pos):\n # Confirm the setup\n if (self.setup_type != None):\n self.start_setup(None)", "def _resubmit_button_fired(self):\n self.resubmit()", "def test_update_custom_button(self):\n pass", "def action_done(self):", "def on_toolButton_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def click_and_close(self, button_name):\r\n self.clicked = button_name\r\n self.root.destroy()", "def on_projectButton_clicked(self):\n self.__enableFindButton()", "def continue_to_grading(self):\r\n self.q(css='input.calibration-feedback-button').first.click()", "def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))", "def click_exit_button(self):\n self.click_img(target_img=SETTINGS['img_paths']['buttons']['exit'])", "def clickButton(self, xpath):\n WebDriverWait(self.driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, xpath))).click()\n self.sleep_approx(1)", "def testButtonCB(self, testId):\n button = self.test_buttons[testId]\n if self.result:\n self.showTestOutput(testId)\n return", "def okButton(self):\n \n self.answer=\"ok\"\n self.top.destroy()", "def click_volver(self):\n self.button.click(liquidaciones_historicas_catalog.BOTON_VOLVER)", "def click_next_month(self):\n self.action.click(self.calendar_next)\n time.sleep(3)", "def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def make_run_button(self):\n\n run_button = Button(\n self.master, text=\"Run\", command=self.run_simulator)\n run_button.grid(row=6, column=1)\n\n return run_button", "def _module_toggled(self, module, required):\n\n self._set_implicit_requirements()\n\n if required:\n self.project.pyqt_modules.append(module)\n else:\n self.project.pyqt_modules.remove(module)\n\n self.project.modified = True", "def click_search_button(self):\n self.click_element(self.generic_search_button_locator)", "def press(button_id: str) -> None:\n try:\n self.query_one(f\"#{button_id}\", Button).press()\n except NoMatches:\n pass", "def on_click(self) -> None:\n os.startfile(self.url) # noqa: S606", "def test_Analytics1(self):\n\n self.delayDisplay(\"We don't have a test\")", "def toggle_test():\n path = path_test\n if (os.path.isfile(path)):\n os.remove(path)\n button_test.configure(text=\"Appuyer sur le bouton de test\")\n print(\"Bouton test relâché\")\n\n else:\n open(path, 'a').close()\n button_test.configure(text=\"Relâcher le bouton de test\")\n print(\"Bouton test enfoncé\")", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def chooseAction(self):\n print \"nothing\"\n pass", "def m_press(self, button: MButton):\n pass", "def on_pushButton_start_strategy_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def click_create_vendor_button(self):\n create_vendor_element = self.wait().until(EC.element_to_be_clickable(self.create_vendor_locator), \"create vendor locator not found before specified time out\")\n create_vendor_element.click()\n self.wait_for_ajax_spinner_load()", "def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])", "def activate_button(self, e):\n self.serv_but.config(state=\"normal\")", "def click_upload_button(self):\n self.click_element(self.upload_button_locator)", "def on_next_turn_click(self, button):\n if self.referee.is_game_over():\n Gtk.main_quit()\n else:\n self.do_next_turn(button)\n # if the game is over after this turn, we will shutdown on the next click,\n # so visually alert the player with the button label\n if self.referee.is_game_over():\n button.set_label(GAME_OVER_MSG)", "def setup_button_run(self):\n run_icon = tk.PhotoImage(file = self.run_icon)\n self.button_run = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = run_icon,\n command = self.run_world)\n self.button_run.image = run_icon\n self.button_run.grid(row = 0, column = 2, sticky = tk.W)", "def back_button(self):\r\n self.update_settings()\r\n self.is_action = True\r\n if self.back_call is not None:\r\n self.back_call()" ]
[ "0.680854", "0.66910315", "0.64084864", "0.6336114", "0.6120209", "0.61122954", "0.61030865", "0.6091507", "0.6059248", "0.5980037", "0.59742665", "0.5892647", "0.5888682", "0.5856755", "0.58410084", "0.5814442", "0.581181", "0.5751456", "0.57503045", "0.5749024", "0.5719854", "0.5696186", "0.56758994", "0.5629732", "0.5557732", "0.5556676", "0.55492127", "0.5539981", "0.5522274", "0.5517732", "0.5514842", "0.54890496", "0.54819345", "0.5464257", "0.54564244", "0.54507387", "0.5449843", "0.54326326", "0.54265785", "0.54258263", "0.54253733", "0.53865826", "0.53779536", "0.53689325", "0.53564614", "0.53551006", "0.53551006", "0.5323211", "0.52943665", "0.52756083", "0.52595127", "0.52402365", "0.5235121", "0.5232569", "0.5227601", "0.52266777", "0.5226345", "0.5220445", "0.51994693", "0.5196376", "0.5180749", "0.51738554", "0.51726437", "0.51651746", "0.5163287", "0.51622516", "0.51541656", "0.5146739", "0.5143461", "0.5119492", "0.511681", "0.51116085", "0.5107037", "0.50804454", "0.50708866", "0.50668484", "0.50577855", "0.5053031", "0.50465554", "0.50395596", "0.5036401", "0.5020591", "0.501232", "0.50108725", "0.5002263", "0.49875066", "0.49834707", "0.49802506", "0.49706063", "0.49649897", "0.49649656", "0.49632758", "0.4962652", "0.49623194", "0.4955854", "0.49558407", "0.4955074", "0.49535", "0.49530908" ]
0.59564984
12
Reload the page, wait for JS, then trigger the output.
def reload_and_trigger_output(self): self.browser.refresh() self.wait_for_js() # pylint: disable=no-member self.q(css='div#fixture button').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_page(self):\n self.m_driver.refresh()\n time.sleep(30)", "def refresh(self):\n self.log_info(f\"Browser.refresh: Refreshing the page\")\n self.CORE.refresh()\n return", "def refresh_page(self, callback=None):\n if callback is not None:\n callback()\n return True", "def Reload(self):\n self._inspector_backend.Navigate(self.url, None, 10)", "def js(self, script):\n self.page().mainFrame().evaluateJavaScript(script)", "def refresh(self):\n\n self.driver.implicitly_wait(5)\n self.driver.refresh()", "def trigger_reloading(self) -> None:\n self.trigger_signal(\"reloading\")", "def wait_for_page_load(self):\n pass", "def execute_js(self, script):\n self.driver.execute_script(script)", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def onReload(self, event):\n\n\t\tself.wv.Reload()", "async def async_trigger_reloading(self) -> None:\n await self.async_trigger_signal(\"reloading\")", "def refresh_page(self, check=True):\n url = self.app.page_base.url\n self.app.page_base.refresh()\n\n if check:\n assert_that(self.app.page_base.url, equal_to(url))", "def refresh(self):\n\t\tself.driver.refresh()", "def trigger_reload(server):\n log.info(\"Triggering /reload on %s\", server)\n screenCmd(server, 'reload')", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def refresh(self, id):\n exports.execute_export.delay(id)\n return render({\"id\": id})", "def step_impl(context):\r\n context.browser.get('https://opensource-demo.orangehrmlive.com/')\r\n time.sleep(10)", "def _on_dom_ready(self):\n logger.debug('_on_dom_ready')\n self._status = self.WindowStatus.SHOWN\n\n # Subscribe current browser for javascript value returned\n RuntimeManager.get_instance().JavascriptReturned.subscribe(self.browser)\n\n # Get callback on engine ready\n RuntimeManager.get_instance().JavascriptReturned\\\n .on_value('_event__engine_ready', lambda *_: self._on_engine_ready())\n\n # Inject puithonJS the engine\n self.browser.ExecuteJavascript(open(self.JS_ENGINE_FILE, 'r').read())", "def update_page(self, waittime):\n if not self.runningtask.get():\n return\n if self.vars[\"enabled\"].get():\n logger.trace(\"Updating page\")\n self.display_item_set()\n self.load_display()\n self.after(waittime, lambda t=waittime: self.update_page(t))", "def postloop(self):\n print 'Bye!'", "def view(self):\n\t\tself.done(1)", "def wait_for_load(driver):\n html = driver.page_source\n time.sleep(0.5)\n while html != driver.page_source:\n html = driver.page_source\n time.sleep(0.5)", "def html_redirect(self):\n soup = BeautifulSoup(self.contents, \"lxml\")\n meta = soup.find('meta', **{'http-equiv': 'refresh'})\n assert meta is not None, 'No <meta http-equiv=\"refresh\" /> tag found.'\n url = meta.get('content').partition(';url=')[2]\n self.open(url)", "def ReturnReload():\r\n return _hiew.ReturnReload()", "def eighth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.ninth_page.wait_for_page()", "def reload(self):\n puts('Reloading application...')\n local('touch ../reload.txt')", "def refreshPageAndGoToWatchlist(self):\n try:\n self.sleep_approx(1)\n self.user_requests_made += 1\n self.driver.refresh()\n\n wait_for_shield_invisibility(self.driver)\n\n WebDriverWait(self.driver, 30).until(\n EC.visibility_of_element_located(\n (By.CLASS_NAME, 'icon-transfer'))\n )\n\n wait_for_shield_invisibility(self.driver)\n\n self.sleep_approx(3)\n\n log_event(self.queue, \"Going back to watchlist\")\n self.go_to_watchlist()\n except:\n log_event(self.queue, \"Exception retrying refreshPageGoToWatchlist\")\n # TODO could be dangerous when stuck in infinite loop\n self.refreshPageAndGoToWatchlist()", "def test_reload_parameter_starts_populateed(self, simulate_reload_one_day_main,\n caplog, qtbot):\n QtTest.QTest.qWaitForWindowShown(simulate_reload_one_day_main.form)\n qtbot.wait(3000)", "def relaunch(self, delay):\r\n\r\n # close the browser window previously opened\r\n self.sel_driver.quit()\r\n\r\n # wait for a while\r\n time.sleep(delay)\r\n\r\n # reopen browser window\r\n self.sel_driver = webdriver.Firefox(executable_path=\"C:\\\\geckodriver.exe\")", "def wait_for_ajax_complete():\r\n javascript = \"\"\"\r\n var callback = arguments[arguments.length - 1];\r\n if(!window.jQuery) {callback(false);}\r\n var intervalID = setInterval(function() {\r\n if(jQuery.active == 0) {\r\n clearInterval(intervalID);\r\n callback(true);\r\n }\r\n }, 100);\r\n \"\"\"\r\n # Sometimes the ajax when it returns will make the browser reload\r\n # the DOM, and throw a WebDriverException with the message:\r\n # 'javascript error: document unloaded while waiting for result'\r\n for _ in range(5): # 5 attempts max\r\n try:\r\n result = world.browser.driver.execute_async_script(dedent(javascript))\r\n except WebDriverException as wde:\r\n if \"document unloaded while waiting for result\" in wde.msg:\r\n # Wait a bit, and try again, when the browser has reloaded the page.\r\n world.wait(1)\r\n continue\r\n else:\r\n raise\r\n return result", "def refresh_browser(timeout=None): \n if timeout is None:\n restart_browser()\n return\n\n else:\n echo_highlight('Checking server status...')\n for i in range(5):\n time.sleep(timeout)\n try:\n response = get('http://localhost:8000/health_check')\n except ConnectionError:\n echo_highlight('Still checking...')\n else:\n if response and response.status_code == 200:\n restart_browser()\n return\n\n echo_warning('Maximum attempts reached! Something might be wrong.')", "def evaluate_in_page(self, js_string: str) -> Awaitable[Any]:", "def syncrepl_refreshdone(self):\n pass", "def get(self, url):\n\t\ttry:\n\t\t\tassert(type(url)) == str\n\t\t\tself.driver.get(url)\n\t\t\t# sleep(1) # Even tho driver.get is blocking, it returns as soon as DOM loads, without waiting for JS to run and update the DOM with the new elements\n\t\t\t# wait(self.driver, 10).until( EC.visibility_of_element_located() ) # Not sure how to wait here efficiently\n\t\t\tsleep(5) # A little long, but without a conditional variable to tell us when the page is ready us when to go our only choice is to nap\n\t\t\tself.bsource = bs( self.viewSource(), \"lxml\" ) # Update internal BeautifulSoup source with new javascript-encriched code (\"lxml\" is faster that \"html.parser\")\n\t\texcept Exception as e:\n\t\t\tprint(\"[*] Unable to GET page {}\\n{}\".format(url, e))\n\t\t\treturn -1", "def wait_for_page_load(self):\n # For right now, just wait for 2 seconds since webdriver returns when loaded.\n # TODO: switch to waiting for network idle\n time.sleep(2)", "def refresh_screen(self):\n stdscr = self.stdscr\n stdscr.refresh()", "def receive_reload_request(self, _: EmptyMsg):\n self.update()", "def run_javascript(self, code_str):\n self.browser.ExecuteJavascript(str(code_str))", "def reload_job(self):\n if self.ui['main_window'].widgets['live_preview'].get_active():\n self._update_preview()", "def run_javascript_with_result(self, code_str):\n assert self._status is self.WindowStatus.READY, 'This method can only be called after the window is ready'\n\n _uniq_key = hex(hash(f'{time.time()}-{random()}'))\n self.call_engine_function('executeThenPoll', _uniq_key, code_str)\n return RuntimeManager.get_instance().JavascriptReturned.wait_for_value(_uniq_key)", "def update():\n print(\"current page is \", wikiPageStackTrace[-1].getTitle())\n if wikiPageStackTrace[-1].getUrl() != goalPage.getUrl(): # no victory\n eel.addRoundNumber()\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(\n wikiPageStackTrace[-1].getFirstSentence())\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.hideLoader()\n elif wikiPageStackTrace[-1].getUrl() == goalPage.getUrl(): # victory\n eel.hideLoader()\n eel.addRoundNumber()\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.showVictory()\n # we need to do this because overwise the JS is not fat egoth to respond so we get an infinit loading\n time.sleep(0.5)\n eel.hideLoader()", "def wait_and_refresh_static_page_until_text(self, search_text, wait_time, loc_frame, loc_text):\n self.selenium.driver.refresh()\n self.selenium.select_frame(loc_frame)\n text_portion = self.selenium.get_text(loc_text)\n while text_portion != search_text:\n self.selenium.driver.refresh()\n self.selenium.select_frame(loc_frame)\n text_portion = self.selenium.get_text(loc_text)", "def reload():\n xd = display.XKCDDisplayService()\n if xd.is_running():\n click.echo(\"gracefully reloading changes\")\n xd.send_signal(signal.SIGHUP)\n else:\n click.echo(\"xkcd service not running\")", "def execute_javascript(self, code):\n return self.loop.run_until_complete(self.async_func.execute_javascript_async(code))", "def prepare_work(self):\n self.driver.get(self.BaseUrl)\n self.driver.add_cookie(cookie)\n self.driver.refresh()\n self.base_handle = self.driver.current_window_handle", "def refresh(self, new_content):\n pass", "def send_output(self):\n self.__status_handler.io.async_refresh()", "def wait_for_ajax(self):\n return self.driver.execute_script(\n \"return typeof(jQuery)!='undefined' && jQuery.active==0\")", "def reload_info(self):\n self.__loop.run_until_complete(self.__reload_info())", "def second_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.third_page.wait_for_page()", "def fourth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fifth_page.wait_for_page()", "def load_next(self, page, delay_sec):\n time.sleep(delay_sec)\n page.visit()", "def wati_until_page_change(driver, url):\n while driver.current_url == url:\n time.sleep(10)", "def refresh_view():\n pass", "def third_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fourth_page.wait_for_page()", "def on_clicked_update(self):\n process = crawler.CrawlerProcess(\n {\n \"USER_AGENT\": \"currency scraper\",\n \"SCRAPY_SETTINGS_MODULE\": \"currency_scraper.currency_scraper.settings\",\n \"ITEM_PIPELINES\": {\n \"currency_scraper.currency_scraper.pipelines.Sqlite3Pipeline\": 300,\n }\n }\n )\n process.crawl(InvestorSpider)\n try:\n process.start()\n gui_warnings.update_notification()\n except error.ReactorNotRestartable:\n gui_warnings.warning_already_updated()", "def XeprGUIrefresh(self):\n with self._lock:\n self._API.XeprRefreshGUI()", "def on_click_reload(self):\n with suppress_errors():\n self.load_imdb()\n self.load_exp()", "def seventh_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.eighth_page.wait_for_page()", "def do_GET(self):\n query = self.path.split('?', 1)[-1]\n query = dict(urllib.parse.parse_qsl(query))\n self.server.query_params = query\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n html = \"<html><head><script>setTimeout(function(){\" \\\n \"window.open('','_self','');window.close()},10)\" \\\n '</script></head><body>You may close this window.</body></html>'\n self.wfile.write(html)", "def TestPeriodic(self):\n LiveWindow.Run()", "def execute_javascript(self, code):\n return self.loop.run_until_complete(self.get_async_keyword_group().execute_javascript(code))", "def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()", "def update_html(shell=False):\n\n if shell: tell.info(\"Rendering the HTML.\")\n html.generate()\n if shell: tell.done(\"Updated `html/index.html`.\")", "def run_if_refresh(self):\n if self.is_finished():\n self.status.collect = True\n self.run() # self.run_if_collect()\n elif (\n self.server.run_mode.non_modal\n or self.server.run_mode.queue\n or self.server.run_mode.modal\n ):\n self.run_static()\n else:\n self.refresh_job_status()\n if self.status.refresh:\n self.status.suspended = True\n if self.status.busy:\n self.status.refresh = True\n self.run_if_refresh()", "def __work__(self):\n while not self.is_done:\n self.refreshSignal.emit()\n time.sleep(0.05)", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def start(self):\n self._test_email()\n while True:\n try:\n current_div = self._get_relevant_div_as_text()\n if current_div != self.active_div:\n print(\"Div has changed!!\")\n self._send_email_changed()\n self.active_div = current_div\n else:\n print(\"[%s] Div has not changed...\" % datetime.datetime.now().strftime(\"%c\"))\n except Exception as e:\n print(\"Something went wrong... More luck next time...\")\n finally:\n time.sleep(300)", "def refresh_alarm() -> str:\r\n logging.info(\"Page refreshed by refresh_alarm()\")\r\n notifications = create_notifications()\r\n s.enter(3600,1,refresh_alarm)\r\n return \"Alarm refreshed\"", "def run(self) -> None:\n self._render()\n print(self.sio.getvalue())", "def reloadfile(self, ):\n self.loadfile()", "def reload(self):", "def reload(self):", "def first_page_execution(self):\n self.errors_and_correct_input_values_helper(wrong_pattern_error=True)\n self.utility_page.click_next_button()\n self.utility_page.click_next_button()\n self.second_page.wait_for_page()", "async def start_html(self):\n self.add_header('Content-Type', 'text/html')\n await self._send_headers()", "def finalize_script(self):\n self.interrupt_script()", "def load(self, url):\n # Download the raw dom to our local build folder so we can load it quickly.\n self._save_raw_dom_to_local(url)\n # Randomized content check:\n # Load it multiple times and see if any content has changed.\n # This is not foolproof. Sometimes we load a page three times and won't find\n # all the randomized content, or any at all.\n if self._driver is None:\n self._create_driver(self._config)\n load_count = 0\n template = HtmlTemplate()\n longest_load_time = 0\n while load_count < self._config.PAGE_CHANGE_NUM_LOADS:\n load_count += 1\n logger.info(f\"Page load #{load_count}\")\n # Load it back into the browser so the javascript will run.\n if not self._load_raw_dom_from_local():\n return False # We can't proceed if we can't load the page.\n self.reset_state()\n load_time, template2 = self.wait_for_stable_template(seconds_threshold=self._config.PAGE_CHANGE_THRESHOLD,\n seconds_timeout=self._config.PAGE_CHANGE_TIMEOUT)\n longest_load_time = max(longest_load_time, load_time)\n if template2.is_stable():\n logger.info(f\"{url} took {load_time} seconds to stabilize.\")\n else:\n logger.warning(f\"{url} loaded in {load_time} seconds except for unstable xpaths: {template2.get_unstable_xpaths()}\")\n template.add_template(template2)\n for el_xpath in sorted(template.get_unstable_xpaths()):\n # When we do comparisons, we should remove or ignore these elements.\n # Maybe do: If there is a lot of changing content under a particular ancestor, ignore the whole ancestor?\n logger.info(f\"Found unstable content {el_xpath}\")\n\n self.wait_for_animation() # Have to do this to freeze the styles and positions.\n self._current_state_data = self._create_state_data()\n # Explore all reachable elements.\n self._current_state_data.elements_to_explore = self.query_xpath('html/body//*[@demod_reachable=\"true\"]')\n self._current_state_data.load_time = longest_load_time\n self._current_state_data.template = template\n return True", "async def handle_live(request: web.Request) -> web.Response:\n # pylint: disable=unused-argument\n return web.Response(text=\"OK\")", "def xj(self, script):\n self.pbug(\"Executing > {0}\".format(script))\n script = \"try {{ {0} }} catch(e) {{ miteErrorCallback(String(e), e, 'execute', '', '');}}\".format(script)\n return self.browser.ExecuteJavascript(script)", "def on_post_page(self, out, **kwargs):\n\n # Inject jQuery in the header\n jquery = \"https://code.jquery.com/jquery-3.4.1.min.js\"\n out = out.replace(\"</head>\", \n \"<script src=\\\"{}\\\">\".format(jquery) \n + \"</script></head>\")\n\n return out", "def _resubmit_button_fired(self):\n self.resubmit()", "def test_visualisations_perform_visualisation_render_data_refresh_synchronously(self):\n pass", "async def poll_refresh(self) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.poll_refresh))", "def wait_ajax(self, lib='JQUERY', timeout=30):\n page_logger.debug('Waiting for AJAX using %s' % lib)\n js = self.wait_ajax_script.get(lib, 'return true;')\n WebDriverWait(self.driver, timeout).until(\n lambda driver: driver.execute_script(js))", "def on_browser_exit(self, info: \"BrowserExitInfo\") -> None:", "def sixth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.utility_page.click_next_button()\n self.seventh_page.wait_for_page()", "def exec_script_and_interact(self):\r\n self.exec_script(set_focus=True)", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "def main():\n time.sleep(0.1)", "def refresh_dialog(self):\n self._client.update_elements()", "def nav(self, url):\r\n\r\n self.driver.get(url)\r\n time.sleep(3) # wait for page load\r", "def _handle_popup_close(self):\n self._refresh()", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def update_main_page():\n\n line = house_keeping + '/acis_gain.html'\n text = open(line, 'r').read()\n\n today = tcnv. currentTime('Display')\n\n text = text.replace('#DATE#', today)\n\n file = web_dir + '/acis_gain.html'\n fo = open(file, 'w')\n fo.write(text)\n fo.close()", "def render():\n html = request.get_data().decode('utf-8')\n sio.emit('render', html)\n return 'OK'", "def on_reload_button_cicked_(self):\n self.pause_subscriber = True\n\n self._load_robot_description()\n controllers = self.get_current_controllers()\n\n self.joints = self._create_joints(controllers)\n\n self.synergy = self._create_synergy(controllers)\n\n self.delete_old_sliders_()\n\n # self._widget.sliderReleaseCheckBox.setCheckState(Qt.Unchecked)\n\n self.load_new_synergy_sliders_()\n\n # self.load_new_sliders_()\n\n self._update_synergy_viewer()\n\n self.pause_subscriber = False", "def fifth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.sixth_page.wait_for_page()", "def init_job_page(self, base_url):\n self.driver.get(base_url)\n self.driver.implicitly_wait(100)" ]
[ "0.61143285", "0.6020823", "0.5730542", "0.5707968", "0.5650479", "0.5597226", "0.5587973", "0.5548144", "0.55204254", "0.55047655", "0.54316336", "0.53578436", "0.5348964", "0.5344503", "0.52896523", "0.5284119", "0.5284119", "0.5281825", "0.52662414", "0.5262159", "0.52440184", "0.52349263", "0.5234214", "0.52057993", "0.5190605", "0.51883143", "0.5174814", "0.51740783", "0.5151051", "0.51439023", "0.5133373", "0.5116038", "0.5114709", "0.5112387", "0.51093894", "0.51014996", "0.5084173", "0.5079214", "0.50768083", "0.50631034", "0.5062993", "0.5059159", "0.50590444", "0.50543714", "0.50408286", "0.50385094", "0.50329906", "0.5003276", "0.49761254", "0.49678326", "0.49558216", "0.49508637", "0.49419385", "0.49368873", "0.49340555", "0.4931812", "0.4920166", "0.49128565", "0.4908229", "0.49013284", "0.4894713", "0.4890244", "0.4874857", "0.48711768", "0.48196843", "0.48030224", "0.4799783", "0.47707367", "0.47702685", "0.4767801", "0.47653997", "0.47647536", "0.47589684", "0.47566414", "0.47566414", "0.47542164", "0.4752434", "0.47500646", "0.47467443", "0.47339195", "0.47271597", "0.47103533", "0.47090945", "0.47020507", "0.47004405", "0.46972448", "0.46850747", "0.46789837", "0.4678941", "0.46780476", "0.46665207", "0.46618897", "0.46517095", "0.4648912", "0.46463194", "0.4640839", "0.46350905", "0.46348202", "0.46327764", "0.4631042" ]
0.77481604
0
Click a button which will only work once RequireJS finishes loading.
def trigger_output(self): self.q(css='div#fixture button').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click_button(self):\n self.widgets.get('button').click()", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def click(self):\n self.dispatch['elementClick'] = self.clickJsFnc", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def __on_click(self):\n if self.enable:\n self.__function_to_activate()", "def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()", "def run_button(self):\n if self.run.label == 'Run':\n self.run.label = 'Stop'\n self.run.button_type = 'danger'\n self.callback_obj = self.doc.add_periodic_callback(self.unlocked_task, 1000)\n\n else:\n self.run.label = 'Run'\n self.run.button_type = 'success'\n self.doc.remove_periodic_callback(self.callback_obj)", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def click(self):\r\n pass", "def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")", "def cb_something_2(self, button):\n print(\"Do Something 2\")", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def Click(self):\n if self.function == None:\n return\n \n self.function()", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def regression_pressed(self):\n\t\tregr_button = self.ui.findChild(QWidget, \"regr_button\")\n\t\tif regr_button.checkState():\n\t\t\tprint \"regression activated\"\n\t\telse:\n\t\t\tprint \"regression deactivated\"", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def cb_something_4(self, button): \n print(\"Do Something 4\")", "def on_run_clicked(self, button):\n active_tab = self.get_active_tab()\n active_tab.save() # enables auto-save before running\n active_tab.execute()", "def on_run_button(self, event):\n text = _(u\"Run button pressed.\")\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n self.run_command()", "def on_click(self) -> None:\n pass", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def click_download_button(self):\n self._basket.click_download_button()", "def OnButtonClick(self):\n self.choice()", "def _clicked_yes_button(self):\n self.yes = True", "def Button(request):\n params = {\n 'mimetype': 'text/javascript',\n 'fn': request.GET.get('fn', '_bRunTest'),\n 'btn_text': request.GET.get('btn_text', 'Run the test'),\n 'cb_text': request.GET.get('cb_text',\n 'and send my results to Browserscope (anonymously)'),\n }\n return util.Render(request, 'user_test_button.js', params)", "def click_modal_button(self, title):\n locator = lex_locators[\"modal\"][\"button\"].format(title)\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.wait_until_element_is_enabled(locator)\n self._jsclick(locator)", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def on_click(self) -> None:\n self.cycle()", "def run_user_code(self, button):\n button.setEnabled(False)\n self.user_thread.start()", "def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global success\n global fails\n if success or fails == 20:\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n else:\n self.minigame.window.show_view(self.minigame.main_view)\n print(f\"Exit Button.\")", "def ready(self):\n self.btnAdd.setEnabled(True)", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)", "def clicar_no_botao_start():\r\n # terceiro\r\n try:\r\n start_button = _browser.find_element_by_xpath(\r\n \"//button[@class='waves-effect col s12 m12 l12 btn-large uiColorButton']\")\r\n except:\r\n start_button = None\r\n\r\n start_button.click()\r\n assert start_button", "def click_login_button(self):", "def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def _done_button_cb(self, widget=None):\n if self.lastTestResult:\n self._trigger_event(\"success\")\n else:\n self._launch_click_through_dialog()", "def checkout_btn(self):\n self._checkout_btn.click()", "def clickonbutton(titleobj, buttontoclick):\n try:\n ldtp.click(titleobj,buttontoclick)\n logging.info(\"Clicked on : %s\" % buttontoclick)\n except Exception as er:\n print (\"Not able to click on button\")", "def press(self):\n self.clicked = True\n if self.command:\n self.command(self.name)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def _click(self):\n if hasattr(self.canvas[\"items\"][self.index], 'commandFunc'):\n self.canvas[\"items\"][self.index].commandFunc(None)", "def on_run_clicked(self):\n self.start_threading()\n self.stepping = False\n self.step_event.set()", "def poll(self):\n\tself.met = self.button.poll()", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def on_pushButton_clicked(self):\n print(\"hello\")", "def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n yes else DOM.DownloadManager.download_confirm_no, msg)\n btn.tap()", "def exec_(self):\n super().exec_()\n return self.clicked_button", "def initialize(window):\n MY.restart_button.location = window / 2", "def activateRefreshButton(self):\r\n \r\n self.refreshButton.show()", "def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def toggle_run_button(self, event):\n if not self.running:\n self.start_thread()\n else:\n self.stop_thread()", "def click_continue(self):\n self.click_element(self.continue_button_selector)", "def gt_helper_clicked(self):\n if not self.gt_helper_open:\n self.gt_helper_open = True\n self.gt_helper.show()", "def on_click(self, event):\n if event['button'] == 1 and 'button1' in self.options:\n subprocess.call(self.options['button1'].split())\n elif event['button'] == 2 and 'button2' in self.options:\n subprocess.call(self.options['button2'].split())\n elif event['button'] == 3 and 'button3' in self.options:\n subprocess.call(self.options['button3'].split())", "def click(self, element):\n element.click()", "def click(self, pos):\n # Confirm the setup\n if (self.setup_type != None):\n self.start_setup(None)", "def test_update_custom_button(self):\n pass", "def _resubmit_button_fired(self):\n self.resubmit()", "def action_done(self):", "def on_toolButton_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def click_and_close(self, button_name):\r\n self.clicked = button_name\r\n self.root.destroy()", "def on_projectButton_clicked(self):\n self.__enableFindButton()", "def continue_to_grading(self):\r\n self.q(css='input.calibration-feedback-button').first.click()", "def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))", "def click_exit_button(self):\n self.click_img(target_img=SETTINGS['img_paths']['buttons']['exit'])", "def clickButton(self, xpath):\n WebDriverWait(self.driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, xpath))).click()\n self.sleep_approx(1)", "def testButtonCB(self, testId):\n button = self.test_buttons[testId]\n if self.result:\n self.showTestOutput(testId)\n return", "def okButton(self):\n \n self.answer=\"ok\"\n self.top.destroy()", "def click_volver(self):\n self.button.click(liquidaciones_historicas_catalog.BOTON_VOLVER)", "def click_next_month(self):\n self.action.click(self.calendar_next)\n time.sleep(3)", "def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def make_run_button(self):\n\n run_button = Button(\n self.master, text=\"Run\", command=self.run_simulator)\n run_button.grid(row=6, column=1)\n\n return run_button", "def _module_toggled(self, module, required):\n\n self._set_implicit_requirements()\n\n if required:\n self.project.pyqt_modules.append(module)\n else:\n self.project.pyqt_modules.remove(module)\n\n self.project.modified = True", "def click_search_button(self):\n self.click_element(self.generic_search_button_locator)", "def press(button_id: str) -> None:\n try:\n self.query_one(f\"#{button_id}\", Button).press()\n except NoMatches:\n pass", "def on_click(self) -> None:\n os.startfile(self.url) # noqa: S606", "def test_Analytics1(self):\n\n self.delayDisplay(\"We don't have a test\")", "def toggle_test():\n path = path_test\n if (os.path.isfile(path)):\n os.remove(path)\n button_test.configure(text=\"Appuyer sur le bouton de test\")\n print(\"Bouton test relâché\")\n\n else:\n open(path, 'a').close()\n button_test.configure(text=\"Relâcher le bouton de test\")\n print(\"Bouton test enfoncé\")", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def chooseAction(self):\n print \"nothing\"\n pass", "def m_press(self, button: MButton):\n pass", "def click_create_vendor_button(self):\n create_vendor_element = self.wait().until(EC.element_to_be_clickable(self.create_vendor_locator), \"create vendor locator not found before specified time out\")\n create_vendor_element.click()\n self.wait_for_ajax_spinner_load()", "def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])", "def on_pushButton_start_strategy_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError", "def activate_button(self, e):\n self.serv_but.config(state=\"normal\")", "def click_upload_button(self):\n self.click_element(self.upload_button_locator)", "def on_next_turn_click(self, button):\n if self.referee.is_game_over():\n Gtk.main_quit()\n else:\n self.do_next_turn(button)\n # if the game is over after this turn, we will shutdown on the next click,\n # so visually alert the player with the button label\n if self.referee.is_game_over():\n button.set_label(GAME_OVER_MSG)", "def setup_button_run(self):\n run_icon = tk.PhotoImage(file = self.run_icon)\n self.button_run = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = run_icon,\n command = self.run_world)\n self.button_run.image = run_icon\n self.button_run.grid(row = 0, column = 2, sticky = tk.W)", "def back_button(self):\r\n self.update_settings()\r\n self.is_action = True\r\n if self.back_call is not None:\r\n self.back_call()" ]
[ "0.6809526", "0.6691951", "0.6409184", "0.63370335", "0.61212236", "0.611287", "0.6103311", "0.6092065", "0.606103", "0.59802353", "0.5975364", "0.5893435", "0.58887005", "0.5856853", "0.5842421", "0.5815631", "0.5811794", "0.5753243", "0.57503897", "0.5748913", "0.5719881", "0.56971335", "0.5677009", "0.5628113", "0.5558209", "0.5556658", "0.55496645", "0.55408645", "0.5523833", "0.5519146", "0.551543", "0.54898196", "0.5481497", "0.5464406", "0.5455469", "0.54513663", "0.5450767", "0.54342055", "0.5426409", "0.5426193", "0.5425633", "0.53865564", "0.5378192", "0.5369868", "0.53571373", "0.53549886", "0.53549886", "0.53230345", "0.5295172", "0.52767557", "0.5257381", "0.5240568", "0.52354777", "0.52327955", "0.5226595", "0.52262163", "0.52256626", "0.52203447", "0.52005756", "0.51970357", "0.5181735", "0.517433", "0.5173699", "0.51669616", "0.51620454", "0.5161393", "0.5152454", "0.5146581", "0.5144083", "0.5119645", "0.5116903", "0.51117545", "0.5107454", "0.5079767", "0.507198", "0.5068207", "0.5056983", "0.50533545", "0.50473547", "0.5039884", "0.5037201", "0.5021105", "0.5012736", "0.5011777", "0.50023514", "0.49888772", "0.49840546", "0.49801934", "0.49703923", "0.4964703", "0.49645883", "0.49635357", "0.496348", "0.49631572", "0.49563348", "0.4955969", "0.49547598", "0.49535108", "0.49532387" ]
0.59564173
11
Wait for scripts to finish and then return the contents of the ``output`` div on the page.
def output(self): return super(RequireJSPage, self).output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def wait_for_ajax_complete():\r\n javascript = \"\"\"\r\n var callback = arguments[arguments.length - 1];\r\n if(!window.jQuery) {callback(false);}\r\n var intervalID = setInterval(function() {\r\n if(jQuery.active == 0) {\r\n clearInterval(intervalID);\r\n callback(true);\r\n }\r\n }, 100);\r\n \"\"\"\r\n # Sometimes the ajax when it returns will make the browser reload\r\n # the DOM, and throw a WebDriverException with the message:\r\n # 'javascript error: document unloaded while waiting for result'\r\n for _ in range(5): # 5 attempts max\r\n try:\r\n result = world.browser.driver.execute_async_script(dedent(javascript))\r\n except WebDriverException as wde:\r\n if \"document unloaded while waiting for result\" in wde.msg:\r\n # Wait a bit, and try again, when the browser has reloaded the page.\r\n world.wait(1)\r\n continue\r\n else:\r\n raise\r\n return result", "def wait(self):\n\n for output in self.proc.communicate():\n if output is not None:\n self.output += output", "def waitOutput( self, verbose=False ):\n log = info if verbose else debug\n output = ''\n while self.waiting:\n data = self.monitor()\n output += data\n log( data )\n return output", "def output(self):\n\t\tif (self.isLoaded()):\n\t\t\treturn self.loader.output()", "def wait_ajax(self, lib='JQUERY', timeout=30):\n page_logger.debug('Waiting for AJAX using %s' % lib)\n js = self.wait_ajax_script.get(lib, 'return true;')\n WebDriverWait(self.driver, timeout).until(\n lambda driver: driver.execute_script(js))", "def run_javascript_with_result(self, code_str):\n assert self._status is self.WindowStatus.READY, 'This method can only be called after the window is ready'\n\n _uniq_key = hex(hash(f'{time.time()}-{random()}'))\n self.call_engine_function('executeThenPoll', _uniq_key, code_str)\n return RuntimeManager.get_instance().JavascriptReturned.wait_for_value(_uniq_key)", "def finished(self):\n qbytearray = self.process.readAllStandardOutput()\n locale_codec = QTextCodec.codecForLocale()\n output = to_text_string(locale_codec.toUnicode(qbytearray.data()))\n testresults = self.load_data()\n self.sig_finished.emit(testresults, output)", "def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_script(program_folder)\n return None", "def wait_for_page_load(self):\n pass", "def await_simulation(self):\n\n # Test if overall statistics can be requested\n while True:\n d = self.BIVAS_API.get_output_overallstatistics(self.scenarioID)\n\n if (d is not None) and (d.status_code == 200):\n break\n\n logger.info('Waiting for BIVAS to finish...')\n time.sleep(60)\n\n logger.info(d.text)\n\n logger.info('Finished!')\n\n # Close BIVAS\n logger.info('Closing BIVAS')\n os.system('taskkill /f /im Bivas.exe')\n time.sleep(5)", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def complete_output(self):\n if self.stdout0:\n sys.stdout = self.stdout0\n sys.stderr = self.stderr0\n self.stdout0 = None\n self.stderr0 = None\n return self.outputBuffer.getvalue()", "def execute_script(self):\n\n # render script variables\n script = self.replable.render_script_from_flo(self.flo, **self.template_engine_kwargs)\n\n # run over script lines\n for cmd in script.split(\"\\n\"):\n\n # no empty lines\n if cmd:\n\n self.brief_logger.info(cmd)\n if self.verbose_logger and self.log_file_echo_command:\n self.verbose_logger.info(\"$> '%s'\", cmd)\n\n # execute command\n cmd = cmd + \"\\n\"\n self.sock.send(cmd.encode())\n\n res = self.wait_for_command_execution(timeout=self.timeout)\n # read all data which is not covered by the regex used for stream searching\n # TODO: use loop here?!\n res += read_remaining_data(self.sock, SOCKET_READ_BUF_SIZE)\n\n # apply the custom check function\n if self.return_value_checker is not None:\n try:\n self.return_value_checker(cmd, res)\n except Exception as e:\n raise REPLUnexpectedResult(\n \"The following output is unexpected to the method `return_value_checker`:\\n%s\" % res,\n caused_by=e)\n\n yield res", "def execute():\n # print('Wow')\n result = gui.controller.main('execute')\n print(result)\n\n return render_template('results.html', data=json.dumps(result))", "def execute_script(self, script, asynchronous=False):\n pass", "def wait(self):\n self.mainloop().wait()", "def execute_javascript(self, code):\n return self.loop.run_until_complete(self.async_func.execute_javascript_async(code))", "def evaluate_in_page(self, js_string: str) -> Awaitable[Any]:", "def eighth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.ninth_page.wait_for_page()", "def script_content(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"script_content\")", "def end_rendering(self, output):\n if self.wrapper_to_generate:\n output = self.div(output, id=self.id, class_='nagare-generated nagare-async-view')\n\n return output", "def wait_for_requirejs(dependencies=None):\r\n if not dependencies:\r\n dependencies = [\"jquery\"]\r\n # stick jquery at the front\r\n if dependencies[0] != \"jquery\":\r\n dependencies.insert(0, \"jquery\")\r\n\r\n javascript = \"\"\"\r\n var callback = arguments[arguments.length - 1];\r\n if(window.require) {{\r\n requirejs.onError = callback;\r\n var unloadHandler = function() {{\r\n callback(\"unload\");\r\n }}\r\n addEventListener(\"beforeunload\", unloadHandler);\r\n addEventListener(\"unload\", unloadHandler);\r\n require({deps}, function($) {{\r\n setTimeout(function() {{\r\n removeEventListener(\"beforeunload\", unloadHandler);\r\n removeEventListener(\"unload\", unloadHandler);\r\n callback(true);\r\n }}, 50);\r\n }});\r\n }} else {{\r\n callback(false);\r\n }}\r\n \"\"\".format(deps=json.dumps(dependencies))\r\n for _ in range(5): # 5 attempts max\r\n try:\r\n result = world.browser.driver.execute_async_script(dedent(javascript))\r\n except WebDriverException as wde:\r\n if \"document unloaded while waiting for result\" in wde.msg:\r\n result = \"unload\"\r\n else:\r\n raise\r\n if result == \"unload\":\r\n # we ran this on the wrong page. Wait a bit, and try again, when the\r\n # browser has loaded the next page.\r\n world.wait(1)\r\n continue\r\n elif result not in (None, True, False):\r\n # We got a require.js error\r\n # Sometimes requireJS will throw an error with requireType=require\r\n # This doesn't seem to cause problems on the page, so we ignore it\r\n if result['requireType'] == 'require':\r\n world.wait(1)\r\n continue\r\n\r\n # Otherwise, fail and report the error\r\n else:\r\n msg = \"Error loading dependencies: type={0} modules={1}\".format(\r\n result['requireType'], result['requireModules'])\r\n err = RequireJSError(msg)\r\n err.error = result\r\n raise err\r\n else:\r\n return result", "def wait_for_load(driver):\n html = driver.page_source\n time.sleep(0.5)\n while html != driver.page_source:\n html = driver.page_source\n time.sleep(0.5)", "def waitUntilSuccess():", "def output(self):\n text_list = self.q(css='#output').text\n\n if len(text_list) < 1:\n return None\n return text_list[0]", "def getscript(self, name):\n code, data, content = self.__send_command(\n \"GETSCRIPT\", [name.encode(\"utf-8\")], withcontent=True)\n if code == \"OK\":\n lines = content.splitlines()\n if self.__size_expr.match(lines[0]) is not None:\n lines = lines[1:]\n return u\"\\n\".join([line.decode(\"utf-8\") for line in lines])\n return None", "async def wait_until_done(self) -> None:\n ...", "def get_content(self):\n url = self.build_url()\n try:\n self.content_page = requests.get(url)\n if not(self.content_page.status_code == requests.codes.ok):\n self.content_page.raise_for_status()\n except requests.exceptions.RequestException as ex:\n logging.info('A requests exception has ocurred: ' + str(ex))\n logging.error(traceback.format_exc())\n sys.exit(0)", "def get_results_from_script(self, script):\n raise NotImplementedError()", "def wait(self, timeout: float = None) -> CompletedProcess: # type: ignore\n if self.stdout is None:\n return CompletedProcess(self.args, returncode=super().wait(timeout=timeout), stdout=None)\n else:\n stdout = []\n while self.poll() is None:\n stdout.append(line := self.stdout.readline())\n\n if self.verbose:\n print(line, end=\"\")\n\n return CompletedProcess(self.args, returncode=self.poll(), stdout=\"\".join(stdout))", "def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)", "def wait():\n pass", "def run_async(self) -> StoryHolderDict:\n self.add_futures(self.j_dict)\n loop = asyncio.get_event_loop()\n get_url_futures = asyncio.gather(\n *[f for f in self.responses.values()])\n find_text_futures = asyncio.gather(\n *[f for f in self.find_futures_list])\n\n final_future = asyncio.gather(get_url_futures, find_text_futures)\n\n if not run_from_ipython:\n loop.run_until_complete(final_future)\n else:\n asyncio.ensure_future(final_future)\n return NewsDump.story_dump", "def wait_until_loading_is_complete(self, locator=None):\n locator = lex_locators[\"body\"] if locator is None else locator\n try:\n self.selenium.wait_until_page_contains_element(locator)\n self.wait_for_aura()\n # this knowledge article recommends waiting a second. I don't\n # like it, but it seems to help. We should do a wait instead,\n # but I can't figure out what to wait on.\n # https://help.salesforce.com/articleView?id=000352057&language=en_US&mode=1&type=1\n time.sleep(1)\n\n except Exception:\n try:\n self.selenium.capture_page_screenshot()\n except Exception as e:\n self.builtin.warn(\"unable to capture screenshot: {}\".format(str(e)))\n raise", "def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_CaVEMan_scripts(\n program_folder, self.path2exe, self.ref_fai, self.file1, self.file2,\n self.config_file, self.qsub_dir, self.mstep_script, self.merge_script, self.estep_script\n )\n return None", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # ESRCH means the process finished/died between last check and now\n if e.errno != errno.ESRCH:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"TaskWarrior to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"TaskWarrior stopped responding and couldn't be killed\")", "def _exec_and_wait(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc.wait()\n return proc.stdout.read()", "def get_test_results(self):\n element = self.find_element_by_id(self.results_id, wait=True)\n\n if element:\n return element.text\n else:\n return False", "def wait() -> None:\n\n process_input(input())", "def Wait(self):\n try:\n # Flush stdout and stderr to be sure no output is interleaved.\n sys.stdout.flush()\n sys.stderr.flush()\n\n # File position pointers are shared across processes, so we must open\n # our own file descriptor to ensure output is not lost.\n self._WaitForStartup()\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n results = []\n with open(self._output.name, 'r') as output:\n pos = 0\n running, exited_cleanly, task_errors, all_errors = (True, False, [], [])\n while running:\n # Check whether the process is still alive.\n running = self.is_alive()\n\n try:\n errors, results = \\\n self._queue.get(True, self.PRINT_INTERVAL)\n if errors:\n task_errors.extend(errors)\n all_errors.extend(errors)\n\n running = False\n exited_cleanly = True\n except Queue.Empty:\n pass\n\n if not running:\n # Wait for the process to actually exit. If the child doesn't exit\n # in a timely fashion, kill it.\n self.join(self.EXIT_TIMEOUT)\n if self.exitcode is None:\n msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))\n self._KillChildren([self])\n elif not exited_cleanly:\n msg = ('%r exited unexpectedly with code %s'\n % (self, self.exitcode))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))\n\n # Read output from process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n\n if len(buf) > 0:\n silent_death_time = time.time() + self.SILENT_TIMEOUT\n elif running and time.time() > silent_death_time:\n msg = ('No output from %r for %r seconds' %\n (self, self.SILENT_TIMEOUT))\n all_errors.extend(\n failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))\n self._KillChildren([self])\n\n # Read remaining output from the process.\n output.seek(pos)\n buf = output.read(_BUFSIZE)\n running = False\n\n # Print output so far.\n while len(buf) > 0:\n sys.stdout.write(buf)\n pos += len(buf)\n if len(buf) < _BUFSIZE:\n break\n buf = output.read(_BUFSIZE)\n\n # Print error messages if anything exceptional occurred.\n if len(all_errors) > len(task_errors):\n logging.PrintBuildbotStepFailure()\n msg = '\\n'.join(x.str for x in all_errors if x)\n logging.warning(msg)\n traceback.print_stack()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n # Propagate any results.\n for result in results:\n results_lib.Results.Record(*result)\n\n finally:\n self.Cleanup(silent=True)\n\n # If an error occurred, return it.\n return all_errors", "def second_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.third_page.wait_for_page()", "def third_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fourth_page.wait_for_page()", "def _wait_for_output(self):\n # Here we should get an empty list or list with a tuple [(fd, event)]\n # When we get list with a tuple we can use readline method on\n # the file descriptor.\n poll_result = self.poll_obj.poll(0)\n\n if poll_result:\n line = self.output().readline()\n if self._banner.match(line):\n return True\n\n return False", "def runScript(self, commands):\n sem = defer.DeferredSemaphore(1)\n dl = [sem.run(self.runCommand, command) for command in commands]\n return defer.gatherResults(dl)", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def wait_for_version_reply(self):\n frontends = self.get_frontends()\n for frontend in frontends:\n # we abuse this function:\n while frontend.get_afo_state() != AfoServerState.LEADER:\n progress(\".\")\n time.sleep(0.1)", "def make_all_html_results(cmd, folder_names = [], jobs=[]):\n run = cmd.results.current['run_name']\n if not os.path.exists(pjoin(cmd.me_dir, 'HTML', run)):\n os.mkdir(pjoin(cmd.me_dir, 'HTML', run))\n \n unit = cmd.results.unit\n P_text = \"\" \n Presults = collect_result(cmd, folder_names=folder_names, jobs=jobs)\n \n for P_comb in Presults:\n P_text += P_comb.get_html(run, unit, cmd.me_dir) \n P_comb.compute_values()\n if cmd.proc_characteristics['ninitial'] == 1:\n P_comb.write_results_dat(pjoin(cmd.me_dir, 'SubProcesses', P_comb.name,\n '%s_results.dat' % run))\n \n Presults.write_results_dat(pjoin(cmd.me_dir,'SubProcesses', 'results.dat')) \n \n fsock = open(pjoin(cmd.me_dir, 'HTML', run, 'results.html'),'w')\n fsock.write(results_header)\n fsock.write('%s <dl>' % Presults.get_html(run, unit, cmd.me_dir))\n fsock.write('%s </dl></body>' % P_text)\n\n return Presults.xsec, Presults.xerru", "def wait():\n time.sleep(1)", "def run(self):\n\n try:\n # Get the content from this page\n if self.verbose:\n print \"Getting page content for '%s'\" % self.url.strip()\n \n content = getPageContent(self.url)\n\n # Verify that this is not binary data\n if content is not None and isHTML(content):\n\n\n # Extract basic data about this result\n content = content.lower()\n title, keywords, description = parseMetaDataFromContent(content)\n headers = parseHeaderInformationFromContent(content)\n\n # Add this result data\n self.resultDictionary['title'] = title\n self.resultDictionary['keywords'] = keywords\n self.resultDictionary['description'] = description\n self.resultDictionary['content'] = content\n self.resultDictionary['headers'] = headers\n\n # Run the extensions\n for extension in self.extensions:\n extension.run(self.resultDictionary)\n\n\n except URLError:\n\n # Skip this URL, and register it as an error on the cache\n if self.verbose:\n print(\"Error accessing '%s', %s\" % (self.url.strip(), str(sys.exc_info()[1]).strip()))", "def get_page_content(self, url, delay):\r\n\r\n # if browser cannot connect to the server, repeat it infinitely.\r\n while True:\r\n try:\r\n # load the page\r\n self.sel_driver.get(url)\r\n\r\n # if the page is loaded, wait for delay seconds until loading would finish.\r\n # this delay is also to avoid being blocked by upwork due to so frequent access\r\n time.sleep(delay)\r\n\r\n # read and parse the page contents\r\n soup = BeautifulSoup(self.sel_driver.page_source, 'html.parser')\r\n\r\n # page loading succeeded. escape from the endless iteration\r\n break\r\n except (WebDriverException, TimeoutException):\r\n # error occurred, do it again\r\n print(\"(ERROR) Driver could't be load: \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n self.relaunch(60)\r\n\r\n # check if the page is ACCESS DENIED\r\n # get the title of the page\r\n elements = soup.find_all(\"title\")\r\n if len(elements) == 0:\r\n return soup # if it has no title, it's may be a normal page\r\n\r\n # if the title is UPWORK ACCESS DENIED, I deal with it\r\n title = elements[0].text\r\n if 'access denied' in title.lower():\r\n print(\"(ERROR) UPWORK DENIED at \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n\r\n self.relaunch(200) # relaunch after about 3 minutes\r\n\r\n return self.get_page_content(url, delay)\r\n\r\n # if the title is Upwork - Maintenance, let it wait\r\n if title == 'Upwork - Maintenance':\r\n print(\"(ERROR) UPWORK is under the Maintenance - \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n time.sleep(random.randint(200, 400)) # We don't need relaunch browser.\r\n return self.get_page_content(url, delay)\r\n\r\n return soup", "def run(self, script, **kwargs):\r\n # don't return a value from a script\r\n kwargs['nout'] = 0\r\n return self.call(script, **kwargs)", "def wait(self):\n self.__prcs.wait()\n return self.poll()", "def get(self, url):\n\t\ttry:\n\t\t\tassert(type(url)) == str\n\t\t\tself.driver.get(url)\n\t\t\t# sleep(1) # Even tho driver.get is blocking, it returns as soon as DOM loads, without waiting for JS to run and update the DOM with the new elements\n\t\t\t# wait(self.driver, 10).until( EC.visibility_of_element_located() ) # Not sure how to wait here efficiently\n\t\t\tsleep(5) # A little long, but without a conditional variable to tell us when the page is ready us when to go our only choice is to nap\n\t\t\tself.bsource = bs( self.viewSource(), \"lxml\" ) # Update internal BeautifulSoup source with new javascript-encriched code (\"lxml\" is faster that \"html.parser\")\n\t\texcept Exception as e:\n\t\t\tprint(\"[*] Unable to GET page {}\\n{}\".format(url, e))\n\t\t\treturn -1", "def wait(t,p):\n\toutput_list = []\n\tc = ''\n\td = ''\n\twhile p not in d:\n\t\tc = t.read_very_eager()\n\t\tif len(c) > 0:\n\t\t\td += c\n\t\t\tprint c\n\t\t\toutput_list.append(c)\n\t\tif \"Press any key to continue\" in c or \"--More--\" in c:\n\t\t\tt.write(\" \")\n\toutput_list = ((''.join(output_list)).replace('\\r\\n','\\n')).split('\\n')\n\treturn output_list", "def run(self):\n yield from self._dts.ready.wait()", "def fourth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fifth_page.wait_for_page()", "def wait_for_page_load(self, timeout=30):\n old_page = self.driver.find_element_by_tag_name('html')\n yield\n WebDriverWait(self.driver, timeout).until(\n staleness_of(old_page)\n )", "def wait_page_loaded(self, timeout=10):\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as ec\n\n old_page = self.selenium.find_element(By.TAG_NAME, \"html\")\n yield\n # Wait for the next page to be loaded\n self.wait_until(ec.staleness_of(old_page), timeout=timeout)\n self.wait_page_ready(timeout=timeout)", "def wait_till_read_out():\n\n\trespond = send_command('waitreadout')", "def get_stdout(self):\n _ = self.get() # force finished wait\n if self._stdout is not None:\n if wait_until_exists(self._stdout):\n with open(self._stdout) as f:\n self._out = f.read()\n return self._out", "def split_graph_output(output):\n html, js = output.split(\"<script\")\n js = \"<script\" + js\n return html, js", "def process(self):\n while not self.halted:\n self.step()\n return self.outputs", "def wait_page_ready(self, timeout=10):\n self.wait_until(\n lambda driver: driver.execute_script(\"return document.readyState;\")\n == \"complete\",\n timeout,\n )", "def execute_javascript(self, code):\n return self.loop.run_until_complete(self.get_async_keyword_group().execute_javascript(code))", "def run_module(self):\n info(\"Searching for cross site scripting (reflected)...\")\n\n # load in a list of lfi attach strings\n #self.attack_strings = self.main.db.get_wordlist(\n # self.info['wordlist_name'])\n\n self.attack_strings = ['<script>alert(1)</script>',\n '<img srx=\"x\" onerror=\"alert(1)>\"']\n\n # the search strings will be the attack strings themselves\n # because python will not interpret any javascript\n self.re_search_strings = self.attack_strings\n\n injectable_params = self._get_previous_results('HTMLParser')\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n results = executor.map(self._run_thread, injectable_params)\n\n final = []\n for r in results:\n final.extend(r)\n\n # save the results\n self._save_scan_results(final)", "def wait(self):\n self.Popen.wait()", "def wonder():\n copy()\n get_soup()\n get_text()\n change_write_text()\n Check_status_time_stamp()", "def download_scripts(parsed_args, scripts, client):\n print(\"INFO: Fetching available scanning scripts...\")\n for script_object in scripts:\n script = client.fetch_airgapped_script(str(script_object.id))\n if script is None:\n continue\n file_name = script.type.split('::')\n if \"Linux\" in file_name:\n file_name[-1] += '.sh'\n elif \"Windows\" in file_name:\n file_name[-1] += '.ps1'\n path = os.path.join(os.path.dirname(__file__), \"/\".join(file_name))\n with open(path, 'w') as filehandle:\n filehandle.write(script.contents)\n if script.attachment and parsed_args.no_attachment:\n download_attachment(file_name, script.attachment)\n print(\"INFO: Script saved in {}\".format(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Scripts')))", "def run_results(self):\n calculation_band = self.ctx.workchain_bands.get_outputs(link_type=LinkType.CALL)[0]\n\n self.report('workchain succesfully completed'.format())\n self.out('band_parameters', calculation_band.out.output_parameters)\n self.out('bandstructure', calculation_band.out.output_band)", "def getResults(self, cleanup=True):\n self.wait_on_job()\n stdout_str = self.ofile_string()\n stderr_str = self.efile_string()\n if cleanup:\n self.erase_files()\n return (stdout_str, stderr_str)", "def _get_output(arguments, timeout=None):\n # NOTE Increase this value if tests fail with None being received as\n # stdout/stderr instead of the expected content\n output_timeout = 0.1 # seconds\n\n pidq = Queue()\n outputq = Queue()\n\n t = Thread(target=_queue_output, args=(arguments, pidq, outputq))\n t.daemon = True\n t.start()\n\n try:\n pid = pidq.get(timeout=timeout)\n except Empty:\n pid = None\n\n # Process crashed or timed out for some reason\n if pid is None:\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to start\")\n\n # Wait for process to finish (normal execution)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program thread to join\")\n\n # If we reach this point we assume the process got stuck or timed out\n for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):\n # Start with lower signals and escalate if process ignores them\n try:\n os.kill(pid, signal.SIGABRT)\n except OSError as e:\n # 3 means the process finished/died between last check and now\n if e.errno != 3:\n raise\n\n # Wait for process to finish (should die/exit after signal)\n state = wait_process(pid, timeout)\n\n if state:\n # Process finished\n return _retrieve_output(t, output_timeout, outputq,\n \"Program to die\")\n\n # This should never happen but in case something goes really bad\n raise OSError(\"Program stopped responding and couldn't be killed\")", "def run_with_output(self, cmd, end_strs, timeout=310):\n self.write(cmd)\n out = self.gather_output(cmd, end_strs, timeout)\n return out", "def run_script_block(json_response):\r\n scripts = request.values['scripts']\r\n exec_id = request.values['exec_id']\r\n ds_name = request.values['datasource']\r\n user_id = current_user.login_name\r\n result = executor.run_script_block(exec_id, scripts, ds_name, user_id)\r\n return json_response(result=result)", "def print_contents(self):\n try:\n # We only wait for 0.001 seconds.\n self.print_all_contents(indef_wait=False)\n except NotYourTurnError:\n # It's not our turn, so try again the next time this function is called.\n pass", "def handleContentComplete():", "def wait_for_response(self, request_id):\n url = \"{}/{}/{}\".format(self.url, self.url_dir, request_id)\n while True:\n response = requests.get(url)\n if response.text == \"done\\n\":\n return", "def execute(self):\n\n while True:\n\n neighbours, script, location = self.queue.get()\n\n if neighbours is None and script is None:\n self.queue.task_done()\n return\n\n self.run_script(neighbours, script, location)\n self.queue.task_done()", "def wait(self):\n pass", "def wait(self):\n pass", "def wait_complete(self):\n self.join()", "def test_scripts_inside_content_block(self):\n c = Client()\n resp = c.get('/books/')\n self.assertNotIn(b'<div id=\"recent_reviews\"></div>', resp.content)\n self.assertNotIn(b'<script crossorigin src=\"https://unpkg.com/react@16/umd/react.development.js\"></script>',\n resp.content)\n self.assertNotIn(\n b'<script crossorigin src=\"https://unpkg.com/react-dom@16/umd/react-dom.development.js\"></script>',\n resp.content)\n self.assertNotIn(b'<script src=\"https://unpkg.com/babel-standalone@6/babel.min.js\"></script>', resp.content)\n self.assertNotIn(b'<script src=\"/static/recent-reviews.js\" type=\"text/babel\"></script>', resp.content)\n self.assertNotIn(b'ReactDOM.render(<RecentReviews url=\"/api/reviews/?limit=6\" />,', resp.content)\n self.assertNotIn(b'document.getElementById(\\'recent_reviews\\')', resp.content)", "def run(url, output, loglevel, logfile):\n # Logging setup\n loader.logging.setup(level=loglevel, logfile=logfile)\n\n # Download page and get DOM\n dom = BeautifulSoup(loader.network.download(url), DEFAULT_PARSER)\n\n # Split URL to fragments\n scheme, net_loc, *_ = list(urlparse(url))\n\n # Get resource objects from DOM\n resources = loader.handler.get_resources(dom)\n\n if resources:\n # Build resource dirname\n local_dirname = loader.path.for_resource_dir(url)\n # Create dir for resource inside 'output'\n loader.storage.mkdir(os.path.join(output, local_dirname))\n\n web_resource_paths = []\n for resource in resources:\n # Get resource path from resource object\n web_resource_path = loader.handler.get_path(resource)\n # Build resource local path\n local_resource_path = os.path.join(\n local_dirname,\n loader.path.for_resource(web_resource_path),\n )\n # Set local path in resource object\n loader.handler.update_resource(\n resource=resource,\n new_link=local_resource_path,\n )\n web_resource_paths.append(web_resource_path)\n # Save modified DOM\n loader.storage.save(\n f_content=dom.encode(),\n output=output,\n filename=loader.path.for_page(url),\n )\n # Download resources\n for resource_path in tqdm(web_resource_paths, desc=BAR_DESC):\n resource_url = urlunsplit(\n [scheme, net_loc, resource_path, None, None],\n )\n try:\n loader.storage.save(\n f_content=loader.network.download(resource_url),\n output=os.path.join(output, local_dirname),\n filename=loader.path.for_resource(resource_path),\n )\n except loader.network.NetworkError as error:\n logging.debug(error, exc_info=sys.exc_info())", "def wait_for_ajax(self):\n return self.driver.execute_script(\n \"return typeof(jQuery)!='undefined' && jQuery.active==0\")", "def finish(self):\r\n self.start_finish()\r\n self.wait_finish()", "def get(self, url=None, script=None, key=None):\n self.base_url = self.base_url or url # set base URL if not set\n html = self.cache.get(key)\n if html:\n if self.debug: print 'load cache', key \n self.setHtml(html, QUrl(self.base_url))\n elif url:\n self.load(QUrl(url))\n elif script:\n self.js(script)\n\n loop = QEventLoop()\n timer = QTimer()\n timer.setSingleShot(True)\n timer.timeout.connect(loop.quit)\n self.loadFinished.connect(loop.quit)\n timer.start(self.timeout * 1000)\n loop.exec_() # delay here until download finished or timeout\n \n if timer.isActive():\n # downloaded successfully\n timer.stop()\n html = self.current_html()\n if key:\n self.cache[key] = html\n self.inject_jquery()\n else:\n # didn't download in time\n print 'Download timeout'\n html = ''\n return html", "def process_results(self):\n return self._do_action_under_lock(self._process_results)", "def get(self):\n if not self.finished():\n self.wait()\n return self._result", "def get_results_from_script(self, script):\n result = script.scaler.work_free_stats\n return result", "def handle_execution_response(self, data, *, wait):\n ...", "def collect_output(self):\n pass", "def collect_output(self):\n pass", "def include_content_html():\n\n# <div id=\"content\"> \n root_div = etree.Element(\"div\", id=\"content\")\n \n for initial_condition in initial_conditions:\n for flux in fluxes:\n # content_id identifies the results of a particular computation in the HTML document \n content_id = initial_condition + \"_\" + flux\n # <div id=\"content_id\">\n div = etree.SubElement(root_div, \"div\", id=content_id)\n # JQuery function to include content dynamically\n # <script> = include_content(content_id)</script>\n etree.SubElement(div, \"script\").text = \"include_content(\\\"\" + content_id + \"\\\")\"\n #</div> \n# </div>\n\n# Write the generated HTML document to a file\n output_html_file = open(html_path + \"html/computations/include_content.html\", \"w\") \n output_html_file.write(etree.tostring(root_div, pretty_print=True).decode(\"utf-8\"))", "def run_next_action():\n os.environ[\"BROWSER\"] = 'echo %s'\n result = subprocess.run(context.arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=\"utf-8\")\n return result.stdout + result.stderr" ]
[ "0.58988714", "0.5896934", "0.5896934", "0.5896934", "0.5896934", "0.5818085", "0.5690976", "0.56339425", "0.5543673", "0.5390721", "0.5287882", "0.52217585", "0.5208909", "0.51837784", "0.5157287", "0.5146636", "0.51380193", "0.51380193", "0.51380193", "0.51380193", "0.51246214", "0.50950605", "0.50584346", "0.50428325", "0.501126", "0.50033057", "0.50016296", "0.5001629", "0.500133", "0.5001176", "0.49920604", "0.49898067", "0.4980966", "0.4938505", "0.49365634", "0.49348938", "0.49305403", "0.4911703", "0.48998383", "0.48989302", "0.48979533", "0.48949262", "0.4889959", "0.48642418", "0.48610294", "0.48433548", "0.48398426", "0.48384643", "0.48214427", "0.4816019", "0.48154774", "0.48017773", "0.47952098", "0.4784768", "0.47800678", "0.47712675", "0.47692594", "0.47393233", "0.4730804", "0.4722589", "0.4697666", "0.46956542", "0.46955723", "0.46955082", "0.469297", "0.46922916", "0.46913755", "0.4690477", "0.46857592", "0.46854463", "0.46773377", "0.46747455", "0.46707627", "0.46591407", "0.4653245", "0.46522498", "0.46521986", "0.46521592", "0.46499422", "0.46452317", "0.4641753", "0.463313", "0.46298158", "0.46253136", "0.46206844", "0.46203747", "0.46203747", "0.4617286", "0.46102887", "0.45996612", "0.4598194", "0.45967522", "0.45952183", "0.45903826", "0.45903563", "0.4589297", "0.4588863", "0.457745", "0.457745", "0.45772776", "0.4577204" ]
0.0
-1
Click the button on the page, which triggers an ajax call that updates the output div.
def click_button(self): self.q(css='div#fixture button').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def click_button(self):\n self.widgets.get('button').click()", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def on_clicked_update(self):\n process = crawler.CrawlerProcess(\n {\n \"USER_AGENT\": \"currency scraper\",\n \"SCRAPY_SETTINGS_MODULE\": \"currency_scraper.currency_scraper.settings\",\n \"ITEM_PIPELINES\": {\n \"currency_scraper.currency_scraper.pipelines.Sqlite3Pipeline\": 300,\n }\n }\n )\n process.crawl(InvestorSpider)\n try:\n process.start()\n gui_warnings.update_notification()\n except error.ReactorNotRestartable:\n gui_warnings.warning_already_updated()", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def handle_ajax(self):\r\n pass", "def Button(request):\n params = {\n 'mimetype': 'text/javascript',\n 'fn': request.GET.get('fn', '_bRunTest'),\n 'btn_text': request.GET.get('btn_text', 'Run the test'),\n 'cb_text': request.GET.get('cb_text',\n 'and send my results to Browserscope (anonymously)'),\n }\n return util.Render(request, 'user_test_button.js', params)", "def save(self):\n self.click(\".action-save\")\n self.page.wait_for_ajax()", "def execbox(response, url=\"/exec/\"):\n response.out.write(\"\"\"\n <form action=\"\" method=\"GET\">\n <b>enter command:</b><input type=\"commit\" name=\"input\" value=\"\">\n // <input type=\"button\" value=\"go\" onClick=\"makePOSTRequest(this.form)\"\n </form>\n \"\"\")", "def click(self):\n self.dispatch['elementClick'] = self.clickJsFnc", "def click_download_button(self):\n self._basket.click_download_button()", "def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()", "def view(self):\n\t\tself.done(1)", "def do(self, jQuery):", "def on_click(self) -> None:\n pass", "def handle_remote_button(self, request):\n self._verify_auth_parameters(request)\n content = yield from request.content.read()\n parsed = dmap.parse(content, tag_definitions.lookup_tag)\n self.last_button_pressed = dmap.first(parsed, 'cmbe')\n return web.Response(status=200)", "def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)", "def click(self):\r\n pass", "def on_click(self) -> None:\n os.startfile(self.url) # noqa: S606", "def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()", "def OnButtonSubmitterPageButton(self, event):\r\n\t\twebbrowser.open(self._configtmp[\"imageurl\"])", "def checkout_btn(self):\n self._checkout_btn.click()", "def commandbox(response, url=\"/dispatch/\"):\n response.out.write(\"\"\"\n <form action=\"%s\" method=\"post\">\n <div><b>enter command:</b> <input type=\"commit\" name=\"content\"></div>\n </form>\n \"\"\" % url)", "def clickButton(self, xpath):\n WebDriverWait(self.driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, xpath))).click()\n self.sleep_approx(1)", "def do_POST(self):\r\n self.do_GET()", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "def handle_ajax(self, dispatch, data):\r\n pass", "def click_documents_grid_inline_action_button(self, reference_number):\n self.click_inline_action_button(self.documents_grid_div_id, reference_number, self.documents_grid_inline_action_column_number)\n self.wait_for_ajax_spinner_load()", "def move_confirm_btn(self):\n self.wait_for_ajax()\n move_confirm_btn_sitem = self.locator_finder_by_id(self.move_confirm_btn_id, 20)\n move_confirm_btn_sitem.click()\n time.sleep(1)", "def refresh(self):\n #self.find('counter-label').text = 'Counter: %i' % self.counter\n\n #@on('increment-button', 'click')\n #def on_button(self):\n \"\"\"\n This method is called every time a child element\n with ID 'increment-button' fires a 'click' event\n \"\"\"\n #self.counter += 1\n #self.refresh()", "def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))", "def submit_response(self):\r\n self.q(css='input.submit-button').first.click()\r\n\r\n # modal dialog confirmation\r\n self.q(css='button.ok-button').first.click()\r\n\r\n # Ensure that the submission completes\r\n self._wait_for_submitted(self.assessment_type)", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))", "def pagemainred():\n return render_template('do_action.html')", "def answer_problem(self):\r\n self.q(css='input.check').first.click()\r\n self.wait_for_ajax()", "def _link_clicked(self, href):\n\n self.main_frame.load(href)", "def execute():\n # print('Wow')\n result = gui.controller.main('execute')\n print(result)\n\n return render_template('results.html', data=json.dumps(result))", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1", "def btn_follow_clicked(self, widget, data=None):\n print \"follow clicked\"\n #Going to put random stuff here.", "def update_page(self, waittime):\n if not self.runningtask.get():\n return\n if self.vars[\"enabled\"].get():\n logger.trace(\"Updating page\")\n self.display_item_set()\n self.load_display()\n self.after(waittime, lambda t=waittime: self.update_page(t))", "def click_upload_button(self):\n self.click_element(self.upload_button_locator)", "def button1_press(self):\n\n ext = nuke_link(str(self.lineEdit.text()))\n url = 'https://learn.foundry.com/nuke/developers/70/pythonreference/{}'.format(ext)\n webbrowser.open(url)", "def on_click(self) -> None:\n self.cycle()", "def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")", "def on_click ( self, object ):\n pass", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def click(self, element):\n element.click()", "def clickedAction(self, events):\n print(\"The {} button was clicked!\".format(self.imgname))", "def submit(self):\n self.driver.find_element(*BaseLocators.SUBMIT_BUTTON).click()", "def click_on_submit(context):\n submit_for_approval = context.browser.find_elements_by_css_selector(\n \"input[type='button'][value='Submit for Approval']\")\n for item in submit_for_approval:\n item.click()\n time.sleep(10)", "def update_task_states_ajax(self, return_html):\r\n changed = self.update_task_states()\r\n if changed:\r\n pass\r\n return return_html", "def fetch_page(classnum=0): # ! This will generate web requests\r\n r = requests.post('http://deshalit.iscool.co.il/default.aspx',\r\n data={'__EVENTTARGET': 'dnn$ctr11396$TimeTableView$btnChangesTable',\r\n '__VIEWSTATE': post_viewstate,\r\n 'dnn$ctr11396$TimeTableView$ClassesList': str(classnum)})\r\n return r.text", "def on_click(self, event):\n if event['button'] == 1 and 'button1' in self.options:\n subprocess.call(self.options['button1'].split())\n elif event['button'] == 2 and 'button2' in self.options:\n subprocess.call(self.options['button2'].split())\n elif event['button'] == 3 and 'button3' in self.options:\n subprocess.call(self.options['button3'].split())", "def Click(self):\n if self.function == None:\n return\n \n self.function()", "def _resubmit_button_fired(self):\n self.resubmit()", "def cb_something_2(self, button):\n print(\"Do Something 2\")", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def cb_something_4(self, button): \n print(\"Do Something 4\")", "def click_save_changes_button(self):\n self.click_element(self.save_changes_button_locator, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.confirmation_popup_locator), 'confirmation popup locator not found before specified time out')\n self.click_element(self.ok_button_locator, True)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def click_view_price_list_detail_first_row_inline_action_button(self):\n self.click_inline_action_button(self.view_price_list_div_id, None, self.view_price_list_column_number, True)\n self.wait_for_ajax_spinner_load()", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def index_html(self,REQUEST): \n return self.posting_html(self,REQUEST)", "def _(event):\n try:\n self.view_model.update_results()\n except Exception:\n self.view_model.status_textcontrol.text = \"(no results available)\"", "def go(self, url):\n self.driver.get(url)", "def refresh_page(self):\n self.m_driver.refresh()\n time.sleep(30)", "def double_clicked_to_view(self):\n\n # TODO need this method? better in init to go to view_file\n self.view_file()", "def do_response(data):\n def on_done(i):\n if i == -1:\n return\n\n cite_key = data[i][2]\n view = sublime.active_window().active_view()\n view.run_command(\"dblp_insert_result\", {\"text\": cite_key})\n\n sublime.active_window().show_quick_panel(data, on_done)", "def click(self, wait_load_page = True):\n\t\tif self.__element.tag == 'a':\n\t\t\tself.__browser.load_page(self.get_property('href'))", "def select_upload_btn(self):\n select_upload_btn_sitem = self.locator_finder_by_xpath(self.select_upload_btn_id)\n select_upload_btn_sitem.click()\n time.sleep(3)", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def execPushButton(self):\n\t\t# verbose.detail(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))\n\t\tprint(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))", "def clickDashboard(self):\n self.waitForElement(locator=self._dashboardBtn, locatorType=\"xpath\")\n self.elementClick(locator=self._dashboardBtn, locatorType=\"xpath\")", "def OnButton(self, event):\n\n\n event_id = event.GetId()\n event_obj = event.GetEventObject()\n print(\"Button 1 Clicked:\")\n print(\"ID=%d\" % event_id)\n print(\"object=%s\" % event_obj.GetLabel())", "def click(self, id):\n el = self.wait_n_get(By.ID, id)\n el.click()", "def click(self, element_tuple):\n current_state = self.change_monitor()\n self.log_info(f\"Browser.click: Clicking {element_tuple}\")\n self.CORE.find_element(*self.format_element(element_tuple)).click()\n self.change_monitor(previous_data=current_state)\n return", "def action_done(self):", "def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def click_login_button(self):", "def refresh(self, id):\n exports.execute_export.delay(id)\n return render({\"id\": id})", "def press_entry(self, button):\r\n buttonText = button.text\r\n selectedPlace = Place()\r\n for place in self.place_list.list_places:\r\n placeDisplayText = self.generateDisplayText(place.name, place.country, place.priority, place.is_required)\r\n if buttonText == placeDisplayText:\r\n selectedPlace = place\r\n break\r\n\r\n selectedPlace.mark_visited() # Mark the place visited\r\n self.root.ids.entriesBox.clear_widgets() # Apply to GUI\r\n self.create_widget()\r\n\r\n self.news = \"You have visited {}\".format(selectedPlace.name) # Display change in news\r", "def click_view_price_list_detail_page_inline_action_button(self, price_list_item):\n self.click_inline_action_button(self.view_price_list_div_id, price_list_item, self.view_price_list_column_number)\n self.wait_for_ajax_spinner_load()", "def get_submit(self):\r\n return self.driver.find_element(*SinginPage.submit).click()", "def do_click(self, xpath):\n e = self._find_element_by_xpath(xpath)\n e.click()", "def mainWebActions(self, **kwargs):\n # If the dictionary item value is the required opens the webpage\n if kwargs['button']=='docs':\n # Only 1 click at every 5 seconds\n self.docs_Button.setDown(True)\n QTimer.singleShot(5000, lambda: self.docs_Button.setDown(False))\n webbrowser.open('https://italorenan.gitbook.io/roc/')", "def click_upload_statement_button(self):\n self.click_element(self.upload_statement_button_locator)", "def on_NodeServiceRefresh_clicked(self):\n # TODO: not implemented yet\n #raise NotImplementedError\n output=\"Refresh Finished ! Press button to Get more text!\"\n if self.NodeServer!=\"\" :\n try:\n output=self.NodeServer.readcmdResult(1000)\n print(f\"cmd execute result:\\n{output}\")\n\n except:\n print(\"No More Result!\")\n\n self.NodeServiceText.append(output)\n print(output)\n\n self.NodeServiceText.reload()\n ## should add some code here for server and client", "def handle_playback_button(self, request):\n self._verify_auth_parameters(request)\n self.last_button_pressed = request.rel_url.path.split('/')[-1]\n return web.Response(status=200)", "def _do_load_page(self, **kwargs): # pylint: disable=unused-argument\n _return = False\n\n _function = self._dtc_data_controller.request_do_select(\n self._function_id)\n\n self.txtAvailability.set_text(\n str(self.fmt.format(_function.availability_logistics)))\n self.txtMissionAt.set_text(\n str(self.fmt.format(_function.availability_mission)))\n self.txtMissionHt.set_text(\n str(self.fmt.format(_function.hazard_rate_mission)))\n self.txtPredictedHt.set_text(\n str(self.fmt.format(_function.hazard_rate_logistics)))\n\n self.txtMMT.set_text(str(self.fmt.format(_function.mmt)))\n self.txtMCMT.set_text(str(self.fmt.format(_function.mcmt)))\n self.txtMPMT.set_text(str(self.fmt.format(_function.mpmt)))\n\n self.txtMissionMTBF.set_text(\n str(self.fmt.format(_function.mtbf_mission)))\n self.txtMTBF.set_text(str(self.fmt.format(_function.mtbf_logistics)))\n self.txtMTTR.set_text(str(self.fmt.format(_function.mttr)))\n\n self.txtTotalCost.set_text(str(locale.currency(_function.cost)))\n self.txtModeCount.set_text(\n str('{0:d}'.format(_function.total_mode_count)))\n self.txtPartCount.set_text(\n str('{0:d}'.format(_function.total_part_count)))\n\n return _return", "def click_buy_page_inline_action_button(self, vendor):\n self.click_inline_action_button(self.vendors_div_id, vendor, self.grid_column_number)", "def on_click():\n action = self.screens[self.curr_screen].on_click()\n\n if screen_actions.CHANGE_SCREEN == action[\"screen action\"]:\n self.curr_screen = action[\"value\"]\n self.screens[self.curr_screen].update_screen()\n self.lcd_display.show()\n elif screen_actions.UPDATE_REDIS == action[\"screen action\"]:\n self.redis_client.set(action[\"redis key\"], action[\"value\"])\n self.redis_dict[action[\"redis key\"]] = action[\"value\"]\n print(\n \"Key: {}, Value: {}\".format(\n action[\"redis key\"],\n self.redis_client.get(action[\"redis key\"]).decode(\"UTF-8\"),\n )\n )", "def update():\n print(\"current page is \", wikiPageStackTrace[-1].getTitle())\n if wikiPageStackTrace[-1].getUrl() != goalPage.getUrl(): # no victory\n eel.addRoundNumber()\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(\n wikiPageStackTrace[-1].getFirstSentence())\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.hideLoader()\n elif wikiPageStackTrace[-1].getUrl() == goalPage.getUrl(): # victory\n eel.hideLoader()\n eel.addRoundNumber()\n eel.updateRoundNumber()\n eel.updateHistory(getHistoryTitles())\n eel.showVictory()\n # we need to do this because overwise the JS is not fat egoth to respond so we get an infinit loading\n time.sleep(0.5)\n eel.hideLoader()", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def handle_ajax(self, dispatch, data):\r\n if dispatch == 'get_hint':\r\n out = self.get_hint(data)\r\n elif dispatch == 'get_feedback':\r\n out = self.get_feedback(data)\r\n elif dispatch == 'vote':\r\n out = self.tally_vote(data)\r\n elif dispatch == 'submit_hint':\r\n out = self.submit_hint(data)\r\n else:\r\n return json.dumps({'contents': 'Error - invalid operation.'})\r\n\r\n if out is None:\r\n out = {'op': 'empty'}\r\n elif 'error' in out:\r\n # Error in processing.\r\n out.update({'op': 'error'})\r\n else:\r\n out.update({'op': dispatch})\r\n return json.dumps({'contents': self.runtime.render_template('hinter_display.html', out)})" ]
[ "0.6879377", "0.6879377", "0.6518393", "0.6406209", "0.62057817", "0.61877733", "0.61342907", "0.6104341", "0.6008106", "0.5944604", "0.5852779", "0.5775311", "0.5756694", "0.57321346", "0.5669197", "0.56490207", "0.5648505", "0.5631494", "0.5623206", "0.5592204", "0.5576342", "0.5566358", "0.55594367", "0.55564535", "0.55080074", "0.5476524", "0.54727453", "0.54709107", "0.5456298", "0.5435267", "0.5434026", "0.54186463", "0.53939545", "0.5375304", "0.53720325", "0.53566235", "0.5332291", "0.532446", "0.53056866", "0.52915126", "0.52871877", "0.52753294", "0.5272342", "0.5259273", "0.5240635", "0.5234333", "0.52195156", "0.52075624", "0.5195786", "0.519165", "0.51895595", "0.51861227", "0.51788056", "0.51783425", "0.5142036", "0.51336914", "0.5120227", "0.51149786", "0.51114345", "0.5108813", "0.5108", "0.50991327", "0.5096193", "0.508725", "0.507345", "0.50693053", "0.5068556", "0.5061156", "0.5053562", "0.5053419", "0.5048917", "0.50416785", "0.5036405", "0.5034545", "0.50276726", "0.50215346", "0.50205636", "0.50166994", "0.5006557", "0.50034857", "0.49945503", "0.4993499", "0.49872002", "0.49843043", "0.49807367", "0.49784157", "0.49776748", "0.4967158", "0.49589187", "0.4951538", "0.49492145", "0.4947437", "0.4942619", "0.49423155", "0.49421054", "0.49368542", "0.4933463", "0.4931957", "0.49222824", "0.49210706" ]
0.641759
3
Click button and wait until output id appears in DOM.
def is_button_output_present(self): self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_presence('div#output', 'Button Output is Available')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def is_button_output_visible(self):\n self.wait_for_element_presence('div#ready', 'Page is Ready')\n self.q(css='div#fixture button').first.click()\n self.wait_for_element_visibility('div#output', 'Button Output is Visible')", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def click_button(self):\n self.widgets.get('button').click()", "def click(self, id):\n el = self.wait_n_get(By.ID, id)\n el.click()", "def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")", "def testButtonCB(self, testId):\n button = self.test_buttons[testId]\n if self.result:\n self.showTestOutput(testId)\n return", "def click_the_save_button_which_should_be_returned_to_the_storage_page(driver):\n assert wait_on_element(driver, 5, '//button[contains(.,\"Save Access Control List\")]', 'clickable')\n driver.find_element_by_xpath('//button[contains(.,\"Save Access Control List\")]').click()\n time.sleep(1)\n assert wait_on_element_disappear(driver, 30, '//h6[contains(.,\"Please wait\")]')", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def onButton(self):\n \n s = self.id_entry.get().strip()\n if len(s) < 3: # Require at least 3 characters in an id.\n return\n \n self.answer = g.app.leoID = s\n self.top.destroy() # terminates wait_window", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def wait(self, _id):\n while not self._actions[_id].done:\n sleep(1e-3)", "def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def wait_for_input(self):\n pass", "def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def poll(self):\n\tself.met = self.button.poll()", "def wait_dialog_box(self):\n while True:\n time.sleep(0.5)\n dialog = AppWindow.locate_on(SummonSelector.dialog_ok.path, (1 / 3, 2 / 3, 2 / 3, 1 / 3))\n if dialog is not None:\n self.logger.info(\"dialog popped up\")\n return", "def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def doWaitVisibleClickElement(self, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout}\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n\n # locate the element\n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, cssSelector=cssSelector,\n location=location, more=more)\n rsp = self.hasElement(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False\n return ret\n \n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n \n if elementId is None:\n self.error(\"element id is missing in response\")\n \n # checking if visible\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout, \"wait-until-value\": True}\n cmdId = self.displayedElement(elementId=elementId, more= more)\n rsp = self.isElementDisplayed(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False \n return ret\n \n # finally click on it\n cmdId = self.clickElement(elementId=elementId)\n if self.isElementClicked(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret", "def wait_for_tag():\n time.sleep(1.1)", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def clickButton(self, xpath):\n WebDriverWait(self.driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, xpath))).click()\n self.sleep_approx(1)", "def wait_until_transfers_displayed(self):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).wait_until_displayed()", "def wait(self, secs):\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def press(button_id: str) -> None:\n try:\n self.query_one(f\"#{button_id}\", Button).press()\n except NoMatches:\n pass", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()", "def select_upload_btn(self):\n select_upload_btn_sitem = self.locator_finder_by_xpath(self.select_upload_btn_id)\n select_upload_btn_sitem.click()\n time.sleep(3)", "def check_TEAMS_exit_edit_mode_Button(driver = None,intervalWaitForPage = None,output = None):\r\n\tglobal verify, log_path\r\n\tpageLoadWaitInterval = intervalWaitForPage if intervalWaitForPage != None else 5\r\n\tif (driver == None or output == None):\r\n\t\tprint \"ERROR in check_TEAMS_exit_edit_mode_Button(): Please send webdriver, and output as arguments.\"\r\n\telse:\r\n\t\tdriver.set_page_load_timeout(pageLoadWaitInterval)\r\n\t\ttry:\r\n\t\t\tverify = 0\r\n\t\t\t#Admin Gear test\r\n\t\t\ttry:\r\n\t\t\t\teditButton = WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.ID,\"QA:CentricView:exitEditButton\")))\r\n\t\t\t\tif editButton.is_displayed() == False:\r\n\t\t\t\t\toutput = writer(\"VERIFY:\\texitEditButton Absent\\tFAIL\",output)\r\n\t\t\t\telif editButton.is_displayed() == True:\r\n\t\t\t\t\toutput = writer(\"VERIFY:\\texitEditButton Present\\tPASS\",output)\r\n\t\t\t\t\tverify = 1\r\n\t\t\texcept TimeoutException:\r\n\t\t\t\toutput = writer(\"INFO:\\tCatastrophic DOM Error\",output)\r\n\t\t\t#-------------------------\r\n\t\texcept TimeoutException:\r\n\t\t\toutput = writer(\"INFO:\\tgo to Admin iframe failed\",output)\t\r\n\treturn verify", "def click_upload_button(self):\n self.click_element(self.upload_button_locator)", "def wait():\n time.sleep(1)", "def test1(stopEvent: Event):\n auto.InitializeUIAutomationInCurrentThread()\n n = 0\n child = None\n auto.Logger.WriteLine('Use UIAutomation in another thread:', auto.ConsoleColor.Yellow)\n while True:\n if stopEvent.is_set():\n break\n if not child:\n n = 1\n child = auto.GetRootControl().GetFirstChildControl()\n auto.Logger.WriteLine(n, auto.ConsoleColor.Cyan)\n auto.LogControl(child)\n child = child.GetNextSiblingControl()\n n += 1\n stopEvent.wait(1)\n auto.UninitializeUIAutomationInCurrentThread()\n print('test1 exits')", "def click_download_button(self):\n self._basket.click_download_button()", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def move_confirm_btn(self):\n self.wait_for_ajax()\n move_confirm_btn_sitem = self.locator_finder_by_id(self.move_confirm_btn_id, 20)\n move_confirm_btn_sitem.click()\n time.sleep(1)", "def wait_click_element(self, locator):\n try:\n return WebDriverWait(self.driver, 10).until(ec.element_to_be_clickable(locator))\n except AttributeError as e:\n loger.error('元素不可点击定位出错')\n self.save_screen_shot()\n raise e", "def select_ok_pop_up_item(self):\n if self.driver.wait_for_object(\"retargeting_data_ok_pop_up_btn\", raise_e=False):\n self.driver.click(\"retargeting_data_ok_pop_up_btn\")", "def testcase1(self):\r\n\r\n self.driver.find_element_by_xpath('//*[@id=\"screenshotContainer\"]/div/div/div/div/div/div[14]').click()\r\n self.driver.find_element_by_xpath('//*[@id=\"screenshotContainer\"]/div/div/div/div/div/div[23]').is_displayed()\r\n self.driver.find_element_by_xpath('//*[@id=\"screenshotContainer\"]/div/div/div/div/div/div[23]').is_enabled()", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def log_in_button_click(self):\n waiter.find_element(self.driver, LOG_IN_BUTTON_XPATH, by=XPATH).click()", "def Wait(p_question: str):\n input(p_question)\n return", "def doWaitClickElement(self, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout}\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n\n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, cssSelector=cssSelector,\n location=location, more=more)\n rsp = self.hasElement(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False\n return ret\n \n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n \n cmdId = self.clickElement(elementId=elementId)\n if self.isElementClicked(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret", "def submit_response(self):\r\n self.q(css='input.submit-button').first.click()\r\n\r\n # modal dialog confirmation\r\n self.q(css='button.ok-button').first.click()\r\n\r\n # Ensure that the submission completes\r\n self._wait_for_submitted(self.assessment_type)", "def card_success(self): \n handles = self.driver.window_handles\n while len(handles) != 3:\n handles = self.driver.window_handles\n self.driver.switch_to_window(handles[2])\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR,'.success'))) \n self.driver.find_element_by_class_name(\"success\").click()\n self.driver.switch_to_window(handles[0])", "def click(self, element):\n element.click()", "def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))", "def wait_until_element_is_clickable(self, element):\n try:\n self.wait.until(EC.element_to_be_clickable(element))\n except TimeoutException:\n raise NoSuchElementException(\"UI Element %s not found\" % element[1])\n except Exception as exce:\n raise exce", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def homerunascan(window,referenceid):\n try:\n allbuttons = getAppButtons(window)\n print allbuttons\n atomacclick(allbuttons[0])\n atomacclick(allbuttons[20])\n time.sleep(4)\n Runwindow = getChildwindows(referenceid)\n buttons = getAppButtons(Runwindow)\n atomacclick(buttons[0])\n newb = getAllObjects(Runwindow)\n time.sleep(3)\n atomacclick(newb[2])\n except Exception as er:\n print(\"Not able to click on homerunascan\")\n return False", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def click_upload_statement_button(self):\n self.click_element(self.upload_statement_button_locator)", "def wait(self):\n self.event.wait()", "def batch_test_open():\n try:\n WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.CLASS_NAME, \"cdk-overlay-pane\")))\n ActionChains(browser).send_keys(Keys.ESCAPE).perform()\n except:\n print(\"No migration pop-up\")\n\n WebDriverWait(browser, 2).until(EC.element_to_be_clickable((By.LINK_TEXT, config.app_name)))\n browser.find_element_by_link_text(config.app_name).click()\n WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.CLASS_NAME, 'nav-section')))\n buttons = browser.find_elements_by_class_name('nav-section')\n buttons[1].click()\n WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH, '//button[contains(text(), '\n '\"Batch testing\")]')))\n browser.find_element_by_xpath('//button[contains(text(), \"Batch testing\")]').click()", "def doWaitVisibleElement(self, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout}\n\n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n\n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, cssSelector=cssSelector,\n location=location, more=more)\n rsp = self.hasElement(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False \n return ret\n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout, \"wait-until-value\": True}\n\n cmdId = self.displayedElement(elementId=elementId, more= more)\n rsp = self.isElementDisplayed(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False \n return ret", "def wait(self):\n time.sleep(0.010)", "def termin_suchen(driver: webdriver.Chrome):\n\n driver.find_element_by_xpath(\"//button[contains(text(),'suchen')]\").click()", "def executeButton(self, nodeId):\n identifierPropertie = self.getNodeProperties(\n nodeId, [{\"name\": \"identifier\", \"value\": \"\"}])\n identifier = identifierPropertie['properties'][0]['value']\n if self.existSession(self.client_session.session_key):\n if not self.isInBackground(identifier):\n # start new executeButtonThread\n thread = threading.Thread(\n target=self._executeButtonThread, args=[nodeId])\n thread.daemon = True\n thread.start()\n # Node is already running in background\n return True\n raise exceptions.NotAcceptable(\n \"executeButton - There's no session\")", "def answer_problem(self):\r\n self.q(css='input.check').first.click()\r\n self.wait_for_ajax()", "def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()", "def _wait_for_output(self):\n # Here we should get an empty list or list with a tuple [(fd, event)]\n # When we get list with a tuple we can use readline method on\n # the file descriptor.\n poll_result = self.poll_obj.poll(0)\n\n if poll_result:\n line = self.output().readline()\n if self._banner.match(line):\n return True\n\n return False", "def wait(self):\n self.mainloop().wait()", "def WaitForTest(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('waitForTest', payload=payload, response_object=None)", "def click_display(self) -> None:\n logging.info(f\"Click on the displayed element. {self.desc}\")\n js = 'var elm = document.querySelector(\"' + self.css + '\");' \\\n ' if(elm != null){elm.style.border=\"2px solid red\";elm.click();}'\n self._execute_javascript(js)", "def clickDashboard(self):\n self.waitForElement(locator=self._dashboardBtn, locatorType=\"xpath\")\n self.elementClick(locator=self._dashboardBtn, locatorType=\"xpath\")", "def wait() -> None:\n\n process_input(input())", "def execPushButton(self):\n\t\t# verbose.detail(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))\n\t\tprint(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))", "def test_acceptance_checkout_button_was_instantiated(self):\r\n pattern = re.compile(\r\n r\"document.getElementById\\('checkout-button-sku_\\w{14}'\\);\", re.I | re.M)\r\n res = re.search(pattern, self.dom_str)\r\n self.assertTrue(hasattr(res, 'group'),\r\n msg=\"You didn't add a checkout button.\")", "def click_login_button(self):\n submit_button = self.locate_element_by_css_selector(LOGIN_BUTTON_SELECTPR)\n submit_button.click()", "def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))", "def click_add_resolution_save_button(self):\n self.click_element(self.add_resolution_save_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_locator), 'success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "def wait_for_response(self, request_id):\n url = \"{}/{}/{}\".format(self.url, self.url_dir, request_id)\n while True:\n response = requests.get(url)\n if response.text == \"done\\n\":\n return", "def wait_and_click(self, locator_type, locator):\n self.wait.until(EC.element_to_be_clickable((locator_type, locator)))\n return self.driver.find_element(by=locator_type, value=locator).click()", "def click_on_submit(context):\n submit_for_approval = context.browser.find_elements_by_css_selector(\n \"input[type='button'][value='Submit for Approval']\")\n for item in submit_for_approval:\n item.click()\n time.sleep(10)", "def wait(self):\n time.sleep(self.next())", "def wait():\n pass", "def check_upgrade_button():\n try:\n upgrade = driver.find_element_by_link_text(\"Upgrade Now!\")\n upgrade.click()\n except:\n print \"Upgrade Now! button is not available.\"", "def click_save_changes_button(self):\n self.click_element(self.save_changes_button_locator, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.confirmation_popup_locator), 'confirmation popup locator not found before specified time out')\n self.click_element(self.ok_button_locator, True)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def _done_button_cb(self, widget=None):\n if self.lastTestResult:\n self._trigger_event(\"success\")\n else:\n self._launch_click_through_dialog()", "def click(self):\r\n pass", "def shared_logic():\n driver.implicitly_wait(5)\n driver.get(f'{APP_PATH}{id_num}')\n q_element = driver.find_elements_by_id(\"user\")\n if len(q_element) > 0:\n result = q_element[0].text\n print(f'->\\tResponse: got {result} from web app interface.')\n return result\n else:\n raise Exception(\"test failed\")", "def click(self):\n end_time = time.time() + self.parent.wait_time\n error = None\n while time.time() < end_time:\n try:\n return self._element.click()\n except (\n ElementClickInterceptedException,\n WebDriverException,\n ) as e:\n error = e\n\n raise error", "def click(self, agent):\n self.grab(agent)\n #eventlet.sleep(5)\n self.degrab(agent)", "def click_the_edit_button_that_appears(driver):\n driver.find_element_by_xpath(xpaths.users.eric_Edit_Button).click()", "def wait_until_element_visible(self, element):\n LOG.info(\"Waiting for '%s' element to get visible\" % element[1])\n try:\n self.wait.until(EC.visibility_of_element_located(element))\n except TimeoutException:\n raise NoSuchElementException(\"UI Element %s not found\" % element[1])\n except Exception as exce:\n raise exce", "def manual_entry(self):\n # Bring up the customer ID screen\n self.log.info(\"Clicking the customer ID button...\")\n if pos.is_element_present(self.customer_id_button, timeout = self.wait_time):\n pos.click('customer id')\n else:\n tc_fail(\"Customer ID button did not appear.\")\n\n self.log.info(\"Entering the customer ID...\")\n if pos.is_element_present(self.manual_button, timeout = self.wait_time):\n # TODO: Change this to the actual account number\n pos.enter_keypad('9999', after='enter')\n else:\n tc_fail(\"Did not change to the customer ID screen.\")\n\n # Make sure we returned to the right screen\n if pos.is_element_present(self.customer_id_button, timeout = self.wait_time):\n self.log.info(\"Successfully entered customer ID manually on HTML POS!\")\n else:\n tc_fail(\"Did not return from customer ID screen.\")\n\n # Pay so that the customer ID will show up in a PJR to check\n pos.pay()" ]
[ "0.7427304", "0.7427304", "0.73659456", "0.69929504", "0.6745575", "0.6593339", "0.62362456", "0.6128253", "0.5971103", "0.59329253", "0.59204847", "0.5899554", "0.5845453", "0.58411074", "0.5840437", "0.5811588", "0.58096135", "0.58032614", "0.5756631", "0.57544845", "0.5751007", "0.5714853", "0.57095635", "0.56864905", "0.5648061", "0.5627916", "0.56080407", "0.5592354", "0.5579457", "0.5560939", "0.55408376", "0.5540494", "0.55314183", "0.5487884", "0.5452423", "0.5446425", "0.54210204", "0.5403817", "0.54002583", "0.5396843", "0.53908306", "0.5376322", "0.5368073", "0.53664607", "0.53638047", "0.5361773", "0.5361085", "0.53575486", "0.53542334", "0.5336743", "0.5332712", "0.5324441", "0.531047", "0.52944994", "0.52899265", "0.52894944", "0.52848184", "0.52770114", "0.5270308", "0.52695215", "0.5268235", "0.5268235", "0.5268235", "0.5268235", "0.5264461", "0.52632827", "0.5248484", "0.52448344", "0.52443755", "0.5238286", "0.5237833", "0.52302873", "0.52257884", "0.5225677", "0.5223537", "0.52224547", "0.5204401", "0.5200095", "0.5192227", "0.518505", "0.51741666", "0.5169365", "0.51652014", "0.51631266", "0.5159891", "0.5153074", "0.51511014", "0.51486695", "0.51427543", "0.5130274", "0.51275694", "0.5126969", "0.5115305", "0.51111174", "0.51089597", "0.51071477", "0.5100265", "0.5097981", "0.5096711", "0.5092541" ]
0.71300626
3
Click button and wait until playing class disappeared from DOM
def is_class_absent(self): self.q(css='#spinner').first.click() self.wait_for_element_absence('.playing', 'Animation Stopped')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def poll(self):\n\tself.met = self.button.poll()", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def _check_play_button(self, mouse_pos):\n # checking if button is clicked while there's no game active\n # else the button would be clickable even after turning invisible\n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n # resets games speed\n self.settings.initialize_dynamic_settings()\n\n # reset stats / level / ships and changing game state\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n\n # getting rid of alien ships and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # creating new fleet and centering players ship\n self._create_fleet()\n self.ship.center_ship()\n\n # making mouse pointer invisible\n pygame.mouse.set_visible(False)", "def _check_play_button(self, mouse_pos): \n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n # Reset game settings\n self.settings.initialize_dynamic_settings()\n\n # Reset game stats\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n\n # Remove any remaining aliends and bullets\n self.aliens.empty() \n self.bullets.empty()\n\n # Create new fleet and center the ship\n self._create_fleet()\n self.ship.center_ship() \n\n # Hide the mouse cursor when inside of game window\n pygame.mouse.set_visible(False)", "def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break", "def _check_play_button(self, mouse_pos):\n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n # Reset the game settings.\n self.settings.initialize_dynamic_settings()\n # Reset the game statistics.\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_score()\n self.sb.prep_level()\n self.sb.prep_ships()\n # Get rid of any remaining stars and bullets.\n self.stars.empty()\n self.bullets.empty()\n # Create a new galaxy and center the ship.\n self._create_galaxy()\n self.ship.center_ship()\n pygame.mouse.set_visible(False)", "def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_active:\n ai_settings.initialize_dynamic_settings()\n #hiding mouse cursor\n start_game(ai_settings, screen, stats, ship, aliens, bullets)\n\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()", "def play_video(self):\n\n self.wait.until(self.visible((By.ID, \"video-title\")))\n self.driver.find_element_by_xpath(\"//button[@class='ytp-large-play-button ytp-button']\").click()", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def _check_play_button(self, mouse_pos):\n\t\tbutton_clicked = self.play_button.rect.collidepoint(mouse_pos)\n\t\tif button_clicked and not self.stats.game_active:\n\t\t\t# Reset the game settings.\n\t\t\tself.settings.initialize_dynamic_settings()\n\t\t\tself.stats.reset_stats()\n\t\t\tself.stats.game_active = True\n\t\t\tself.sb.prep_score()\n\t\t\tself.sb.prep_pigeons()\n\t\t\t# Hide the cursor.\n\t\t\tpygame.mouse.set_visible(False)\n\n\t\t# Get rid of any remaining autos and droppings.\n\t\tself.autos.empty()\n\t\tself.droppings.empty()\n\n\t\t# Create a new fleet and center the pigeon\n\t\tself._create_fleet()\n\t\tself.pigeon.center_pigeon()", "def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])", "def wait_for_start(self):\n while True:\n ev = self.scene.waitfor('click')\n game_type = self.on_click(ev)\n if game_type:\n return game_type", "def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global success\n global fails\n if success or fails == 20:\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n else:\n self.minigame.window.show_view(self.minigame.main_view)\n print(f\"Exit Button.\")", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def wait_to_play(self):\n\n\t\tself.player_model.current_player = self.player_model.rival_player\n\t\tself.player_frame.prepare_to_wait_turn(self.player_model.rival_player.name, self.player_model.available_cells)", "def wait_for_video_class(self):\r\n self.wait_for_ajax()\r\n\r\n video_selector = '{0}'.format(CSS_CLASS_NAMES['video_container'])\r\n self._wait_for_element(video_selector, 'Video is initialized')", "def play(self, event):\n if self.num_clicks == 1:\n self.clickable(event)\n if len(self.canvas.find_withtag(\"selected\")) == 2:\n self.num_of_tries += 1\n print(f'Number of tries {self.num_of_tries}')\n if self.num_of_tries > 13:\n self.score -= 10\n self.score_label.config(text=f'Score: {self.score}')\n self.check_match(self.click_tiles)\n self.canvas.after(self.delay, self.flip_back)\n self.click_tiles.clear()\n self.num_clicks = 0\n else:\n self.clickable(event)", "def click_music(self, button):\n if cf.music_on is True:\n cf.music_on = False\n elif cf.music_on is False:\n cf.music_on = True\n # Remove old button.\n self.remove_button()\n # Re-add the button.\n self.add_button()", "def pause():\n click.pause()", "def _check_play_button(self, mouse_pos):\n if self.play_button.rect.collidepoint(mouse_pos) and not self.stats.game_active:\n self.stats.reset_stats()\n self.settings.initialize_dynamic_settings()\n self.stats.game_active = True\n\n #Hide mouse cursor\n pygame.mouse.set_visible(False)\n\n # Get rid of any leftover aliens and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n #Create a new fleet and center the ship.\n self._create_fleet()\n self.ship.center_ship()\n\n self.scoreboard.prep_score()\n self.scoreboard.prep_high_score()\n self.scoreboard.prep_ships()", "def update(self):\n\n self.play_button.update()", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def check_play_button(\n ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y\n):\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n\n # The play button needs to deactivate each time the game is active.\n if button_clicked and not stats.game_active:\n # Reset game settings.\n ai_settings.initialize_dynamic_settings()\n # Hide the cursor.\n pygame.mouse.set_visible(False)\n # Reset the game stats.\n stats.reset_stats()\n stats.game_active = True\n\n # Reset the scoreboard images.\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n\n # Empty aliens and bullets.\n aliens.empty()\n bullets.empty()\n\n # Create new fleet.\n create_fleet(ai_settings, screen, ship, aliens)\n ship.center_ship()", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def _check_play_button(self, mouse_pos):\n\n # If the player clicks the play button AND the game isn't going\n if self.play_button.rect.collidepoint(mouse_pos) and not self.stats.game_active:\n\n # reset the game stats and dynamic settings\n self.stats.reset_stats()\n self.settings.initialize_dynamic_settings()\n self.stats.game_active = True\n self.sb.prep_score()\n\n # get rid of any remaining aliens and bullets.\n self.aliens.empty()\n self.bullets.empty()\n\n # recenter player\n self.ship.center_ship()\n\n # hide the mouse cursor\n pygame.mouse.set_visible(False)", "def on_play_btn(self):\n if self.state == self.READY:\n self.send_rtsp_request(self.PLAY)", "def press_button_play(self):\n \n global is_playing\n global my_thread\n is_playing = False\n if not is_playing:\n is_playing = True\n my_thread = threading.Thread(target=self.play_audio)\n my_thread.start()", "def button_click(self, btn, mbtn):\n self.last_action_ts = pygame.time.get_ticks() # update last action timestamp (idle shutdown countdown restarts)\n self.show_time = pygame.time.get_ticks() # refresh show time timestamp, so countdown restarts\n\n status = self.player.get_status()\n\n # which button was pressed?\n if btn is self.btn_play:\n logger.debug(\"button_click: btn_play \")\n player.pause() # toggle play/pause\n elif btn is self.btn_prev:\n logger.debug(\"button_click: btn_prev \")\n try:\n if int(status['song']) > 0: # only accept 'prev' button push if this is not the first song\n player.prev()\n except Exception as e:\n logger.error(e, exc_info=True) # log any exceptions\n elif btn is self.btn_next:\n logger.debug(\"button_click: btn_next \")\n try:\n if int(status['song']) < (int(status['playlistlength']) - 1):\n player.next()\n except Exception as e:\n logger.error(e, exc_info=True) # log any exceptions\n elif btn is self.background:\n logger.debug(\"button_click: background \")\n if status['state'] == 'play' or status['state']== 'pause':\n self.show_buttons()\n else:\n logger.debug(\"button_click: <unknown>\")", "def check_play_button(ai_settings,screen,stats,play_button,ship,aliens,bullets,\n\tmouse_x,mouse_y,sb):\n\n\tbutton_clicked = play_button.rect.collidepoint(mouse_x,mouse_y)\n\n\tif button_clicked and not stats.game_active:\n\t\t# Reinicia as configurações no jogo\n\t\tai_settings.initialize_dynamic_settings()\n\n\n\t\t# Oculta cursor do mouse quando o mouse estiver sobre a janela\n\t\tpygame.mouse.set_visible(False)\n\t\t\n\n\t\t# Reinicia o jogo\n\t\tstats.reset_stats()\n\t\tstats.game_active = True\n\n\t\t# Reinicia as imagems do painel de pontuação\n\t\tsb.prep_score()\n\t\tsb.prep_high_score()\n\t\tsb.prep_level()\n\t\tsb.prep_ship()\n\n\t\t# Esvazia a lista de alienígenas e de projéteis\n\t\taliens.empty()\n\t\tbullets.empty()\n\n\t\t# Cria uma ova frota e centraliza a espaçonave\n\t\tcreate_fleet(ai_settings,screen,ship,aliens)\n\t\tship.center_ship()", "def check_play_button(si_settings,screen,stats,sb,play_button,ship,aliens,bullets,mouse_x,mouse_y):\n button_clicked = play_button.rect.collidepoint(mouse_x,mouse_y)\n if button_clicked and not stats.game_active:\n #Hides mouse\n pygame.mouse.set_visible(False)\n #reset stats\n si_settings.initalize_dynamic_settings()\n stats.reset_stats()\n stats.game_active = True\n #reset Scoreboard\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()\n #Empty aliens and bullets\n aliens.empty()\n bullets.empty()\n #creates new fleet and centers ship\n create_fleet(si_settings,screen,ship,aliens)\n ship.center_ship()", "def check_replay_button(self, mouse_x, mouse_y):\r\n for button in self._replay_button_list:\r\n if button.get_button_rect().collidepoint(mouse_x, mouse_y):\r\n button_clicked = button\r\n break\r\n else:\r\n button_clicked = None\r\n\r\n if button_clicked is not None and button_clicked.get_num_atom() == 1:\r\n self.setup_new_game()\r\n elif button_clicked is not None and button_clicked.get_num_atom() == 2:\r\n sys.exit()", "def wait(self):\n time.sleep(self.pause_time)", "def press_play(self, playTime):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Playback Content for 30s',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.press_rc_key(self.rc.PLAY)\n self.tv.wait_in_second(playTime)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")", "def _playAgain(self):\n self._click()\n if self._last is None and self._touch is not None:\n self._state = STATE_RESET", "def check_play_button(si_settings, screen, stats, sb, play_button, ship, aliens, bullets, alienBullets, images, mouse_x, mouse_y):\r\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\r\n if button_clicked and not stats.game_active:\r\n # Reset the game settings.\r\n si_settings.initialize_dynamic_settings()\r\n\r\n # Hide the mouse cursor.\r\n pygame.mouse.set_visible(False)\r\n\r\n # Reset the game statistics.\r\n stats.reset_stats()\r\n stats.game_active = True\r\n\r\n # Reset the scoreboard images.\r\n sb.prep_score()\r\n sb.prep_high_score()\r\n sb.prep_level()\r\n sb.prep_ships()\r\n\r\n # Empty the list of aliens and bullets.\r\n aliens.empty()\r\n bullets.empty()\r\n alienBullets.empty()\r\n\r\n # Create a new fleet and center the ship.\r\n create_fleet(si_settings, screen, ship, aliens, images)\r\n ship.center_ship()", "def _callback(self):\n self.is_playing = False\n self.play_next()", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def pushbutton_play_clicked(self):\n\n self.frame_player_start_signal.emit()", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def handle_button(self, button):\n last_run = self.last_seen[button] if button in self.last_seen else 0\n diff = time.time() - last_run\n\n if diff <= 1:\n logging.warning(\"duplicate: %s, %d, %d\", button, last_run, diff)\n return\n\n try:\n cmd = buttons.COMMANDS[button]\n except KeyError:\n logging.warning(\"No instructions found for button %s.\", button)\n return\n\n self.last_seen[button] = time.time()\n\n try:\n function, music, zone = cmd\n except ValueError, ex:\n logging.warning(\"Couldn't parse instructions from %s: %s\", cmd, ex)\n return\n\n device = self.player.zone(zone)\n if not device:\n logging.warning(\"Can't find a device called %s\", zone)\n return\n\n # If this is the same button we saw last, pause or unpause it.\n if button == self.last_button:\n device.toggle()\n return\n\n if function == \"play_local\":\n self.play_local(music, device)\n self.last_button = button\n else:\n logging.warning(\"Don't know how to %s.\", cmd)", "def check_play_button(ai_settings,screen,stats,play_button,ship,bullets,mouse_x,mouse_y):\n\te_pressed = False\n\tbutton_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n\tif (button_clicked or e_pressed) and not stats.game_active: \n\t\t#play the music\n\t\tpygame.mixer.music.load('sounds/lvl1.mp3')\n\t\tpygame.mixer.music.set_volume(1.5)\n\t\tpygame.mixer.music.play()\n\t\t# Hide the mouse cursor\n\t\tpygame.mouse.set_visible(False)\n\t\t# Reset the game statistics\n\t\tstats.reset_stats()\n\t\tstats.game_active = True\n\t\tship.second_stage = False\n\t\tai_settings.boss_health = ai_settings.boss_health_default\n\t\t#empty the list of bullets\n\t\tbullets.empty()\n\t\t#center the ship\n\t\tship.center_ship()", "def wait_for_tag():\n time.sleep(1.1)", "def handle_playback_button(self, request):\n self._verify_auth_parameters(request)\n self.last_button_pressed = request.rel_url.path.split('/')[-1]\n return web.Response(status=200)", "def click_nav_mp3_players(self):\n self.driver.find_element(*BasePageLocators.MP3S).click()\n return self", "def continue_button(self):\r\n self.update_settings()\r\n self.is_pause = False\r\n self.is_step = False\r\n if self.continue_call is not None:\r\n self.wm.after(1, self.continue_call)", "def game_play(self):", "def play(self):\n pass", "def _wait_for_video_play(self, video_display_name=None):\r\n playing_selector = self.get_element_selector(video_display_name, CSS_CLASS_NAMES['video_container'])\r\n pause_selector = self.get_element_selector(video_display_name, VIDEO_BUTTONS['pause'])\r\n\r\n def _check_promise():\r\n \"\"\"\r\n Promise check\r\n\r\n Returns:\r\n bool: Is promise satisfied.\r\n\r\n \"\"\"\r\n return 'is-playing' in self.q(css=playing_selector).attrs('class')[0] and self.q(css=pause_selector).present\r\n\r\n EmptyPromise(_check_promise, 'Video is Playing', timeout=200).fulfill()", "def on_worker_started(self):\n self.playing = True\n self.enable_video_buttons(False, True, True)", "def clicar_no_botao_start():\r\n # terceiro\r\n try:\r\n start_button = _browser.find_element_by_xpath(\r\n \"//button[@class='waves-effect col s12 m12 l12 btn-large uiColorButton']\")\r\n except:\r\n start_button = None\r\n\r\n start_button.click()\r\n assert start_button", "def _control_pause(self):\n self.player.pause()", "def pause_button(self):\r\n self.is_action = True\r\n self.update_settings()\r\n self.is_pause = True\r\n if self.pause_call is not None:\r\n self.wm.after(1, self.pause_call)", "def check_game_mode_button(self, mouse_x, mouse_y):\r\n for button in self._play_mode_button_list:\r\n if button.get_button_rect().collidepoint(mouse_x, mouse_y):\r\n button_clicked = button\r\n break\r\n else:\r\n button_clicked = None\r\n\r\n if button_clicked is not None and \\\r\n self._stats.get_status() == \"Start_game\":\r\n self.start_game(button_clicked.get_num_atom())", "def check_play_button(ai_settings, screen, stats, sb, play_button, ship, \n\t\taliens, bullets, mouse_x, mouse_y, alien_bullets, long_bullets,\n\t\thealth, shield):\n\tbutton_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n\tbutton_pressed = pygame.key.get_pressed()[pygame.K_RETURN]\n\tif (button_clicked or button_pressed) and not stats.game_active:\n\t\t#Reset the game settings\n\t\tai_settings.initialize_dynamic_settings()\n\t\t\n\t\t#Hide the mouse cursor\n\t\tpygame.mouse.set_visible(False)\n\t\t\n\t\t#Reset the game statistics\n\t\tstats.reset_stats() \n\t\tstats.game_active = True\n\t\t\n\t\t#Reset the scoreboard images\n\t\tsb.prep_score()\n\t\tsb.prep_high_score()\n\t\tsb.prep_level()\n\t\tsb.prep_ships()\n\n\t\t#Empty the list of aliens, bullets, long bullets, health\n\t\taliens.empty()\n\t\tbullets.empty()\n\t\tlong_bullets.empty()\n\t\thealth.empty()\n\t\tshield.empty()\n\t\t\n\t\t#Create a new fleet and center the ship\n\t\tcreate_fleet(ai_settings, screen, ship, aliens) \n\t\t#create_alien_bullet(ai_settings, screen, aliens, alien_bullets)\n\t\tship.center_ship()", "def checkPlayerSelection(self):\n starting = True\n for button in self.model.buttons.sprites():\n if button.rect.collidepoint(mouse.get_pos()) and mouse.get_pressed()[0]:\n if button.function == \"1P\":\n self.model.playernum = 1\n starting = False\n else:\n self.model.playernum = 2\n starting = False\n\n\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n return \"Quit\"\n\n return starting", "def gallery_prev():\n try:\n prev = driver.find_element_by_class_name('slick-prev')\n prev.click()\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def testPlayback(self):\n \n pass", "def player_ans(self, ans):\n\n # Received a click, don't end game because of timeout\n Fl.remove_timeout(self.gameover)\n \n # Check validity of click\n if self.is_correct(ans):\n self.player_seq.append(ans)\n \n # Sequence completed, play next sequence\n if len(self.player_seq) == len(self.sequence): \n \n self.deactivate_buts()\n\n # Brief delay before playing sequence because it sounds nicer\n Fl.add_timeout(0.5, self.play_seq)\n \n else:\n # Start another 5 second time limit to click next button\n Fl.add_timeout(5, self.gameover, True)\n \n else: # End the game if it's not part of the sequence\n self.gameover()", "def animate():\n global callback_id\n if button.label == '► Play':\n button.label = '❚❚ Pause'\n callback_id = doc.add_periodic_callback(animate_update, 100)\n else:\n button.label = '► Play'\n doc.remove_periodic_callback(callback_id)", "def wait_dialog_box(self):\n while True:\n time.sleep(0.5)\n dialog = AppWindow.locate_on(SummonSelector.dialog_ok.path, (1 / 3, 2 / 3, 2 / 3, 1 / 3))\n if dialog is not None:\n self.logger.info(\"dialog popped up\")\n return", "def wait_for_load(browser):\n loader = browser.find_element_by_class_name('ui-loader')\n while loader.is_displayed():\n time.sleep(0.1)", "def pushbutton_stop_clicked(self):\n\n if self.frame_player.run_player:\n self.frame_player.run_player = False", "def wait(self):\n time.sleep(self.next())", "async def pause_behaviors(self) -> None:", "def click_continue(self):\n self.click_element(self.continue_button_selector)", "def wait(self):\n try:\n confirm_modal_dialog = EC.presence_of_all_elements_located((By.CLASS_NAME, 'btn-default'))\n WebDriverWait(self.web_element, 2).until(confirm_modal_dialog)\n except TimeoutException:\n confirm_ajs_dialog = EC.presence_of_all_elements_located((By.CLASS_NAME, 'ajs-cancel'))\n WebDriverWait(self.web_element, 2).until(confirm_ajs_dialog)", "def load_buttons(self):\n self.playing_buttons.append(Button(20, 40, 100, 40, \"New Game\"))", "def is_button_output_present(self):\n self.wait_for_element_presence('div#ready', 'Page is Ready')\n self.q(css='div#fixture button').first.click()\n self.wait_for_element_presence('div#output', 'Button Output is Available')", "def is_button_output_visible(self):\n self.wait_for_element_presence('div#ready', 'Page is Ready')\n self.q(css='div#fixture button').first.click()\n self.wait_for_element_visibility('div#output', 'Button Output is Visible')", "def on_worker_unpaused(self):\n self.playing = True\n self.enable_video_buttons(False, True, True)\n self.unpausing = False", "def play(self):\n print('Playing game...')", "def pause(self):\n if self.status()['state'] == \"playing\":\n self.toggle_pause()", "def click(self, mouse_pos):\n for button in self.enabled_buttons(): # type: Button\n if button.is_position_on_button(mouse_pos):\n self.sound.play_sound(self.click_sound)\n button.click()", "def _button_stop_fired(self):\n self.taking_spectra = False", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def testPlayback(self):\n\t\tc = Controller()\n\t\taction = BaseAction('x')\n\t\tc.actions.append(action)\n\t\tc.playback(action)\n\t\tself.failIf(c.actions.contains(action))\n\t\tself.failUnless(action.playbackPolicy.hasBeenPlayedBack)\n\t\tself.failUnless(action.playbackPolicy.isReadyForRemoval)", "def on_worker_paused(self):\n self.playing = False\n self.pausing = False\n self.enable_video_buttons(True, False, True)", "def card_success(self): \n handles = self.driver.window_handles\n while len(handles) != 3:\n handles = self.driver.window_handles\n self.driver.switch_to_window(handles[2])\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR,'.success'))) \n self.driver.find_element_by_class_name(\"success\").click()\n self.driver.switch_to_window(handles[0])", "async def skip(self):\n await self.play()", "def play_game():\n pass", "def on_next_turn_click(self, button):\n if self.referee.is_game_over():\n Gtk.main_quit()\n else:\n self.do_next_turn(button)\n # if the game is over after this turn, we will shutdown on the next click,\n # so visually alert the player with the button label\n if self.referee.is_game_over():\n button.set_label(GAME_OVER_MSG)", "def gallery_next():\n try:\n next = driver.find_element_by_class_name('slick-next')\n next.click()\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def _inactive(self):\n self._click()\n if self._last is None and self._touch is not None:\n self._state = STATE_COUNTDOWN\n self._game = Gameplay()\n self._last = self._touch", "def create_play_button(self):\n play_button = Button(self.littleFrame, text=\"Rejouer\", font=(\"Arial\", 25), bg='white', relief='groove',\n fg='lightblue',\n command=self.start_game, width=8, activebackground='white',\n activeforeground='lightblue')\n play_button.grid(column=0, row=0)\n invisible_widget = Label(self.littleFrame, text=\" \", bg=\"lightblue\")\n invisible_widget.grid(column=1, row=0)", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def wait(self):\n self.event.wait()", "def click(self, agent):\n self.grab(agent)\n #eventlet.sleep(5)\n self.degrab(agent)", "def Pause():\n\tDmg.enableButton.SetOff()", "def click(self, x, y, button, press):\n\n if self.is_in_screen(x, y) and not self.pause:\n self.get_color(x, y)\n self.record(x, y, button, press)", "def pause(self):\n pass", "def pause(self):\n pass", "def spinupplayer():\n if __name__ == '__main__':\n _playthread = ImmediatePlayer(PLAYER_SETTINGS, COLOR_SETTINGS)\n PROCESSES.append(_playthread)\n _playthread.start()", "def click_button(self):\n self.widgets.get('button').click()", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def send_thumbs_up():\n try:\n thumbs_up = driver.find_element_by_class_name('la-thumbs-o-up')\n thumbs_up.click()\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def wait(self):\n time.sleep(0.010)", "def _is_finished_loading():\r\n return not self.q(css=CSS_CLASS_NAMES['video_spinner']).visible", "def waitUntilFinished():", "def waitUntilFinished():" ]
[ "0.6262264", "0.6247249", "0.6172894", "0.6123498", "0.6084997", "0.6075153", "0.60310066", "0.6026245", "0.60012144", "0.5995387", "0.5967095", "0.59577924", "0.5948899", "0.59429574", "0.592757", "0.5881039", "0.5876616", "0.5875502", "0.58591527", "0.5838041", "0.58163005", "0.5808849", "0.5791393", "0.57863444", "0.5770776", "0.57666916", "0.575722", "0.5756134", "0.57365084", "0.57210803", "0.57064503", "0.56896156", "0.5679448", "0.5661153", "0.5648074", "0.5647299", "0.564441", "0.56439614", "0.56238836", "0.56109184", "0.56107527", "0.56049603", "0.56043893", "0.5593312", "0.55803883", "0.55715954", "0.5570285", "0.5554192", "0.55335367", "0.55332273", "0.5508286", "0.5505781", "0.5491083", "0.5485017", "0.5477956", "0.5477092", "0.545722", "0.54527473", "0.54497164", "0.54485786", "0.5444393", "0.54438967", "0.54331076", "0.5426218", "0.5424371", "0.5419201", "0.5387427", "0.5375411", "0.53673756", "0.5359791", "0.5352512", "0.53482103", "0.53447056", "0.5342982", "0.5333324", "0.53324234", "0.5322392", "0.5317241", "0.5315856", "0.5315236", "0.5313008", "0.52979386", "0.52894956", "0.5288134", "0.528195", "0.52791095", "0.52701944", "0.52677864", "0.5253454", "0.52518374", "0.52518374", "0.52454114", "0.5237881", "0.52325755", "0.52293384", "0.52290714", "0.52232856", "0.5222631", "0.52221334", "0.52221334" ]
0.6424565
0
Click button and wait until output is displayed.
def is_button_output_visible(self): self.wait_for_element_presence('div#ready', 'Page is Ready') self.q(css='div#fixture button').first.click() self.wait_for_element_visibility('div#output', 'Button Output is Visible')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def click_button(self):\n self.widgets.get('button').click()", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def is_button_output_present(self):\n self.wait_for_element_presence('div#ready', 'Page is Ready')\n self.q(css='div#fixture button').first.click()\n self.wait_for_element_presence('div#output', 'Button Output is Available')", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def poll(self):\n\tself.met = self.button.poll()", "def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def wait(self):\n self.mainloop().wait()", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def wait_for_input(self):\n pass", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def wait():\n time.sleep(1)", "def wait(self):\n time.sleep(0.010)", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def testButtonCB(self, testId):\n button = self.test_buttons[testId]\n if self.result:\n self.showTestOutput(testId)\n return", "def pop_up(self):\n sleep(2)\n self.driver.find_element_by_link_text('Got It').click()\n self.get_search_results()", "def simulate_button_clicked(self):\n self.simulate_bool = True\n self.update_change()", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def wait_dialog_box(self):\n while True:\n time.sleep(0.5)\n dialog = AppWindow.locate_on(SummonSelector.dialog_ok.path, (1 / 3, 2 / 3, 2 / 3, 1 / 3))\n if dialog is not None:\n self.logger.info(\"dialog popped up\")\n return", "def wait(self):\n self.event.wait()", "def run(self):\n run=0\n wx.CallAfter(Publisher().sendMessage, \"update\", \"\")\n time.sleep(10)\n while (run==0):\n wx.CallAfter(Publisher().sendMessage, \"updatebuttons\", \"\")\n time.sleep(10)", "def click_process(self):\n # TODO implement print function for verbosity\n\n # Create Worker Thread\n self.worker = Worker(self)\n\n self.worker.start()\n self.worker.finished.connect(self.worker.deleteLater)\n self.worker.log.connect(self.update_log)\n\n # Safety Lock\n self.Process_Button.setEnabled(False)\n self.worker.finished.connect(lambda: self.Process_Button.setEnabled(True))", "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def wait():\n pass", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def _launch_click_through_dialog(self):\n text = \"The port test did not complete successfully. If you are certain that you really did forward the port and would like to continue anyway, you can do so.\\\n Otherwise, you may want to try again.\"\n self.controller.show_msgbox(text, title=\"Do You Really Want to Do That?\", cb=self._click_through_dialog_cb, buttons=(gtk.STOCK_CANCEL, 0, gtk.STOCK_OK, 1), width=300)", "def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def execPushButton(self):\n\t\t# verbose.detail(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))\n\t\tprint(\"%s %s\" %(self.sender().objectName(), self.sender().property('exec')))", "def show_result():\n print(\"I win!!\")", "def press(button):\r\n if button == 'Process':\r\n src_file = app.getEntry('Input_File')\r\n dest_dir = app.getEntry('Output_Directory')\r\n out_file = app.getEntry('Output_name')\r\n points = app.getEntry('Points')\r\n out_file = out_file + '.csv'\r\n plot = app.getCheckBox('Plot Results')\r\n iter(src_file, Path(dest_dir, out_file), points,plot)\r\n else:\r\n app.stop()", "def wait(self, secs):\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))", "def pause():\n click.pause()", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def click_button(button_to_click):\n try:\n button_to_click.click()\n except:\n print(\"Button not found\")", "def wait() -> None:\n\n process_input(input())", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def Wait(p_question: str):\n input(p_question)\n return", "def reload_and_trigger_output(self):\n self.browser.refresh()\n self.wait_for_js() # pylint: disable=no-member\n self.q(css='div#fixture button').first.click()", "def okClicked(self):\n try:\n self.enablePackage()\n except MissingPackage:\n debug.critical(\"The controlflow package is not available\")\n return\n\n # Verify that at least one input and one output have been chosen\n input_ports_info = self.getInputPortsInfo()\n output_ports_info = self.getOutputPortsInfo()\n if len(input_ports_info) == 0:\n show_info('No Input Ports Selected', 'No Input Ports have been selected. You must select at least one to proceed.')\n elif len(output_ports_info) == 0:\n show_info('No Output Port Selected', 'No Output Port has been selected. You must select one to proceed.')\n else:\n self.createControlFlow(input_ports_info, output_ports_info)\n self.close()", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def cb_something_3(self, button):\n print(\"Do Something 3\")", "def run_command(self):\r\n self.update_settings()\r\n self.run = True\r\n self.pause = False\r\n if self.run_call is not None:\r\n self.wm.after(1, self.run_call)", "def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def start_button_action(self):\n if self.dynamic.output_file.text() and os.path.isdir(\n self.dynamic.output_directory.text()\n ):\n\n additional_settings = {\n \"Save_data\": True,\n \"Filepath\": self.dynamic.output_directory.text(),\n \"Filename\": self.dynamic.output_file.text(),\n \"skip_init\": False,\n }\n\n # Generate a Lookuptable for the plots\n steps = (\n int(\n abs(\n float(self.dynamic.max_voltage_IV.value())\n / float(self.dynamic.voltage_steps_IV.value())\n )\n )\n + 1\n )\n self.cmapLookup = self.cmap.getLookupTable(1.0, 3.0, steps)\n self.variables.reset_plot_data()\n\n self.generate_dynamicwaiting_job(additional_settings)\n # self.variables.reset_plot_data()\n\n else:\n reply = QMessageBox.information(\n None,\n \"Warning\",\n \"Please enter a valid filepath and filename.\",\n QMessageBox.Ok,\n )", "def on_pushButton_clicked(self):\r\n # TODO: not implemented yet\r\n print 1", "def wait(self):\n pass", "def wait(self):\n pass", "def view(self):\n\t\tself.done(1)", "def _done_button_cb(self, widget=None):\n if self.lastTestResult:\n self._trigger_event(\"success\")\n else:\n self._launch_click_through_dialog()", "def on_click(self, event):\n if event['button'] == 1 and 'button1' in self.options:\n subprocess.call(self.options['button1'].split())\n elif event['button'] == 2 and 'button2' in self.options:\n subprocess.call(self.options['button2'].split())\n elif event['button'] == 3 and 'button3' in self.options:\n subprocess.call(self.options['button3'].split())", "def cb_something_2(self, button):\n print(\"Do Something 2\")", "def click(self):\r\n pass", "def run(self):\n while self.__running:\n enum = self.__gui_app.pollButtonEvent()\n if enum != '':\n print enum\n if int(enum, 16) == 4:\n self.__qf.tick()\n else:\n self._publish(enum)\n\n print \"Exit: %s\\n\" % self", "def wait(self):\n time.sleep(self.next())", "def run_button(self):\n if self.run.label == 'Run':\n self.run.label = 'Stop'\n self.run.button_type = 'danger'\n self.callback_obj = self.doc.add_periodic_callback(self.unlocked_task, 1000)\n\n else:\n self.run.label = 'Run'\n self.run.button_type = 'success'\n self.doc.remove_periodic_callback(self.callback_obj)", "def click(self, agent):\n self.grab(agent)\n #eventlet.sleep(5)\n self.degrab(agent)", "def OnButtonClick(self):\n self.choice()", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def test_ProstateReporting1(self):\n\n self.delayDisplay(\"Starting the test\")\n\n self.delayDisplay('Test passed!')", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def click_download_button(self):\n self._basket.click_download_button()", "def on_run_button(self, event):\n text = _(u\"Run button pressed.\")\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n self.run_command()", "def run(self):\n self.window.mainloop()", "def go(self):\n self.driver.go()\n self.last_control = time.time()", "def cb_something_4(self, button): \n print(\"Do Something 4\")", "def wait_until_transfers_displayed(self):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).wait_until_displayed()", "def run_user_code(self, button):\n button.setEnabled(False)\n self.user_thread.start()", "def main():\n time.sleep(0.1)", "def launch_tv_input(self):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Launch Channel Input',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.press_rc_key(self.rc.TV)\n \n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")", "def do_wait(self):\n pass", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def batch_test_open():\n try:\n WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.CLASS_NAME, \"cdk-overlay-pane\")))\n ActionChains(browser).send_keys(Keys.ESCAPE).perform()\n except:\n print(\"No migration pop-up\")\n\n WebDriverWait(browser, 2).until(EC.element_to_be_clickable((By.LINK_TEXT, config.app_name)))\n browser.find_element_by_link_text(config.app_name).click()\n WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.CLASS_NAME, 'nav-section')))\n buttons = browser.find_elements_by_class_name('nav-section')\n buttons[1].click()\n WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH, '//button[contains(text(), '\n '\"Batch testing\")]')))\n browser.find_element_by_xpath('//button[contains(text(), \"Batch testing\")]').click()", "def wait(self, ms=None):\r\n raw_input(\"\\n\\nPress a key...\")", "def show_results ():\n #Total volume\n vol = tkinter.Label\\\n (text= (\"Total volume: \" + str (ice_volume) + \" m\\u00b2\"))\n vol.pack ()\n #Total mass\n mass = tkinter.Label\\\n (text= (\"Total mass: \" + str (ice_mass) + \" kg\"))\n mass.pack ()\n #Towability\n print (\"Calculating towability\")\n if ice_mass > 36000000:\n tow = tkinter.Label (text = \"Iceberg cannot be towed\")\n else:\n tow = tkinter.Label (text = \"Iceberg can be towed\")\n print (\"Towability calculated\")\n tow.pack ()\n #Disable button after 1 click\n #Code based on https://www.youtube.com/watch?v=QfTo3rK3e48\n results_btn ['state'] = 'disabled'", "def test1(stopEvent: Event):\n auto.InitializeUIAutomationInCurrentThread()\n n = 0\n child = None\n auto.Logger.WriteLine('Use UIAutomation in another thread:', auto.ConsoleColor.Yellow)\n while True:\n if stopEvent.is_set():\n break\n if not child:\n n = 1\n child = auto.GetRootControl().GetFirstChildControl()\n auto.Logger.WriteLine(n, auto.ConsoleColor.Cyan)\n auto.LogControl(child)\n child = child.GetNextSiblingControl()\n n += 1\n stopEvent.wait(1)\n auto.UninitializeUIAutomationInCurrentThread()\n print('test1 exits')", "def showTestBegin(self, test):\n self.test_buttons[test.id()].setState('running')\n self.showMessage('busy', test.id())\n self.update_idletasks()\n return", "def exit_out():\r\n print(\"Exiting...\")\r\n time.sleep(3)\r\n os.system(\"pause\")", "def click_green_button(self):\n self.driver.sleep(2)\n self.driver.find_or_raise(\n \"//div/a[text()='My Usage']/following-sibling::span\", xpath=True\n ).click() # Clicks the expand icon next to \"My Usage\"\n self.driver.sleep(1)\n self.driver.find(\"//a[.='My Green Button Data']\", xpath=True).click()\n self.driver.screenshot(BaseWebScraper.screenshot_path(\"select green button\"))", "def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global entered_code\n\n if button_text == \"Enter Code\":\n self.answer = self.input_box.text\n entered_code = self.answer\n self.convert_string_to_int(self.answer)\n self.ui_manager.purge_ui_elements()\n self.minigame.window.show_view(FakeCodeGame.MyView(self.minigame.main_view))\n print(f\"EnterCode button. {self.answer}\")\n elif button_text == \"Exit Terminal\":\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n\n \"\"\"\n The following functions check the submitted answer\n \"\"\"", "def cb_gui_test_1( self, ):\r\n print( \"cb_gui_test_1\" )\r\n self.helper_thread.toggle_lock()", "def jupyter_run_button(self, update_rate: float = 1E-21):\n if not _in_jupyter():\n raise RuntimeError('This method is reserved for Jupyter environments')\n\n import asyncio\n import ipywidgets\n\n _running = False\n\n async def _run():\n while True:\n if _running:\n self.step()\n await asyncio.sleep(update_rate)\n\n asyncio.ensure_future(_run())\n\n def _run_cb(change):\n if change['name'] == 'value':\n nonlocal _running\n _running = change.new\n\n _run_button = ipywidgets.ToggleButton(value=False, description='Run Simulation')\n _run_button.observe(_run_cb, names='value')\n return _run_button", "def wait(self):\n time.sleep(self.pause_time)", "def wait(self) -> None:\n\n self.event_.wait()", "def click_the_save_button_which_should_be_returned_to_the_storage_page(driver):\n assert wait_on_element(driver, 5, '//button[contains(.,\"Save Access Control List\")]', 'clickable')\n driver.find_element_by_xpath('//button[contains(.,\"Save Access Control List\")]').click()\n time.sleep(1)\n assert wait_on_element_disappear(driver, 30, '//h6[contains(.,\"Please wait\")]')", "def run(self):\n if has_GUI:\n self.GUI(self.buffer)\n else:\n while True:\n message = input(\"Write your command:\\n\")\n # print(message)\n self.buffer.append(message)" ]
[ "0.7492994", "0.7492994", "0.74046254", "0.73170245", "0.6892896", "0.67912996", "0.6658468", "0.66095304", "0.66076696", "0.6600129", "0.64885914", "0.6422199", "0.63829", "0.63829", "0.6375364", "0.63732177", "0.6356924", "0.63511723", "0.6327557", "0.62831944", "0.62803334", "0.62401396", "0.6229682", "0.6228761", "0.61933845", "0.61933845", "0.618299", "0.61780655", "0.6164878", "0.6160755", "0.612186", "0.6117805", "0.60768753", "0.6069506", "0.60629284", "0.6038843", "0.6037128", "0.6021135", "0.60022366", "0.5994713", "0.59771776", "0.5954694", "0.59533757", "0.5953304", "0.5951689", "0.5947443", "0.594479", "0.59442914", "0.59406465", "0.59329945", "0.59265053", "0.5924148", "0.59218436", "0.59218436", "0.59218436", "0.5921607", "0.58865994", "0.58745563", "0.58745563", "0.5864452", "0.5854653", "0.5849963", "0.58375764", "0.5826434", "0.58243144", "0.5822217", "0.582056", "0.58148235", "0.58059937", "0.5799946", "0.5795001", "0.5788225", "0.5775925", "0.57714045", "0.5732524", "0.57305914", "0.57187665", "0.57141846", "0.5697528", "0.56931454", "0.568934", "0.56742716", "0.5671859", "0.5671859", "0.5671859", "0.5671859", "0.5668868", "0.566793", "0.56491214", "0.56465614", "0.5645993", "0.56413156", "0.5630996", "0.56246936", "0.5619114", "0.56190974", "0.5619055", "0.56159675", "0.5604974", "0.5596099" ]
0.67361134
6
Click button and wait until spinner is disappeared.
def is_spinner_invisible(self): self.q(css='#spinner').first.click() self.wait_for_element_invisibility('#anim', 'Button Output is Visible')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_spinner_disappear(self):\n self.wait_for_element_disappear(loadings_catalog.LOADING_SPINNER)\n self.wait_for_element_disappear(loadings_catalog.LOADING)", "def wait_for_button(self, button, message=True):\n if message:\n rospy.loginfo(\"Waiting for xbox button: \" + button)\n \n wait_for(lambda: not self.get_button(button) == 0)", "def click_on_browse_button(self):\n self.kill_all_opened_file_browsing_dialogs()\n browse_button_element = self.wait().until(EC.element_to_be_clickable(self.browse_button_locator), 'browse button not found before specified time')\n browse_button_element.click()\n self.wait_for_ajax_spinner_load()", "def click_entry_complete_button(self):\n self.click_element(self.entry_complete_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_entry_success_message_locator), 'statement entry success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def poll(self):\n\tself.met = self.button.poll()", "def click_on_upload_button(self):\n upload_button_element = self.wait().until(EC.visibility_of_element_located(self.upload_button_locator), 'upload button not found before specified time')\n upload_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.visibility_of_element_located(self.success_message_popup_title), 'success popup message not found before specified time')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def wait_for_buttons(self, threaded=True):\n\t\tRPIO.wait_for_interrupts(threaded)", "def click_button(self):\n self.widgets.get('button').click()", "def wait_dialog_box(self):\n while True:\n time.sleep(0.5)\n dialog = AppWindow.locate_on(SummonSelector.dialog_ok.path, (1 / 3, 2 / 3, 2 / 3, 1 / 3))\n if dialog is not None:\n self.logger.info(\"dialog popped up\")\n return", "def click_received_charges_cancel_changes_button(self):\n self.click_element(self.received_charges_cancel_changes_button_locator)\n self.wait_for_ajax_spinner_load()", "def is_class_absent(self):\n self.q(css='#spinner').first.click()\n self.wait_for_element_absence('.playing', 'Animation Stopped')", "def _done_button_cb(self, widget=None):\n if self.lastTestResult:\n self._trigger_event(\"success\")\n else:\n self._launch_click_through_dialog()", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def click_win_dispute_cancel_button(self):\n self.click_element(self.win_dispute_cancel_button_locator)\n try:\n self.dismiss_alert_pop_up()\n except:\n pass\n self.wait_for_ajax_spinner_load()", "def handler(signum, frame, spinner):\n spinner.red.fail(\"✘\")\n spinner.stop()", "def wait_for_load(browser):\n loader = browser.find_element_by_class_name('ui-loader')\n while loader.is_displayed():\n time.sleep(0.1)", "def hide(self) -> None:\n self.spinner.stop()\n self.hidden = True", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def click(self):\n element = self.element\n WebDriverWait(self._browser, TMO).until(\n lambda browser: element.is_displayed())\n time.sleep(0.1) # Just some pacing.\n element.click()", "def wait_for_click():\r\n global _canvas\r\n global _cue\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n while True:\r\n _cue = _canvas.wait()\r\n if _cue.getDescription() == 'mouse release': break", "def wait(self):\n time.sleep(0.010)", "def wait_until_transfers_displayed(self):\n BaseElement(self.driver, locators.TRANSFER_MONEY_BUTTON).wait_until_displayed()", "def spinner(self):\n return None", "def wait():\n time.sleep(1)", "def select_settings_unload_btn(self):\n select_settings_unload_btn_sitem = self.locator_finder_by_id(self.select_settings_unload_btn_id)\n select_settings_unload_btn_sitem.click()\n time.sleep(2)\n self.wait_for_ajax()", "def click_the_submit_button(self):\n with self._wait_for_page_refresh():\n self.selib.click_button(self.locator.submit_button)", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def wait(self):\n self.mainloop().wait()", "def click_submit_payment_button(self):\n self.click(self.submit_payment_locator)\n time.sleep(2)", "def select_upload_btn(self):\n select_upload_btn_sitem = self.locator_finder_by_xpath(self.select_upload_btn_id)\n select_upload_btn_sitem.click()\n time.sleep(3)", "def continue_to_grading(self):\r\n self.q(css='input.calibration-feedback-button').first.click()", "def wait(self, secs):\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))", "def wait(self):\n self.event.wait()", "def spin(self):\n spinner = self._spinner_dict.get(self._current_gui, lambda: None)\n spinner()", "def handler(signum, frame, spinner):\n spinner.fail()\n spinner.stop()", "def wait(self):\n try:\n confirm_modal_dialog = EC.presence_of_all_elements_located((By.CLASS_NAME, 'btn-default'))\n WebDriverWait(self.web_element, 2).until(confirm_modal_dialog)\n except TimeoutException:\n confirm_ajs_dialog = EC.presence_of_all_elements_located((By.CLASS_NAME, 'ajs-cancel'))\n WebDriverWait(self.web_element, 2).until(confirm_ajs_dialog)", "def move_confirm_btn(self):\n self.wait_for_ajax()\n move_confirm_btn_sitem = self.locator_finder_by_id(self.move_confirm_btn_id, 20)\n move_confirm_btn_sitem.click()\n time.sleep(1)", "def busyWait(self):\n time.sleep(0.0)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def run_button(self):\n if self.run.label == 'Run':\n self.run.label = 'Stop'\n self.run.button_type = 'danger'\n self.callback_obj = self.doc.add_periodic_callback(self.unlocked_task, 1000)\n\n else:\n self.run.label = 'Run'\n self.run.button_type = 'success'\n self.doc.remove_periodic_callback(self.callback_obj)", "def wait_for_tag():\n time.sleep(1.1)", "def wait(self):\n time.sleep(self.next())", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def _state_main(self, gui):\n gui.pack_button.wait_variable(gui.buttons_on)", "def click_create_vendor_button(self):\n create_vendor_element = self.wait().until(EC.element_to_be_clickable(self.create_vendor_locator), \"create vendor locator not found before specified time out\")\n create_vendor_element.click()\n self.wait_for_ajax_spinner_load()", "def pause():\n click.pause()", "def continue_button(self):\r\n self.update_settings()\r\n self.is_pause = False\r\n self.is_step = False\r\n if self.continue_call is not None:\r\n self.wm.after(1, self.continue_call)", "def wait(self):\n pass", "def wait(self):\n pass", "def run_and_wait():\n self.busy.put(True)\n action()\n self.busy.put(False)\n status._finished(success=True)", "def click_the_save_button_which_should_be_returned_to_the_storage_page(driver):\n assert wait_on_element(driver, 5, '//button[contains(.,\"Save Access Control List\")]', 'clickable')\n driver.find_element_by_xpath('//button[contains(.,\"Save Access Control List\")]').click()\n time.sleep(1)\n assert wait_on_element_disappear(driver, 30, '//h6[contains(.,\"Please wait\")]')", "def show(self) -> None:\n self.spinner.start()\n self.hidden = False\n self.bring_to_front()", "def ToggleSpinner(event, state, widget):\n if state == True:\n widget.Enable()\n else:\n widget.Disable()\n event.Skip()", "def wait(self):\n time.sleep(self.pause_time)", "def wait_for_non_loading_screen():\n imagesearch_loop(image=SETTINGS['img_paths']['screens']['nav_box'])", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def clickButton(self, xpath):\n WebDriverWait(self.driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, xpath))).click()\n self.sleep_approx(1)", "def select_collection_delete_btn(self):\n select_collection_delete_btn_sitem = self.locator_finder_by_id(self.select_collection_delete_btn_id)\n select_collection_delete_btn_sitem.click()\n time.sleep(1)", "def cb_gui_test_2( self, ):\r\n pass\r\n # TASK list is gone self.task_list.stop_auto( )\r", "def do_wait(self):\n pass", "def wait_for_shield_invisibility(driver, duration=0.25):\n WebDriverWait(driver, 10).until(\n EC.invisibility_of_element_located(\n (By.CLASS_NAME, 'ut-click-shield showing interaction'))\n )\n sleep(.25)", "def click_on_vendor_price_list_upload_search_button(self):\n vendor_price_list_upload_search_button_element = self.wait().until(EC.element_to_be_clickable(self.vendor_price_list_upload_search_button_locator), 'vendor price list upload search button locator not found before specified time')\n vendor_price_list_upload_search_button_element.click()\n self.wait_for_ajax_spinner_load()", "def click(self, browser, locator, sleep_time=3, expl_time=20):\n\n time.sleep(sleep_time)\n try:\n browser.implicitly_wait(5)\n WebDriverWait(browser, expl_time, ignored_exceptions=StaleElementReferenceException).until(\n ec.presence_of_element_located(locator))\n except (NoSuchElementException, TimeoutException, ElementNotInteractableException, StaleElementReferenceException):\n # additional check were deleted, cause of some unexpected timeout exceptions on it\n browser.implicitly_wait(5)\n WebDriverWait(browser, 10).until(ec.element_to_be_clickable(locator))\n self.waiting_loading_element(browser)\n browser.find_element(*locator).click()\n self.waiting_loading_element(browser)", "def click_upload_statement_upload_button(self):\n self.click_element(self.statement_upload_button_locator)\n try:\n self.wait().until(EC.visibility_of_element_located(self.statement_upload_success_message_locator), 'statement upload success message locator not found before specified time out')\n self.click_element(self.ok_button_locator)\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def on_click(self, event):\n if self.click_job is not None:\n self.after_cancel(self.click_job)\n item = self.identify(\"item\", event.x, event.y)\n if item:\n self.click_job = self.after(200, self.clicked, item)\n return \"break\"", "def wait():\n pass", "def showTestBegin(self, test):\n self.test_buttons[test.id()].setState('running')\n self.showMessage('busy', test.id())\n self.update_idletasks()\n return", "def _is_finished_loading():\r\n return not self.q(css=CSS_CLASS_NAMES['video_spinner']).visible", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def click_remove_dispute_button_without_selection(self):\n remove_dispute_button_element = self.wait().until(EC.element_to_be_clickable(self.remove_dispute_button_locator), 'remove dispute button locator not found before specified time out')\n remove_dispute_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n self.wait().until(EC.presence_of_element_located(self.error_message_locator), 'error message locator not found before specified time out')\n self.wait_for_ajax_spinner_load()\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time out')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def card_success(self): \n handles = self.driver.window_handles\n while len(handles) != 3:\n handles = self.driver.window_handles\n self.driver.switch_to_window(handles[2])\n WebDriverWait(self.driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR,'.success'))) \n self.driver.find_element_by_class_name(\"success\").click()\n self.driver.switch_to_window(handles[0])", "def clicar_no_botao_start():\r\n # terceiro\r\n try:\r\n start_button = _browser.find_element_by_xpath(\r\n \"//button[@class='waves-effect col s12 m12 l12 btn-large uiColorButton']\")\r\n except:\r\n start_button = None\r\n\r\n start_button.click()\r\n assert start_button", "def collection_delete_confirm_btn(self):\n collection_delete_confirm_btn_sitem = self.locator_finder_by_xpath(self.collection_delete_confirm_btn_id)\n collection_delete_confirm_btn_sitem.click()\n time.sleep(1)", "def wait_for_input(self):\n pass", "def select_status_loaded(self):\n select_status_loaded_sitem = self.locator_finder_by_xpath(self.select_status_loaded_id)\n select_status_loaded_sitem.click()\n time.sleep(2)", "def smart_wait(self,locator = None, wait_seconds=10, locator_type = None):\n try:\n loc = locator\n if locator_type == 'button':\n WebDriverWait(self.driver, wait_seconds).until(EC.element_to_be_clickable((By.XPATH, loc)))\n else:\n WebDriverWait(self.driver, wait_seconds).until(EC.presence_of_element_located((By.XPATH,loc)))\n except Exception as e:\n print(e + 'Exception')\n return False\n return True", "def _jsclick(self, locator):\n\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.wait_until_element_is_enabled(locator)\n for should_retry in (True, False):\n try:\n # Setting the focus first seems to be required as of Spring'20\n # (read: without it, tests started failing in that release). I\n # suspect it's because there is a focusOut handler on form\n # fields which need to be triggered for data to be accepted.\n element = self.selenium.get_webelement(locator)\n self.selenium.driver.execute_script(\n \"arguments[0].focus(); arguments[0].click()\", element\n )\n return\n except StaleElementReferenceException:\n if should_retry:\n time.sleep(1)\n else:\n raise", "def select_truncate_btn(self):\n select_truncate_btn_sitem = self.locator_finder_by_id(self.select_truncate_btn_id)\n select_truncate_btn_sitem.click()\n time.sleep(1)\n select_truncate_confirm_btn_sitem = self.locator_finder_by_xpath(self.select_truncate_confirm_btn_id)\n select_truncate_confirm_btn_sitem.click()\n time.sleep(2)\n self.wait_for_ajax()", "def wait(self, _id):\n while not self._actions[_id].done:\n sleep(1e-3)", "def wait(self, seconds):\n time.sleep(seconds)", "def click(self) -> None:\n if self.is_enabled():\n try:\n self.element.click()\n logging.info(\"Class booked!\")\n except:\n logging.info(\"The button could not be clicked, trying to execute the element.\")\n self.driver.execute_script(\"arguments[0].click();\", self.element)\n finally:\n logging.info(\"Could not book the class\")\n\n else:\n warnings.warn('The Button cannot be clicked.')", "def click_non_traffic_charges_delete_button(self):\n self.get_non_traffic_charges_grid_row_count()\n self.click_element(self.non_traffic_charges_delete_button_locator, True)\n try:\n self.wait().until(EC.visibility_of_element_located(self.charges_delete_success_message_locator), 'charges delete success message locator not found before specified time out')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time out')\n self.wait_for_ajax_spinner_load()\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def block_waiting( self ):\n while self.num_waiting > 0:\n time.sleep( 1 )", "def click_on_analyze_and_complete_inline_action(self, inline_item):\n self.select_inline_action_item(inline_item)\n self.wait_for_ajax_spinner_load(300)\n try:\n self.wait().until(EC.presence_of_element_located(self.analyze_and_complete_confirmation_popup_locator), 'analyze and complete confirmation popup locator not found before specified time out')\n self.wait_for_ajax_spinner_load()\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time')\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def cb_gui_test_1( self, ):\r\n print( \"cb_gui_test_1\" )\r\n self.helper_thread.toggle_lock()", "def check_control_loop(self):\n state = win32api.GetAsyncKeyState(win32con.VK_MENU)\n self.click_control(state != 0)\n self.after(100, self.check_control_loop)", "def wait_for_hidden(self, locator):\r\n for i in range(timeout_seconds):\r\n if self.driver.is_visible(locator):\r\n time.sleep(1)\r\n else:\r\n break\r\n else:\r\n raise ElementVisiblityTimeout(\"%s visibility timed out\" % locator)\r\n return True", "def click_continue(self):\n self.click_element(self.continue_button_selector)", "def click_submit_button(self):\n self.click(by_locator=self.__ASK_QUESTION_PAGE_ASK_QUESTION_BUTTON)", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def cb_something_1(self, button):\n print(\"Do Something 1\")", "def click_non_recurring_charge_save_button(self):\n non_recurring_charge_save_button_element = self.wait().until(EC.element_to_be_clickable(self.non_recurring_charge_save_button_locator), 'non recurring charge save button locator not found before specified time out')\n non_recurring_charge_save_button_element.click()\n self.wait_for_ajax_spinner_load()\n try:\n try:\n self.wait().until(EC.visibility_of_element_located(self.do_you_wish_to_continue_locator))\n self.click_element(self.ok_button_locator)\n except:\n pass\n self.wait().until(EC.visibility_of_element_located(self.success_message_locator), 'success message locator not found before specified time out')\n ok_button_element = self.wait().until(EC.element_to_be_clickable(self.ok_button_locator), 'ok button locator not found before specified time out')\n self.wait_for_ajax_spinner_load()\n ok_button_element.click()\n except:\n raise\n self.wait_for_ajax_spinner_load()", "def click_vendor_price_lists_search_button(self):\n search_button_element = self.wait().until(EC.element_to_be_clickable(self.search_button_locator), 'search button not found before specified time')\n self.script_executor_click(search_button_element)\n self.wait_for_ajax_spinner_load(300)", "def cb_something_4(self, button): \n print(\"Do Something 4\")" ]
[ "0.7126271", "0.6884283", "0.66924036", "0.65854484", "0.654948", "0.64384645", "0.63156706", "0.62573993", "0.62523603", "0.6224479", "0.61823416", "0.61524755", "0.6134778", "0.61076003", "0.6089265", "0.6077746", "0.6075494", "0.606039", "0.6058926", "0.6032544", "0.6031637", "0.6020459", "0.60119134", "0.6008281", "0.59909135", "0.5972173", "0.5954425", "0.59371537", "0.5932139", "0.59140754", "0.59129614", "0.5896673", "0.5881144", "0.58553684", "0.5841519", "0.5833602", "0.5822374", "0.58148366", "0.5791918", "0.5791918", "0.5791918", "0.5791918", "0.5783304", "0.57742804", "0.577193", "0.57434005", "0.57311803", "0.57311803", "0.57286805", "0.57248425", "0.572087", "0.57090634", "0.57090634", "0.57034236", "0.5699807", "0.5696847", "0.5688821", "0.56862086", "0.5679579", "0.5670727", "0.56518847", "0.5651031", "0.5645564", "0.56437856", "0.56406", "0.5626635", "0.56227696", "0.56153846", "0.5603009", "0.5597138", "0.5597096", "0.55812067", "0.55738264", "0.55738264", "0.55679274", "0.55447966", "0.5543772", "0.5539883", "0.5539848", "0.5538125", "0.55226386", "0.5519875", "0.55175513", "0.5508439", "0.5504803", "0.5503627", "0.5490316", "0.5486503", "0.5483152", "0.548225", "0.5471775", "0.54686064", "0.5467968", "0.5465598", "0.5463569", "0.5461722", "0.546163", "0.54594916", "0.54584116", "0.54566026" ]
0.69422036
1
Determine if a given blockchain is valid
def valid_chain(self, chain): last_block = chain[0] current_index = 1 while current_index < len(chain): block = chain[current_index] # print(f'{last_block}') # print(f'{block}') # print("\n-----------\n") # Check that the hash of the block is correct last_block_hash = self.hash(last_block) if block['previous_hash'] != self.hash(last_block): return False # Check that the Proof of Work is correct if not self.valid_proof(last_block['proof'], block['proof'], last_block_hash): return False last_block = block current_index += 1 return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validChain(bc):\n #If the first block of chain is not equal to the genesis block, then chain is invalid\n if (json.dumps(Block.to_dict(bc.chain[0])) != json.dumps(Block.to_dict(Block.genesis()))):\n return False\n #check validity for all blocks in the blockchain\n for i in range(1,len(bc.chain)):\n curr_block = bc.chain[i]\n prev_block = bc.chain[i-1]\n #if lastHash of curr_block is not equal to the hash value of prev_block then chain is not valid\n #if the hash value of curr_block is not equal to the hash value produced from hashBlock then chain is not valid\n if((curr_block.lastHash != prev_block.hashVal) or (curr_block.hashVal != Block.hashBlock(curr_block))):\n return False\n return True", "def verify_chain(cls, blockchain):\n # When enumerating a list, it returns a pair of index and value (tuple)\n for (index, block) in enumerate(blockchain):\n if index == 0: # Genesis block\n continue\n if block.previous_hash != hash_util.hash_block(blockchain[index - 1]):\n return False\n if not cls.valid_proof(block.transactions[:-1], block.previous_hash, block.proof):\n print(\"Proof of Work is invalid!\")\n return False\n return True", "def is_valid(self):\n\n chain = blockchain.chaindb.chain # This object of type Blockchain may be useful\n\n # Solution for (1a)\n\n # (checks that apply to all blocks)\n # Check that Merkle root calculation is consistent with transactions in block (use the calculate_merkle_root function) [test_rejects_invalid_merkle]\n if not (self.merkle == self.calculate_merkle_root()):\n return False, \"Merkle root failed to match\"\n # Check that block.hash is correctly calculated [test_rejects_invalid_hash]\n if not (self.hash == self.calculate_hash()):\n return False, \"Hash failed to match\"\n # Check that there are at most 900 transactions in the block [test_rejects_too_many_txs]\n if len(self.transactions) > 900:\n return False, \"Too many transactions\"\n\n # (checks that apply to genesis block)\n if self.is_genesis:\n # Check that height is 0 and parent_hash is \"genesis\" [test_invalid_genesis]\n if not (self.height == 0):\n return False, \"Invalid genesis\"\n if not (self.parent_hash == \"genesis\"):\n return False, \"Invalid genesis\"\n\n # (checks that apply only to non-genesis blocks)\n if not self.is_genesis:\n # Check that parent exists [test_nonexistent_parent]\n if not self.parent_hash in chain.blocks:\n return False, \"Nonexistent parent\"\n parent_block = chain.blocks[self.parent_hash]\n # Check that height is correct w.r.t. parent height [test_bad_height]\n if not (self.height == parent_block.height + 1):\n return False, \"Invalid height\"\n # Check that timestamp is non-decreasing [test_bad_timestamp]\n if self.timestamp < parent_block.timestamp:\n return False, \"Invalid timestamp\"\n # Check that seal is correctly computed and satisfies \"target\" requirements [test_bad_seal]\n if not self.seal_is_valid():\n return False, \"Invalid seal\"\n # Check that all transactions within are valid (use tx.is_valid) [test_malformed_txs]\n for tx in self.transactions:\n if not tx.is_valid():\n return False, \"Malformed transaction included\"\n\n # Check that for every transaction\n txs_in_block = {}\n inputs_spent_in_block = []\n blocks_in_chain = chain.get_chain_ending_with(self.parent_hash)\n for tx in self.transactions:\n user_transacting = None\n # the transaction has not already been included on a block on the same blockchain as this block [test_double_tx_inclusion_same_chain]\n if nonempty_intersection(blocks_in_chain, chain.blocks_containing_tx.get(tx.hash, [])):\n return False, \"Double transaction inclusion\"\n # (or twice in this block; you will have to check this manually) [test_double_tx_inclusion_same_block]\n if tx.hash in txs_in_block:\n return False, \"Double transaction inclusion\"\n # for every input ref in the tx\n total_amount_input = 0\n for input_ref in tx.input_refs:\n input_tx_hash = input_ref.split(\":\")[0]\n input_index = int(input_ref.split(\":\")[1])\n\n # each input_ref is valid (aka can be looked up in its holding transaction) [test_failed_input_lookup]\n if input_tx_hash in txs_in_block:\n candidate_tx = txs_in_block[input_tx_hash]\n elif input_tx_hash in chain.all_transactions:\n candidate_tx = chain.all_transactions[input_tx_hash]\n else:\n return False, \"Required output not found\"\n if not input_index < len(candidate_tx.outputs):\n return False, \"Required output not found\"\n output_for_input = candidate_tx.outputs[input_index]\n total_amount_input += output_for_input.amount\n\n # every input was sent to the same user (would normally carry a signature from this user; we leave this out for simplicity) [test_user_consistency]\n if user_transacting == None:\n user_transacting = output_for_input.receiver\n else:\n if not output_for_input.receiver == user_transacting:\n return False, \"User inconsistencies\"\n\n # no input_ref has been spent in a previous block on this chain [test_doublespent_input_same_chain]\n if nonempty_intersection(blocks_in_chain, chain.blocks_spending_input.get(input_ref, [])):\n return False, \"Double-spent input\"\n # (or in this block; you will have to check this manually) [test_doublespent_input_same_block]\n if input_ref in inputs_spent_in_block:\n return False, \"Double-spent input\"\n # each input_ref points to a transaction on the same blockchain as this block [test_input_txs_on_chain]\n if nonempty_intersection(blocks_in_chain, chain.blocks_containing_tx.get(input_tx_hash, [])):\n inputs_spent_in_block.append(input_ref)\n continue\n # (or in this block; you will have to check this manually) [test_input_txs_in_block]\n if input_tx_hash in txs_in_block:\n inputs_spent_in_block.append(input_ref)\n continue\n return False, \"Input transaction not found\"\n\n total_amount_output = 0\n for output in tx.outputs:\n # every output was sent from the same user (would normally carry a signature from this user; we leave this out for simplicity)\n # (this MUST be the same user as the outputs are locked to above) [test_user_consistency]\n if (output.sender != user_transacting):\n return False, \"User inconsistencies\"\n total_amount_output += output.amount\n # the sum of the input values is at least the sum of the output values (no money created out of thin air) [test_no_money_creation]\n if total_amount_output > total_amount_input:\n return False, \"Creating money\"\n txs_in_block[tx.hash] = tx\n return True, \"All checks passed\"\n\n # Placeholder for (1a)\n return True, \"All checks passed\"", "def is_valid(self):\n\n chain = blockchain.chain # This object of type Blockchain may be useful\n\n # Placeholder for (1a)\n\n # (checks that apply to all blocks)\n # Check that Merkle root calculation is consistent with transactions in block (use the calculate_merkle_root function) [test_rejects_invalid_merkle]\n # On failure: return False, \"Merkle root failed to match\"\n if self.merkle != self.calculate_merkle_root():\n return False, \"Merkle root failed to match\"\n\n # Check that block.hash is correctly calculated [test_rejects_invalid_hash]\n # On failure: return False, \"Hash failed to match\"\n if self.hash != self.calculate_hash():\n return False, \"Hash failed to match\"\n\n # Check that there are at most 900 transactions in the block [test_rejects_too_many_txs]\n # On failure: return False, \"Too many transactions\"\n if len(self.transactions) > 900:\n return False, \"Too many transactions\"\n\n # (checks that apply to genesis block)\n if self.is_genesis:\n # Check that height is 0 and parent_hash is \"genesis\" [test_invalid_genesis]\n # On failure: return False, \"Invalid genesis\"\n if self.height != 0 or self.parent_hash != \"genesis\":\n return False, \"Invalid genesis\"\n\n # (checks that apply only to non-genesis blocks)\n else:\n # Check that parent exists (you may find chain.blocks helpful) [test_nonexistent_parent]\n # On failure: return False, \"Nonexistent parent\"\n if self.parent_hash not in chain.blocks:\n return False, \"Nonexistent parent\"\n\n # Check that height is correct w.r.t. parent height [test_bad_height]\n # On failure: return False, \"Invalid height\"\n if self.height != chain.blocks[self.parent_hash].height + 1:\n return False, \"Invalid height\"\n\n # Check that timestamp is non-decreasing [test_bad_timestamp]\n # On failure: return False, \"Invalid timestamp\"\n if self.timestamp < chain.blocks[self.parent_hash].timestamp:\n return False, \"Invalid timestamp\"\n\n # Check that seal is correctly computed and satisfies \"target\" requirements; use the provided seal_is_valid method [test_bad_seal]\n # On failure: return False, \"Invalid seal\"\n if self.seal_is_valid() == False:\n return False, \"Invalid seal\"\n\n # Check that all transactions within are valid (use tx.is_valid) [test_malformed_txs]\n # On failure: return False, \"Malformed transaction included\"\n for tx in self.transactions:\n if tx.is_valid() == False:\n return False, \"Malformed transaction included\"\n\n # Check that for every transaction\n tx_in_block = {}\n input_refs_in_block = {}\n for tx in self.transactions:\n # the transaction has not already been included on a block on the same blockchain as this block [test_double_tx_inclusion_same_chain]\n # (or twice in this block; you will have to check this manually) [test_double_tx_inclusion_same_block]\n # (you may find chain.get_chain_ending_with and chain.blocks_containing_tx and util.nonempty_intersection useful)\n # On failure: return False, \"Double transaction inclusion\"\n if nonempty_intersection(chain.get_chain_ending_with(self.parent_hash), chain.blocks_containing_tx.get(tx.hash, [])):\n return False, \"Double transaction inclusion\"\n\n # If the two same transactions in this block\n if tx.hash in tx_in_block:\n return False, \"Double transaction inclusion\"\n else:\n # If not, add to the dict.\n tx_in_block[tx.hash] = tx\n\n # for every input ref in the tx\n input_user = None\n output_sum=0\n input_sum = 0\n for input_ref in tx.input_refs:\n\n # (you may find the string split method for parsing the input into its components)\n input_tx = input_ref.split(\":\")\n input_tx_hash = input_tx[0]\n input_tx_index = int(input_tx[1])\n\n # each input_ref is valid (aka corresponding transaction can be looked up in its holding transaction) [test_failed_input_lookup]\n # (you may find chain.all_transactions useful here)\n # On failure: return False, \"Required output not found\"\n if (input_tx_hash not in chain.all_transactions or input_tx_index >= len(chain.all_transactions[input_tx_hash].outputs)) and input_tx_hash not in tx_in_block:\n return False, \"Required output not found\"\n\n # every input was sent to the same user (would normally carry a signature from this user; we leave this out for simplicity) [test_user_consistency]\n # On failure: return False, \"User inconsistencies\"\n input_tx_ref = None\n if input_tx_hash in chain.all_transactions:\n input_tx_ref = chain.all_transactions[input_tx_hash]\n else:\n input_tx_ref = tx_in_block[input_tx_hash]\n\n if input_user != None and input_user != input_tx_ref.outputs[input_tx_index].receiver:\n return False, \"User inconsistencies\"\n else:\n input_user = input_tx_ref.outputs[input_tx_index].receiver\n\n # no input_ref has been spent in a previous block on this chain [test_doublespent_input_same_chain]\n # (or in this block; you will have to check this manually) [test_doublespent_input_same_block]\n # (you may find nonempty_intersection and chain.blocks_spending_input helpful here)\n # On failure: return False, \"Double-spent input\"\n if input_tx_hash in input_refs_in_block:\n return False, \"Double-spent input\"\n else:\n input_refs_in_block[input_tx_hash] = input_ref\n\n if input_ref in chain.blocks_spending_input and nonempty_intersection(chain.get_chain_ending_with(self.parent_hash), chain.blocks_spending_input[input_ref]):\n return False, \"Double-spent input\"\n\n # each input_ref points to a transaction on the same blockchain as this block [test_input_txs_on_chain]\n # (or in this block; you will have to check this manually) [test_input_txs_in_block]\n # (you may find chain.blocks_containing_tx.get and nonempty_intersection as above helpful)\n # On failure: return False, \"Input transaction not found\"\n if input_tx_hash not in tx_in_block and nonempty_intersection(chain.get_chain_ending_with(self.parent_hash), chain.blocks_containing_tx.get(input_tx_hash)) == False:\n return False, \"Input transaction not found\"\n input_sum += input_tx_ref.outputs[input_tx_index].amount\n\n # for every output in the tx\n for output in tx.outputs:\n\n # every output was sent from the same user (would normally carry a signature from this user; we leave this out for simplicity)\n # (this MUST be the same user as the outputs are locked to above) [test_user_consistency]\n # On failure: return False, \"User inconsistencies\"\n if output.sender != input_user:\n return False, \"User inconsistencies\"\n output_sum += output.amount\n\n # the sum of the input values is at least the sum of the output values (no money created out of thin air) [test_no_money_creation]\n # On failure: return False, \"Creating money\" \n if output_sum > input_sum:\n return False, \"Creating money\"\n\n return True, \"All checks passed\"", "def verify_chain(cls, blockchain: List[Block]) -> bool:\n\n for (index, block) in enumerate(blockchain):\n if index == 0:\n continue\n logger.debug(\n \"Checking index %s previous hash with the block hash of index %s\",\n index,\n index - 1,\n )\n\n computed_previous_hash = cls.hash_block_header(blockchain[index - 1].header)\n if block.header.previous_hash != computed_previous_hash:\n logger.error(\n \"Previous block hashed not equal to previous hash stored in current block\"\n )\n return False\n\n logger.debug(\n \"Checking the Block hash for index %s is correct with the nonce attached\",\n index,\n )\n if not cls.valid_nonce(block.header):\n logger.error(\"Proof of work is invalid\")\n return False\n logger.info(\"Chain is valid\")\n return True", "def is_blockchain_valid(self, last_block=[]):\n if last_block:\n last_block = [last_block.get_block_obj(True)]\n if len(self.blockchain) == 0:\n return False\n i = 0\n for block in self.blockchain + last_block:\n if block[\"hash\"] == \"0\":\n # the first block\n continue\n if self.blockchain[i][\"hash\"] != block[\"previous_hash\"]:\n return False\n i += 1\n return True", "def isValidBlock(self, block, unSpentTransactions):\n\n prevBlock = self.getBlock(self.tailBlockHash)\n if prevBlock.index+1 != block.index:\n return False\n elif prevBlock.currHash != block.prevHash:\n return False\n elif block.calculateHash() != block.currHash:\n return False\n return block.isValid(unSpentTransactions)", "def verify_chain(blockchain):\n for index, block in enumerate(blockchain):\n if index == 0:\n continue\n\n if block.previous_hash != hash_block(blockchain[index - 1]):\n return False\n\n if not Verification.valid_proof(block.transactions[:-1], block.previous_hash, block.proof):\n return False\n\n return True", "def verify_chain():\n for (index,block) in enumerate(blockchain):\n if index ==0:\n continue\n if block['previous_hash'] != hash_block(blockchain[index-1]):\n return False\n if not valid_proof(block['transactions'][:-1],block['previous_hash'],block['proof']):\n print('Proof of Work is Invalid')\n return False\n return True", "def verify_chain():\n for (index, block) in enumerate(blockchain):\n if index == 0:\n continue\n if block['previous_hash'] != hash_block(blockchain[index - 1]):\n return False\n # Here [:-1] excludes the reward from being a part of validation\n if not valid_proof(block['transactions'][:-1], block['previous_hash'], block['proof']):\n print('Proof of work is invalid.')\n return False\n return True", "def is_valid_proof(self, block, block_hash): \n return (block_hash.startswith('0' * Blockchain.difficulty) and block_hash == compute_hash())", "def verify_chain():\n\n block_index = 0\n is_unchanged = True\n\n if namoto_length < 1:\n print('Blockchain is empty!')\n return None\n\n for block in namoto_blockchain:\n\n if block[0] == namoto_blockchain[block_index -1]:\n is_unchanged = True\n block_index += 1\n\n else:\n is_unchanged = False\n break\n\n return is_unchanged", "def is_valid_proof(cls, block, block_hash):\n return (block_hash.startswith('0' * Blockchain.difficulty) and\n block_hash == block.compute_hash())", "def is_valid_proof(cls, block, block_hash):\n return (block_hash.startswith('0' * Blockchain.difficulty) and\n block_hash == block.compute_hash())", "def valid_proof(block):\n return Blockchain.hash(block) [:4] == \"0000\"", "def is_chain_valid(self, chain):\r\n previous_block = chain[0]\r\n block_index = 1\r\n while block_index < len(chain):\r\n block = chain[block_index]\r\n if block['previous_hash'] != self.hash(previous_block):\r\n return False\r\n previous_proof = previous_block['proof']\r\n proof = block['proof']\r\n hash_operation = self.hash(block)\r\n if hash_operation[:4] != '0000':\r\n return False\r\n previous_block = block\r\n block_index += 1\r\n return True", "def is_valid_proof(self, block, block_hash):\r\n return (block_hash.startswith('0' * Blockchain.difficulty) and\r\n block_hash == block.compute_hash())", "def is_valid_block(last_block, block):\n if block.last_hash != last_block.hash:\n raise Exception('Incorrect last_hash')\n if hex_to_binary(block.hash)[0:block.difficulty] != '0' * block.difficulty:\n raise Exception('Proof of Work not fulfilled')\n if abs(block.difficulty - last_block.difficulty) > 1:\n raise Exception('Block difficulty must only adjust by 1')\n\n reconstructed_hash = crypto_hash(\n block.timestamp,\n block.last_hash,\n block.data,\n block.nonce,\n block.difficulty\n )\n\n if block.hash != reconstructed_hash:\n raise Exception('Incorrect Block hash')", "def isValid(self):\n currBlock = self.getBlock(self.tailBlockHash)\n while currBlock != self.genesisBlock:\n if not self.isValidBlock(currBlock):\n return False\n currBlock = self.getBlock(currBlock.prevHash)\n return True", "def validation_check():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'Blockchain is valid',\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)}\n else:\n response = {'error': 'There are errors in the Blockchain',\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)}\n return jsonify(response), 200", "def chainIsValid(self):\n for i in range(1, len(self.blocks)):\n prev_block = self.blocks[i-1]\n cur_block = self.blocks[i]\n if cur_block.header['prevBlockH'] != getHashBlock(prev_block):\n return False\n return True", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n\n # Check that the hash of block is correct\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # Check the Proof of Work\n if not self.valid_proof(last_block['proof'], block['proof']):\n return False\n \n last_block = block\n current_index += 1\n return True", "def valid_chain(self, chain):\n\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(f'{last_block}')\n print(f'{block}')\n print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n last_block_hash = self.hash(last_block)\n if block['previous_hash'] != last_block_hash:\n return False\n\n # Check that the Proof of Work is correct\n if not self.valid_proof(last_block['proof'], block['proof'], last_block_hash):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def validate_all_transactions_and_blocks(blockchain):\n\n # 1. Recreate blockchain from scratch...\n new_blockchain = None\n\n # 2. ...which will run all necessary checks as it builds and adds blocks...\n i = 0\n for block in blockchain.blocks:\n if i == 0:\n new_blockchain = Blockchain().init_with_genesis_block(block)\n else:\n try:\n new_blockchain.validate_and_add_block(block)\n except:\n print(\"Blockchain contains invalid blocks!\")\n return False\n i += 1\n return True", "def validate_chain(self):\n chain_length = len(self.chain)\n isValid = None\n\n if(chain_length == 1):\n return \"Add blocks and then validate chain.\"\n\n for x in range(0, chain_length, 1):\n if(x < chain_length-1):\n # Hash the current block\n hash = self.hash(self.chain[x])\n next_block_prev_hash = self.chain[x+1]['prev_hash']\n\n if(hash == \"0\" and x == 0):\n isValid = True\n\n if(hash != next_block_prev_hash):\n isValid = False\n return isValid\n else:\n isValid = True\n\n return isValid", "def valid_chain(self, chain):\n\n last_block = chain[0]\n current_index = 1\n \n while current_index < len(chain):\n block = chain[current_index]\n # Check correctness of last block's hash\n if block['previous_hash'] != self.hash(last_block): \n return False\n # Check correctness of proof-of-work\n if not self.valid_proof(last_block['proof'], block['proof'], block['previous_hash']):\n return False\n last_block = block \n current_index += 1\n\n return True", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(f'{last_block}')\n print(f'{block}')\n print(\"\\n----------------\\n\")\n # verify hash integrity\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # verify proof integrity\n if not self.valid_proof(last_block['proof'], block['proof']):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def validate(bitcoin_address):\n try:\n bcbytes = decode_base58(bitcoin_address, 25)\n except IllegalCharacterError:\n return False\n # Compare checksum\n checksum = sha256(sha256(bcbytes[:-4]).digest()).digest()[:4]\n if bcbytes[-4:] != checksum:\n return False\n # Encoded bytestring should be equal to the original address\n # For example '14oLvT2' has a valid checksum, but is not a valid btc address\n return bitcoin_address == encode_base58(bcbytes)", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(last_block)\n print(block)\n print(\"\\n--------\\n\")\n \n #check that the hash of the previous block is correct\n\n if block[\"previous_hash\"] != self.hash(last_block):\n print(\"Previous hash does not match\")\n return False\n\n if not self.valid_proof(block):\n print(\"Block proof of work is invalid\")\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def is_valid_proof(cls, block, block_hash):\n return (cls.ifsatisfy_diff(block_hash) and block_hash == block.compute_hash())", "def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):\n guess = (str(transactions)+str(last_hash)+str(nonce)).encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n return guess_hash[:difficulty] == '0'*difficulty", "def test_validate_empty_chain(self):\n miner_address = 'miner_address'\n\n blockchain = Blockchain()\n block = blockchain.mine(miner_address)\n\n self.assertTrue(blockchain.validate_chain(blockchain.full_chain))", "def validateChain(self, toValidateChain):\n # First validate both firsts blocks\n if toValidateChain[0].hashBlock() != self.__chain[0].hashBlock():\n return False\n\n # Then compare each block with previous \n for x in range(1, len(toValidateChain)):\n if not self.validateBlock(toValidateChain[x], toValidateChain[x - 1]):\n return False\n\n return True", "def validateBlock(self, currentBlock, previousBlock): \n \n # Check the block index\n if currentBlock.index != previousBlock.index + 1:\n return False\n if currentBlock.previousHash != previousBlock.hash:\n return False\n if currentBlock.hash != currentBlock.hashBlock():\n return False\n if not self.validateNonce(previousBlock.nonce, previousBlock.hash, currentBlock.nonce):\n return False\n return True", "def test_invalid_balance_genesis(self):\n db = MockDatabase()\n prev = TestBlock(block_type=BlockTypes.TRANSFER, transaction={'balance': 1, 'amount':5})\n result, errors = prev.validate_transaction(db)\n self.assertEqual(result, ValidationResult.invalid)\n self.assertIsInstance(errors[0], EuroTokenBlock.InvalidBalance)", "def valid_chain(chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n #print(last_block)\n #print(block)\n #print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n if block['previous_hash'] != hash(last_block):\n return False\n\n # Check that the Proof of Work is correct\n #Delete the reward transaction\n transactions = block['transactions'][:-1]\n # Need to make sure that the dictionary is ordered. Otherwise we'll get a different hash\n transaction_elements = ['sender_address', 'recipient_address', 'value']\n transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in transactions]\n\n if not valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def is_hashed_base58_valid(base58):\n try:\n a2b_hashed_base58(base58)\n except EncodingError:\n return False\n return True", "def valid_chain(self, block, prev_block):\n self.stop_mine()\n\n print('\\n //// MINING STOPPED\\n')\n\n print('\\n //// block entering valid_chain')\n pprint(block)\n\n if block is not None and block['message'] != 'mining stopped':\n if block['previous_hash'] == self.hash(prev_block):\n \n # Check that the Proof of Work is correct\n if self.valid_proof(prev_block['proof'], block['proof']):\n if block['index'] == self.last_block['index']:\n if self.last_block['timestamp'] > block['timestamp']:\n del self.chain[-1]\n self.chain.append(block)\n print('\\n //// true from equal index but older timestamp')\n return True\n\n elif self.last_block['timestamp'] == block['timestamp']:\n print('\\n //// true from timestamps are equal block isnt added')\n return True\n else:\n print('\\n //// true timestamp is newer not added but sending false')\n return False\n\n elif block['index'] > self.last_block['index']:\n print('\\n //// true from index is greater and block is added')\n self.chain.append(block)\n return True\n else:\n print('\\n //// false from adding block had index less than block already there')\n else:\n print('\\n //// false from not a valid proof')\n\n else:\n print('\\n //// false from hashes arent equal')\n if (block['timestamp'] < self.last_block['timestamp']):\n if (block['index'] == self.last_block['index']):\n print('\\n //// hashes arent equal but block is older, subtracting and adding')\n del self.chain[-1]\n self.chain.append(block)\n return True\n\n elif (block['timestamp'] > self.last_block['timestamp']):\n if(block['index'] > self.last_block['index']):\n self.chain.append(block)\n return True\n else:\n return True\n\n return False\n\n else:\n return 'reject'", "def consensus():\n global blockchain\n\n longest_chain = None\n current_len = len(blockchain.chain)\n\n for node in peers:\n response = requests.get('{}chain'.format(node))\n length = response.json()['length']\n chain = response.json()['chain']\n if length > current_len and blockchain.check_chain_validity(chain):\n current_len = length\n longest_chain = chain\n\n if longest_chain:\n blockchain = longest_chain\n return True\n\n return False", "def consensus():\n global blockchain\n\n longest_chain = None\n current_len = len(blockchain.chain)\n\n for node in peers:\n response = requests.get('{}chain'.format(node))\n length = response.json()['length']\n chain = response.json()['chain']\n if length > current_len and blockchain.check_chain_validity(chain):\n current_len = length\n longest_chain = chain\n\n if longest_chain:\n blockchain = longest_chain\n return True\n\n return False", "def _is_valid_message(tx_message: TransactionMessage) -> bool:\n # TODO check the hash matches the terms of the transaction, this means dm requires knowledge of how the hash is composed\n tx_hash = tx_message.signing_payload.get(\"tx_hash\")\n is_valid = isinstance(tx_hash, bytes)\n return is_valid", "def validate_transaction(self, tx, throw_exception=False):\n\n # 1. Validate signature\n isValid = signature.verify(tx.from_pk, tx.to_string_for_hashing(), tx.signature)\n if not isValid:\n error_msg = \"Signature not valid!\"\n if throw_exception:\n print(error_msg)\n raise Exception(error_msg)\n else:\n print(error_msg)\n return False\n\n # 2. Validate sender balance\n balance = get_balance(tx.from_pk, self.blocks)\n if tx.amount > balance:\n error_msg = \"Insufficient funds for this transaction!\"\n if throw_exception:\n print(error_msg)\n raise Exception(error_msg)\n else:\n print(error_msg)\n return False\n return True", "def verify_stored_block(leading_zeros: int) -> bool:\n try:\n with open(\"block.json\", \"r\") as file:\n block_json = json.load(file)\n block = Block(block_json[\"data\"], block_json[\"nonce\"], block_json[\"hash\"])\n hash_to_verify = block.hash\n recalculated_hash = block.get_hash()\n\n binary_hash_to_verify = get_binary_sha256_hash(hash_to_verify)\n binary_recalculated_hash = get_binary_sha256_hash(recalculated_hash)\n\n return (\n binary_hash_to_verify == binary_recalculated_hash\n and binary_recalculated_hash[:leading_zeros] == \"0\" * leading_zeros\n )\n except FileNotFoundError:\n print(\"No stored block found. Create one first\")\n return False", "def verify_block(block_data, block_hash, block_nonce):\n hashing_value = block_data + str(block_nonce)\n new_hash = hashlib.sha256(hashing_value.encode()).hexdigest()\n if new_hash == block_hash:\n return True\n else:\n return False", "def _is_valid_tx(self, tx_message: TransactionMessage) -> bool:\n tx = tx_message.signing_payload.get(\"tx\")\n is_valid = tx is not None\n return is_valid", "def verify_chain(self, new_block=None):\n if new_block and (not new_block.is_valid()\n or self.get_last().hash_block() != new_block.prev_block_hash):\n return False, -2\n\n i = len(self.chain)-1\n for block in reversed(self.chain):\n prev_hash = self.chain[i-1].hash_block()\n if block.index == 0 or i == 0:\n break\n # block's header_hash property is already recalculated in is_valid() method\n elif block.is_valid() and prev_hash == block.prev_block_hash:\n i -= 1\n else:\n return False, block.index\n\n return True, -1", "def is_valid(self):\n # Check blocks\n for block in self.blocks.values():\n # Non-optional blocks must be enabled\n if (\n block.structure.number_non_optional_data() > 0\n and not block.enabled\n and block.is_allowed()\n ):\n self.last_error = (\n f'Required block \"{block.block_header.name}\" not enabled'\n )\n return False\n # Enabled blocks must be valid\n if block.enabled and not block.is_valid:\n self.last_error = f'Invalid block \"{block.block_header.name}\"'\n return False\n\n return True", "def test_validate_chain_with_tempered_block_nonce(self):\n miner_address = 'miner_address'\n\n blockchain = Blockchain()\n last_block = blockchain.mine(miner_address)\n\n # First we look that a new block could be mined\n self.assertIsNotNone(last_block)\n\n chain = blockchain.full_chain\n\n # Hack a block\n chain.append(Block(1, [], 1, last_block.hash))\n\n self.assertFalse(blockchain.validate_chain(blockchain.full_chain))", "def verify_miner(self):\n for transaction in self.transaction_list:\n if(transaction.verify_miner()):\n return True\n return False", "def test_valid_balance_genesis(self):\n db = MockDatabase()\n prev = TestBlock(block_type=BlockTypes.CHECKPOINT, transaction={'balance': 0})\n result, errors = prev.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])\n db.add_block(prev)", "def verify_transaction(transaction):\n sender_balance = get_balance(transaction['sender'])\n return sender_balance >= transaction['amount']", "def isValid(self, public_key):\n if self.sentFrom is None:\n return True\n\n if not self.signature or not len(str(self.signature)):\n raise Exception(\"Transaction Not Signed\")\n\n valid = public_key.verify(self.signature, self.txHash.encode())\n return valid", "def verify(self):\n\n # The boolean will be used to see if there are already a miner's\n # transaction\n exist_miner = False\n # The index will store the input to see if there are no duplicates\n index_input = {}\n\n for transaction in self.transaction_list:\n # We verify if an input of a transaction is already in the\n # transaction's list\n for input in transaction.list_input:\n if(input in index_input):\n print(transaction.list_input)\n print(input)\n return False\n else:\n index_input[input] = None\n\n # We verify is there are the central bank in the transaction\n if transaction.verify_bank():\n # If it is in the transaction it can be just the miner's\n # transaction (without duplicates)\n if(exist_miner or not(transaction.verify_miner())):\n print(\"TADATA\")\n return False\n else:\n exist_miner = True\n else:\n # Otherwise, we verify normaly the transaction\n if(not(transaction.verify())):\n print(\"TADATA2\")\n return False\n return True", "def validate_address(network, address):\n try:\n address = Address.import_address(address=address, network=(\n 'litecoin' if network == 'mainnet' else 'litecoin_testnet'))\n return True\n except:\n return False", "def is_valid(self):\r\n return self.circuit.is_valid", "def verify(blocknumber, trx, use_api):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n b = Blockchain(morphene_instance=stm)\n i = 0\n if not blocknumber:\n blocknumber = b.get_current_block_num()\n try:\n int(blocknumber)\n block = Block(blocknumber, morphene_instance=stm)\n if trx is not None:\n i = int(trx)\n trxs = [block.json_transactions[int(trx)]]\n else:\n trxs = block.json_transactions\n except Exception:\n trxs = [b.get_transaction(blocknumber)]\n blocknumber = trxs[0][\"block_num\"]\n wallet = Wallet(morphene_instance=stm)\n t = PrettyTable([\"trx\", \"Signer key\", \"Account\"])\n t.align = \"l\"\n if not use_api:\n from morphenepythonbase.signedtransactions import Signed_Transaction\n for trx in trxs:\n if not use_api:\n # trx is now identical to the output of get_transaction\n # This is just for testing porpuse\n if True:\n signed_tx = Signed_Transaction(trx.copy())\n else:\n tx = b.get_transaction(trx[\"transaction_id\"])\n signed_tx = Signed_Transaction(tx)\n public_keys = []\n for key in signed_tx.verify(chain=mph.chain_params, recover_parameter=True):\n public_keys.append(format(Base58(key, prefix=mph.prefix), mph.prefix))\n else:\n tx = TransactionBuilder(tx=trx, morphene_instance=stm)\n public_keys = tx.get_potential_signatures()\n accounts = []\n empty_public_keys = []\n for key in public_keys:\n account = wallet.getAccountFromPublicKey(key)\n if account is None:\n empty_public_keys.append(key)\n else:\n accounts.append(account)\n new_public_keys = []\n for key in public_keys:\n if key not in empty_public_keys or use_api:\n new_public_keys.append(key)\n if isinstance(new_public_keys, list) and len(new_public_keys) == 1:\n new_public_keys = new_public_keys[0]\n else:\n new_public_keys = json.dumps(new_public_keys, indent=4)\n if isinstance(accounts, list) and len(accounts) == 1:\n accounts = accounts[0]\n else:\n accounts = json.dumps(accounts, indent=4)\n t.add_row([\"%d\" % i, new_public_keys, accounts])\n i += 1\n print(t)", "def isValid(self) :\n try :\n pos = 0\n while self.firstblock[pos] == chr(0) :\n pos += 1\n except IndexError : \n return False\n else : \n firstblock = self.firstblock[pos:]\n if firstblock.startswith(\"\\033E\\033\") or \\\n firstblock.startswith(\"\\033%1BBPIN;\") or \\\n ((pos == 11000) and firstblock.startswith(\"\\033\")) or \\\n (firstblock.startswith(\"\\033*rbC\") and (not self.lastblock[-3:] == \"\\f\\033@\")) or \\\n firstblock.startswith(\"\\033*rB\\033\") or \\\n firstblock.startswith(\"\\033%8\\033\") or \\\n (firstblock.find(\"\\033%-12345X\") != -1) or \\\n (firstblock.find(\"@PJL ENTER LANGUAGE=PCL\\012\\015\\033\") != -1) or \\\n (firstblock.startswith(chr(0xcd)+chr(0xca)) and (firstblock.find(\"\\033E\\033\") != -1)) :\n return True\n else : \n return False", "def test_missing_validated_balance(self):\n db = MockDatabase()\n\n G = db.owner\n A = TestWallet()\n\n G1 = TestBlock(\n key = G,\n block_type=BlockTypes.CREATION,\n transaction={'amount': 10},\n links=A\n )\n result, errors = G1.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])\n db.add_block(G1)\n\n A1 = TestBlock(\n key=A,\n block_type=BlockTypes.CREATION,\n transaction={'amount': 10},\n linked = G1\n )\n result, errors = A1.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])\n db.add_block(A1)\n\n A2 = TestBlock(\n block_type=BlockTypes.TRANSFER,\n transaction={'balance':5, 'amount': 5},\n previous=A1\n )\n result, errors = A2.validate_transaction(db)\n self.assertEqual(result, ValidationResult.invalid)\n self.assertIsInstance(errors[0], EuroTokenBlock.InsufficientValidatedBalance)", "def valid(self):\n return self.hash.to_int('little') < self.target", "def validate(smartAddress):\n\n addressLen = len(smartAddress)\n\n if addressLen < 27 or addressLen > 35:\n return None\n\n try:\n decoded = decode_base58(smartAddress, 25)\n except ValueError:\n return None\n\n # Compare checksum\n checksum = HashKeccak(decoded[:-4])[:4]\n if decoded[-4:] != checksum:\n return None\n\n if smartAddress != encode_base58(decoded):\n return None\n\n return smartAddress", "def valid_proof(transactions, last_hash, proof):\n # Creates a String containing all the hash inputs.\n guess = (str([transaction.to_ordered_dict() for transaction in transactions]) + str(last_hash) + str(\n proof)).encode()\n # Hashes the String.\n guess_hash = hash_util.hash_string_256(guess)\n # Only a hash based on the above inputs that starts with two 0s is valid for the algorithm.\n # This condition can be changed, but once adding more characters to validate, the more time consuming it is.\n return guess_hash[0:2] == '00'", "def valid_bet(self, amount: int) -> bool:\n return MINIMUM_BET() <= amount <= self.balance", "def verify_transaction(transaction, get_balance, check_funds=True):\n if check_funds:\n sender_balance = get_balance(transaction.sender)\n return sender_balance >= transaction.amount and Wallet.verify_transaction(transaction)\n else:\n return Wallet.verify_transaction(transaction)", "def validate_blockchain(chain):\n assert isinstance(chain, list)\n\n for hook in chain[::-1]:\n pass", "def check_valid(self, store, txn):\n LOGGER.debug('check update %s from %s', str(self), self.whitelist_name)\n\n # Check name\n if not self.is_valid_name():\n raise InvalidTransactionError(\n 'Illegal whitelist name {}'.format(self.whitelist_name[:64]))\n\n# try:\n# with open(\"/home/vagrant/sawtooth/whitelist.json\")\\\n# as whitelist_fd:\n# whitelist_dic = json.load(whitelist_fd)\n# except IOError, ex:\n# raise InvalidTransactionError(\n# 'could not open /home/vagrant/sawtooth/whitelist.json {}'\n# .format(str(ex)))\n\n if not PermissionedValidators:\n raise InvalidTransactionError('No Permissioned Validators')\n\n whitelist_dic = PermissionedValidators\n if 'PermissionedValidatorPublicKeys' in whitelist_dic:\n permissioned_public_keys =\\\n whitelist_dic['PermissionedValidatorPublicKeys']\n for public_key in self.permissioned_public_keys:\n if public_key not in permissioned_public_keys:\n raise InvalidTransactionError(\n 'Illegal public key {}'\n .format(str(public_key)))\n\n if 'PermissionedValidatorAddrs' in whitelist_dic:\n permissioned_addrs = whitelist_dic['PermissionedValidatorAddrs']\n for addr in self.permissioned_addrs:\n if addr not in permissioned_addrs:\n raise InvalidTransactionError(\n 'Illegal public addr {}'\n .format(str(addr)))\n\n return True", "def verify_transaction(\n transaction: SignedRawTransaction,\n get_balance: Callable,\n get_last_tx_nonce: Callable,\n check_funds: bool = True,\n ) -> bool:\n if check_funds:\n logger.debug(\n \"Checking the sender's balance can cover the amount being transferred\"\n )\n sender_balance = get_balance(transaction.details.sender)\n if sender_balance >= transaction.details.amount:\n logger.info(\"Sender has enough coin to create this transaction\")\n\n return Wallet.verify_transaction(transaction, get_last_tx_nonce)", "def test_block_bad_consensus(self):\n pass", "def test_missing_balance(self):\n db = MockDatabase()\n\n prev = TestBlock(block_type=BlockTypes.CHECKPOINT, transaction={})\n result, errors = prev.validate_transaction(db)\n self.assertEqual(result, ValidationResult.invalid)\n self.assertIsInstance(errors[0], EuroTokenBlock.MissingBalance)", "def check_block(self, block):\n pass", "def valid(self) -> bool:\n return True", "def validateNonce(lastNonce, lastHash, nonce):\n sha = hashlib.sha256(f'{lastNonce}{lastHash}{nonce}'.encode())\n return sha.hexdigest()[:4] == '0000'", "def validate_checksum(self):\n return self.calculate_checksum() == self.checksum()", "def IsValid(*args):\n return _BRepAlgo.brepalgo_IsValid(*args)", "def _validate_tx(self, tx: payloads.Transaction) -> None:\n if tx.network_fee == 0 or tx.system_fee == 0:\n raise ValueError(\"Transaction validation failure - \"\n \"a transaction without network and system fees will always fail to validate on chain\")\n\n if len(tx.signers) == 0:\n raise ValueError(\"Transaction validation failure - Missing sender\")\n\n if len(tx.script) == 0:\n raise ValueError(\"Transaction validation failure - script field can't be empty\")\n\n if self.is_watchonly:\n raise ValueError(\"Cannot sign transaction using a watch only account\")", "def is_spent(tx_hash, index):\n try:\n response = make_request('http://tbtc.blockr.io/api/v1/tx/info/' + tx_hash)\n data = json.loads(response)\n result = bool(data['data']['vouts'][index]['is_spent'])\n except Exception as e:\n result = True\n\n return result", "def test_chain(mocker):\n transaction = Transaction(\n chain=-1,\n nonce=4_294_967_295,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_CHAIN\n ):\n transaction.validate(raise_exception=True)\n\n transaction.chain = 15\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.chain = 257\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_CHAIN\n ):\n transaction.validate(raise_exception=True)", "def valid(self) -> bool:\n pass", "def is_valid(self, key: Bits, verbose=True):\n ivk = wep_make_ivk(key, self.iv)\n if verbose:\n debug(verbose, fun_name + \" : ivk = \" + str(ivk))\n\n decrypted = rc4_crypt(self.payload, ivk, verbose)\n if verbose:\n debug(verbose, fun_name + \" : decrypted = \" + str(ivk))\n\n decrypted_message = decrypted[:-len(self.crc)]\n if verbose:\n debug(verbose, fun_name + \" : decrypted_message = \" + str(decrypted_message))\n\n decrypted_crc = decrypted[-len(self.crc):]\n if verbose:\n debug(verbose, fun_name + \" : decrypted_crc = \" + str(decrypted_crc))\n\n int_computed_crc, computed_crc = crc32(decrypted_message)\n if verbose:\n debug(verbose, fun_name + \" : computed_crc = \" + str(computed_crc))\n debug(verbose, fun_name + \" : computed_crc = \" + str(int_computed_crc))\n debug(verbose, fun_name + \" : frame_crc = \" + str(self.crc))\n\n return decrypted_crc == computed_crc", "def brepalgo_IsValid(*args):\n return _BRepAlgo.brepalgo_IsValid(*args)", "def _is_valid(self):\n self._is_allows_valid()\n self._is_denies_valid()", "def is_valid(self): # -> bool:\n ...", "def is_valid_entity(self):\n return is_correct_cve_id(self.cve_id)", "def verify(self, commitment, index_range):\n if not all([self.P, self.P2, self.s, self.e]):\n raise PoDLE(\"Verify called without sufficient data\")\n if not self.get_commitment() == commitment:\n return False\n for J in [getNUMS(i) for i in index_range]:\n sig_priv = secp256k1.PrivateKey(self.s, raw=True, ctx=ctx)\n sG = sig_priv.pubkey\n sJ = J.tweak_mul(self.s)\n e_int = decode(self.e, 256)\n minus_e = encode(-e_int % N, 256, minlen=32)\n minus_e_P = self.P.tweak_mul(minus_e)\n minus_e_P2 = self.P2.tweak_mul(minus_e)\n KG = dummy_pub.combine([sG.public_key, minus_e_P.public_key])\n KJ = dummy_pub.combine([sJ.public_key, minus_e_P2.public_key])\n KGser = secp256k1.PublicKey(KG, ctx=ctx).serialize()\n KJser = secp256k1.PublicKey(KJ, ctx=ctx).serialize()\n #check 2: e =?= H(K_G || K_J || P || P2)\n e_check = hashlib.sha256(\n KGser + KJser + self.P.serialize() + self.P2.serialize()).digest()\n if e_check == self.e:\n return True\n #commitment fails for any NUMS in the provided range\n return False", "def _is_valid_trade(self, trade):\n if not trade:\n return False\n\n if trade.Status() in VALID_TRADE_STATUSES:\n if acm.Time().AsDate(trade.TradeTime()) > self.start_date:\n return False\n print '1'\n ins_type = trade.Instrument().InsType()\n if ins_type == 'Curr':\n if trade.ValueDay() > self.start_date:\n return True\n elif ins_type == 'Combination':\n for comb_ins in trade.Instrument().Instruments():\n trades = comb_ins.Trades()\n if trades and trades[0] in VALID_TRADE_STATUSES:\n trade = trades[0]\n ins_type = trade.Instrument().InsType()\n if (self._is_basis_trade(trade) and\n ins_type in ('Swap', 'FRA')):\n return True\n elif ins_type == 'CurrSwap':\n if trade.Instrument().ExpiryDateOnly() > self.start_date:\n return True\n else:\n if trade.Instrument().ExpiryDateOnly() > self.start_date:\n if (self._is_basis_trade(trade) and\n ins_type in ('Swap', 'FRA')):\n return True\n\n return False", "def is_valid(self):\n try:\n self.validate()\n return True\n except (TypeError, ValueError) as e:\n return False", "def is_valid(self, card):\n # type: (str, Card) -> bool\n if card.version == \"3.0\":\n return False\n fingerprint = self.crypto.calculate_fingerprint(\n Utils.strtobytes(card.snapshot)\n )\n fingerprint_hex = fingerprint.to_hex\n if fingerprint_hex != card.id:\n return False\n verifiers = self.verifiers.copy()\n card_public_key = self.crypto.import_public_key(card.public_key)\n verifiers[fingerprint_hex] = card_public_key\n for key in verifiers:\n if key not in card.signatures:\n return False\n is_valid = self.crypto.verify(\n fingerprint.value,\n Utils.b64tobytes(card.signatures[key]),\n verifiers[key]\n )\n if not is_valid:\n return False\n return True", "def verify(self, _base_block_hash: bytes) -> None:\n from hathor.merged_mining import MAGIC_NUMBER\n from hathor.transaction.exceptions import AuxPowError\n if not self.coinbase_head.endswith(MAGIC_NUMBER):\n raise AuxPowError('cannot find MAGIC_NUMBER')\n if MAGIC_NUMBER in self.coinbase_head[42:len(MAGIC_NUMBER)]: # 42 first bytes can be ignored\n raise AuxPowError('multiple instances of MAGIC_NUMBER')\n if len(self.merkle_path) > 12:\n raise AuxPowError('`merkle_path` too long')\n # XXX: is there anything else that needs to be verified?", "def _is_valid_sbl_fee_settlement(settlement):\n if not settlement.IsKindOf(acm.FSettlement):\n return False\n\n if not settlement.Trade():\n return False\n\n if settlement.AdditionalInfo().Call_Confirmation():\n return False\n \n if settlement.Status() != 'Authorised':\n return False\n acquirer = settlement.Trade().Acquirer().Name()\n instrument = settlement.Trade().Instrument().InsType()\n if acquirer not in ('SECURITY LENDINGS DESK', 'PRIME SERVICES DESK'):\n return False\n\n if acquirer == 'SECURITY LENDINGS DESK':\n if instrument != 'SecurityLoan':\n return False\n\n if acquirer == 'PRIME SERVICES DESK':\n if instrument != 'Deposit':\n return False\n if settlement.Trade().Portfolio().Name() != 'Call_SBL_Agency_Collateral':\n return False\n\n if settlement.Type() not in ['Cash', 'Loan Fee', 'Finder Fee', 'Call Fixed Rate Adjustable']:\n return False\n\n return True", "def validate(self, encrypted_token: str) -> bool:\n payload, timestamp_ms, crc = self.unsleeve(encrypted_token)\n ts_bytes = timestamp_ms.to_bytes(8, 'big')\n\n computed_crc = zlib.crc32(payload + ts_bytes)\n\n if crc == computed_crc:\n return in_range(timestamp_ms, deadline=self.token_life_ms)\n\n return False", "def _is_valid_sbl_settlement(settlement):\n if not settlement.IsKindOf(acm.FSettlement):\n return False\n\n if not settlement.Trade():\n return False\n \n trade = settlement.Trade()\n acquirer = trade.Acquirer().Name()\n instrument = trade.Instrument().InsType()\n delivery_type = trade.AddInfoValue(\"SL_SWIFT\")\n if instrument != 'SecurityLoan':\n return False\n if acquirer != 'SECURITY LENDINGS DESK':\n return False\n if delivery_type != 'SWIFT':\n return False\n if settlement.Type() not in ['Security Nominal', 'End Security']:\n return False\n\n return True", "def is_valid(self):\n # check data sets\n for dataset in self.datasets.values():\n # Non-optional datasets must be enabled\n if not dataset.structure.optional and not dataset.enabled:\n return False\n # Enabled blocks must be valid\n if dataset.enabled and not dataset.is_valid:\n return False\n # check variables\n for block_header in self.block_headers:\n for dataset in block_header.data_items:\n # Non-optional datasets must be enabled\n if not dataset.structure.optional and not dataset.enabled:\n return False\n # Enabled blocks must be valid\n if dataset.enabled and not dataset.is_valid():\n return False", "def validate(btctxstore, cfg):\n\n # is a dict\n if not isinstance(cfg, dict):\n raise InvalidConfig(\"Config must be a dict!\")\n\n # correct version\n if cfg.get(\"version\") != VERSION:\n msg = \"Invalid version: {0} expected, got {1}\"\n raise InvalidConfig(msg.format(VERSION, cfg.get(\"version\")))\n\n # has valid payout address\n if not btctxstore.validate_address(cfg.get(\"payout_address\")):\n raise InvalidConfig(\"Missing entry 'payout_address'!\")\n\n # has valid wallet\n if not btctxstore.validate_wallet(cfg.get(\"wallet\")):\n msg = \"Invalid 'wallet' entry: {0}!\"\n raise InvalidConfig(msg.format(cfg.get(\"wallet\")))\n\n return True", "async def check_trustline(\n cls, transaction: Transaction, server: Server, locks: Dict\n ):\n try:\n _, account = await get_account_obj_async(\n Keypair.from_public_key(transaction.to_address), server\n )\n except BaseRequestError:\n logger.exception(f\"Failed to load account {transaction.to_address}\")\n transaction.pending_execution_attempt = False\n await sync_to_async(transaction.save)()\n return\n trustline_found = False\n for balance in account[\"balances\"]:\n if balance.get(\"asset_type\") == \"native\":\n continue\n if (\n balance[\"asset_code\"] == transaction.asset.code\n and balance[\"asset_issuer\"] == transaction.asset.issuer\n ):\n trustline_found = True\n break\n if trustline_found:\n logger.debug(\n f\"detected transaction {transaction.id} is no longer pending trust\"\n )\n await cls.process_deposit(transaction, server, locks)\n else:\n transaction.pending_execution_attempt = False\n await sync_to_async(transaction.save)()", "def check(self, txid=None, amount=None, confirmation_height=None):\n\n txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True)\n current_height = self.node.getblockcount()\n assert_equal(len(txs), self.expected_txs)\n\n addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])\n\n if self.expected_txs:\n assert_equal(len(addresses[0][\"txids\"]), self.expected_txs)\n\n if txid is not None:\n tx, = [tx for tx in txs if tx[\"txid\"] == txid]\n assert_equal(tx[\"label\"], self.label)\n assert_equal(tx[\"address\"], self.address[\"address\"])\n assert_equal(tx[\"amount\"], amount)\n assert_equal(tx[\"category\"], \"receive\")\n assert_equal(tx[\"label\"], self.label)\n assert_equal(tx[\"txid\"], txid)\n\n # If no confirmation height is given, the tx is still in the\n # mempool.\n confirmations = (1 + current_height - confirmation_height) if confirmation_height else 0\n assert_equal(tx[\"confirmations\"], confirmations)\n if confirmations:\n assert \"trusted\" not in tx\n\n address, = [ad for ad in addresses if txid in ad[\"txids\"]]\n assert_equal(address[\"address\"], self.address[\"address\"])\n assert_equal(address[\"amount\"], self.expected_balance)\n assert_equal(address[\"confirmations\"], confirmations)\n # Verify the transaction is correctly marked watchonly depending on\n # whether the transaction pays to an imported public key or\n # imported private key. The test setup ensures that transaction\n # inputs will not be from watchonly keys (important because\n # involvesWatchonly will be true if either the transaction output\n # or inputs are watchonly).\n if self.data != Data.priv:\n assert_equal(address[\"involvesWatchonly\"], True)\n else:\n assert_equal(\"involvesWatchonly\" not in address, True)", "def _ecssa_verify(ec: EC, hf, m: bytes, P: Point, sig: ECSS) -> bool:\n\n # the bitcoin proposed standard is only valid for curves\n # whose prime p = 3 % 4\n if not ec.pIsThreeModFour:\n errmsg = 'curve prime p must be equal to 3 (mod 4)'\n raise ValueError(errmsg)\n\n # Let r = int(sig[ 0:32]); fail if r is not [0, p-1].\n # Let s = int(sig[32:64]); fail if s is not [0, n-1].\n r, s = to_ssasig(ec, sig)\n\n # The message m: a 32-byte array\n if len(m) != hf().digest_size:\n errmsg = f'message of wrong size: {len(m)}'\n errmsg += f' instead of {hf().digest_size}'\n raise ValueError(errmsg)\n\n # Let P = point(pk); fail if point(pk) fails.\n ec.requireOnCurve(P)\n if P[1] == 0:\n raise ValueError(\"public key is infinite\")\n\n # Let e = int(hf(bytes(r) || bytes(P) || m)) mod n.\n e = _ecssa_e(ec, hf, r, P, m)\n\n # Let R = sG - eP.\n R = DblScalarMult(ec, s, ec.G, -e, P)\n\n # Fail if infinite(R).\n if R[1] == 0:\n raise ValueError(\"sG - eP is infinite\")\n\n # Fail if jacobi(y(R)) ≠ 1.\n if legendre_symbol(R[1], ec._p) != 1:\n raise ValueError(\"y(sG - eP) is not a quadratic residue\")\n\n # Fail if x(R) ≠ r.\n return R[0] == r", "def check_validity(self):", "def _is_hash_valid(self):\n downloaded_hash = sha1(self._downloaded_bytes).digest()\n return downloaded_hash == self.hash", "def isStakeBase(tx):\n # A stake base (SSGen) must only have two transaction inputs.\n if len(tx.txIn) != 2:\n return False\n\n # The previous output of a coin base must have a max value index and\n # a zero hash, as well as null fraud proofs.\n if not isNullOutpoint(tx):\n return False\n\n if not isNullFraudProof(tx):\n return False\n\n return True", "def ec_verify(ec, digest, signature):\n assert len(signature) == ec_signature_length(ec)\n length = len(signature) / 2\n prefix = pack(\"!L\", length)\n try:\n return bool(ec.verify_dsa(digest, prefix + signature[:length], prefix + signature[length:]))\n except:\n return False", "def validate(msg, pubkey: dict, signature):\n if signature is None:\n print(\"Signature is None. probably cause something other than a string or byte being passed to signer\")\n return False\n try:\n x_int = base64.b85decode(pubkey[\"x\"].encode())\n x_int = int.from_bytes(x_int, \"big\")\n\n y_int = base64.b85decode(pubkey[\"y\"].encode())\n y_int = int.from_bytes(y_int, \"big\")\n except KeyError:\n return False\n\n signature = signature.encode()\n signature = base64.b85decode(signature)\n\n # if it a string\n try:\n hash_of_message = SHA256.new(msg)\n except TypeError:\n hash_of_message = SHA256.new(msg.encode())\n\n try:\n pubkey = ECC.construct(point_x=x_int, point_y=y_int, curve=\"P-256\").public_key()\n verifier = DSS.new(pubkey, mode=\"fips-186-3\")\n verifier.verify(hash_of_message, signature=signature)\n except ValueError:\n return False\n else:\n return True" ]
[ "0.763013", "0.7623251", "0.76183456", "0.7577117", "0.7567591", "0.7553642", "0.74016935", "0.7393162", "0.7381024", "0.7346507", "0.73355687", "0.7174818", "0.71236986", "0.71236986", "0.7094909", "0.70591253", "0.7049671", "0.69781476", "0.69487953", "0.694765", "0.6893976", "0.68448675", "0.6833846", "0.68165153", "0.676778", "0.6743761", "0.6743592", "0.67270905", "0.6693863", "0.66810644", "0.66767865", "0.66335213", "0.65815896", "0.6522641", "0.6496396", "0.64830923", "0.63982147", "0.63820845", "0.63777983", "0.63777983", "0.6354926", "0.63228035", "0.63082623", "0.6292136", "0.62920296", "0.62672937", "0.6219631", "0.6197709", "0.61971956", "0.61905324", "0.61375254", "0.6124766", "0.6105763", "0.6105683", "0.6093925", "0.59414554", "0.5898845", "0.58806366", "0.5872197", "0.5860194", "0.58592486", "0.58503693", "0.58256704", "0.58210415", "0.5806244", "0.5805369", "0.580378", "0.5796525", "0.5784159", "0.57623583", "0.5760195", "0.57575166", "0.57492185", "0.57399344", "0.5739673", "0.5739058", "0.57368624", "0.57160604", "0.5698114", "0.5679023", "0.5671219", "0.56683916", "0.565526", "0.5652473", "0.56505346", "0.56448674", "0.5642269", "0.5637223", "0.5624799", "0.5624334", "0.5623247", "0.561988", "0.55961525", "0.5594925", "0.5594746", "0.5586692", "0.5585733", "0.55782026", "0.5570052", "0.5566606" ]
0.69277084
20
Creates a SHA256 hash of a Block
def hash(block): # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes block_string = json.dumps(block, sort_keys=True).encode() return hashlib.sha256(block_string).hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash_block(self):\n sha = hasher.sha256()\n sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).endswith('utf-8'))\n return sha.hexdigest()", "def compute_hash(block):\n block_string = json.dumps(self.__dict__, sort_keys= True)\n return sha256(block_string.encode()).hexdigest()", "def hash(block):\r\n block_string = json.dumps(block, sort_keys=True).encode()\r\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(self, block):\n block_string = json.dumps(block, sort_keys=True).encode()\n\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n # hashes a block\n #we must make sure that the dictionary is ordered, or we will have inconsistent hashes\n block_string = json.dumps(block, sort_keys = True).encode()\n return hashlib.sha256(block_string).hexdigest()\n #pass", "def hash(block):\n\n # Dictionary must be ordered, else hashes will be inconsistent\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def hash(block):\n # The dictionary MUST be ordered, or we can have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def test_hash_sha256(self):\n block = self.blockchain.new_block(self.proof, self.previous_hash)\n hash_ = self.blockchain.hash(block)\n\n self.assertIsInstance(hash_, str)\n self.assertEqual(hashlib.sha256(json.dumps(block, sort_keys=True).encode()).hexdigest(), hash_)", "def hash(self, block):\r\n # Convert Dictionary To String\r\n\r\n encoded_block = json.dumps({'nonce': block['nonce'], # Create a string from the required fields\r\n 'transaction': block['transactions'],\r\n 'previous_hash': block['previous_hash']}, sort_keys=True).encode()\r\n\r\n # Hash The String And Return It\r\n return hashlib.sha256(encoded_block).hexdigest() # Return the hash\r", "def hash(block):\n\t\t#Make sure the Dictionnary is ordered to have consistent hashes\n\t\tblock_string = json.dumps(block, sort_keys=True).encode()\n\t\treturn hashlib.sha256(block_string).hexdigest()", "def hash_block(self):\n # TODO : Refactor the algorithm and improve it. This method only does basic things\n block_string = pickle.dumps(self)\n block_hash = hashlib.sha3_256(block_string).digest()\n # The above lines converts the object into __str__() representation and hashes it using sha3_256 algorithm.\n return block_hash", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def hash_block_header(header: Header) -> str:\n hashable_block_header = header.SerializeToString()\n return Verification.hash_bytes_256(hashable_block_header)", "def compute_hash(self) -> str:\r\n #block_dict = self.__dict__.pop('hash', None) # Remove hash field value before calculating hash\r\n block_dict = self.__dict__.copy()\r\n block_dict.pop('hash', None) # Remove hash field value before calculating hash\r\n block_string = json.dumps(block_dict, sort_keys=True).encode('utf-8')\r\n return sha256(block_string).hexdigest()", "def SHA256(self) -> _n_0_t_3[_n_0_t_9]:", "def compute_hash(self):\n '''\n s = \"\"\n s += str(self.index)\n for i in range(len(self.transactions)):\n s += self.transactions[i]\n s += str(self.timestamp)\n s += self.previous_hash\n s += str(self.nonce)\n\n s_json = json.dumps(s)\n x = sha256()\n x.update(s_json.encode())\n h = x.hexdigest()\n return h\n '''\n\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def hash_block_content(index: int, prev_bhash: str, timestamp: int,\n data: List[Transaction], difficulty: int, nonce: int):\n return hash_sha256([index, prev_bhash, timestamp, data, difficulty, nonce])", "def hash(last_block):\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(last_block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()", "def get_block_hash(header_bin):\n _hash = hashlib.sha256(hashlib.sha256(header_bin).digest()).digest()\n return reverse_hash(_hash.hex())", "def hash_block_header(self):\r\n # concatenate all the fields into string\r\n string = str(self.timestamp)\r\n string += self.hash_merkle_root\r\n string += str(self.bits)\r\n string += str(self.nonce)\r\n string += self.hash_prev_block_header\r\n hash_value = sha256(sha256(string.encode()).hexdigest().encode()).hexdigest()\r\n return hash_value", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()", "def build_block_hash(index, timestamp, data, previous_hash):\n str_to_hash = str(index) + str(timestamp) + str(data) + str(previous_hash)\n block_hash = sha256(\n bytes(str_to_hash, encoding='utf-8')\n ).hexdigest()\n return block_hash", "def _Hash(content: bytes) -> str:\n return hashlib.sha256(content).hexdigest()", "def hash_for_file(file_name, block_size=2 ** 20):\n hasher = SHA256.new()\n source_file = open(file_name, \"r\")\n\n while True:\n data = source_file.read(block_size)\n if not data:\n break\n hasher.update(data.encode('utf-8'))\n\n source_file.close()\n return hasher.hexdigest()", "def hashFile(filename):\n\tblocks = []\n\twith open(filename, 'rb') as f:\n\t\tblock = f.read(1024)\n\t\twhile block:\n\t\t\tblocks.append(block)\n\t\t\tblock = f.read(1024)\n\t\n\tprevHash = b''\n\tfor block in reversed(blocks):\n\t\thash = sha256(block + prevHash)\n\t\tprevHash = hash\n\treturn prevHash", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def calculate_hash(self, base_block_hash: bytes) -> bytes:\n from hathor.merged_mining.bitcoin import build_merkle_root_from_path, sha256d_hash\n coinbase_tx_hash = sha256d_hash(self.coinbase_head + base_block_hash + self.coinbase_tail)\n merkle_root = bytes(reversed(build_merkle_root_from_path([coinbase_tx_hash] + self.merkle_path)))\n return sha256d_hash(self.header_head + merkle_root + self.header_tail)", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def sha256_stream(stream, block_size=65536):\n sha256 = hashlib.sha256()\n for block in iter(lambda: stream.read(block_size), b''):\n sha256.update(block)\n return sha256.hexdigest()", "def hashfile(file: str, block_size: int = 65536) -> str:\n with open(file, 'rb') as message:\n m = hashlib.sha256()\n block = message.read(block_size)\n while len(block) > 0:\n m.update(block)\n block = message.read(block_size)\n digest = m.hexdigest()\n\n return digest", "def compute_payload_block_hash(this):\n\n return hmac.new(\n hashlib.sha512(\n struct.pack('<Q', this._index) +\n hashlib.sha512(\n this._.header.value.dynamic_header.master_seed.data +\n this._.transformed_key + b'\\x01'\n ).digest()\n ).digest(),\n struct.pack('<Q', this._index) +\n struct.pack('<I', len(this.block_data)) +\n this.block_data, hashlib.sha256\n ).digest()", "def compute_block_id(self):\n digest = hashes.Hash(hashes.SHA256())\n if(self.previous):\n digest.update(self.previous)\n if type(self.miner) == type('str'):\n self.miner = bytes.fromhex(self.miner)\n digest.update(self.miner)\n\n for transaction in self.transactions:\n digest.update(transaction.txid)\n\n digest.update(Transaction.littleEndian(self.timestamp))\n digest.update(Transaction.littleEndian16Bytes(self.difficulty))\n digest.update(Transaction.littleEndian(self.nonce))\n\n return digest.finalize()", "def hashfile(file):\n\n hasher = hashlib.sha256()\n\n with open(file, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n hasher.update(buf)\n\n return(hasher.hexdigest())", "def hash_file_sha256(file_path, binary=False, buffer_size=65536):\n return hash_file(file_path, hash_type=hashlib.sha256, binary=binary, buffer_size=buffer_size)", "def hash(self) -> bytes:", "def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result", "def hashing(word) :\r\n ans = hashlib.sha256(word.encode())\r\n return ans.hexdigest()", "def hash(password):\n return sha256_crypt.encrypt(password)", "def calculateHash(self):\n hashData = str(self.index) + str(self.data) + self.timestamp + self.previousHash + str(self.nonce)\n return hashlib.sha256(hashData.encode(encoding='UTF-8',errors='strict')).hexdigest()", "def sha256(ctx, salt=\"\"):\n if ctx.data:\n salted_input_value = salt + \":\" + ctx.data\n ctx.data = hashlib.sha256(salted_input_value.encode()).hexdigest()\n else:\n raise RefError(\n \"Ref error: eval_func: nothing to sha256 hash; try \" \"something like '|random:str|sha256'\"\n )", "def hash(self) -> types.UInt256:\n with serialization.BinaryWriter() as bw:\n bw.write_uint32(settings.network.magic)\n self.serialize_unsigned(bw)\n data_to_hash = bytearray(bw._stream.getvalue())\n data = hashlib.sha256(hashlib.sha256(data_to_hash).digest()).digest()\n return types.UInt256(data=data)", "def hash(self):\n return self.block_header.hash", "def sha256(message: bytes):\n # convert message bitarray\n bit_msg = bitarray(endian='big')\n bit_msg.frombytes(message)\n L = len(bit_msg)\n\n # additions done mod 2^32\n pow2 = pow(2,32)\n\n # append 1 followed by K 0s where K is the minimum number >= 0 such that \n # len(bit_msg) + 1 + K + 64 is a multiple of 512\n bit_msg = bit_msg + bitarray('1') + (bitarray('0') * ((-L-65) % 512))\n # append len(bit_msg) as a 64-bit int to bit_msg\n bit_msg = bit_msg + util.int2ba(L, length=64, endian='big')\n\n # initialize hash to predefined values\n current_hash = [h for h in initial_hash]\n\n # operate on each 512-bit chunk\n for chunk_index in range(len(bit_msg)//512):\n chunk = bit_msg[chunk_index * 512 : (chunk_index+1) * 512]\n # w is array of 64 32-bit words with first 16 equal to chunk\n w = [chunk[i*32 : (i+1)*32] for i in range(16)]\n w.extend([bitarray(32) for _ in range(48)])\n # create last 48 words in w from first 16\n for i in range(16, 64):\n s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ rightshift(w[i-15], 3)\n s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ rightshift(w[i-2], 10)\n w[i] = int2ba32(sum(map(util.ba2int, [w[i-16], s0, w[i-7], s1])) % pow2)\n\n # copy current hash (stored in hex) into working list v as bitarrays\n v = list(map(int2ba32, current_hash))\n # compression\n for i in range(64):\n S1 = rightrotate(v[4], 6) ^ rightrotate(v[4], 11) ^ rightrotate(v[4], 25)\n ch = (v[4] & v[5]) ^ ((~v[4]) & v[6])\n temp1 = (constants[i] + sum(map(util.ba2int, [v[7], S1, ch, w[i]]))) % pow2\n S0 = rightrotate(v[0], 2) ^ rightrotate(v[0], 13) ^ rightrotate(v[0], 22)\n maj = (v[0] & v[1]) ^ (v[0] & v[2]) ^ (v[1] & v[2])\n temp2 = (util.ba2int(S0) + util.ba2int(maj)) % pow2\n\n # shift elements of v by 1\n for j in reversed(range(1, len(v))):\n v[j] = v[j-1]\n v[0] = int2ba32((temp1 + temp2) % pow2)\n v[4] = int2ba32((util.ba2int(v[4]) + temp1) % pow2)\n\n # add compressed values (which are bitarrays) to current_hash (which are ints)\n current_hash = list(map(lambda a,b: (a + util.ba2int(b)) % pow2, current_hash, v))\n\n # each entry of current_hash is a 32-bit integer so convert to 4 bytes \n # adding bytes appends them\n return b''.join(x.to_bytes(4, 'big') for x in current_hash)", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def convert_to_SHA256(x):\n result = hashlib.sha256(x.encode())\n result = result.hexdigest()\n return result", "def hash256(the_file, blocksize=32768, force=False):\n if not force and the_file.sha256 is not None:\n # If we already have a hash, use it.\n return the_file.sha256\n # We only do the expensive job of hashing if it doesn't exist, or we're asked to force it.\n try:\n with open(the_file.full_path, 'rb') as f:\n hasher = hashlib.sha256()\n while True:\n buf = f.read(blocksize)\n if not buf:\n break\n hasher.update(buf)\n except:\n return None\n return hasher.hexdigest()", "def get_block_hash(index):\n # TODO: Require implementation\n pass", "def hash_sbox(f):\n hf = sha256()\n for x in f:\n hf.update(hex(x).encode('utf-8'))\n return hf.hexdigest()", "def sha256(content):\n content = content.encode('utf-8')\n return hashlib.sha256(content).hexdigest()", "def _sha256(sha256):\n if not sha256:\n sha256 = \"0\" * 64\n\n return sha256", "def get_256_hash_from_string(string):\n\n sha256 = hashlib.sha256()\n sha256.update(string.encode('utf-8'))\n\n return sha256.hexdigest()", "def sha256(value):\n return hashlib.sha256(value).hexdigest()", "def hash(self):\n block = 1024 * 1024 * 4 # 4 MB.\n hasher = hashlib.sha256()\n\n with open(self.path, \"rb\") as f:\n while True:\n chunk = f.read(block)\n if not chunk:\n break\n hasher.update(hashlib.sha256(chunk).digest())\n\n digest = hasher.hexdigest()\n pdbox.debug(\"Hash for %s: %s\" % (self.path, digest))\n return digest", "def get_sha256_file(filename):\n BLOCKSIZE = 65536\n hasher = hashlib.sha256()\n with open(filename, 'rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return hasher.hexdigest()", "def get_partial_sha256(self, nbytes):\n return sha256file(abspath=self.abspath, nbytes=nbytes)", "def sha256(data):\n\n d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)\n d.update(data)\n return d.digest()", "def sha3_256(x):\n return hashlib.sha3_256(x).digest()", "def get_blockHash(self, data):\n blockHash = data['blockHash']\n return blockHash", "def file_hash(file_to_hash: Path) -> str:\n sha256_hash = hashlib.sha256()\n with file_to_hash.open(\"rb\") as f:\n for block in iter(lambda: f.read(4096), b\"\"):\n sha256_hash.update(block)\n return sha256_hash.hexdigest()", "def sha256(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha256\")", "def proof_of_work(self, block):\r\n block.nonce = 0\r\n\r\n computed_hash = block.compute_hash()\r\n while not computed_hash.startswith('0' * Blockchain.difficulty):\r\n block.nonce += 1\r\n computed_hash = block.compute_hash()\r\n\r\n return computed_hash", "def hash_transaction(transaction: SignedRawTransaction) -> str:\n hashable_transaction = transaction.SerializeToString()\n return Verification.hash_bytes_256(hashable_transaction)", "def _calculate_hash(self) -> str:\n data_str = str(self.version) + str(self.index) + self.pre_hash + str(self.timestamp) + str(self.data)\n return sha256(data_str.encode('utf-8')).hexdigest()", "def hash(self) -> str:\r\n ...", "def proof_of_work(block):\n block.nonce = 0\n\n computed_hash = block.compute_hash()\n while not Blockchain.ifsatisfy_diff(computed_hash):\n block.nonce += 1\n computed_hash = block.compute_hash()\n\n return computed_hash", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def GetBlockHash(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()", "def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha256()\n\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()", "def calculateRoot(blockObject: Block) -> str:\n txList = blockObject.transactions\n msgList = [str(hash(tx)) for tx in txList]\n msg = \"\".join(msgList)\n return sha3_256(msg.encode(\"ascii\")).hexdigest()", "def proof_of_work(self, block):\n block.nonce = 0\n\n computed_hash = block.compute_hash()\n while not computed_hash.startswith('0' * Blockchain.difficulty):\n block.nonce += 1\n computed_hash = block.compute_hash()\n\n return computed_hash", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def get_hash(self):\n source = \"\"\n for cell in self.original_cells:\n source += \"\\n\".join(get_source(cell))\n return hashlib.sha256(source.encode(\"utf-8\")).hexdigest()", "def Sha256(data: Union[bytes, str]) -> bytes:\n return hashlib.sha256(AlgoUtils.Encode(data)).digest()", "def compute_message_hash(message_bytes):\n hash_obj = SHA256.new()\n hash_obj.update(message_bytes)\n return hash_obj.digest()", "def dense_hash(hash_, block_size=16):\n results = list()\n for index in range(0, len(hash_), block_size):\n\n block = hash_[index:index + block_size]\n\n total = 0\n for i in block:\n total ^= i\n\n results.append(total)\n\n return results", "def sha256(s: str) -> str:\n return hashlib.sha256(s.encode()).hexdigest()", "def sha256(self):\n return self._sha256", "def sha256(self):\n return sha256file(self.abspath)", "def proof_of_work(block):\n block.nonce = 0\n\n computed_hash = block.compute_hash()\n while not computed_hash.startswith('0' * Blockchain.difficulty):\n block.nonce += 1\n computed_hash = block.compute_hash()\n\n return computed_hash", "def generate_sha256_hash(fpath, sig_key=None):\n return run(fpath, sig_key)", "def hash_me(cls, p_str, p_len=64):\n v_hash = str()\n v_len = EC.SHA256 if p_len is None else EC.SHA256 if p_len not in EC.HASH_ALGO else p_len\n if v_len == EC.SHA512:\n v_hash = hashlib.sha512()\n elif v_len == EC.SHA256:\n v_hash = hashlib.sha256()\n elif v_len == EC.SHA224:\n v_hash = hashlib.sha224()\n elif v_len == EC.SHA1:\n v_hash = hashlib.sha1()\n\n v_hash.update(p_str.encode(\"utf-8\"))\n return v_hash.hexdigest()", "def sha3_256(data=None):\n return SpongeHash(512, 256, data, \"SHA3-256\", KeccakSponge, PAD_SHA3)", "def computeHash(filename):\n fileHash = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n fileHash.update(chunk)\n return fileHash.hexdigest()", "def _electrum_script_hash(script: bytes) -> str:\n bytes = bytearray(scripts.sha256(script))\n bytes.reverse()\n return bytes.hex()", "def generate_hash(password):\n return pbkdf2_sha256.hash(password)", "def HashAlgorithm(self) -> _n_7_t_0:", "def fast_hash(infile):\n\n m = hashlib.sha256()\n with open(infile, 'rb', 1024 * 1024) as f:\n l = f.read(1024 * 1024)\n while (len(l) > 0):\n m.update(l)\n f.seek(1024 * 1024 * (512 - 1), 1)\n l = f.read(1024 * 1024)\n return m.hexdigest()", "def default_sha256(key: KeyT, *args, **kwargs) -> bytes:\n return sha256(key).digest() # type: ignore" ]
[ "0.79220486", "0.79210114", "0.7906467", "0.7823901", "0.7823901", "0.78018546", "0.7720867", "0.76859254", "0.7677476", "0.7630424", "0.7623705", "0.7534247", "0.7505899", "0.7446964", "0.74002504", "0.74002504", "0.73705757", "0.73611194", "0.7283995", "0.72734344", "0.72014606", "0.71950984", "0.718575", "0.7172557", "0.71227175", "0.70380634", "0.70197177", "0.69963586", "0.6871818", "0.6856275", "0.68426216", "0.6806712", "0.675739", "0.6712361", "0.6712361", "0.6690595", "0.66676533", "0.66274136", "0.66143095", "0.6613235", "0.6591244", "0.65731657", "0.65669763", "0.6559439", "0.65412974", "0.65397346", "0.6538512", "0.65379435", "0.65093213", "0.6503823", "0.6503508", "0.6503508", "0.64879924", "0.64755225", "0.6435043", "0.6425054", "0.63718224", "0.6358856", "0.6328645", "0.63285995", "0.6324685", "0.632014", "0.6291863", "0.6282327", "0.62708455", "0.6267278", "0.62639", "0.6248934", "0.6244761", "0.62404037", "0.6224621", "0.62240577", "0.6207618", "0.6169515", "0.6166688", "0.61594516", "0.6157693", "0.61539394", "0.61394244", "0.61344516", "0.6127854", "0.6122387", "0.6121123", "0.6118602", "0.61168385", "0.6116808", "0.6116414", "0.61020535", "0.60936546", "0.6077827", "0.6068172", "0.60629356", "0.6055617", "0.6053939", "0.6047739", "0.6043871" ]
0.7548614
15
Check if value is prime
def is_prime(value: int) -> bool: if value == 1: return False if value <= 0: raise ValueError("Value must be greater than zero") for i in range(2, int(value**(1/2)) + 1): if value % i == 0: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_prime(value):\n\n if value < 2: raise ValueError\n\n for i in range(2, value):\n if value % i == 0:\n return False\n\n return True", "def is_prime(self):\n pass", "def is_prime(value):\n if value < 4:\n return True\n \n lower_bound = 2\n upper_bound = value-1\n \n prime = True\n test_value = lower_bound\n \n while test_value < upper_bound:\n #print \"testing divisibility of %d for %d\" % (value, test_value)\n if value % test_value == 0:\n prime = False\n test_value += 1\n return prime", "def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False", "def is_prime(a):\n return all(a % i for i in xrange(2, a))", "def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True", "def isprime(n):\r\n\treturn is_prime(n)", "def isprime(x):\n if x <= 1: return False \n if x % 2 == 0: return x == 2\n for k in range(3, int(sqrt(x))+1, 2): \n if x % k == 0: return False\n return True", "def is_prime(number):\n if number <=3:\n return True\n \n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True", "def isprime(number):\n\n if number == 1:\n return False\n for i in range(2, int(number**0.5) + 1):\n if number % i == 0:\n return False\n return True", "def isprime(number: int) -> bool:\n for i in range(2, int(number ** 0.5) + 1):\n if number % i == 0:\n return False\n return True", "def is_prime(number):\n #for i in range(2, ceil(sqrt(number))):\n for i in range(2, number):\n if number % i == 0:\n return False\n return True", "def isprime(n):\n\treturn is_prime(n)", "def is_prime(number: int) -> bool:\n\n if number % 2 == 0 and number > 2:\n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))", "def test_prime(n):\n if SIEVE[n]:\n return True\n else:\n return False", "def is_prime(n):\n return mr_prime(n)", "def is_prime(x):\n if x < 2:\n return False\n for i in range(2, x // 2 + 1):\n if x % i == 0:\n return False\n return True", "def is_prime(num):\n if num < 2:\n return False\n\n for i in range(2, num):\n if num % i == 0:\n return True", "def is_prime(number):\n number = int(number)\n\n if number < 2:\n return False\n if number < 4:\n return True\n if number % 2 == 0:\n return False\n for d in range(3, number // 2, 2):\n if number % d == 0:\n return False\n return True", "def is_prime(num):\n\n if num == 2:\n return True\n for i in range(2, num):\n if num % i == 0:\n return False\n return True", "def isPrime(x):\n for i in range(2,int(x**0.5)+1):\n if (x % i == 0):\n return False\n\n return True", "def is_prime(num):\n import math\n\n\n if num % 2 == 0 and num > 2:\n return False\n for i in range(3, int(math.sqrt(num))+1, 2):\n if num % i == 0:\n return False\n return True", "def is_prime(x: int) -> bool:\n if x < 2:\n return False\n if x != 2 and x % 2 == 0:\n return False\n for i in range(3, x // 2 + 1):\n if x % i == 0:\n return False\n return True", "def is_prime(num: int) -> bool:\n return factorial(num - 1) % num != 0", "def _is_prime(self, num):\n if num == 2:\n return True\n if num < 2 or num % 2 == 0:\n return False\n for n in range(3, int(num ** 0.5) + 2, 2):\n if num % n == 0:\n return False\n return True", "def is_prime(number):\n if number == 2:\n return True\n\n if number <= 1 or number % 2 == 0:\n return False\n\n # check to see if number has any odd factors\n for x in range(3, int(number ** 0.5) + 1, 2):\n if number % x == 0:\n return False\n return True", "def primenumber(x):\n if x >= 2:\n for y in range(2,x):\n if not (x % y):\n return False\n else:\n return False\n return True", "def is_prime(number):\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True", "def is_prime(number):\n\tif number < 0:\n\t\treturn False\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True", "def is_prime_by_python(num):\n if num == 2:\n return True\n elif num % 2 == 0 or num <= 1:\n # even or smaller then one\n return False\n else:\n res = True\n partial_num_range = int(num / 4) + 1\n\n for i in range(1, partial_num_range):\n if num % (2 * i + 1) == 0:\n res = False\n break\n return res", "def is_prime(num):\n\tif num is 1:\n\t\treturn False\n\tif num % 2 is 0:\n\t\treturn num is 2\n\n\tdivision = 3\n\twhile (division * division) <= num:\n\t\tif num % division is 0:\n\t\t\treturn False\n\t\tdivision += 2\n\treturn True", "def is_prime(num):\n for x in range(2, num + 1):\n if num % x == 0:\n return False\n return True", "def is_prime(x: int) -> bool:\n return not any(x % i == 0 for i in range(2, int(math.sqrt(x)+1)))", "def is_prime(number):\n if number <= 1:\n return False\n\n max_element = int(math.ceil(math.sqrt(number)))\n # iterate through all elements from 2 through sqrt(n)\n for element in range(2,max_element + 1):\n if number % element == 0:\n return False\n\n return True", "def is_prime(number):\n\t\n\tif number < 2: return False\n\telif number == 2: return True\n\telif number % 2 == 0: return False\n\telse:\n\t\tfor x in range(2, number):\n\t\t\tif number % x == 0:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def is_prime(num):\n if num == 0 or num == 1:\n return False\n for x in range(2, num):\n if num % x == 0:\n return False\n else:\n return True", "def is_prime(num):\n if is_even(num) and num != 2 or num == 1:\n return False\n\n for dd in range(3, int(mt.sqrt(num)) + 1):\n if num % dd == 0:\n return False\n\n return True", "def good_prime(p):\n return p % 4 == 3 and probablyPrime(p, accuracy=100)", "def is_prime(n):\n k = 2\n while n % k != 0:\n k += 1\n if k < n:\n return False\n else:\n return True", "def is_prime(num):\n for i in range(2, num):\n if num % i == 0:\n return False\n return True", "def is_prime(num):\n if not isinstance(num, int):\n return False\n if num <= 1:\n return False\n if num == 2 or num == 3:\n return True\n if num % 6 in [0, 2, 3, 4]:\n return False\n div_max = int(math.sqrt(num))\n for div in range(5, div_max + 1, 2):\n if num % div == 0:\n return False\n return True", "def isprime(n):\n if n % 2 == 0:return False\n return all(n % i for i in range(3, int(n**0.5) + 1, 2))", "def is_prime(num):\n for n in range(2, num):\n if num % n == 0:\n return False\n\n else:\n return True", "def prime_with(x, s):\n for i in s:\n if x % i == 0:\n return False\n return True", "def is_prime(num1):\n num2 = 2\n while num2 < num1:\n if num1 % num2 == 0:\n return False\n num2 += 1\n return True", "def isprime(x):\n # 1 and 0 are not primes\n if( x < 2):\n return False\n if( x == 2):\n return True\n # All evens are not prime\n if (x % 2 == 0):\n return False\n\n # check others, up x / 2\n else:\n for y in range(3, int(x**(0.5)+1), 2):\n ##print(y)\n if( x % y == 0):\n return False\n return True", "def is_prime(p):\n if p == 1:\n return False\n for n in range(2, int(math.sqrt(p))+1):\n if p % n == 0:\n return False\n return True", "def is_prime(num):\n if num < 2:\n return False\n elif num == 2:\n return True\n\n for i in range(2, int(num**(1/2))+1):\n if num % i == 0:\n return False\n\n return True", "def is_prime(self, it):\n return it > 0 \\\n and (it == 2 or it % 2 != 0) \\\n and (it == 1 or not (any(it % number == 0 for number in range(3, it // 2, 2))))", "def is_prime(num):\r\n if num == 0 or num == 1:\r\n return False\r\n for i in range(2, num):\r\n if num % i == 0:\r\n return False\r\n else:\r\n return True", "def isPrime(num):\r\n if num < 1:\r\n return False\r\n elif num == 2:\r\n return True\r\n else:\r\n for i in range(2, num):\r\n if num % i == 0:\r\n return False\r\n return True", "def is_prime(num):\n\n assert num >= 0, \"Num should be a positive integer!\"\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n while n * n <= num:\n if num % n == 0:\n return False\n n += 2\n\n return True", "def is_prime(number):\n if number == 0 or number == 1:\n return False\n\n isprime = True\n for test in range(2, int(math.sqrt(number) + 1)): # +1 since we have to test up to the square root value\n if number % test == 0:\n isprime = False\n break\n return isprime", "def comprobar_primo(num):\n primo = True\n for i in range(2, num):\n if num%i == 0:\n primo = False\n return primo", "def is_prime(k: int) -> bool:\n if k < 2 or k % 2 == 0:\n return False\n elif k == 2:\n return True\n else:\n for x in range(3, int(math.sqrt(k) + 1), 2):\n if k % x == 0:\n return False\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 or n == 3:\n return True\n elif n % 2 == 0:\n return False\n else:\n x = 0\n for i in range(3, n, 2):\n if n % i == 0:\n x = 1\n return x == 0", "def is_prime(n):\n if n == 1:\n return False\n else:\n i = 2\n while i < n:\n if n % i == 0:\n return False\n i += 1\n return True", "def prime_checker(num):\n\n assert num > 0\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n\n while n * n <= num:\n\n if num % n == 0:\n return False\n\n else:\n num += 2\n\n return True", "def is_prime(n):\n x = 2\n def divide_x(x):\n if x > round(pow(n, 0.5)):\n return True\n elif n % x == 0:\n return False\n else:\n return divide_x(x + 1)\n return divide_x(x)", "def is_prime(n):\n for i in range(2,n):\n if n % i == 0:\n return False\n return True", "def isprime(checknumber):\n isprime = 0\n if checknumber % 2 == 0:\n if checknumber != 2:\n return False\n else:\n x = 3\n while x <= int(math.sqrt(checknumber)):\n if checknumber % x == 0:\n return False\n x += 2\n return True", "def isprime(n):\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n\tb = 2\n\twhile b <= math.sqrt(n):\n\t\tif n % b == 0:\n\t\t\treturn False\n\t\tb += 1\n\treturn True", "def isPrime(x):\n \n # your code here\n Prime_num = False\n \n if x > 1:\n # Iterate from 2 to n / 2\n for i in range(2, int(x/2)+1):\n\n # If num is divisible by any number between\n # 2 and n / 2, it is not Prime_num\n if (x % i) == 0:\n Prime_num = False\n break\n else:\n Prime_num = True\n else:\n Prime_num = False\n \n return Prime_num", "def esPrimo(self, x):\r\n divisor = 0\r\n for i in range(2, x+1):\r\n if x%i == 0:\r\n divisor = divisor + 1\r\n if divisor > 1:\r\n return False\r\n return True", "def is_prime(k):\n\n for i in xrange(2, int(k / 2) + 1):\n if k % i == 0:\n return False\n\n return True", "def isprime(n=936):\n if n < 3: return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n i, count = 2, 0\n while i < n:\n if n % i == 0:\n count += 1\n break\n i += 1\n if count == 0 and n != 1:\n return True\n else:\n return False", "def is_prime(n: int) -> bool:\n if n <= 1:\n return False\n\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n \n return True", "def is_prime(n: int) -> bool:\n if n <= 3:\n return n > 1\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i ** 2 <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def isprime(n):\n if n == 2: return True\n if n == 3: return True\n if n % 2 == 0: return False\n if n % 3 == 0: return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True", "def is_prime(n):\n \n for i in range(3, int(n**0.5+1), 2):\n if n % i == 0:\n print(n,'is not prime')\n return False\n\n print(n,'is prime') \n return True", "def is_prime(n):\n assert n >= 1, \"n is not a positive integer\"\n k = 2\n if n == 1:\n flag = False\n else:\n flag = True\n while k <= sqrt(n):\n if n % k == 0:\n flag = False\n break\n k += 1\n return flag", "def return_prime(x):\n \n for m in range(x+1):\n if m!=0 and x%m==0 and m!=1 and x!=m:\n return 'not prime'\n return 'prime'", "def isPrime(n, DEBUG=False):\n if n < 2:\n return False\n else:\n return _sieve(n, DEBUG=DEBUG)[n]", "def is_prime(n):\n if n <= 1:\n return False\n elif n <= 2:\n return True\n elif n % 2 == 0:\n return False\n else:\n for i in range(3, int(n**.5) + 1, 2):\n if n % i == 0:\n return False\n return True", "def test_is_prime_valid(self):\n sol = solution.Solution();\n self.assertTrue(sol.isPrime(2))\n self.assertTrue(sol.isPrime(3))\n self.assertTrue(sol.isPrime(7))\n #self.assertTrue(sol.isPrime(863))", "def prime_checker(num):\n if num <= 0:\n return \"Error: num must be a positive nonzero integer\"\n elif num <= 3:\n return num > 1\n elif num % 2 == 0 or num % 3 == 0:\n return False\n else:\n k = 5\n while k * k < num:\n if (num % k == 0) or (num % (k+2) == 0):\n return False\n k += 6\n return True", "def basicIsPrime(n,K=100):\n if n % 2 == 0:\n return n == 2\n if n in primesList.lessThanHundredThousand:\n return True\n return None", "def is_prime(n):\n if n <= 1: return False\n if n <= 3: return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def is_prime(num):\n for n in range(2,num):\n if num % n == 0:\n print \"Not Prime\"\n break\n else: \n print 'The number is prime'", "def check_prime(p):\n # type: (int) -> RE\n if not gmpy2.is_prime(p):\n return RSAPublicKeyResult.NON_PRIME\n return RSAPublicKeyResult.OK", "def is_prime(n):\n if n == 2:\n return True\n if n == 0 or n == 1 or n % 2 == 0:\n return False\n for i in range(3, int(math.sqrt(n))+1, 2):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n == 2 or n == 3: return True\n if n < 2 or n % 2 == 0: return False\n if n < 9: return True\n if n % 3 == 0: return False\n r = int(sqrt(n))\n f = 5\n while f <= r:\n if n % f == 0: return False\n if n % (f + 2) == 0: return False\n f += 6\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n in [2,3]:\n return True\n if n % 2 == 0:\n return False\n\n for factor in range(3, int(math.sqrt(n))+1, 2):\n if n % factor == 0:\n return False\n return True", "def isPrime(n: int):\n if n <= 1:\n return False\n\n for i in range(2, n-1):\n if n % i == 0:\n # print(\"{} is divisable by {}\".format(n, i))\n return False\n\n return True", "def isPrime(n):\r\n # Znamo da 1 nije prost broj\r\n if n == 1:\r\n return False\r\n\r\n i = 2\r\n # Petlja se vrti od 2 do int(sqrt(x)) \r\n while i*i <= n:\r\n # Provjera da li i dijeli x bez ostatka\r\n if n % i == 0:\r\n # To znači da n ima faktor između 2 i sqrt(n)\r\n # Stoga nije prost broj\r\n return False\r\n i += 1\r\n # Ako nismo pronašli nijedan faktor u gornjoj petlji\r\n # onda je n prost broj\r\n return True", "def is_prime(n):\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n\n return True", "def is_prime(n):\n if n < 1 or n % 1 > 0:\n return False\n if n == 1 or n == 2:\n return True\n for i in range(3, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True", "def prime(n: int) -> bool:\n if len(divisors(n)) > 2 or n < 1:\n return False\n else:\n return True", "def is_prime(n):\n if n <= 1:\n return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(num):\n # 2 is prime; exclude\n if num == 2: \n return True\n \n # exclude all other even numbers and numbers less than 2\n if num % 2 == 0 or num < 2:\n return False\n \n # Only need to count up to the the square root of num\n sqrt = int(num ** 0.5 +1) # int rounds down; correct by +1\n \n # Loop through all odd numbers\n for i in range(3, sqrt, 2):\n if num % i == 0:\n return False\n return True", "def isprime(n):\n if n == 1:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def isPrime(n):\n for i in range (2, n/2+1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n == 2:\n return True\n\n if n < 2 or n % 2 == 0:\n return False\n\n for i in range(3, int(sqrt(n)+1), 2):\n if n % i == 0:\n return False\n\n return True", "def is_prime(inpt:int) -> bool:\n inpt_sqrt = int(math.sqrt(inpt))\n prime_flag = True\n for i in range(2,inpt_sqrt):\n if is_divisible(inpt,i):\n prime_flag = False\n break\n return prime_flag", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def isPrime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n\n return True", "def is_prime(n):\r\n if n in (2, 3, 5, 7, 11, 13, 17, 19): return(True)\r\n if (n<=1 or n%2==0 or n%3==0): return(False)\r\n # determine upper limit of test range =>\r\n ulimit = (int(math.ceil(math.sqrt(n)))+1)\r\n return(not any(n%k==0 for k in range(3, ulimit, 2)))" ]
[ "0.8251294", "0.7981401", "0.794535", "0.78773165", "0.7870162", "0.7779645", "0.77755505", "0.77661145", "0.77396506", "0.77324396", "0.77158386", "0.77105474", "0.7694114", "0.7672864", "0.7660135", "0.7650814", "0.76269066", "0.76167756", "0.7599302", "0.75861603", "0.7584424", "0.75835097", "0.7577305", "0.7570747", "0.7545516", "0.7541722", "0.7530172", "0.7525161", "0.75243926", "0.7520725", "0.7515851", "0.7504046", "0.75000113", "0.7499382", "0.74853706", "0.7474431", "0.7461341", "0.74372786", "0.74341345", "0.7432573", "0.7426603", "0.7425776", "0.74130476", "0.73828083", "0.7374941", "0.7362141", "0.734937", "0.73461616", "0.7338399", "0.7329647", "0.7307399", "0.72983986", "0.7278367", "0.72743994", "0.7272283", "0.7249055", "0.72441465", "0.72264624", "0.72245926", "0.7224098", "0.7211653", "0.71941215", "0.71937567", "0.7190316", "0.7188377", "0.7180787", "0.7173092", "0.71708715", "0.71297127", "0.71290237", "0.71224463", "0.71215165", "0.71203965", "0.71156013", "0.71107656", "0.7107491", "0.71062917", "0.7102367", "0.7093732", "0.7091128", "0.7070077", "0.7069965", "0.7067709", "0.7065804", "0.7063077", "0.7058535", "0.7057392", "0.7054165", "0.705186", "0.70437115", "0.70433444", "0.7042543", "0.7042438", "0.70345277", "0.7021664", "0.7019075", "0.7015921", "0.7007321", "0.70062125", "0.70028836" ]
0.81013525
1
Get all prime factors of the given value
def factors(value: int) -> list: prime_factors: list = [] for i in range(2, value + 1): if i > 2 and i % 2 == 0 or not is_prime(i): continue while value % i == 0: value = int(value / i) prime_factors.append(i) if value == 1: break return prime_factors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prime_factors_of(value):\n\n # Okay, so we need to \"solve\" two problems here:\n # is a given number a factor of `value`?\n # and\n # is a given number PRIME?\n\n # I think the simplest non-stupid approach is to generate all \n # FACTORS OF VALUE, and then check to see which are prime!\n # actually, a cute approach would be to start from the top down\n # and just return the first one. we'll see if i need that optimization.\n # (don't optimize prematurely!)\n\n # WELP. I tried to generate all primes up to value//2! what a mistake.\n # or was it? maybe it was just a bad implementation of prime-finding?\n\n factors = []\n\n for i in range(2, value//2):\n if value % i == 0:\n factors.append(i)\n\n prime_factors = []\n \n for i in factors:\n if is_prime(i):\n prime_factors.append(i)\n\n return prime_factors", "def generate_prime_factors(value):\n if not isinstance(value, int):\n raise ValueError()\n\n primes = []\n factor = 2\n while factor <= value:\n while value % factor == 0:\n primes.append(factor)\n value /= factor\n\n factor += 1\n\n return primes", "def factor_primes(x, iter):\n factors = []\n for factor in prime:\n while x % factor == 0:\n x = x / factor\n factors.append(factor)\n if x == 1:\n break\n return factors", "def primefactors(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])", "def prime_factors(number):\n factors = []\n\n if number == 0 : return factors\n\n # first round factors by two\n while number % 2 == 0:\n factors.append(2)\n number /= 2\n\n # other rounds goes by odd numbers only (no other even is prime)\n divisor = 3\n while divisor <= number:\n while number % divisor == 0:\n factors.append(divisor)\n number /= divisor\n divisor += 2\n\n return factors", "def prime_factors(num):\n if prime_checker(num):\n return num\n if num > 10^5:\n maxPrime = round(num**0.5) + 1\n else:\n maxPrime = round(num/2)+1\n primelist = prime_generator(maxPrime)\n factors = []\n\n while num > 1 and num not in primelist:\n for prime in primelist:\n if num % prime == 0:\n factors.append(prime)\n num = int(num / prime)\n break\n if not num == 1:\n factors.append(num)\n \n return factors", "def get_prime_factors(self, number):\n for prime in self.get_primes():\n while number % prime == 0:\n yield prime\n number /= prime\n \n if number == 1:\n break", "def prime_factors(number):\n all_factors = factors(number)\n return list(filter(lambda x: is_prime(x), all_factors))", "def getallprimefactors(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n print(n)\n n /= d\n d += 1\n return factors", "def factor(number):\n\tdividing_primes = sieve(number/2 + 1)\n\tfactors = []\n\t\n\twhile number != 1:\t\n\t\tif not dividing_primes:\n\t\t\treturn [number]\n\n\t\tnext_divisor = min(dividing_primes)\n\n\t\tif not number % next_divisor:\n\t\t\tfactors.append(next_divisor)\n\t\t\tnumber /= next_divisor\n\t\telse:\n\t\t\tdividing_primes.remove(next_divisor)\n\n\treturn factors", "def get_factors(number):\n\n factors = [1, number]\n\n for i in range(2, int(math.sqrt(number))):\n if number % i == 0:\n factors.extend([i, number / i])\n\n return(factors)", "def primeFactors(number):\n factorlist=[]\n loop=2\n while loop<=number:\n if number%loop==0:\n number/=loop\n factorlist.append(loop)\n else: \n loop+=1\n return factorlist", "def primefactors(n):\n seq = []\n val = 2\n while val <= n:\n if VERBOSE: print \"val: %s n: %s\" % (val, n)\n if n % val == 0:\n # Found a factor, shrink n by that factor \n # ie. n = 60, val = 2\n # Next pass n = 30, val = 2\n seq.append(val)\n n /= val\n else:\n # Not (or no longer) a factor\n val += 1\n\n return seq", "def prime_factors(n):\n\n prime_set = primes(n)\n factors = []\n for prime in prime_set:\n if n % prime == 0:\n factors.append(prime)\n return factors", "def prime_factors(n) -> []:\n i = 2\n factors = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors", "def get_prime_factors(number):\n if number == 1:\n return []\n\n # We have to begin with 2 instead of 1 or 0\n # to avoid the calls infinite or the division by 0\n for i in range(2, number):\n # Get remainder and quotient\n rd, qt = divmod(number, i)\n if not qt: # if equal to zero\n return [i] + get_prime_factors(rd)\n\n return [number]", "def factors(n):\n _factors = []\n p = 1\n\n # Loop until half of n\n while p <= n // 2:\n p += 1\n if div_by(p, _factors):\n continue\n if not n % p:\n _factors.append(p)\n\n # Number given is a prime\n if not _factors:\n _factors.append(n)\n\n return _factors", "def factor(cls, number):\n factors = []\n for prime in cls():\n if prime > number:\n break\n # print 'Checking to see if %d is a factor of %d' % (prime, number)\n # reduce the total iterations\n if prime > math.sqrt(number):\n factors.append(number)\n break\n while not number % prime:\n number /= prime\n factors.append(prime)\n return factors", "def factors(n, primes):\n\n for p in takewhile(lambda p: p*p < n, primes):\n exponent = 0\n\n while n % p == 0:\n exponent += 1\n n /= p\n\n if exponent > 0:\n yield p, exponent\n\n if n > 1:\n yield n, 1", "def prime_factors(n):\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]", "def get_factors(num):\n factors = []\n\n # Extend range by 1 to include num\n for i in range(1, num+1):\n if num % i == 0:\n factors.append(i)\n return factors", "def factors(n, cache=None):\n if cache is None or max(cache) < n:\n potential_factors = primes(n + 1)\n else:\n potential_factors = cache\n prime_factors = []\n i = 0\n while n != 1:\n while n % potential_factors[i] == 0:\n n /= potential_factors[i]\n prime_factors.append(potential_factors[i])\n i += 1\n return prime_factors", "def prime_factors(n):\r\n factors = defaultdict(int)\r\n d = 2\r\n while n > 1:\r\n while n % d == 0:\r\n factors[d]+=1\r\n n /= d\r\n d = d + 1\r\n if d*d > n:\r\n if n > 1: factors[n]+=1\r\n break\r\n return factors", "def prime_factors(num):\n prime_factors = []\n for i in range(2, num + 1):\n if (num % i) == 0 and is_prime(i) == True:\n prime_factors.append(i)\n return prime_factors", "def get_factors(val):\n N = np.sqrt(val)\n N = np.floor(N)\n M = val/N\n\n while (val % N != 0):\n N = N-1\n M = val/N\n\n return int(M), int(N)", "def prime_factors(n):\n\n factors = []\n lastresult = n\n c = 2\n while lastresult != 1:\n if lastresult % c == 0 and c % 2 > 0:\n factors.append(c)\n lastresult /= c\n c += 1\n else:\n c += 1\n return factors[0], factors[1]", "def factorize(num):\n factors = []\n while num not in primes_list:\n for prime in primes_list:\n if num % prime == 0:\n factors.append(prime)\n num /= prime\n break\n factors.append(num)\n factors = sorted(factors)\n return factors", "def factors(number):\n\n if not (isinstance(number, int)):\n raise TypeError(\n \"Incorrect number type provided. Only integers are accepted.\")\n\n factors = []\n for i in range(1, number + 1):\n if number % i == 0:\n factors.append(i)\n return factors", "def prime_factorization(x, sieve=None):\n if x == 0:\n return []\n if x in [1, 2]:\n return [x]\n if sieve is None:\n sieve = prime_sieve(x + 1)\n factors = []\n if sieve[x]:\n return [x]\n for i in range(2, int(math.sqrt(x) + 1)):\n if sieve[x]:\n break\n if not sieve[i]:\n continue\n if x % i == 0:\n factors.append(i)\n x //= i\n return factors + prime_factorization(x, sieve)", "def get_unique_factors(num):\n a = num\n m = int(num ** 0.5) if num > 100 else num\n factors = []\n primes = sieve(m)\n # Divide the number by compatible primes until it is 1\n # (or we run out of primes...)\n for p in primes:\n if a % p == 0:\n a = a / p\n factors.append(p)\n if a == 1:\n break\n return factors", "def find_factors(number):\n \n i = 2\n prod = 1\n factors = []\n sqrt = math.sqrt(number)\n num = number\n \n while i < num:\n div = check_divisbility(number, i)\n if div == 'divisible':\n factors.append(i)\n number /= i\n prod *= i\n recurse = find_factors(number)\n \n #I recurse here because it prevents us wasting time playing with large numbers\n for fac in recurse:\n factors.append(fac)\n number /= fac\n prod *= fac\n #stop if we find a factor greater tha sqrt(number)\n if i >= sqrt:\n break\n #make sure we're not looking once we find all the factors \n if prod == num:\n break\n else:\n if i> sqrt:\n if len(factors)==0:\n factors.append(num)\n prod *= num\n else: \n print i\n recurse = find_factors(number)\n for fac in recurse:\n factors.append(fac)\n prod *= fac\n if prod == num:\n break\n i = i+1\n if prod != num:\n raise ValueError (\"This isn't right\")\n return factors", "def factors(n: int) -> List[int]:\n k = 1\n while k**2 < n:\n if n % k == 0:\n yield k\n k += 1\n\n k = int(n**(1/2))\n while k > 0:\n if n % k == 0:\n yield n // k\n k -= 1", "def prime_factors(number):\n prime_factors = []\n while ( smallest_factor(number) ):\n smallest = smallest_factor(number)\n prime_factors.append(smallest)\n number /= smallest\n prime_factors.append(number)\n #return prime_factors\n return number", "def _prime_factorization(n):\n factors = []\n f = 2\n # Use trial division to add factors\n while f**2 <= n:\n while (n % f) == 0:\n factors.append(f)\n n //= f\n f += 1\n\n if n > 1:\n factors.append(n)\n\n return factors", "def prime_divisors(n):\r\n\treturn list(set(factors(n)))", "def factorize(n):\n fct = [] # prime factor\n b, e = 2, 0 # base, exponent\n while b * b <= n:\n while n % b == 0:\n n = n // b\n e = e + 1\n if e > 0:\n fct.append((b, e))\n b, e = b + 1, 0\n if n > 1:\n fct.append((n, 1))\n return fct", "def find_factors(num):\n factors = set()\n i = 1\n while i*i < num:\n if num % i == 0:\n factors.add(i)\n factors.add(int(num/i))\n i+=1\n factors = list(factors)\n factors.sort()\n return factors", "def primefactors_with_multiplicity(n):\n factors = []\n primes = prime_sieve(n)\n\n for p in primes:\n while n % p == 0:\n factors.append(p)\n n /= p\n if n == 1:\n return(factors)\n return([n])", "def prime_factors_set(n):\n factors = []\n d = 2\n while n > 1:\n while n % d == 0:\n factors.append(d)\n n /= d\n d = d + 1\n if d*d > n:\n if n > 1: factors.append(n)\n break\n return list(set(factors))", "def factors(num):\n\tif is_prime(num) == True:\n\t\tfactors = [1, num]\n\t\treturn factors\n\telse:\n\t\tfactors = [1]\n\t\tsquare_root = int(math.ceil(math.sqrt(num)))\n\t\t\n\t\tfor n in range(2, square_root+1):\n\t\t\tif num % n == 0:\n\t\t\t\tfactors.append(n)\n\n\t\tfor n in range(1, len(factors)):\n\t\t\tnew_n = num / factors[n]\n\t\t\tif new_n not in factors:\n\t\t\t\tfactors.append(num / factors[n])\n\n\t\tfactors.append(num)\n\t\treturn factors", "def prime_factors(n):\n if n < 2 or n - round(n) != 0:\n print('Numbers smaller than 2 and non-integers do not have prime',\n 'factors')\n L = []\n while n >= 2:\n i = low_prime(n)\n L.append(i)\n n //= i\n return L", "def factorize(n):\n it = factorize._prime_iterator\n factors = []\n it.reset()\n for p in it:\n if n == 1 or n < p * p:\n break\n if n % p == 0:\n n //= p\n m = 1\n while n % p == 0 and n > 1:\n n //= p\n m += 1\n factors.append((p, m))\n if n > 1:\n factors.append((n, 1))\n return factors", "def prime_factors(num):\n result = []\n for i in range(2, num):\n if (is_prime(i)) and (num % i == 0):\n result.append(i)\n if not result:\n print(\"No prime factors\")\n else:\n return result", "def factorization(n):\n pf = []\n for p in primeslist:\n if p*p > n : break\n count = 0\n while not n % p:\n n //= p\n count += 1\n if count > 0: pf.append((p, count))\n if n > 1: pf.append((n, 1))\n return pf", "def getPrimeFactors(num):\n n = num\n primes = {}\n\n p = 2\n sqrt = math.sqrt(num)\n\n def checkAndUpdate(inc):\n nonlocal n\n nonlocal p\n nonlocal primes\n if n % p == 0:\n if str(p) in primes.keys():\n primes[str(p)] += 1\n else:\n primes[str(p)] = 1\n n /= p\n else:\n p += inc\n \n while p == 2 and p <= n:\n checkAndUpdate(1)\n while p <= n and p <= sqrt:\n checkAndUpdate(2)\n if len(primes.keys()) == 0:\n primes[str(num)] = 1\n elif n != 1:\n primes[str(n)] = 1\n return primes", "def generate_prime_factors(number):\n if not isinstance(number, int):\n raise ValueError\n list_of_ints = []\n if number > 1:\n remainder = number\n divisor = 2\n while remainder != 1:\n if remainder % divisor == 0:\n list_of_ints.append(divisor)\n remainder = remainder / divisor\n else:\n divisor += 1\n return list_of_ints", "def get_prime_factors(num: int, prime_list: list = None) -> list:\n upper_bound = math.ceil(num / 2) + 1\n if not prime_list:\n prime_list = [prime for prime in primes.Primes(upper_bound)]\n\n prime_factors = []\n for prime in prime_list:\n temp = num\n multiplicity = 0\n temp, remainder = divmod(temp, prime)\n while remainder == 0 and temp >= 1:\n multiplicity += 1\n temp, remainder = divmod(temp, prime)\n if multiplicity > 0:\n prime_factors.append((prime, multiplicity))\n if prime > upper_bound:\n break\n\n if not prime_factors:\n prime_factors = [(num, 1)]\n\n return prime_factors", "def _factors(n):\n gen = ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)\n return set(sum(gen, []))", "def prime_factorization(n):\r\n result = []\r\n for i in xrange(2, n+1):\r\n s = 0;\r\n while n / float(i) == floor(n/float(i)):\r\n n = n / float(i)\r\n s += 1\r\n if s > 0:\r\n for k in range(s):\r\n result.append(i)\r\n if n == 1:\r\n return result", "def get_prime_factors(n):\n factors = {}\n if n <= 1: return {}\n \n while n != 1:\n if is_prime(n):\n factors[n] = 1\n break\n \n i = 2\n while i <= n:\n j = 0\n while n % i == 0 and n != 1:\n j += 1\n n //= i\n \n if j > 0:\n factors[i] = j\n break\n i += 1\n \n return factors", "def primeFactors(n):\n\n ps = primes(n)\n rest = n\n factors = {}\n for p in ps:\n if rest == 1:\n break\n\n if p ** 2 > n:\n if len(factors.keys()) > 0:\n factors[p] = 1\n else:\n factors[n] = 1\n break\n\n power = 0\n while rest % p == 0:\n power += 1\n rest = rest / p\n\n if power > 0:\n factors[p] = power\n\n return factors", "def factors(n):\n factors = []\n for x in range(1, int(sqrt(n)+1)):\n if (n % x) == 0:\n factors += [x, n/x]\n \n return sorted(set(factors))", "def calculate_factors(x):\n print(\"The factors of\", x, \"are:\")\n for i in range(1, x + 1):\n if x % i == 0:\n print(i)", "def factorize(primes, n):\n factor = []\n for prime in primes:\n ex = 0\n while n % prime == 0:\n ex += 1\n n = n // prime\n if ex != 0:\n factor.append((prime, ex))\n\n return factor if n == 1 else None", "def get_prime_factors_by_number(self, number):\n if int(number) < 2:\n print \"this method needs number >= 2\"\n return {}\n ret = {}\n import math\n # use math.sqrt for speedup\n if number >= 4:\n number_sqrt = math.sqrt(number)\n else:\n number_sqrt = 2\n primes = self.get_primes_by_limit_number(number_sqrt)\n num = number\n for p in primes:\n if num == 1:\n break\n while num % p == 0:\n num /= p\n if p in ret:\n ret[p] = ret[p] + 1\n else:\n ret[p] = 1\n if num == number:\n # in this case, number is prime\n ret[number] = 1\n elif num != 1:\n ret[num] = 1\n return ret", "def factor(n):\n\n f = []\n\n for i in xrange(1, int(round(sqrt(n)))+1):\n if n%i == 0:\n f.append(i)\n f.append(n/i)\n\n return f", "def prime_factors_p(num, primes):\n if num > primes[len(primes) - 1]:\n raise Exception('num is larger than the largest prime in the list: '\n '{} > {}'.format(num, primes[len(primes) - 1]))\n factors = {}\n if num < 0:\n factors[-1] = 1\n num = -num\n\n limit = math.floor(math.sqrt(num))\n\n current = num\n for i in primes:\n if i > current or i > limit:\n if current != 1:\n factors[current] = 1\n break\n power = 0\n while current % i == 0:\n power += 1\n current //= i\n\n if power > 0:\n factors[i] = power\n\n return factors", "def primefactors(num):\n\n while num % 2 == 0:\n print(2)\n num = num / 2\n for i in range(3,int(math.sqrt(num))+1,2):\n while ( num % i == 0 ):\n print (i)\n num = num / i\n if num > 2:\n print (num)", "def prime_divisors(n):\n\treturn tuple(set(factors(n)))", "def primish(n):\n\n factors = set()\n for i in range(n, 1, -1):\n\n # Find the smallest divisor of i.\n smallest = 2\n while (i % smallest) != 0:\n smallest += 1\n\n # Divide by that divisor until we have 1 or something else.\n remainder = i\n while (remainder % smallest) == 0:\n remainder /= smallest\n\n # Keep it if needed.\n if remainder == 1:\n factors.add(i)\n\n return factors", "def factors(n):\n f = list(reduce(list.__add__, ([i, n // i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)))\n return sorted(f)", "def prime_factorization(number):\n global primes_under_1M\n\n assert number <= 1_000_000\n\n factors = []\n running_product = 1\n current_number = number\n\n # Loop through the primes, iteratively dividing our\n # number by each prime `p` so long as `p` exactly\n # divides `current_number`\n for p in primes_under_1M:\n while (current_number % p) == 0:\n current_number = current_number // p\n factors.append(p)\n running_product *= p\n if running_product == number:\n return set(factors)", "def factors_s(n, ret=False):\n f = set()\n if n < 4:\n return f\n limit = int(n / 2 + 1)\n for i in primeList:\n if i > limit:\n break\n while n != 1:\n if n % i:\n break\n else:\n n //= i\n f.add(i)\n else:\n break\n if ret:\n return (n, f)\n return f", "def prime_factors(number: int) -> dict:\n f = {}\n i = 2\n while number > 1 and number >= i:\n if number % i == 0:\n if i not in f:\n f[i] = 1\n else:\n f[i] += 1\n number //= i\n else:\n i += 1\n return f", "def primefacs(num):\n facs=set()\n fac=2\n while (fac*fac <= num):\n if num%fac == 0:\n facs.add(fac)\n num = num//fac\n else:\n fac += 1\n if num != 1:\n facs.add(num)\n return facs", "def factors(n):\n\tif n<0: n=-n # Only deal with positive integers\n\tif (is_prime(n)):\n\t\treturn [n]\n\tfact = factorone(n)\n\tif ((abs(n) == 1) or (n == 0)): raise ValueError('Unable to factor \\\"{0}\\\"'.format(n))\n\tfacts = factors(n//fact) + factors(fact)\n\tfacts.sort()\n\treturn facts", "def prime_factorization(num):\n return prime_factors_p(num, _sieve)", "def factors(n):\r\n output = []\r\n for i in range(1,n+1):\r\n if n % i == 0:\r\n output.append(i)\r\n output.append(-i)\r\n return output", "def primeFactorsGivenPrimes(n, primes):\n factors = {}\n for p in primes: \n while n % p == 0:\n n //= p\n factors[p] = factors.get(p,0)+1\n if n < p*p:\n if n > 1:\n factors[n] = factors.get(n,0)+1\n return factors\n return factors", "def factors(n):\r\n\tif n<0: n=-n # Only deal with positive integers\r\n\tif (is_prime(n)):\r\n\t\treturn [n]\r\n\tfact = factorone(n)\r\n\tif (fact == 1): return \"Unable to factor \"+str(n) # Can't deal with units\r\n\tfacts = factors(n/fact) + factors(fact)\r\n\tfacts.sort()\r\n\treturn facts", "def factors2(n):\n\tfactors = []\n\ti = 1\n\twhile i <= math.sqrt(n):\n\t\tif n%i == 0:\n\t\t\tfactors.append(i)\n\t\t\tfactors.append(n/i)\n\t\ti += 1\n\treturn factors", "def GetNFactors(n, primes, n_pfactors, _):\n sqrtn = int(n ** 0.5) + 1\n\n for p in primes:\n if p > sqrtn:\n break\n if n % p == 0:\n n //= p\n if n % p == 0:\n return n_pfactors[n]\n else:\n return n_pfactors[n] + 1\n\n # n is primes\n primes.append(n)\n return 1", "def prime_factorization(n):\n\t\n\tprimes = []\n\t\n\twhile not n % 2:\n\t\tprimes.append(2)\n\t\tn //= 2\n\t\n\tfor possible_factor in range(3, int(sqrt(n)) + 1, 2):\n\t\twhile not n % possible_factor:\n\t\t\tprimes.append(i)\n\t\t\tn //= possible_factor\n\t\n\tif n > 1:\n\t\tprimes.append(n)\n\treturn primes", "def problem3():\n def _prime_factorization(n):\n \"\"\"Returns the list of prime factors of a number n\"\"\"\n factors = []\n f = 2\n # Use trial division to add factors\n while f**2 <= n:\n while (n % f) == 0:\n factors.append(f)\n n //= f\n f += 1\n\n if n > 1:\n factors.append(n)\n\n return factors\n\n return max(_prime_factorization(600851475143))", "def divisors(n):\n divs = [1]\n for p, e in factorization(n):\n divs += [x*p**k for k in range(1,e+1) for x in divs]\n return divs", "def factorize(n):\n\n if n in (0, 1):\n return [(n, 1)]\n\n factors = []\n\n if n < 0:\n factors.append((-1, 1))\n n = -n\n\n # check 2, 3, then all integers in form q = 6k +- 1\n for q in chain((2, 3), range(5, isqrt(n) + 1, 6)):\n # q = 6k - 1\n a = 0\n while n % q == 0:\n # q is prime because n already divided by its prime factors\n n //= q\n a += 1\n if a > 0:\n factors.append((q, a))\n\n # 6k + 1\n q += 2\n a = 0\n while n % q == 0:\n # q is prime because n already divided by its prime factors\n n //= q\n a += 1\n if a > 0:\n factors.append((q, a))\n\n if n != 1:\n factors.append((n, 1))\n\n return factors", "def factors(n):\n for x in range(1,n+1):\n if n % x == 0:\n print(x)", "def get_factors():", "def factor(N):\n\n factors = []\n sqrtN = math.sqrt(N)\n for x in range(2, int(sqrtN)+1):\n (d, r) = divmod(N, x)\n if r == 0:\n factors.append(x)\n if x != d: factors.append(d)\n return [1, N] + factors", "def prime_factorization(n):\n # Code taken directly from \"Prime factorization - list\" at\n # http://stackoverflow.com/a/16996439.\n primfac = []\n d = 2\n while d*d <= n:\n while (n % d) == 0:\n primfac.append(d) # supposing you want multiple factors repeated\n n //= d\n d += 1\n if n > 1:\n primfac.append(n)\n return Multiset(primfac)", "def primes(n):\n primfac = {}\n primfac = defaultdict(lambda: 0, primfac)\n while (n % 2) == 0:\n primfac[2] += 1 \n n //= 2\n d = 3\n while d*d <= n:\n while (n % d) == 0:\n primfac[d] += 1 # supposing you want multiple factors repeated\n n //= d\n d += 2\n if n > 1:\n primfac[n] = 1\n return primfac", "def allprimes():\n\n key = [] # The empty list is initiated\n\n for val in range(2, 101): # Set to obtain all prime values from 2 to 100\n if val >= 2: # They are then stored into the list\n for n in range(2, val): # The values have to be greater than 2 as 1 cannot\n if not (val % n): # be included\n break # Pulls all prime numbers by iterating through them\n else: # If a number does not obtain a remainder that means\n key.append(val) # it cannot be divisable by anything but it's own\n # number it is appended as a prime number\n return key", "def prime_factor(x):\n thelist=get_factors(x)\n newlist=return_primelist(thelist)\n result=newlist[-1]\n return result", "def prime_factor(n):\n while n > 1:\n k = 2 \n while n % k != 0:\n k = k+1\n n = n // k\n print(k)", "def factor_naive(n):\n factors = []\n\n for factor in range(2, n // 2):\n q, r = divmod(n, factor)\n power = 0\n while r == 0:\n power += 1\n n = q\n q, r = divmod(q, factor)\n if power != 0:\n factors.append((factor, power))\n\n if factors == []:\n factors = [(n, 1)]\n\n return factors", "def primefac(n, aprimes = []):\n if not aprimes: aprimes = primes(n)\n ps = list(filter(lambda x : x <= n, aprimes))\n facs = []\n for p in ps:\n nn = n\n d = 0\n while nn % p == 0:\n nn = nn // p\n d += 1\n if d != 0:\n facs.append((p, d))\n return facs", "def factorize(num: int) -> [int, ]:\n # assert isinstance(num, int)\n primes = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29,\n 31, 37, 41, 43, 47, 53, 59, 61, 67, 71,\n 73, 79, 83, 89, 97, 101, 103, 107, 109, 113,\n 127, 131, 137, 139, 149, 151, 157, 163, 167, 173,\n 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,\n 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,\n 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409,\n 419, 421, 431, 433, 439, 443, 449, 457, 461, 463,\n 467, 479, 487, 491, 499, 503, 509, 521, 523, 541)\n factors = []\n if num == 0:\n return [0, ]\n\n # Generate a list of prime factors:\n for prime in primes:\n if prime > num:\n break\n while num % prime == 0:\n factors.append(prime)\n num = int(round(num / prime))\n if num != 1:\n # TODO: make it find larger primes to avoid this problem.\n raise ArithmeticError(\n f'num is {num}. did not finish prime factorization.')\n return factors", "def factorize(it):\n assert it > 0, \"cannot factorize %s\" % it\n while it % 2 == 0: # special case 2 - allows exclusion of even numbers later\n yield 2\n it //= 2\n factor = 3\n while factor * factor <= it:\n while it % factor == 0:\n yield factor\n it //= factor\n factor += 2\n if it > 1: # remainder is a prime\n yield it", "def factors(n):\n nfactors = 0 # the number of factors of n\n for divisor in range(1, n+1): # divisors: {1,2,3,4...,n}\n if n%divisor == 0: # divides with no remainder\n nfactors += 1 # i.e. one new factor found\n return nfactors", "def factor(n: int) -> List[Tuple[int, int]]:\n if n <= 1:\n raise ValueError\n\n factors = list()\n\n ml = 0\n p = 2\n while n % p == 0:\n n //= p\n ml += 1\n if ml > 0:\n factors.append((p, ml,))\n\n p = 3\n while p ** 2 <= n:\n ml = 0\n while n % p == 0:\n n //= p\n ml += 1\n if ml > 0:\n factors.append((p, ml,))\n p += 2\n\n if n > 2:\n factors.append((n, 1,))\n\n return factors", "def prime_factorisation(n):\n prime_numbers = []\n integers = []\n for i in range(n+1):\n if is_prime(i):\n prime_numbers.append(i)\n if n in prime_numbers:\n return f'{n} is prime'\n k = 0\n while k < len(prime_numbers):\n if n % prime_numbers[k] == 0:\n integers.append(prime_numbers[k])\n n //= prime_numbers[k]\n else:\n k += 1\n return integers", "def findPrimeFactors(n: int, exponent: bool = False):\n s = []\n\n # Number of 2s that divide n\n while n % 2 == 0:\n s.append(2)\n n = n // 2\n\n nroot = integer_sqrt(n)\n\n # n must be odd at this point. So we can\n # skip one element (Note i = i +2)\n for i in range(3, nroot, 2):\n\n # While i divides n, print i and divide n\n while n % i == 0:\n s.append(i)\n n = n // i\n\n # This condition is to handle the case\n # when n is a prime number greater than 2\n if n > 2:\n s.append(n)\n\n uniqSorted = sorted(list(set(s)))\n\n if exponent:\n # using set to get unique list\n return dict(zip(uniqSorted, [s.count(e) for e in uniqSorted]))\n\n return uniqSorted", "def factors(self, X):\r\n return (lambda fd: [X] if not fd else fd + self.factors(X // fd[0])) (self.firstdiv(X))", "def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)", "def factor(checknumber):\n if checknumber < 0:\n checknumber *= -1\n factorlist = [-1]\n else:\n factorlist = [1]\n for x in xrange(2, int(math.sqrt(checknumber))+1):\n if checknumber % x == 0:\n factorlist.append(x)\n templist = factorlist\n for x in templist:\n test = checknumber/x \n if test not in factorlist:\n factorlist.append(test)\n return factorlist", "def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs", "def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs", "def setFactors(self, number):\n self.number = number\n length = len(self.primes)\n p = self.primes[:self.closestPrimeIndex(self.primes, self.number**0.5) + 1]\n\n start = clock()\n self.facts = serial_factor(self.number, p)\n print \"Time taken ======================> \", clock() - start\n\n c = 1\n for fact in self.facts:\n c = c * fact\n\n if c != self.number:\n num = self.number / c\n for fact in self.facts:\n while num % fact == 0:\n num = num / fact\n\n if num != 1:\n self.facts.append(num)", "def factors(self):\n self.assert_sampled()\n return self._factors", "def factorize(self,num):\n def sieveOfEratosthenes(N, s): \n prime = [False] * (N+1) \n for i in range(2, N+1, 2): \n s[i] = 2\n for i in range(3, N+1, 2): \n if (prime[i] == False): \n s[i] = i \n for j in range(i, int(N / i) + 1, 2): \n if (prime[i*j] == False): \n prime[i*j] = True\n s[i * j] = i \n\n\n def generatePrimeFactors(N): \n ans=[]\n s = [0] * (N+1) \n sieveOfEratosthenes(N, s) \n curr = s[N] \n cnt = 1\n while (N > 1): \n N //= s[N]\n if (curr == s[N]): \n cnt += 1\n continue\n\n ans.append((str(curr),str(cnt))) \n\n curr = s[N] \n cnt = 1\n return ans\n \n return generatePrimeFactors(num)" ]
[ "0.8195275", "0.8172038", "0.78768766", "0.78615814", "0.78161514", "0.7815678", "0.78074765", "0.7803163", "0.7796986", "0.7784412", "0.77701026", "0.7747795", "0.77219456", "0.7687131", "0.766128", "0.76611435", "0.76522994", "0.7594097", "0.75423145", "0.7516391", "0.7508749", "0.7505096", "0.7502163", "0.747645", "0.7472635", "0.7472505", "0.74684477", "0.74524325", "0.7440694", "0.74284333", "0.74233997", "0.7404005", "0.7402889", "0.7401704", "0.73907995", "0.7384289", "0.7375334", "0.73610914", "0.73490167", "0.7335199", "0.7333431", "0.73173136", "0.72922754", "0.7279312", "0.72311455", "0.7212408", "0.7203941", "0.7196483", "0.71939635", "0.7172848", "0.7149165", "0.7149036", "0.7116035", "0.70840055", "0.7070148", "0.70497304", "0.704367", "0.7038368", "0.70367974", "0.70231026", "0.7019566", "0.70059687", "0.7003308", "0.6973457", "0.6948054", "0.6945844", "0.69449306", "0.6936307", "0.69291025", "0.6916203", "0.6896528", "0.6894769", "0.68818223", "0.68631077", "0.68499154", "0.6812173", "0.6791088", "0.6784915", "0.6783941", "0.67557853", "0.6735627", "0.67328495", "0.6702939", "0.6697444", "0.6696848", "0.6690844", "0.6690312", "0.66882914", "0.6687484", "0.6671743", "0.66714877", "0.6666527", "0.6649537", "0.6642531", "0.66327256", "0.66309273", "0.66309273", "0.66053885", "0.65952843", "0.65886223" ]
0.85676354
0
Load the image located at the specified path
def _setup_image(self, image_path): if not os.access(image_path, os.R_OK): rospy.logerr("Cannot read file at '{0}'".format(image_path)) return None img = cv2.imread(image_path) # Return msg return cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(path) -> Image:\n return Image.open(path)", "def load_image(self, path):\n if path:\n self.original_image = cv2.imread(path, 1)\n self.prepare_images()", "def load_image(file_path):\r\n return Image.open(file_path)", "def load_image(path_to_image, image_name):\n print(\"Loading: \", path_to_image + image_name, \" ...\")\n return Image.open(path_to_image + image_name)", "def load(path):\n img = plt.imread(path)\n dimensions = f\"{img.shape[0]} x {img.shape[1]}\"\n print(f\"Loaded image at {path} of dimensions {dimensions}\")\n return img", "def load(path):\n print(\"path\", path)\n print(Path(path).is_file())\n if Path(path).is_file():\n img = image.imread(path)\n print(f\"Loading image of dimensions {img.shape[0]} x \"\n f\"{img.shape[1]}\")\n return np.array(img)\n raise FileNotFoundError", "def load_img(path):\n img = cv2.imread(path)\n return img", "def load(cls, path):\n assert os.path.exists(path), \"No such file: %r\" % path\n\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n image = Image(None)\n image._path = path\n image._format = Image.image_format(extension)\n\n return image", "def load_image(self):\n try:\n return Image.open(self._path, 'r')\n except IOError:\n messagebox.showerror(\"Error\", \"Wrong sprite file path!\")", "def loadImage(self, path: str) -> ndarray:\n try:\n self.img = np.asarray(Image.open(path))\n\n except FileNotFoundError:\n\n print(\"NO such File {}\".format(path))\n return None\n return self.img", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def load_image(self, filename):\n return pygame.image.load(os.path.join('images', filename))", "def load(self):\n logger.debug(f\"Reading {self.path.name}\")\n self.label = int(Data.fromLabel(self.path.parent.name))\n self.image = skimg.data.imread(self.path)", "def load_image(image_path):\n image = io.imread(image_path)\n io.imshow(image)\n io.show()\n print(\"Size of the image is {} KB\".format(round(os.path.getsize(image_path)/1024,2)))\n return image", "def __load(self, node, path):\n\n self.firstgid = node['firstgid']\n self.margin = node['margin']\n self.spacing = node['spacing']\n\n # convierte la ruta de la imagen en una ruta relativa al proyecto\n directory = os.path.dirname(path)\n self.image_path = os.path.join(directory, *node['image'].split(r'\\/'))\n self.image_path = os.path.normpath(self.image_path)", "def read_image(path):\n img = misc.imread(path)\n return img", "def _load_image(file: str) -> pyglet.image.AbstractImage:\n\n return pyglet.image.load(Config.RES_DIR + \"img\" + Config.FILE_SEPARATOR + file)", "def _load_image(path):\r\n image = Image.open(path)\r\n size = image.size\r\n \r\n image = image.resize((550,550), Image.ANTIALIAS)\r\n# image = image.thumbnail((200,200), Image.ANTIALIAS)\r\n return image", "def pil_loader(path):\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n sqrWidth = np.ceil(np.sqrt(img.size[0]*img.size[1])).astype(int)\n return img.convert('L').resize((sqrWidth, sqrWidth))", "def load_image(self, path):\n\n image = cv2.imread(path) / 255\n h, w, _ = image.shape\n image = cv2.resize(image, (self.input_size, self.input_size))\n nh, nw, _ = image.shape\n return image, (nh/h, nw/w)", "def _load_img(self, img_path):\n img = Image.open(img_path).convert('RGB')\n\n if self.use_landmarks:\n landmarks = np.array(self.landmarks[img_path[img_path.rfind('/')+1:]]).reshape(-1)\n img = FivePointsAligner.align(np.array(img), landmarks, show=False)\n img = Image.fromarray(img)\n\n if self.transform is None:\n return img\n\n return self.transform(img)", "def imread(path):\n img = cv2.imread(path)\n return img", "def load_image(self, **kwargs):\n ...", "def read_image(path: str):\n return Image.open(path, mode=\"r\")", "def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img", "def test_load_jpg():\n parameters = {'path': 'green-dot.jpg'}\n\n images.load(parameters)", "def load(image_path):\n\tpil_image = Image.open(image_path).convert(\"RGB\")\n\t# convert to BGR format\n\timage = np.array(pil_image)[:, :, [2, 1, 0]]\n\treturn image", "def load(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n return Costume(name, Image.load(path))", "def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]", "def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]", "def load_img(path: str) -> np.ndarray:\n \n return np.array(Image.open(path))", "def get_image_from_file(path):\n try:\n img = Image.open(path)\n return img\n except IOError as e:\n print e\n return None", "def read_img(path):\r\n if os.path.isfile(path):\r\n return cv2.imread(path)\r\n else:\r\n raise ValueError('hiiiiiiiiii')", "def load_image(image_path):\n # Case insenstive check of the image type.\n img_lower = image_path.lower()\n if (\n img_lower.endswith(\n \".jpg\",\n -4,\n )\n or img_lower.endswith(\n \".png\",\n -4,\n )\n or img_lower.endswith(\n \".jpeg\",\n -5,\n )\n ):\n try:\n image_data = cv2.imread(image_path)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)\n config_utils.logger.info(\"img shape: '{}'.\".format(image_data.shape))\n except Exception as e:\n config_utils.logger.error(\n \"Unable to read the image at: {}. Error: {}\".format(image_path, e)\n )\n exit(1)\n elif img_lower.endswith(\n \".npy\",\n -4,\n ):\n image_data = load(image_path)\n else:\n config_utils.logger.error(\"Images of format jpg,jpeg,png and npy are only supported.\")\n exit(1)\n return image_data", "def load_image(path: str):\n if path.endswith('.npy'):\n return np.load(path)\n if path.endswith(('.nii', '.nii.gz', '.hdr', '.img')):\n import nibabel as nib\n return nib.load(path).get_data()\n if path.endswith('.tif'):\n from PIL import Image\n with Image.open(path) as image:\n return np.asarray(image)\n\n raise ValueError(f\"Couldn't read image from path: {path}.\\n\"\n \"Unknown file extension.\")", "def load_image(data_dir, image_file):\n image_path = os.path.join(data_dir, image_file)\n image = mpimg.imread(image_path)\n return image", "def get_image_by_path(image_path, target_size=None):\n img = image.load_img(image_path, target_size=target_size)\n return img", "def load_image(self, image_path):\n # Load image\n image = cv2.imread(image_path)\n #TODO 如果是灰度图先转为RGB的\n # If grayscale. Convert to RGB for consistency.\n # if image.ndim != 3:\n # image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image\n pass", "def hload_pil(filepath):\n img = Image.open(filepath)\n return img", "def load_image(img_file_name):\n file_name = os.path.join('.', 'images', img_file_name)\n img = pygame.image.load(file_name)\n img.convert()\n return img", "def get_image(path):\n\n # Check if the picture exists or not.\n if not os.path.isfile(path):\n print('Cannot open the image. Please try again!')\n exit(1)\n\n try:\n # Open the image.\n image = Image.open(path)\n\n # If everything is okay return it.\n return image\n # If an error occurred.\n except Exception as err:\n print('Error occurred while trying to open the image:', err, 'Please try again!')\n exit(1)", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def LoadImage(self, filename, mode):\n print(\"TODO: CHECK FOR >PNG?\")\n path = \"static/CVImages/\" + filename\n print(\" path \" + path)\n img = cv2.imread(path, mode) # 0 for black, 1 for rgb\n return img", "def set_image(self, path):\r\n \r\n image = self._load_image(path)\r\n self.image_raw = image\r\n self.image = ImageTk.PhotoImage(image)\r\n self.image_panel.configure(image=self.image)", "def load_single_image(path: str) -> np.uint8:\n if not os.path.exists(path):\n print(f\"Warning, try to load non-exist image {path}\")\n return None\n if path.endswith(\".npy\"):\n img = np.load(path)\n elif path.endswith(\".png\") or path.endswith(\".jpeg\") or path.endswith(\".jpg\"):\n img = plt.imread(path)\n if img.dtype != \"uint8\":\n img = (255 * img).astype(np.uint8)\n return img", "def load_image(self, image_id):\n # Load image\n path = self.image_info[image_id]['path']\n if path.endswith(\".png\" or \".jpg\"):\n image = skimage.io.imread(path)\n elif path.endswith(\".dcm\"):\n ds = pydicom.read_file(path)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def load_image():\n return cv2.imread('test.png')\n pass", "def load_image(self, path, convert_alpha=False):\n if convert_alpha:\n return load(self.get_path(path)).convert_alpha()\n return load(self.get_path(path)).convert()", "def load(path):\n pass", "def load(self, path):\n pass", "def load(self, path):\n pass", "def loadImage(self, imagePath, customScaleFactor=None):\n\t\tif customScaleFactor: scaleFactor = customScaleFactor\n\t\telse: scaleFactor = self.IMAGESCALEUP\n\n\t\timg = pygame.image.load(imagePath)\n\t\timg = pygame.transform.scale(img, (img.get_width() * scaleFactor, img.get_height() * scaleFactor))\n\t\timg.convert_alpha()\n\t\treturn img", "def load_from_file(self, filename):\n\n loader = ImageLoader()\n loader.load(self, filename)", "def load_image(self, path, max_size=400, shape=None):\n\n if 'http' in path:\n response = requests.get(path)\n image = Image.open(BytesIO(response.content)).convert('RGB')\n else:\n image = Image.open(path).convert('RGB')\n \"\"\" Check image size \"\"\"\n if max(image.size) > max_size:\n size = max_size\n else:\n size = image.size\n\n if shape is not None:\n size = shape\n \"\"\" Transform image \"\"\"\n input_transform = transforms.Compose([transforms.Resize(size),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n self = input_transform(image)[:3, :, :].unsqueeze(0)\n return self", "def load(self, path, shape=(1024, 1024, 35), dtype='uint16'):\n valid_dtypes = ['uint8', 'uint16']\n if dtype not in valid_dtypes:\n raise ValueError('dtype should be either one of %s' % ', '.join(valid_dtypes))\n\n im = io.imread(path)\n im = numpy.rollaxis(im, 0, 3)\n\n if im.shape != shape and shape is not None:\n factors = tuple(map(lambda z: int(z[0] / z[1]), zip(im.shape, shape)))\n if any([f > 1 for f in factors]):\n # im = resize(im, shape, mode='constant')\n im = downscale_local_mean(im, factors=factors).astype(im.dtype)\n # if 'conf' in path.lower():\n else:\n warnings.warn('Target shape is not a multiple below initial shape')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n if dtype == 'uint8' and im.dtype != numpy.uint8:\n im = img_as_ubyte(im)\n if dtype == 'uint16' and im.dtype != numpy.uint16:\n im = img_as_uint(im)\n\n self.image_raw = im\n self.name = path", "def loadImage(img_path):\n\n img = Image.open(img_path)\n np_img = np.array(img)\n return (np_img)", "def load_image(self, path, target_size=None):\n img = self.pil_image.open(path)\n if img.mode != 'RGB':\n img = img.convert('RGB')\n if target_size is not None:\n width_height_tuple = (target_size[1], target_size[0])\n if img.size != width_height_tuple:\n img = img.resize(width_height_tuple, self.pil_interpolation)\n return img", "def load(f, as_grey=False):\n use_plugin('pil')\n return imread(os.path.join(assets, f), as_grey=as_grey)", "def load_image(self, img_name):\n img_data = cv2.imread(img_name, 0)\n return img_data", "def load_image(self, image_id):\n # Load image\n# print(self.image_info[image_id]['path'])\n image = cv2.imread(self.image_info[image_id]['path'],cv2.IMREAD_GRAYSCALE) \n image = image[:,:, np.newaxis] #Add 1 dimension for grayscale images\n return image", "def get_image(image_path):\r\n\r\n return Image.open(image_path)", "def imread(img_path):\n if not os.path.exists(img_path):\n raise ImageNotFoundError(f\"Image {img_path} could'nt be located\")\n\n img = cv2.imread(img_path)\n\n if img is None:\n raise InvalidImageError(f\"Image {img_path} could'nt be loaded\")\n\n return img", "def load_image(cls, fullname):\n\t\ttry:\n\t\t\timage_stream = open(fullname, 'rb')\n\t\t\timage = pyglet.image.load(fullname, file=image_stream)\n\t\texcept IOError, message:\n\t\t\tprint 'Cannot load image:', fullname\n\t\t\traise ImageLoadFileIOError, message\n\t\treturn image", "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n return image", "def _load_image(path, filename, bits, mode):\n if filename.rsplit('.')[1].lower() == 'dcm':\n ds = pydicom.dcmread(os.path.join(path, filename))\n m = ('I;16' if bits == 16 else 'L') if mode == 'L' else 'RGB'\n image = Image.frombuffer(\n m, (ds.Columns, ds.Rows), ds.PixelData, 'raw', m, 0, 1)\n else:\n image = Image.open(os.path.join(path, filename)).convert(mode)\n return image", "def load_img(fname):\n img = cv2.imread(fname)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img", "def read_image(img_path):\n img = imageio.imread(uri=img_path)\n return img", "def loadImage(j, im, opts={}):\n displayMessage(j, \"j.Load(%s, ...)\" % im)\n j.Load(im, opts)\n waitStatus(j)", "def pil_loader(path, color=True):\n imgExt = os.path.splitext(path)[1]\n if imgExt == \".npy\":\n img = np.load(path)[0]\n return np.swapaxes(np.swapaxes(img, 0, 2), 0, 1)\n\n # open path as file to avoid ResourceWarning\n # (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n if color:\n return img.convert('RGB')\n else:\n return img.convert('L')", "def load_image(file):\n\n\tfile = os.path.join(DIR_MENU_PICTURES, file)\n\ttry:\n\t\tsurface = pygame.image.load(file)\n\texcept pygame.error:\n\t\terror = \"Could not load image \\\"%s\\\" %s\"%(file, pygame.get_error())\n\t\traise SystemExit(error)\n\treturn surface.convert()", "def _load(f, as_gray=False):\n # importing io is quite slow since it scans all the backends\n # we lazy import it here\n from skimage.io import imread\n return imread(os.path.join(data_dir, f), plugin='pil', as_gray=as_gray)", "def load(self, path: str):\n pass", "def load_img(path, imsize, device):\n transform = transforms.Compose([\n transforms.Resize(imsize), # resize image\n transforms.ToTensor() # PIL image to Tensor\n ])\n img = Image.open(path)\n # fake batch dimension required to fit network's input dimensions\n img = transform(img).unsqueeze(0)\n return img.to(device, torch.float)", "def image(self, path):\n im = Image.open(path).convert(\"RGB\")\n # Convert the RGB image in printable image\n self._convert_and_print_image(im)", "def make_image(self, path):\n\t\treturn None", "def load(filepath):\n canvas = Canvas(100, 100)\n canvas.img = PIL.Image.open(filepath)\n if not canvas.img.mode in (\"RGB\",\"RGBA\"):\n canvas.img = canvas.img.convert(\"RGBA\")\n canvas.drawer = aggdraw.Draw(canvas.img)\n canvas.pixel_space()\n return canvas", "def read_image(image_path):\n if not os.path.exists(image_path):\n raise IOError('File does not exist: %s' % image_path)\n else:\n return Image.open(image_path)", "def load_image(self, image_id):\n info = self.image_info[image_id]\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n width, height = info['width'], info['height']\n\n if info['real']:\n # load image from disk\n impath = os.path.join(self.real_image_dirpath, info['real_image_path'])\n image = cv2.imread(impath,1)\n image = cv2.resize(image, (width, height), cv2.INTER_CUBIC)\n else:\n # synthesize image\n background_path = info['background_image_path']\n card_template_path = info['card_template_path']\n cornerpoints = info['cornerpoints']\n image = self.synthesize_image(card_template_path, background_path, cornerpoints, (width, height))\n return image", "def imread(path):\n with open(path, 'rb') as f:\n with PIL.Image.open(f) as img:\n return img.convert('RGB')", "def load_single_image(image_path, dim=100):\n if not isinstance(image_path, str):\n img = Image.open(image_path)\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n img = preprocess_data(img, dim)\n else:\n img = cv2.imread(image_path, cv2.IMREAD_COLOR)\n img = preprocess_data(img, dim)\n\n img = np.array([img])\n\n return img", "def load_image(image_path):\n img_transforms = get_standard_img_transforms()\n image = Image.open(image_path)\n images = img_transforms(image).unsqueeze(0)\n return images", "def _load_img(self, name):\n try:\n img_path = os.path.join(global_var.PATH, \"maps\", name + \".png\")\n env_img = pygame.image.load(img_path)\n except Exception as e:\n print(e)\n print(\"Environment\", name, \"does not exist. Make sure that a PNG image exists\",\n \"under that name in the \\\"maps\\\" folder.\")\n sys.exit()\n\n return env_img", "def test_load_fail():\n parameters = {'path': 'foo.bar'}\n\n images.load(parameters)", "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def load_image(fname):\n return load_tiff(fname)", "def load_image(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n # height = labelmeJson['imageHeight']\n # width = labelmeJson['imageWidth']\n # shape_list = labelmeJson['shapes']\n image = self.img_b64_to_arr(labelmeJson['imageData'])\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([labelmeJson['height'], labelmeJson['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n #\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n return image", "def _open_image(self, path):\n return cv.imread(path, 1)\n # .astype(float)", "def load_image_file(filename, mode='RGB'):\n return imread(filename, mode=mode)", "def loadImage(image_path, dir):\n # Load in memory using cv2\n image_rgb = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)\n image_grayscale = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)\n image_shape = image_rgb.shape\n\n # Write to memmaps\n _, rgb_name = tempfile.mkstemp(suffix=\".rgb_source\", dir=dir)\n rgb_memmap = np.memmap(\n rgb_name,\n mode=\"w+\",\n shape=image_shape,\n dtype=image_rgb.dtype,\n )\n rgb_memmap[:] = image_rgb\n\n _, grayscale_name = tempfile.mkstemp(suffix=\".grayscale_source\", dir=dir)\n grayscale_memmap = np.memmap(\n grayscale_name,\n mode=\"w+\",\n shape=(image_shape[0], image_shape[1]),\n dtype=image_grayscale.dtype,\n )\n grayscale_memmap[:] = image_grayscale\n\n print(\"Loaded image: \" + image_path)\n\n # Return info\n return [image_path, image_shape, rgb_name, grayscale_name]", "def import_image(self, file: str) -> Any:\n pass", "def read_img(img_path:str) -> object:\n img = cv2.imread(img_path)\n return img", "def LoadPicture(filename):\n return Bitmap(filename)", "def read_im(im_path):\n im = cv2.imread(im_path)\n return im", "def load_image(path, height, width, mode='RGB'):\n image = PIL.Image.open(path)\n image = image.convert(mode)\n image = np.array(image)\n # squash\n image = scipy.misc.imresize(image, (height, width), 'bilinear')\n return image", "def image_load(path) -> numpy.ndarray:\n # file\n na = numpy.array(Image.open(path))\n # fix shape\n na = numpy.moveaxis(na, [2,0,1], [0,1,2])\n # shape is now (3,h,w), add 1\n na = na.reshape(1,3,na.shape[1],na.shape[2])\n # change type\n na = na.astype(\"float32\") / 255.0\n return na", "def _read_image(self, image_path:str, label:str):\n # Get the full path to the image\n image = \"\"\n if label == \"real\":\n image = os.path.join(self.root, \"real\", image_path)\n else:\n image = os.path.join(self.root, \"fake\", image_path)\n \n # Read the image\n image = cv2.imread(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Normalize the image\n image = image / 255.0\n\n # Convert the image to floating point to use it as\n # an input to the PyTorch model\n image = image.astype(np.float32)\n\n return image", "def LoadFile(*args, **kwargs):\n return _gdi_.Bitmap_LoadFile(*args, **kwargs)", "def load(cls, path, name, **kwargs):\n path = Path(path)\n assert path.exists() and path.is_dir(), f\"Load location {path} doesnt exist.\"\n\n pickle_path = path / (name + \".pkl\")\n image_path = path / (name + \"_image.npy\")\n depths_path = path / (name + \"_depths.npy\")\n\n if pickle_path.is_file():\n with open(pickle_path, 'rb') as pickle_file:\n return dill.load(pickle_file)\n\n assert image_path.is_file(), \"_image.npy file must exist if pickle doesnt.\"\n img = np.load(image_path)\n\n if depths_path.is_file():\n kwargs[\"depths\"] = np.load(depths_path)\n else:\n assert (\n \"top\" in kwargs.keys() and \"base\" in kwargs.keys()\n ), \"Depth info needed.\"\n\n return cls(img, **kwargs)", "def load_image(self):\n # Minimal progress display while image is loaded.\n group = displayio.Group()\n group.append(centered_label('LOADING...', 40, 3))\n #self.rect = Rect(-board.DISPLAY.width, 120,\n # board.DISPLAY.width, 40, fill=0x00B000)\n #group.append(self.rect)\n board.DISPLAY.show(group)\n\n # pylint: disable=eval-used\n # (It's cool, is a 'trusted string' in the code)\n duration = eval(TIMES[self.time]) # Playback time in seconds\n # The 0.9 here is an empirical guesstimate; playback is ever-so-\n # slightly slower than benchmark speed due to button testing.\n rows = int(duration * self.rows_per_second * 0.9 + 0.5)\n # Remap brightness from 0.0-1.0 to brightness_range.\n brightness = (self.brightness_range[0] + self.brightness *\n (self.brightness_range[1] - self.brightness_range[0]))\n try:\n self.num_rows = self.bmp2led.process(self.path + '/' +\n self.images[self.image_num],\n self.tempfile,\n rows, brightness,\n self.loop,\n self.load_progress)\n except (MemoryError, BMPError):\n group = displayio.Group()\n group.append(centered_label('TOO BIG', 40, 3))\n board.DISPLAY.show(group)\n sleep(4)\n\n board.DISPLAY.show(displayio.Group()) # Clear display\n self.clear_strip() # LEDs off", "def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im", "def image(fname):\n return cv2.imread(fname)" ]
[ "0.8395559", "0.8235641", "0.80957776", "0.7978053", "0.79137325", "0.7833698", "0.76558286", "0.75878996", "0.7496194", "0.74551564", "0.7341567", "0.7297161", "0.7210053", "0.71392775", "0.7122944", "0.7110663", "0.70598626", "0.703482", "0.69894713", "0.6988201", "0.6976447", "0.69731987", "0.6969585", "0.6968694", "0.6960299", "0.6952217", "0.6940567", "0.6939011", "0.693592", "0.693592", "0.6913731", "0.6909449", "0.69081146", "0.6864422", "0.68374926", "0.6812509", "0.68110216", "0.67873013", "0.67180043", "0.6711848", "0.66799873", "0.6660067", "0.6659732", "0.6647545", "0.6639848", "0.6636122", "0.6624144", "0.66102093", "0.6607273", "0.6592917", "0.6592917", "0.65852684", "0.6581885", "0.6567381", "0.6566753", "0.6566211", "0.6559135", "0.655122", "0.6545694", "0.6522463", "0.6520275", "0.65096676", "0.64977086", "0.6497181", "0.6496024", "0.6478784", "0.64781123", "0.6468189", "0.6455059", "0.6453834", "0.64412594", "0.6441034", "0.6432075", "0.6429512", "0.642654", "0.6418928", "0.64092416", "0.6401459", "0.6399082", "0.63987654", "0.6389774", "0.6386879", "0.63815117", "0.63814676", "0.6380408", "0.6378863", "0.6377274", "0.6357729", "0.635712", "0.63193595", "0.6302778", "0.6301665", "0.6286893", "0.62860566", "0.6273581", "0.6273087", "0.62661356", "0.62455124", "0.6244512", "0.6243545", "0.62415177" ]
0.0
-1
Displays image(s) to robot's head
def display_image(self, image_path, display_in_loop=False, display_rate=1.0): rospy.logdebug("Display images in loop:'{0}', frequency: '{1}'".format(display_in_loop, display_rate)) image_msg = [] image_list = image_path if isinstance(image_path, list) else [image_path] for one_path in image_list: cv_img = self._setup_image(one_path) if cv_img: image_msg.append(cv_img) if not image_msg: rospy.logerr("Image message list is empty") else: r = rospy.Rate(display_rate) while not rospy.is_shutdown(): for one_msg in image_msg: self._image_pub.publish(one_msg) r.sleep() if not display_in_loop: break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self):\n display(self.image)", "def display_image(self, window_title: str = 'Drone Camera'):\n cv2.imshow(window_title, self.output)\n cv2.waitKey(1)", "def display(self, image):\n raise NotImplementedError()", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def take_picture(self):\n self.drone.take_picture()", "def take_picture(self):\n self.drone.take_picture()", "def show_shot(path_to_images, name_image):\n crrt_image = misc.imread(\"./{}/{}\".format(path_to_images, name_image))\n\n plt.imshow(crrt_image)\n\n plt.draw()\n plt.pause(0.5)", "def _render_static_image_annotation(self):\n cv2.rectangle(self._image,\n (0,0), (640, 40),\n (0, 0, 0),\n -1)\n \n cv2.putText(self._image,self._current_mode, (40, 25),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, 255, 2)\n\n cv2.putText(self._image, time.asctime(), (400, 460),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, 255, 2)", "def show(self):\n\n self.image.show()", "def paintHead(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"head\", self.avatarConfiguration[\"skin\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"head\")", "def _render_image(self):\n\n self._render_static_image_annotation()\n\n if self._goal_robot_pose is not None:\n # Render the goal pose as the robot is driving to target...\n self._goal_robot_pose.header.stamp = self._image_time # AHHHHH THIS IS NOT \n self._tf_listener.waitForTransform('/map',\n self._image_info.tf_frame, \n self._image_time,\n rospy.Duration(4))\n\n self._goal_robot_pose.pose.position.z = 1.5 # force goal point to be 1.5m\n pose = self._tf_listener.transformPose(self._image_info.tf_frame,\n self._goal_robot_pose)\n u, v = self._image_info.project3dToPixel((pose.pose.position.x,\n pose.pose.position.y,\n pose.pose.position.z))\n self._goal_robot_pose.pose.position.z=1.45 # force goal point to be 1.5m\n pose = self._tf_listener.transformPose(self._image_info.tf_frame,\n self._goal_robot_pose)\n u2, v2 = self._image_info.project3dToPixel((pose.pose.position.x,\n pose.pose.position.y,\n pose.pose.position.z))\n radius = int(math.sqrt((u2-u)**2 + (v2-v)**2))\n if radius < 100:\n cv2.putText(self._image, \"Goal Location\", (int(u+radius+1), int(v+radius+1)),\n cv2.FONT_HERSHEY_SIMPLEX, radius/10.0, 255, radius/200 * 3)\n cv2.circle(self._image, (int(u),int(v)), radius, (0,0,255,127),-1)\n\n\n\n if self._point_clouds is not None:\n # Render the bouding boxes of objects...\n # Project each response cluster into image\n box_locations = []\n print\n for i, (cloud, label) in enumerate(zip(self._point_clouds, self._labels)):\n print \"Object \",i,\"/\",len(self._point_clouds)\n location = self._project_pointcloud(cloud)\n print location\n box_locations.append(location)\n cv2.rectangle(self._image,\n location[0],location[1],\n (255, 0, 0),\n 3)\n cv2.putText(self._image, label,\n (location[0][0], location[0][1]-10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255, 2)", "def show_image(self):\n cv2.imshow(self.config.DISPLAY_NAME, self.image)", "def body(self, parent):\n img = Label(parent, image = self._photo, text=\"Unable to display image\")\n img.pack()", "def setHRLogo(self,**kwargs):\n self.baxter.display.setImage(self.baxter.datapath + \"logo1024.jpg\")", "def printImage(imageObject):\n # TODO\n pass", "def display_image(img, label):\n cv2.imshow(label,img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def showImage(self, image):\n \n self.image = img", "def show_img(self):\n if self.image is not None:\n cv2.imshow(self.image_window, self.image)\n cv2.waitKey(1)\n else:\n rospy.loginfo(\"No image to show yet\")", "def handle_gui_example_two_intent(self, message):\n self.gui.show_image(\"https://source.unsplash.com/1920x1080/?+random\")", "def show_poster(self):\n\t\twebbrowser.open(self.poster_image_url)", "def display(self):\n image_qt = ImageQt.ImageQt(self.view_state.get_image())\n self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image_qt))\n self.imageLabel.adjustSize()", "def show_image(file_location):\n img = Image.open(file_location)\n img.show()", "def logo(self):\n self.def_logo(0x21)\n self.send(\"\\x21\\x22\\x08\\x08\\x0a\\x23\\x24\")\n self.reset_codepage()", "def send_image(self, path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n # removed by alice\n #rospy.sleep(1)", "def draw_how_to(self):\n howto = pygame.image.load(res.IMG_HOW_TO)\n self.screen.blit(howto, (0, 0))", "def load_homepage() -> None:\n st.image(\"iwakka.png\",\n use_column_width=True)\n \n st.header(\"Hello! This dashboard will help you to analize data from iWakka device\")\n st.write(\"Here are some step to process data: \")\n st.header(\" II. Download data\")\n st.write(\"Here you can download data\")\n \n st.header(\" III. Statistic Data\")\n st.write(\"You can judge patient condition accroding to provided data\") \n \n st.header(\"IV. AGF Indices\")\n st.write(\"Here you can analyse each chart\") \n \n st.header(\" V. Notes\")\n st.write(\"It can be useful for you to collect notes concerning your patient\") \n\n st.header(\" VI. Rank of patient\")\n st.write(\"You can compare results for selected patients\" )", "def show_image(self, image_set='train', index=None, interactive_mode=True):\n if interactive_mode:\n plt.ion()\n else:\n plt.ioff()\n\n if image_set == 'train':\n target = self.train_dataset\n else:\n target = self.test_dataset\n\n if index is None:\n index = randint(0, len(target['data']))\n\n plt.figure(num=self.LABELS[target['labels'][index]])\n plt.imshow(target['data'][index])\n plt.show()", "def show(self, screen):\n x_display = self.xy_position[0] * constants.CELL_SIZE\n y_display = self.xy_position[1] * constants.CELL_SIZE\n screen.blit(self.image, (x_display, y_display))", "def show(self, image_dir_root=None):\n self.get_image(image_dir_root=image_dir_root).show()", "def display_image(self, window_name, image):\n cv2.namedWindow(window_name)\n cv2.imshow(window_name, image)\n cv2.waitKey(0)", "def render(self, window):\n body = pygame.image.load(IMAGE_SNAKE).convert_alpha() # loading image\n for block in self.body:\n window.blit(body, (block[0]*SPRITE_SIZE, block[1]*SPRITE_SIZE)) # painting a beautiful snek\n if self.neural_net: # calls for neural net rendering\n self.neural_net.render(window, self.vision)", "def image_capture_demo():\n return render_template('image_capture_demo.html')", "def show_img(graphs = False):\n while True:\n screen = (yield)\n window_title = \"logs\" if graphs else \"game_play\"\n cv2.namedWindow(window_title, cv2.WINDOW_NORMAL) \n imS = cv2.resize(screen, (800, 400)) \n cv2.imshow(window_title, screen)\n if (cv2.waitKey(1) & 0xFF == ord('q')):\n cv2.destroyAllWindows()\n break", "def main():\n images = Images()\n #print images.create_image_urls()\n print images.get_image_random()\n print images.get_image(12)", "def show(self):\n if self.video:\n self.video.write(self.img)\n cv2.imshow('Simpy', self.img)\n cv2.waitKey(1000 // self.fps)", "def send_image(path):\n img = cv.LoadImage(path)\n msg = cv_bridge.CvBridge().cv_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)", "def img(self, name, img_, **kwargs):\n self.vis.images(img_.cpu().numpy(),\n win=name,\n opts=dict(title=name),\n **kwargs\n )", "async def _misc_IMGplumbob(self, ctx):\r\n await self.bot.say('{}, http://i.imgur.com/q8xJsJQ.gif'.format(ctx.message.author.mention))", "def make_image():\n click.echo(\"make_image\")", "def show_me():\n # Scumbag thumbnail code\n try:\n from PIL import Image\n except ImportError:\n pass\n else:\n filename = os.path.join(app.static_folder, 'img', 'badumtss.png')\n image = Image.open(filename)\n\n return render_template('show_me.html')", "async def inspire(self, ctx):\n async with aiohttp.ClientSession() as session:\n async with session.get('http://inspirobot.me/api?generate=true') as response:\n if(response.status == 200):\n imgurl = await response.text()\n embed = discord.Embed(colour=discord.Colour.dark_blue())\n embed.set_image(url=imgurl)\n embed.set_footer(text='http://inspirobot.me/')\n await ctx.bot.send_message(ctx.message.channel, embed=embed)", "def displayImage(winName, img):\n cv.imshow(winName, img)\n cv.waitKey(0)", "def show(self, name='Detections'):\n cv2.imshow(name, self.get_image())\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def display_image(self, img, img_pos):\n image = tk.Label(self.top, image=img)\n image.grid(row=img_pos[0], column=img_pos[1],\n columnspan=img_pos[2], rowspan=img_pos[3])", "def show_image(dataset, domain, image_class, image_name):\n\timage_file = io.imread(os.path.join(\"data\", dataset, domain, \"images\", image_class, image_name))\n\tplt.imshow(image_file)\n\tplt.pause(0.001)\n\tplt.figure()", "def _image(self):\n print(\"imaging\")\n self.images.append(self.device_control.image())\n yield", "def display(image, name=\"Image\"):\n cv2.imshow(name, image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite(\"{}.png\".format(name), image)", "def display(self):\n\t\tself.imgDisplay.set_from_pixbuf(self.getVisible())\n\t\tgc.collect()", "def blit_me(self):\n self.start_button.blit_me()\n self.title.blit_me()\n self.screen.blit(self.unicorn_img, self.rect)", "def send_image(path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)", "def send_image(path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)", "def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())", "def display_win_view(screen):\n\n pygame.time.Clock().tick(30)\n screen.blit(pygame.image.load('resources//pictures//winb.jpg').convert(), (0, 0))\n pygame.display.flip()", "def draw(self, screen):\n for i in range(self.tiles_len):\n x, y = self.tilepos[i]\n screen.blit(self.images[i], (x, y))\n self.draw_text(screen, \"Moves : \" + str(self.nb_move), 40, 500, 10, 255, 255, 255, False)", "def display_img(title,img):\r\n cv2.namedWindow('img', cv2.WINDOW_NORMAL)\r\n cv2.setWindowTitle('img',title)\r\n cv2.resizeWindow('img',600,400)\r\n\r\n #Display Image on screen\r\n cv2.imshow('img',img)\r\n\r\n #Mantain output until user presses a key\r\n cv2.waitKey(0)\r\n\r\n #Destroy windows when user presses a key\r\n cv2.destroyAllWindows()", "def show(title: str, imagePath: str):\n image = cv2.imread (imagePath)\n cv2.imshow (title, image)", "async def image(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(f\"List of images: {str(self.image_array)}\")", "def printImage(currentImage):\n\tprint currentImage + ' is set to be printed...'", "def showimage(image):\n mplt.figure()\n mplt.imshow(image)\n mplt.show()", "def show_image(window_title = ''):\n while True:\n screen = (yield)\n window_title = window_title\n cv2.namedWindow(window_title, cv2.WINDOW_NORMAL)\n \n # stack each of the provided greyscale images horizontally.\n img = None\n for s in range(screen.shape[2]):\n if img is None:\n img = screen[:, :, s]\n else:\n img = np.hstack((img, screen[:, :, s]))\n \n cv2.imshow(window_title, img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def display_image(window_name, img):\n cv2.imshow(window_name, img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "async def inspire(self, ctx):\n async with aiohttp.ClientSession() as session:\n async with session.get('http://inspirobot.me/api?generate=true') as response:\n if(response.status == 200):\n imgurl = await response.text()\n embed = discord.Embed(colour=discord.Colour.dark_blue())\n embed.set_image(url=imgurl)\n embed.set_footer(text='http://inspirobot.me/')\n await ctx.send(embed=embed)", "def drawLogo(self):\n print(\"__________ __________ \")\n print(\"\\______ \\_____ _______ ____\\______ \\ ____ ____ ____ ______\")\n print(\" | | _/\\__ \\\\_ __ \\_/ __ \\| | _// _ \\ / \\_/ __ \\ / ___/\")\n print(\" | | \\ / __ \\| | \\/\\ ___/| | ( <_> ) | \\ ___/ \\___ \\ \")\n print(\" |______ /(____ /__| \\___ >______ /\\____/|___| /\\___ >____ >\")\n print(\" \\/ \\/ \\/ \\/ \\/ \\/ \\/ \")", "def show_images(plate_full_name, well):\n if not IPYTHON:\n return\n\n src_dir = op.join(cp_config[\"Paths\"][\"SrcPath\"], plate_full_name)\n ctrl_images = load_control_images(src_dir)\n image_dir = op.join(src_dir, \"images\")\n templ_dict = {}\n for ch in range(1, 6):\n im = load_image(image_dir, well, ch)\n templ_dict[\"Img_{}_Cpd\".format(ch)] = img_tag(\n im, options='style=\"width: 250px;\"')\n templ_dict[\"Img_{}_Ctrl\".format(ch)] = ctrl_images[ch]\n tbody_templ = Template(cprt.IMAGES_TABLE)\n table = cprt.TABLE_INTRO + \\\n tbody_templ.substitute(templ_dict) + cprt.HTML_EXTRO\n return HTML(table)", "def show(title, img, write = False, wait = False):\n cv2.namedWindow(title, flags = cv2.WINDOW_NORMAL)\n cv2.imshow(title, img)\n cv2.resizeWindow(title, 1200, 900)\n if write:\n cv2.imwrite(title + \".png\", img)\n if wait:\n cv2.waitKey(1)", "def display_gui_window(self, window_title):\r\n cv2.imshow(window_title, self.image)", "def display_welcome_view(screen):\n pygame.time.Clock().tick(30)\n welcome_background = pygame.image.load('resources//pictures//startb.jpg').convert()\n screen.blit(welcome_background, (0, 0))\n pygame.display.flip()", "def full_photo():\n top = Toplevel()\n top.title(\"Full APOD Photo\")\n top.iconbitmap('10.APOD Viewer/rocket.ico')\n\n #Load the full image to the top image\n img_label = Label(top, image=full_img)\n img_label.pack()", "def render(self):\n self.delete()\n self.__create_background(self._imfname)\n # XXX must be last after successor implementation, but works without this line\n #self.c.event_generate(\"<Configure>\")\n #self.c.update_idletasks()", "def showBtnImg(*args, **kwargs):\n\targs[0].get_image().show()", "def display_image(window_name, image):\n cv2.namedWindow(window_name)\n cv2.imshow(window_name, image)\n cv2.waitKey(0)", "def showAssetImage(*args):\n\n selTab = cmds.tabLayout(widgets[\"shotAssRigListTLO\"], q=True, st=True)\n\n fType = \"\"\n asset = \"\"\n assetPath = \"\"\n path = \"\"\n imagePath = \"\"\n\n if selTab == \"Chars\":\n asset = cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], q=True, si=True)\n if asset:\n imagePath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"characters\", asset[0], \"icon\",\"{0}Icon.png\".format(asset[0])))\n if os.path.isfile(imagePath):\n cFuncs.assetImageUI(imagePath)\n else:\n cmds.warning(\"Can't find an image for {0}\".format(asset[0]))\n\n if selTab == \"Props\":\n asset = cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], q=True, si=True)\n if asset:\n imagePath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"props\", asset[0], \"icon\",\"{0}Icon.png\".format(asset[0])))\n if os.path.isfile(imagePath):\n cFuncs.assetImageUI(imagePath)\n else:\n cmds.warning(\"Can't find an image for {0}\".format(asset[0]))\n \n if selTab == \"Sets\":\n asset = cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], q=True, si=True)\n if asset:\n imagePath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"sets\", asset[0], \"icon\",\"{0}Icon.png\".format(asset[0])))\n if os.path.isfile(imagePath):\n cFuncs.assetImageUI(imagePath)\n else:\n cmds.warning(\"Can't find an image for {0}\".format(asset[0]))\n \n # if selTab == \"Anm\":\n # #need to split this up\n # var_shot = cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], q=True, si=True)\n # if var_shot:\n # var, buf, shot = var_shot[0].partition(\".\")\n # path = cFuncs.getVarMaster(cFuncs.fixPath(os.path.join(pi.shotsFolder, shot, \"anm\", var)))", "def showEmoticonList(self):\n print \"Guess what? No emoticons. But I'll put in a random one for you\"\n self.appendImageAtCursor(\"throbber.gif\")", "def show_file(file_location):\n img = Image.open(file_location)\n img.show()", "def show_image(img, title):\n cv2.imshow(title, img) # show pic\n k = cv2.waitKey(0)\n if k == 27: # wait until esc\n cv2.destroyAllWindows()", "def run_frame(self, ti, img):\n pass", "def on_image(self, image):", "def show_images(images):\n for name, img in images:\n cv2.imshow(name, img)\n\n cv2.waitKey(0)", "def show(type,img):\n # print(img)\n cv2.imshow(type, img)\n cv2.waitKey()", "def write_to_screen(self, text):\n\t\tself.blank_image = np.full((1280,1920, 3), 255, np.uint8)\n\t\tcv2.putText(self.blank_image, text,(40,300), font, 8,(0,0,0),3,cv2.LINE_AA)\n\t\tcv2.imshow(\"Background\", self.blank_image)\n\t\tcv2.waitKey(1)", "def fileCmd(self):\n filename = askopenfilename() \n self.cnvImgOrig.displayImage(filename)\n self.cnvImgTest.displayImage(filename)", "def show_il(self, update, context):\n\n # Send preliminary message\n msg = 'Some other message...'\n self.send_str(msg, update, context)\n\n # Send pic\n self.sendPic('il.png', update, context)", "def display_eink(image):\n if epd:\n epd.display(epd.getbuffer(image))\n else:\n image.show()", "def getimage(self):", "def car(img, x, y):\n gameDisplay.blit(img, (x, y)) # blit display the image", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def _blit_images(self):\n self.screen.blit(self.dial, (self.dialPos))\n self.screen.blit(self.rotatedImage, self.rotatedImageRectangle)", "def render(self):\n\n self.desert_image.render()\n self.cannon_image.render()\n self.play_button.render()\n self.escape_label.render()", "async def setup(bot: DreamBot) -> None:\n\n await bot.add_cog(Images(bot))\n bot_logger.info('Completed Setup for Cog: Images')", "def display_pygame():\n sprite_group.clear(screen, eraser_image)\n sprite_group.draw(screen)\n pygame.display.update()", "def showImage(self, img):\n cv2.namedWindow(self.NAME_WINDOW,cv2.WINDOW_NORMAL)\n cv2.resizeWindow(self.NAME_WINDOW, 300, 700)\n cv2.imshow(self.NAME_WINDOW , img)\n cv2.waitKey(0)", "def set_display(self, index=None):\n # pylint: disable=no-member\n if index:\n image = self.microbit.Image.STD_IMAGES[index]\n else:\n image = self.default_image\n self.microbit.display.show(image)", "def render(self, screen):\n pass", "def render(self, screen):\n pass", "def showTensorImg(ts, title):\n img = np.transpose(ts, (1, 2, 0))\n showImg(img, title)\n return", "def home():\n return render_template(\n 'index.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n image = Cam.do.GetLastImage(),\n \n )", "async def lizard(self, ctx):\n e = discord.Embed(title=\"Here is a lizard image for you {}.\".format(ctx.author.name), color=discord.Color.magenta())\n e.set_image(url=nekos.img('lizard'))\n await ctx.send(embed=e)", "def assemble_img_frame(self):\n\n self.p2_label_img = ttk.Label(self.p2_frame_img, text=self.lang.VP_IMG_LABEL,\n font=FONT_MSG)\n self.p2_label_img.grid(row=1, column=2, padx=5, pady=0)", "def setupScreenText(self) :\n\t\t# Create object to show avatar's position on the screen.\n\t\t# Update actual text using setText method on object.\n\t\tself.avPos = showText(0.92)\n\n \t\t# Create object to show a list of visible avatars\n \t\tself.showNumVisible = showText(0.85)\n \t\tself.visList = []\n\n\t\t# Create object for displaying keyboard shortcuts\n\t\tself.helpText = showText(0.78)\n\t\tself.helpText.setText(\"h: for help\")", "def blitme(self):\n\t\tself.screen.blit(self.image, self.rect)" ]
[ "0.71238935", "0.67987853", "0.66218305", "0.6546899", "0.6478864", "0.6478864", "0.636954", "0.634819", "0.6345504", "0.63256186", "0.62910193", "0.6277586", "0.62452334", "0.6237978", "0.620947", "0.6198384", "0.61778575", "0.61210454", "0.6117013", "0.61046207", "0.60963017", "0.6086652", "0.6079135", "0.607811", "0.60699457", "0.6063965", "0.606275", "0.6051189", "0.60506654", "0.6048609", "0.60376316", "0.60376024", "0.6028276", "0.6027031", "0.6020265", "0.6010374", "0.6004853", "0.600449", "0.60020375", "0.59864455", "0.59822315", "0.5981857", "0.5980684", "0.5979947", "0.5977138", "0.59602934", "0.5959825", "0.59550315", "0.5951675", "0.59461945", "0.59461945", "0.5944255", "0.59380126", "0.5931028", "0.5925865", "0.59191376", "0.5910717", "0.59095913", "0.5906993", "0.58965445", "0.589551", "0.5884311", "0.58835095", "0.58646023", "0.58621615", "0.5850914", "0.5843909", "0.5843237", "0.58393115", "0.58375823", "0.5834023", "0.5833275", "0.5825907", "0.5822112", "0.58205205", "0.5820445", "0.5817569", "0.5796416", "0.5796254", "0.5789914", "0.57821983", "0.5774621", "0.57729447", "0.576822", "0.5767506", "0.5766406", "0.5763712", "0.5763638", "0.5762479", "0.57624173", "0.57607657", "0.5758744", "0.5757767", "0.5757726", "0.5757726", "0.5755218", "0.57536733", "0.57486105", "0.5746949", "0.57463855", "0.57460135" ]
0.0
-1
num_rays Number of beams for the simulated LiDAR. fov Field of view scan_std The standard deviation of the scan theta_disc Theta Discretization
def __init__(self, num_rays, fov, scan_std, batch_size=100): self.batch_size = batch_size # these were used in the old scan 2d self.num_rays = num_rays self.fov = fov self.scan_std = scan_std self.theta_inc = fov/num_rays # often used self.twopi = math.pi * 2 # cache vectors to send to gpu self.output_vector = np.zeros(self.num_rays, dtype=np.float32) self.noise = np.zeros(self.num_rays, dtype=np.float32) self.input_vector = np.zeros((self.num_rays, 3), dtype=np.float32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_rays(self):\n try: \n return self._n_rays\n except AttributeError:\n self._n_rays = 0\n for r in self.rays(): self._n_rays += 1\n return self._n_rays", "def set_rays(self, nrays=10000, seed=6775431):\n self.NPOINT = nrays\n if (seed % 2 == 0): seed += 1\n self.ISTAR1 = seed", "def vlass_stars(duration, n_beams):\n n_pointings = duration//4.2\n n_observed = n_pointings*n_beams\n return n_observed", "def detect_bispectra(self, sigma=5., tol=1.3, Q=0, show=0, save=0):\n\n try:\n ba = self.bispectra\n except AttributeError:\n print 'Need to make bispectra first.'\n return\n\n# ntr = lambda num: num*(num-1)*(num-2)/6 # assuming all triples are present\n ntr = lambda num: len(self.triples) # assume only good triples are present and use array size as input for noise estimate\n\n # using s=S/Q\n mu = lambda s: 1. # for bispectra formed from visibilities\n sigbQ3 = lambda s: n.sqrt((1 + 3*mu(s)**2) + 3*(1 + mu(s)**2)*s**2 + 3*s**4) # from kulkarni 1989, normalized by Q**3, also rogers et al 1995\n s = lambda basnr, nants: (2.*basnr/n.sqrt(ntr(nants)))**(1/3.) # see rogers et al. 1995 for factor of 2\n\n # measure SNR_bl==Q from sigma clipped times with normal mean and std of bispectra. put into time,dm order\n bamean = ba.real.mean(axis=2).transpose()\n bastd = ba.real.std(axis=2).transpose()\n\n bameanstd = []\n for dmind in xrange(len(self.dmarr)):\n (meanmin,meanmax) = sigma_clip(bamean[:, dmind]) # remove rfi to estimate noise-like parts\n (stdmin,stdmax) = sigma_clip(bastd[:, dmind])\n clipped = n.where((bamean[:, dmind] > meanmin) & (bamean[:, dmind] < meanmax) & (bastd[:, dmind] > stdmin) & (bastd[:, dmind] < stdmax) & (bamean[:, dmind] != 0.0))[0] # remove rfi and zeros\n bameanstd.append(ba[dmind][clipped].real.mean(axis=1).std())\n\n bameanstd = n.array(bameanstd)\n basnr = bamean/bameanstd # = S**3/(Q**3 / n.sqrt(n_tr)) = s**3 * n.sqrt(n_tr)\n if Q:\n print 'Using given Q =', Q\n else:\n Q = ((bameanstd/2.)*n.sqrt(ntr(self.nants)))**(1/3.)\n # Q = n.median( bastd[clipped]**(1/3.) ) # alternate for Q\n print 'Estimating noise per baseline from data. Q (per DM) =', Q\n self.Q = Q\n\n # detect\n cands = n.where( (bastd/Q**3 < tol*sigbQ3(s(basnr, self.nants))) & (basnr > sigma) ) # get compact sources with high snr\n\n # plot snrb lc and expected snr vs. sigb relation\n if show or save:\n for dmbin in xrange(len(self.dmarr)):\n cands_dm = cands[0][n.where(cands[1] == dmbin)[0]] # find candidates for this dmbin\n p.figure(range(len(self.dmarr)).index(dmbin)+1)\n ax = p.axes()\n p.subplot(211)\n p.title(str(self.nskip/self.nbl) + ' nskip, ' + str(dmbin) + ' dmbin, ' + str(len(cands_dm))+' candidates', transform = ax.transAxes)\n p.plot(basnr[:,dmbin], 'b.')\n if len(cands_dm) > 0:\n p.plot(cands_dm, basnr[cands_dm,dmbin], 'r*')\n p.ylim(-2*basnr[cands_dm,dmbin].max(),2*basnr[cands_dm,dmbin].max())\n p.xlabel('Integration',fontsize=12,fontweight=\"bold\")\n p.ylabel('SNR_b',fontsize=12,fontweight=\"bold\")\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n p.subplot(212)\n p.plot(bastd[:,dmbin]/Q[dmbin]**3, basnr[:,dmbin], 'b.')\n\n # plot reference theory lines\n smax = s(basnr[:,dmbin].max(), self.nants)\n sarr = smax*n.arange(0,101)/100.\n p.plot(sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(ntr(self.nants)), 'k')\n p.plot(tol*sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(ntr(self.nants)), 'k--')\n p.plot(bastd[cands_dm,dmbin]/Q[dmbin]**3, basnr[cands_dm,dmbin], 'r*')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n if len(cands_dm) > 0:\n p.axis([0, tol*sigbQ3(s(basnr[cands_dm,dmbin].max(), self.nants)), -0.5*basnr[cands_dm,dmbin].max(), 1.1*basnr[cands_dm,dmbin].max()])\n\n # show spectral modulation next to each point\n for candint in cands_dm:\n sm = n.single(round(self.specmod(dmbin,candint),1))\n p.text(bastd[candint,dmbin]/Q[dmbin]**3, basnr[candint,dmbin], str(sm), horizontalalignment='right', verticalalignment='bottom')\n p.xlabel('sigma_b/Q^3',fontsize=12,fontweight=\"bold\")\n p.ylabel('SNR_b',fontsize=12,fontweight=\"bold\")\n if save:\n if save == 1:\n savename = self.file.split('.')[:-1]\n savename.append(str(self.nskip/self.nbl) + '_' + str(dmbin) + '_bisp.png')\n savename = string.join(savename,'.')\n elif isinstance(save, types.StringType):\n savename = save\n print 'Saving file as ', savename\n p.savefig(self.pathout+savename)\n\n return basnr[cands], bastd[cands], zip(cands[0],cands[1])", "def detect_bispectra(self, sigma=5., tol=1.3, Q=0, show=0, save=0):\n\n try:\n ba = self.bispectra\n except AttributeError:\n print 'Need to make bispectra first.'\n return\n\n# ntr = lambda num: num*(num-1)*(num-2)/6 # assuming all triples are present\n ntr = lambda num: len(self.triples) # consider possibility of zeros in data and take mean number of good triples over all times\n\n # using s=S/Q\n mu = lambda s: 1. # for bispectra formed from visibilities\n sigbQ3 = lambda s: n.sqrt((1 + 3*mu(s)**2) + 3*(1 + mu(s)**2)*s**2 + 3*s**4) # from kulkarni 1989, normalized by Q**3, also rogers et al 1995\n s = lambda basnr, nants: (2.*basnr/n.sqrt(ntr(nants)))**(1/3.) # see rogers et al. 1995 for factor of 2\n\n # measure SNR_bl==Q from sigma clipped times with normal mean and std of bispectra. put into time,dm order\n bamean = ba.real.mean(axis=2).transpose()\n bastd = ba.real.std(axis=2).transpose()\n\n bameanstd = []\n for dmind in xrange(len(self.dmarr)):\n (meanmin,meanmax) = sigma_clip(bamean[:, dmind]) # remove rfi to estimate noise-like parts\n (stdmin,stdmax) = sigma_clip(bastd[:, dmind])\n clipped = n.where((bamean[:, dmind] > meanmin) & (bamean[:, dmind] < meanmax) & (bastd[:, dmind] > stdmin) & (bastd[:, dmind] < stdmax) & (bamean[:, dmind] != 0.0))[0] # remove rfi and zeros\n bameanstd.append(ba[dmind][clipped].real.mean(axis=1).std())\n\n bameanstd = n.array(bameanstd)\n basnr = bamean/bameanstd # = S**3/(Q**3 / n.sqrt(n_tr)) = s**3 * n.sqrt(n_tr)\n if Q:\n print 'Using given Q =', Q\n else:\n Q = ((bameanstd/2.)*n.sqrt(ntr(self.nants)))**(1/3.)\n # Q = n.median( bastd[clipped]**(1/3.) ) # alternate for Q\n print 'Estimating noise per baseline from data. Q (per DM) =', Q\n self.Q = Q\n\n # detect\n cands = n.where( (bastd/Q**3 < tol*sigbQ3(s(basnr, self.nants))) & (basnr > sigma) ) # get compact sources with high snr\n\n # plot snrb lc and expected snr vs. sigb relation\n if show or save:\n for dmbin in xrange(len(self.dmarr)):\n cands_dm = cands[0][n.where(cands[1] == dmbin)[0]] # find candidates for this dmbin\n p.figure(range(len(self.dmarr)).index(dmbin)+1)\n ax = p.axes()\n p.subplot(211)\n p.title(str(self.nskip/self.nbl) + ' nskip, ' + str(dmbin) + ' dmbin, ' + str(len(cands_dm))+' candidates', transform = ax.transAxes)\n p.plot(basnr[:,dmbin], 'b.')\n if len(cands_dm) > 0:\n p.plot(cands_dm, basnr[cands_dm,dmbin], 'r*')\n p.ylim(-2*basnr[cands_dm,dmbin].max(),2*basnr[cands_dm,dmbin].max())\n p.xlabel('Integration',fontsize=12,fontweight=\"bold\")\n p.ylabel('SNR_b',fontsize=12,fontweight=\"bold\")\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n p.subplot(212)\n p.plot(bastd[:,dmbin]/Q[dmbin]**3, basnr[:,dmbin], 'b.')\n\n # plot reference theory lines\n smax = s(basnr[:,dmbin].max(), self.nants)\n sarr = smax*n.arange(0,101)/100.\n p.plot(sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(ntr(self.nants)), 'k')\n p.plot(tol*sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(ntr(self.nants)), 'k--')\n p.plot(bastd[cands_dm,dmbin]/Q[dmbin]**3, basnr[cands_dm,dmbin], 'r*')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n if len(cands_dm) > 0:\n p.axis([0, tol*sigbQ3(s(basnr[cands_dm,dmbin].max(), self.nants)), -0.5*basnr[cands_dm,dmbin].max(), 1.1*basnr[cands_dm,dmbin].max()])\n\n # show spectral modulation next to each point\n for candint in cands_dm:\n sm = n.single(round(self.specmod(dmbin,candint),1))\n p.text(bastd[candint,dmbin]/Q[dmbin]**3, basnr[candint,dmbin], str(sm), horizontalalignment='right', verticalalignment='bottom')\n p.xlabel('sigma_b/Q^3',fontsize=12,fontweight=\"bold\")\n p.ylabel('SNR_b',fontsize=12,fontweight=\"bold\")\n if save:\n if save == 1:\n savename = self.file.split('.')[:-1]\n savename.append(str(self.nskip/self.nbl) + '_' + str(dmbin) + '_bisp.png')\n savename = string.join(savename,'.')\n elif isinstance(save, string):\n savename = save\n print 'Saving file as ', savename\n p.savefig(self.pathout+savename)\n\n return basnr[cands], bastd[cands], zip(cands[0],cands[1])", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def cosmic_ray_detect(image,box_radius=3,n_sigma=7,silent=False):\n\n box_radius = np.int(box_radius)\n box_size = np.int(2*box_radius + 1) #make sure width is odd so the target pixel is centred\n\n # Get the median value in a box around each pixel (to use to calculate the MAD)\n median_vals = ndimage.median_filter(image,size=box_size)\n\n # Rather than loop through pixels, loop through the shifts and calculate \n # the deviation for all pixels in the image at the same time\n n_stdev_vals = box_size**2 -1 # We will ignore the centre pixel\n stdev_array = np.zeros((image.shape[0],image.shape[1],n_stdev_vals))\n shift_index = 0\n for yshift in np.arange(-box_radius,box_radius+1):\n for xshift in np.arange(-box_radius,box_radius+1):\n\n # Don't include the pixel in the MAD calculation\n if xshift ==0 and yshift == 0:\n continue\n\n shifted_image = np.roll(image,(yshift,xshift),axis=(0,1))\n stdev_array[:,:,shift_index] = (shifted_image - median_vals)\n\n shift_index += 1\n\n med_abs_dev = np.nanmedian(np.abs(stdev_array),axis=2)\n n_sig_array = (image-median_vals) / (med_abs_dev*1.4826) # this number is to convert MAD to std. deviation\n\n bad_array = np.abs(n_sig_array) > n_sigma \n\n # In case we want to check the bad pixels that we detected:\n # pyfits.writeto('cosmic_ray_array.fits',np.abs(n_sig_array),overwrite=True)\n\n# cosmic_rays = np.where(bad_array)\n n_bad = np.sum(bad_array)\n if not silent:\n print(' '+str(n_bad)+' cosmic rays detected using n_sigma='+str(n_sigma))\n\n return bad_array", "def spectra(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 2*8 # SCET Fine time\n + 2*8 # Integration time\n + 1 # Spare\n + 1 # Comp Schema spectra S\n + 3 # Comp Schema spectra k\n + 3 # Comp Schema spectra M\n + 1 # Spare\n + 1 # Comp Schema trigger S\n + 3 # Comp Schema trigger S\n + 3 # Comp Schema trigger S\n + 4 # Spare\n + 12 # Pixel mask\n + 2*8 # Number of data samples\n )\n\n variable = (\n num_samples * (\n 1*8 # Detector index\n + 32*8 # Spectrum x 32\n + 1*8 # Trigger\n + 1*8 # Number of integrations\n )\n )\n\n return fixed_header, variable", "def detect_bispectra(self, sigma=5., tol=1.3, Q=0, show=0, save=0):\n try:\n ba = self.bispectra\n except AttributeError:\n print 'Need to make bispectra first.'\n return\n\n# ntr = lambda num: num*(num-1)*(num-2)/6 # theoretical number of triples\n ntr = lambda num: len(self.triples) # consider possibility of zeros in data and take mean number of good triples over all times\n\n # using s=S/Q\n# mu = lambda s: s/(1+s) # for independent bispectra, as in kulkarni 1989\n mu = lambda s: 1. # for bispectra at high S/N from visibilities?\n sigbQ3 = lambda s: n.sqrt((1 + 3*mu(s)**2) + 3*(1 + mu(s)**2)*s**2 + 3*s**4) # from kulkarni 1989, normalized by Q**3, also rogers et al 1995\n s = lambda basnr, nants: (2.*basnr/n.sqrt(ntr(nants)))**(1/3.)\n\n # measure SNR_bl==Q from sigma clipped times with normal mean and std of bispectra. put into time,dm order\n bamean = ba.real.mean(axis=1)\n bastd = ba.real.std(axis=1)\n\n (meanmin,meanmax) = sigma_clip(bamean) # remove rfi\n (stdmin,stdmax) = sigma_clip(bastd) # remove rfi\n clipped = n.where((bamean > meanmin) & (bamean < meanmax) & (bastd > stdmin) & (bastd < stdmax) & (bamean != 0.0))[0] # remove rf\n\n bameanstd = ba[clipped].real.mean(axis=1).std()\n basnr = bamean/bameanstd\n if Q:\n print 'Using given Q =', Q\n else:\n Q = ((bameanstd/2.)*n.sqrt(ntr(self.nants)))**(1/3.)\n # Q = n.median( bastd[clipped]**(1/3.) ) # alternate for Q\n print 'Estimating noise per baseline from data. Q =', Q\n self.Q = Q\n\n # detect\n cands = n.where( (bastd/Q**3 < tol*sigbQ3(s(basnr, self.nants))) & (basnr > sigma) )[0] # define compact sources with good snr\n print cands\n\n # plot snrb lc and expected snr vs. sigb relation\n if show or save:\n p.figure()\n ax = p.axes()\n p.subplot(211)\n p.title(str(self.nskip/self.nbl)+' nskip, ' + str(len(cands))+' candidates', transform = ax.transAxes)\n p.plot(basnr, 'b.')\n if len(cands) > 0:\n p.plot(cands, basnr[cands], 'r*')\n p.ylim(-2*basnr[cands].max(),2*basnr[cands].max())\n p.xlabel('Integration')\n p.ylabel('SNR$_{bisp}$')\n p.subplot(212)\n p.plot(bastd/Q**3, basnr, 'b.')\n\n # plot reference theory lines\n smax = s(basnr.max(), self.nants)\n sarr = smax*n.arange(0,51)/50.\n p.plot(sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(ntr(self.nants)), 'k')\n p.plot(tol*sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(ntr(self.nants)), 'k--')\n p.plot(bastd[cands]/Q**3, basnr[cands], 'r*')\n\n if len(cands) > 0:\n p.axis([0, tol*sigbQ3(s(basnr[cands].max(), self.nants)), -0.5*basnr[cands].max(), 1.1*basnr[cands].max()])\n\n # show spectral modulation next to each point\n for candint in cands:\n sm = n.single(round(self.specmod(candint),1))\n p.text(bastd[candint]/Q**3, basnr[candint], str(sm), horizontalalignment='right', verticalalignment='bottom')\n p.xlabel('$\\sigma_b/Q^3$')\n p.ylabel('SNR$_{bisp}$')\n if save:\n if save == 1:\n savename = self.file.split('.')[:-1]\n savename.append(str(self.nskip/self.nbl) + '_bisp.png')\n savename = string.join(savename,'.')\n elif isinstance(save, string):\n savename = save\n print 'Saving file as ', savename\n p.savefig(self.pathout+savename)\n\n return basnr[cands], bastd[cands], cands", "def test_ncols_vrt_array(self):\n self.assertEqual(_test_array(landsat_vrt).shape[2], 235)", "def run(self, nrays=None, seed=None):\n #reset nrays/seed if given\n if (nrays is not None) and (seed is not None): self.src.set_rays(nrays=nrays, seed=seed)\n if (nrays is not None): self.src.set_rays(nrays)\n #generate source\n if self.iwrite: self.src.write(\"start.00\")\n self_repair_src(self.src)\n self.beam.genSource(self.src)\n if self.iwrite:\n self.src.write(\"end.00\")\n self.beam.write(\"begin.dat\")\n #trace oe1\n if self.iwrite: self.oe1.write(\"start.01\")\n self_repair_oe(self.oe1)\n self.beam.traceOE(self.oe1, 1)\n if self.iwrite:\n self.oe1.write(\"end.01\")\n self.beam.write(\"star.01\")\n #trace detector (not required yet)\n # if self.iwrite: self.det.write(\"start.02\")\n # self.beam.traceOE(self.det, 2)\n # if self.iwrite:\n # self.det.write(\"end.02\")\n # self.beam.write(\"star.02\")", "def asamplestdev (inarray, dimension=None, keepdims=0):\r\n return N.sqrt(asamplevar(inarray,dimension,keepdims))", "def cal_samples(self):\n max_omega = max(\n abs(2 * np.pi * self.u.fundamental),\n abs(2 * np.pi * self.v.fundamental),\n abs(2 * np.pi * self.w.fundamental),\n )\n max_freq = max_omega / (2 * np.pi)\n self.fake_samples_number = (\n (max_freq ** 2) * 6 * self.u.data.shape[0] / self.u.sampling_rate\n )", "def run_std(self):\n print \"Initialising grid\"\n self.initialise_grid(50, 100, 3)\n \n self.initialise_shadow_map()\n \n self.num_iterations = 500\n self.jump_length = 1\n \n self.pd_s = 0.6\n self.pd_ns = 0.4\n \n self.avcount = np.zeros(self.num_iterations + 1)\n \n \n before = time.time()\n self.main_loop()\n after = time.time()\n \n time_taken = after - before\n \n print \"Took %f seconds\", time_taken", "def variance(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 2*8 # SCET Fine time\n + 2*8 # Integration time\n + 1*8 # Samples per variance\n + 4*8 # Detector mask\n + 4*8 # Energy mask\n + 4 # Spare\n + 12 # Pixel mask\n + 1 # Spare\n + 1 # Comp Schema variance S\n + 3 # Comp Schema variance K\n + 3 # Comp Schema variance M\n + 2*8 # Number of data points\n )\n\n variable = (\n num_samples*1*8. # Number data points\n )\n\n return fixed_header, variable", "def rayShooting():\r\n \r\n \r\n if nbRay==1:\r\n maxi=1\r\n mini=1\r\n peaceofAngle=angleMax\r\n #to trace one ray at angleMax\r\n else:\r\n maxi=(nbRay-1)/2\r\n mini=-maxi\r\n peaceofAngle=2*angleMax/(nbRay-1)\r\n #to trace rays at regular intervals between [-angleMax;angleMax] \r\n\r\n tot=0 #to count the number of peace of ray\r\n indice=0 #to browse raysIndex\r\n\r\n raysMatrix=np.empty(shape=(0,5),dtype=np.float64)#will contain all the rays in a row\r\n raysIndex=np.empty(shape=(nbRay,),dtype=np.int16)#indexation of the rays in raysMatrix\r\n \r\n for i in np.arange(mini,maxi+1,1):#put maxi+1 to include maxi in the loop\r\n \r\n rayon=Rayon(source.position,angleToVector(peaceofAngle*i))#rayon is\r\n #the ray we will trace\r\n ray,compt=traceRay(rayon)\r\n tot+=(compt+1)\r\n\r\n \r\n raysIndex[indice]=tot #the rays index contains the indice just above\r\n #of the end of the i th ray\r\n\r\n raysMatrix=np.vstack((raysMatrix,ray))\r\n #the form of the ray matrix is a stack of peace of rays describe by\r\n #a,b,c,x1,reflexion. the polynome of the peace of ray being ax^2+bx+c and the\r\n #abscisses of the limiting point being x1, reflexion indicating if a reflexion happened\r\n #when we meet a 5-uple with a coefficient b or c infinite it means\r\n #a new ray begin\r\n \r\n indice+=1\r\n print(\"ray at indice\",i,\"and at angle\",peaceofAngle*i/np.pi*180,'degree(s)')\r\n \r\n print(\"the total number of peaces of ray is :\", tot)\r\n\r\n return(raysMatrix,raysIndex)", "def analysis_function_raytrace(system, wavelength_idx, config, spaxels_per_slice, surface, ignore_vignetting):\n start0 = time()\n\n # Set Current Configuration\n system.MCE.SetCurrentConfiguration(config)\n\n # Get the Field Points for that configuration\n sysField = system.SystemData.Fields\n N_fields = sysField.NumberOfFields\n N_waves = len(wavelength_idx)\n N_rays = N_waves * spaxels_per_slice\n\n fx_min, fy_min = sysField.GetField(1).X, sysField.GetField(1).Y\n fx_max, fy_max = sysField.GetField(N_fields).X, sysField.GetField(N_fields).Y\n\n # Watch Out! here we are assuming Rectangular Normalization\n X_MAX = np.max([np.abs(sysField.GetField(i + 1).X) for i in range(N_fields)])\n Y_MAX = np.max([np.abs(sysField.GetField(i + 1).Y) for i in range(N_fields)])\n\n # Normalized field coordinates (hx, hy)\n hx_min, hx_max = fx_min / X_MAX, fx_max / X_MAX\n hy_min, hy_max = fy_min / Y_MAX, fy_max / Y_MAX\n\n hx = np.linspace(hx_min, hx_max, spaxels_per_slice)\n hy = np.linspace(hy_min, hy_max, spaxels_per_slice)\n\n # The Field coordinates for the Object\n obj_xy = np.array([X_MAX * hx, Y_MAX * hy]).T\n foc_xy = np.empty((N_waves, spaxels_per_slice, 2))\n\n raytrace = system.Tools.OpenBatchRayTrace()\n normUnPolData = raytrace.CreateNormUnpol(N_rays, constants.RaysType_Real, surface)\n\n # Loop over the wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n\n # Loop over all Spaxels in the Slice\n for j_field, (h_x, h_y) in enumerate(zip(hx, hy)):\n\n # Add the ray to the RayTrace\n normUnPolData.AddRay(wave_idx, h_x, h_y, 0, 0, constants.OPDMode_None)\n\n # Run the RayTrace for the whole Slice\n CastTo(raytrace, 'ISystemTool').RunAndWaitForCompletion()\n # time_ray = time() - start\n # print(\"Time spent running Raytrace: %.3f sec\" % time_ray)\n\n # start = time()\n normUnPolData.StartReadingResults()\n\n # Retrieve the results for the operands and raytrace\n # Loop over the wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n # Loop over all Spaxels in the Slice\n for j_field, (h_x, h_y) in enumerate(zip(hx, hy)):\n\n output = normUnPolData.ReadNextResult()\n if ignore_vignetting == False:\n # We do care about vignetting\n if output[2] == 0 and output[3] == 0:\n x, y = output[4], output[5]\n foc_xy[i_wave, j_field, 0] = x\n foc_xy[i_wave, j_field, 1] = y\n\n elif output[2] == 0 and output[3] != 0:\n vignet_code = output[3]\n vignetting_surface = system.LDE.GetSurfaceAt(vignet_code).Comment\n print(\"\\nConfig #%d | Wavelength idx #%d\" % (config, wave_idx))\n fx, fy = h_x * X_MAX, h_y * Y_MAX\n print(\"Field point #%d : hx=%.4f hy=%.4f | fx=%.4f, fy=%.4f\" % (j_field + 1, h_x, h_y, fx, fy))\n print(\"Vignetting at surface #%d: %s\" % (vignet_code, vignetting_surface))\n else:\n # If we don't care about vignetting (rays falling outside the active area of the detector, for example)\n # we add the Raytrace results to the focal coordinates array no matter what\n if output[2] == 0:\n x, y = output[4], output[5]\n foc_xy[i_wave, j_field, 0] = x\n foc_xy[i_wave, j_field, 1] = y\n\n normUnPolData.ClearData()\n CastTo(raytrace, 'ISystemTool').Close()\n # time_res = time() - start\n # print(\"Time spent reading results: %.3f sec\" % time_res)\n\n return [obj_xy, foc_xy]", "def _one_octave(self, shrink=True, refine=True, n_5=False):\n x = []\n y = []\n dx = []\n dy = []\n if not self.sigmas:\n self._calc_sigma()\n if self.do_mask and (self.cur_mask is None):\n self._init_mask()\n if self.do_mask and (numpy.logical_not(self.cur_mask).sum(dtype=int) == 0):\n return\n\n previous = self.data\n dog_shape = (len(self.sigmas) - 1,) + self.data.shape\n self.dogs = numpy.zeros(dog_shape, dtype=numpy.float32)\n\n idx = 0\n i = 0\n for sigma_abs, sigma_rel in self.sigmas:\n# if self.already_blurred != [] and i < 3:\n# sigma_rel = 0\n# if i > 0 : previous = self.already_blurred[i-1]\n if sigma_rel == 0:\n self.blurs.append(previous)\n else:\n new_blur = gaussian_filter(previous, sigma_rel)\n self.blurs.append(new_blur)\n self.dogs[idx] = previous - new_blur\n previous = new_blur\n idx += 1\n i += 1\n\n\n if self.dogs[0].shape == self.raw.shape:\n self.dogs_init = self.dogs\n\n if _blob:\n valid_points = _blob.local_max(self.dogs, self.cur_mask, n_5)\n else:\n valid_points = local_max(self.dogs, self.cur_mask, n_5)\n kps, kpy, kpx = numpy.where(valid_points)\n self.raw_kp.append((kps, kpy, kpx))\n\n if refine:\n if \"startswith\" in dir(refine) and refine.startswith(\"SG\"):\n kpx, kpy, kps, delta_s = self.refine_Hessian_SG(kpx, kpy, kps)\n l = kpx.size\n peak_val = self.dogs[(numpy.around(kps).astype(int),\n numpy.around(kpy).astype(int),\n numpy.around(kpx).astype(int))]\n valid = numpy.ones(l, dtype=bool)\n else:\n kpx, kpy, kps, peak_val, valid = self.refine_Hessian(kpx, kpy, kps)\n l = valid.sum()\n self.ref_kp.append((kps, kpy, kpx))\n print('After refinement : %i keypoints' % l)\n else:\n peak_val = self.dogs[kps, kpy, kpx]\n l = kpx.size\n valid = numpy.ones(l, bool)\n\n keypoints = numpy.recarray((l,), dtype=self.dtype)\n\n\n if l != 0:\n keypoints[:].x = (kpx[valid] + 0.5) * self.curr_reduction - 0.5 # Place ourselves at the center of the pixel, and back\n keypoints[:].y = (kpy[valid] + 0.5) * self.curr_reduction - 0.5 # Place ourselves at the center of the pixel, and back\n sigmas = self.init_sigma * (self.dest_sigma / self.init_sigma) ** ((kps[valid]) / (self.scale_per_octave))\n keypoints[:].sigma = (self.curr_reduction * sigmas)\n keypoints[:].I = peak_val[valid]\n\n\n if shrink:\n #shrink data so that they can be treated by next octave\n logger.debug(\"In shrink\")\n last = self.blurs[self.scale_per_octave]\n ty, tx = last.shape\n if ty % 2 != 0 or tx % 2 != 0:\n new_tx = 2 * ((tx + 1) // 2)\n new_ty = 2 * ((ty + 1) // 2)\n new_last = numpy.zeros((new_ty, new_tx), last.dtype)\n new_last[:ty, :tx] = last\n last = new_last\n if self.do_mask:\n new_msk = numpy.ones((new_ty, new_tx), numpy.int8)\n new_msk[:ty, :tx] = self.cur_mask\n self.cur_mask = new_msk\n self.data = binning(last, 2) / 4.0\n self.curr_reduction *= 2.0\n self.octave += 1\n self.blurs = []\n if self.do_mask:\n self.cur_mask = (binning(self.cur_mask, 2) > 0).astype(numpy.int8)\n self.cur_mask = morphology.binary_dilation(self.cur_mask, self.grow)\n\n\n if len(self.keypoints) == 0 :\n self.keypoints = keypoints\n else:\n old_size = self.keypoints.size\n new_size = old_size + l\n new_keypoints = numpy.recarray(new_size, dtype=self.dtype)\n new_keypoints[:old_size] = self.keypoints\n new_keypoints[old_size:] = keypoints\n self.keypoints = new_keypoints", "def n_spectra(self):\n return np.product(self.image_shape)", "def noise_list_all_lambda_max(lambda_max_list, n, m, function_type):\r\n noise_sd = np.zeros((len(lambda_max_list), 6))\r\n index = 0\r\n for lambda_max in lambda_max_list:\r\n noise_sd[index] = (np.round(np.genfromtxt(\r\n 'noise_list_n=%s_m=%s_lambda_max=%s_%s.csv' %\r\n (n, m, lambda_max, function_type),\r\n delimiter=','), 2))\r\n index += 1\r\n return noise_sd", "def run_many_fits(spectrum,rms,guesses,nruns):\n tk_fit = []\n tex_fit = []\n ntot_fit = []\n width_fit = []\n for i in range(nruns):\n noisy_spectrum = add_noise(spectrum,rms)\n noisy_spectrum.specfit(fittype='cold_ammonia',guesses=guesses,fixed=[F,F,F,F,F,T])\n parcopy = copy.deepcopy(noisy_spectrum.specfit.parinfo)\n tk_fit = np.append(tk_fit,parcopy[0].value)\n tex_fit = np.append(tex_fit,parcopy[1].value)\n ntot_fit = np.append(ntot_fit,parcopy[2].value)\n width_fit = np.append(width_fit,parcopy[3].value)\n return tk_fit,tex_fit,ntot_fit,width_fit", "def read_raytomo_dbase(self, inh5fname, runid, dtype='ph', wtype='ray', create_header=True, Tmin=-999, Tmax=999, verbose=False):\n if dtype is not 'ph' and dtype is not 'gr':\n raise ValueError('data type can only be ph or gr!')\n if wtype is not 'ray' and wtype is not 'lov':\n raise ValueError('wave type can only be ray or lov!')\n stalst = self.waveforms.list()\n if len(stalst) == 0:\n print 'Inversion with surface wave datasets only, not added yet!'\n return\n indset = h5py.File(inh5fname)\n #--------------------------------------------\n # header information from input hdf5 file\n #--------------------------------------------\n dataid = 'reshaped_qc_run_'+str(runid)\n pers = indset.attrs['period_array']\n grp = indset[dataid]\n isotropic = grp.attrs['isotropic']\n org_grp = indset['qc_run_'+str(runid)]\n minlon = indset.attrs['minlon']\n maxlon = indset.attrs['maxlon']\n minlat = indset.attrs['minlat']\n maxlat = indset.attrs['maxlat']\n if isotropic:\n print 'isotropic inversion results do not output gaussian std!'\n return\n dlon_HD = org_grp.attrs['dlon_HD']\n dlat_HD = org_grp.attrs['dlat_HD']\n dlon = org_grp.attrs['dlon']\n dlat = org_grp.attrs['dlat']\n if create_header:\n inv_header = {'minlon': minlon, 'maxlon': maxlon, 'minlat': minlat, 'maxlat': maxlat,\n 'dlon': dlon, 'dlat': dlat, 'dlon_HD': dlon_HD, 'dlat_HD': dlat_HD}\n self.add_auxiliary_data(data=np.array([]), data_type='Header', path='raytomo', parameters=inv_header)\n self._get_lon_lat_arr(path='raytomo', hd=True)\n for staid in stalst:\n netcode, stacode = staid.split('.')\n staid_aux = netcode+'_'+stacode\n stla, elev, stlo = self.waveforms[staid].coordinates.values()\n if stlo < 0.:\n stlo += 360.\n if stla > maxlat or stla < minlat or stlo > maxlon or stlo < minlon:\n print 'WARNING: station: '+ staid+', lat = '+str(stla)+' lon = '+str(stlo)+', out of the range of tomograpic maps!'\n continue\n disp_v = np.array([])\n disp_un = np.array([])\n T = np.array([])\n #-----------------------------\n # determine the indices\n #-----------------------------\n ind_lon = np.where(stlo<=self.lons)[0][0]\n find_lon = ind_lon \n ind_lat = np.where(stla<=self.lats)[0][0]\n find_lat = ind_lat\n # point 1\n distmin, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat], self.lons[ind_lon]) # distance is in m\n # point 2\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat], self.lons[ind_lon-1]) # distance is in m\n if dist < distmin:\n find_lon = ind_lon-1\n distmin = dist\n # point 3\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat-1], self.lons[ind_lon]) # distance is in m\n if dist < distmin:\n find_lat = ind_lat-1\n distmin = dist\n # point 4\n dist, az, baz = obspy.geodetics.gps2dist_azimuth(stla, stlo, self.lats[ind_lat-1], self.lons[ind_lon-1]) # distance is in m\n if dist < distmin:\n find_lat = ind_lat-1\n find_lon = ind_lon-1\n distmin = dist\n for per in pers:\n if per < Tmin or per > Tmax:\n continue\n try:\n pergrp = grp['%g_sec'%( per )]\n vel = pergrp['vel_iso_HD'].value\n vel_sem = pergrp['vel_sem_HD'].value\n except KeyError:\n if verbose:\n print 'No data for T = '+str(per)+' sec'\n continue\n T = np.append(T, per)\n disp_v = np.append(disp_v, vel[find_lat, find_lon])\n disp_un = np.append(disp_un, vel_sem[find_lat, find_lon])\n data = np.zeros((3, T.size))\n data[0, :] = T[:]\n data[1, :] = disp_v[:]\n data[2, :] = disp_un[:]\n disp_header = {'Np': T.size}\n self.add_auxiliary_data(data=data, data_type='RayDISPcurve', path=wtype+'/'+dtype+'/'+staid_aux, parameters=disp_header)\n indset.close()\n return", "def remove_cosmic_ray(self, n_std=5):\n idx, = np.where(abs(self.y - np.median(self.y)) > n_std * np.std(self.y))\n\n if idx.size != 0:\n idx_prev = idx[0] - 1\n idx_next = idx[-1] + 1\n slope = (self.y[idx_next] - self.y[idx_prev]) / (self.x[idx_next] - self.x[idx_prev])\n origin_crossing = self.y[idx_prev] - slope * self.x[idx_prev]\n cts_replace = slope * self.x[idx] + origin_crossing\n y_noise = 0.1 * np.std(self.y) * np.random.normal(size=cts_replace.size)\n self.y[idx] = cts_replace + y_noise", "def wood_drum_env(N, sr):\n ## TODO: Fill this in\n return np.zeros(N)", "def gen_draw_rays_from_film(self):\r\n r = self.aperture_radius[self._elem_count - 1]\r\n step = 0.01\r\n count = ti.cast(r / step,ti.i32)\r\n for j in range(1):\r\n for i in range(count):\r\n y = r - i * step\r\n ori, dir = ti.Vector([0.0, 0.0, 0.0]), ti.Vector([y, 0.0, self.rear_z()])\r\n ok, a, b = self.gen_ray_from_film(ori, dir)\r\n if ok:\r\n self.draw_ray_from_film(ori, dir, 0)\r\n break", "def noise_generator(n, mean, std, fractindex):\n if fractindex not in VALID_FRACT:\n raise ValueError(\"results: status must be one of %r.\" % VALID_FRACT)\n \n stdev = std\n \n b = 2*fractindex-1\n print('beta: ', b)\n \n bdis = np.zeros(n)\n\n bdis[0] = 1\n for i in range(1,n):\n bdis[i] = bdis[i-1] * (0.5 * b + (i-1))/i # note that b is the shape parementer (b)\n\n plt.plot(bdis)\n plt.show\n\n wnt = np.random.normal(mean, stdev, size = n)\n print('WhiteNoise Stdev: ', np.std(wnt))\n plt.plot(wnt)\n plt.show()\n\n bdis_freq = np.fft.fft(bdis)\n wnt_freq = np.fft.fft(wnt)\n\n bdis_freq = bdis_freq[1:n+1]\n wnt_freq = wnt_freq[1:n+1]\n\n freq_total = bdis_freq * wnt_freq\n \n NumUniquePts = n/2 + 1\n NumUniquePts = int(NumUniquePts)\n j = np.arange(1, NumUniquePts)\n \n if fractindex > 1.0:\n j = j\n elif fractindex <= 1.0:\n j = j**0.5\n \n ft_half1 = freq_total[1:NumUniquePts]/j\n\n real = np.real(freq_total[1:NumUniquePts+1])\n real = np.flip(real, axis=0)\n\n imaginary = np.imag(freq_total[1:NumUniquePts+1])\n imaginary = np.flip(imaginary, axis=0)\n imaginary = 1j * imaginary\n\n ft_half2 = real - imaginary\n\n ft = np.hstack((ft_half1, ft_half2))\n \n x = np.fft.ifft(ft)\n x = np.real(x[:n])\n\n mean_diff = mean - np.mean(x)\n x = mean_diff + x\n print(np.mean(x))\n print(np.std(x))\n plt.plot(x)\n plt.show()\n \n return x", "def sense_landmarks(state, field_map, max_observations):\n\n assert isinstance(state, np.ndarray)\n assert isinstance(field_map, Landscape)\n\n assert state.shape == (3,)\n\n M = field_map.num_landmarks\n #print(M, field_map.landmarks.shape)\n noise_free_observations_list = list()\n for k in range(M):\n noise_free_observations_list.append(get_observation(state, field_map, k))\n noise_free_observation_tuples = [(x[0], np.abs(x[1]), int(x[2])) for x in noise_free_observations_list]\n\n dtype = [('range', float), ('bearing', float), ('lm_id', int)]\n noise_free_observations = np.array(noise_free_observations_list)\n noise_free_observation_tuples = np.array(noise_free_observation_tuples, dtype=dtype)\n\n ii = np.argsort(noise_free_observation_tuples, order='bearing')\n noise_free_observations = noise_free_observations[ii]\n noise_free_observations[:, 2] = noise_free_observations[:, 2].astype(int)\n\n c1 = noise_free_observations[:, 1] > -np.pi / 2.\n c2 = noise_free_observations[:, 1] < np.pi / 2.\n ii = np.nonzero((c1 & c2))[0]\n\n if ii.size <= max_observations:\n return noise_free_observations[ii]\n else:\n return noise_free_observations[:max_observations]", "def main(idrun):\n int_type = numpy.int32\n double_type = numpy.float64\n float_type = numpy.float32\n complex_type = numpy.complex64\n\n ns = 7\n iudm = 19; iuv = 12\n dname = numpy.array([\"LONGITUDINAL EFIELD \",\"ELEC CURRENT DENSITY\",\n \"VECTOR POTENTIAL \",\"TRANSVERSE EFIELD \",\n \"MAGNETIC FIELD \",\"RADIATIVE VPOTENTIAL\",\n \"ION CURRENT DENSITY \"],dtype=str)\n\n# create string from idrun\n if (idrun < 0):\n cdrun = \"Unknown\"\n while (cdrun.isdigit() == False):\n cdrun = input(\"enter integer idrun: \")\n idrun = int(cdrun)\n cdrun = str(idrun)\n fname = \"diag3.\" + cdrun\n cmfield3.ffopen3(iudm,fname)\n\n# nscalars = table of available diagnostics\n nscalars = numpy.zeros((ns),int_type,'F')\n\n# determine which vector diagnostics are available\n cmfield3.readvdiags3(iudm,nscalars)\n\n nts = numpy.zeros((1),int_type,'F')\n modesx = numpy.zeros((1),int_type,'F')\n modesy = numpy.zeros((1),int_type,'F')\n modesz = numpy.zeros((1),int_type,'F')\n mrec = numpy.zeros((1),int_type,'F')\n fname = numpy.array([\"\"],'S32')\n\n# select diagnostic\n m = numpy.sum(nscalars)\n while True:\n if (m > 0):\n n = -1\n while True:\n if (n < 0):\n for i in range(0,ns):\n if (nscalars[i]==1):\n print (\"enter \", i+1,\" for\", \n numpy.str.rstrip(dname[i]))\n print (\"enter \", 0,\" for EXIT\")\n c = input(\"\")\n if (c.isdigit()):\n n = int(c)\n if (n==0):\n break\n if ((n >= 1) and (n <= ns)):\n if (nscalars[n-1]==0):\n n = -1\n else:\n n = -1\n if (n > 0):\n break\n print (\"invalid entry, try again or enter 0 to quit\")\n else:\n print (\"no vector diagnostic files found\")\n n = 0\n# exit procedure\n if (n==0):\n if (\"vfield\" in globals()):\n vfield = None\n cmfield3.closeff3(iudm)\n return\n\n print (numpy.str.rstrip(dname[n-1]), \" diagnostic selected\")\n\n# return parameters for selected vector diagnostic:\n# nts, modesx, modesy, modesz, nrec, fname\n cmfield3.vdiagparams3(iudm,n,nts,modesx,modesy,modesz,mrec,fname)\n nrec = mrec[0]\n\n# nx/ny/nz = number of global grid points in x/y/z direction\n nx = int(math.pow(2,in3.indx)); ny = int(math.pow(2,in3.indy))\n nz = int(math.pow(2,in3.indz))\n# kyp/kzp = number of real grids in each field partition in y/z\n kyp = int((ny - 1)/in3.nvpy) + 1; kzp = int((nz - 1)/in3.nvpz) + 1\n# kyb/kzb = minimum number of processors in distributed array in y/z\n kyb = int((ny - 1)/kyp) + 1; kzb = int((nz - 1)/kzp) + 1\n# nyv = second dimension of scalar field array, >= ny\n# nzv = third dimension of scalar field array, >= nz\n nyv = kyp*kyb; nzv = kzp*kzb\n\n# allocate vector array\n if (\"vfield\" not in globals()):\n vfield = numpy.empty((in3.ndim,nx,nyv,nzv),float_type,'F')\n dt = in3.dt*float(nts[0])\n\n# open stream file for vector field\n cmfield3.fsopen3(iuv,fname)\n\n# nrec = number of complete records\n nrec = int(nrec/(kyb*kzb))\n print (\"records found: nrec = \", nrec)\n\n# read and transpose vector data\n for ii in range(0,nrec):\n# read real vector field\n cmfield3.freadv3(iuv,vfield,in3.ndim,nx,kyp,kyb,kzp,kzb)\n it = nts[0]*ii\n time = dt*float(ii)\n# show time\n print (\"it,time=\",it,time)\n cmfield3.closeff3(iuv)\n print()", "def folding(eventfile,Porb,nbins):\n times = fits.open(eventfile)[1].data['TIME'] #getting array of times\n gtis_data = fits.open(eventfile)[2].data #getting GTIs\n T = sum([ gtis_data[i]['STOP']-gtis_data[i]['START'] for i in range(len(gtis_data)) ]) #exposure time\n\n gtis_conform = []\n for i in range(len(gtis_data)):\n gtis_conform.append([gtis_data[i][0],gtis_data[i][1]]) #conform to the input that Stingray uses\n\n phase_sr,prof_sr,err_sr = fold_events(times,1/Porb,gtis=np.array(gtis_conform),ref_time=times[0],nbin=nbins)\n phase_sr_expo,prof_sr_expo,err_sr_expo = fold_events(times,1/Porb,gtis=np.array(gtis_conform),ref_time=times[0],expocorr=True,nbin=nbins)\n\n total_phase_sr = list(phase_sr) + list(phase_sr+1)\n total_prof_sr = list(prof_sr)*2\n total_err_sr = list(err_sr)*2\n\n total_phase_sr_expo = list(phase_sr_expo) + list(phase_sr_expo+1)\n total_prof_sr_expo = list(prof_sr_expo)*2\n total_err_sr_expo = list(err_sr_expo)*2\n\n plt.figure()\n plt.errorbar(x=total_phase_sr,y=total_prof_sr/T,yerr=total_err_sr/T,color='r',drawstyle='steps-mid')\n plt.errorbar(x=total_phase_sr_expo,y=total_prof_sr_expo/T,yerr=total_err_sr_expo/T,color='b',drawstyle='steps-mid')\n plt.legend(('Folded profile','Exposure-corrected'),loc='best',fontsize=12)\n plt.title(str(pathlib.Path(eventfile).name) +', exposure-corrected (using Stingray fold_events)',fontsize=12)\n plt.xlabel('Phase',fontsize=12)\n plt.ylabel('Counts/s',fontsize=12)\n\n return total_phase_sr_expo,total_prof_sr_expo/T,total_err_sr_expo/T", "def numberOfCamera():\n return numCams", "def sampleStandardDeviation(numlist):\n\tv = sampleVariance(numlist)\n\t#print v\n\treturn math.sqrt(v)", "def vwraysParameters(self):\n return self.__vwraysParameters", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def get_IS_variance(lamda, num_samples):\n A = lamda\n int_max = 5\n\n # get sum of squares\n running_total = 0\n for i in range(num_samples):\n x = get_random_number(0, int_max)\n running_total += (f_of_x(x) / g_of_x(x, A, lamda)) ** 2\n\n sum_of_sqs = running_total / num_samples\n\n # get squared average\n running_total = 0\n for i in range(num_samples):\n x = get_random_number(0, int_max)\n running_total += f_of_x(x) / g_of_x(x, A, lamda)\n sq_ave = (running_total / num_samples) ** 2\n\n return sum_of_sqs - sq_ave", "def main(n = 150000, quiet = False):\n t0 = time() #timing possibility\n\n if quiet:\n output_stream = StringIO()\n else:\n output_stream = sys.stdout\n\n\n print(t0,file=output_stream)\n \n\n sfr = .01\n # star mass function\n kroupa = np.vectorize(functions.kroupa)\n mf = dist.Distribution(kroupa, .1, 50.)\n\n #star formation history\n constant_sfr = np.vectorize(functions.constant_sfr)\n \n ages = np.logspace(5,7,7)\n sf = [dist.Distribution(constant_sfr, 1000., ages[i]) for i in range(len(ages))]\n #sfr = [150000*mf.mean()/(ages[i]-1000.) for i in range(len(ages))]\n\n t1 = time() # finished reading the distributions\n print(t1,file=output_stream)\n\n\n # setting up model data\n aperas = np.logspace(2, 5, 4)\n avs = np.linspace(10.0, 50.0, 5)\n l = 1\n mpold, tmpnew = 0., time()\n parameters = []\n for i in range(len(avs)):\n for j in range(len(aperas)):\n for k in range(len(ages)):\n tmpold, tmpnew = tmpnew, time()\n starformation.main(massfunction = mf, starformationhistory = sf[k], \\\n A_v = avs[i], sfr = n, apera = aperas[j], maxage = ages[k], \\\n appendix = \"%s_%03d_%06d_%09d\" % ('sim',avs[i],aperas[j],ages[k]), quiet=True, precise=False)\n print(avs[i],aperas[j],ages[k], l/len(avs)/len(aperas)/len(ages), (len(avs)*len(aperas)*len(ages)-l)*(tmpnew-tmpold),file=output_stream)\n l = l+1\n \n parameters.append([avs[i],aperas[j],ages[k]])\n\n t2 = time() # end of simulation\n print(t2, t1, t2-t1)\n \n print ('number of simulations run: %s' %l , file=output_stream) \n head = ['#','AV', 'Aperature_size', 'Age']\n f = open('out/__head', 'w')\n f.write( ','.join(head)+'\\n' )\n np.savetxt(f, parameters)\n f.close()\n\n t3 = time() # end of saving data\n\n analysis.main('out')\n print ('analysis complete' , file=output_stream) \n \n t4 = time() # end of analysing data\n\n\n\n print( 'starting script at %f' %(t0), file=output_stream)\n print( 'initializing %f' %(t1-t0), file=output_stream)\n print( \"running simulation %f\" %(t2-t1), file=output_stream)\n print( \"writing data %f\" %(t3-t2), file=output_stream)\n print( \"analysing data %f\" %(t4-t3), file=output_stream)\n print( \"________________________\", file=output_stream)\n print( \"total runtime %f\" %(t4-t0), file=output_stream)\n print( \"finishing script %f\" %t4, file=output_stream)", "def __init__(self, data, num_bfs=35, bfs_sigma=0.0286, num_centers_outside_range=2.):\n\n #list to store all demo trajectories\n self._demo_trajs = data\n \n #number of demos available\n self._num_demos = len(self._demo_trajs)\n #lenght of each demonstrations\n\n self._num_centers_out_range = num_centers_outside_range\n \n self._traj_len = len(self._demo_trajs[0])\n #time step\n self._dt = 0.005\n \n #list to store all demo traj velocities\n self._Ddemo_trajs = self.compute_velocities()\n\n #number of basis function\n self._n_bfs = num_bfs\n #variance of the basis function\n self._bfs_sigma = bfs_sigma\n #centers of the basis function\n self._bfs_centres = np.linspace(0, 1, self._n_bfs)\n\n #list that stores all the weights\n self._W = []\n\n #phase variable\n self._phase = self.compute_phase(dt=self._dt, phase_speed=1.)\n\n #mean and sigma of the weights\n self._mean_W = None\n self._sigma_W = None\n\n #compute the basis functions\n self._Phi, self._PhiD, self._PhiDD = self.generate_basis_function(phase_z=self._phase._z, phase_zd=self._phase._Dz, phase_zdd=self._phase._DDz)\n\n #via points\n self._viapoints = []", "def get_initial_rays_trig(bs,\n num_steps,\n fov,\n resolution,\n ray_start,\n ray_end,\n device, ):\n\n W, H = resolution\n # Create full screen NDC (-1 to +1) coords [x, y, 0, 1].\n # Y is flipped to follow image memory layouts.\n x, y = torch.meshgrid(torch.linspace(-1, 1, W, device=device),\n torch.linspace(1, -1, H, device=device))\n x = x.T.flatten() # (HxW, ) [[-1, ..., 1], ...]\n y = y.T.flatten() # (HxW, ) [[1, ..., -1]^T, ...]\n z = -torch.ones_like(x, device=device) / np.tan((2 * math.pi * fov / 360) / 2) # (HxW, )\n\n rays_d_cam = normalize_vecs(torch.stack([x, y, z], -1)) # (HxW, 3)\n\n z_vals = torch.linspace(ray_start,\n ray_end,\n num_steps,\n device=device) \\\n .reshape(1, num_steps, 1) \\\n .repeat(W * H, 1, 1) # (HxW, n, 1)\n points = rays_d_cam.unsqueeze(1).repeat(1, num_steps, 1) * z_vals # (HxW, n_samples, 3)\n\n points = torch.stack(bs * [points]) # (b, HxW, n_samples, 3)\n z_vals = torch.stack(bs * [z_vals]) # (b, HxW, n_samples, 1)\n rays_d_cam = torch.stack(bs * [rays_d_cam]).to(device) # (b, HxW, 3)\n\n return points, z_vals, rays_d_cam", "def nvar(self):\n return self.h.shape[0]", "def test_nrows_vrt_array(self):\n self.assertEqual(_test_array(landsat_vrt).shape[1], 224)", "def num_wet(self):\n return np.sum(self.array == 5)", "def find_backstats(f_arr, sigma, niter):\n ave = f_arr.mean()\n std = f_arr.std()\n for i in range(niter):\n mask = (abs(f_arr - ave) < sigma * std)\n ave = f_arr[mask].mean()\n std = f_arr[mask].std()\n return ave, std", "def example1(N):\n\tX = np.random.rand(N)\n\tI_estm = np.mean(X**3)\n\ts_square = np.var(X**3) *N / (N-1)\n\tstd_error = np.sqrt(s_square/N) \n\tprint(\"simulation estimate:\", I_estm)\n\tprint(\"std error of estimate:\", std_error)", "def NBIAS(self):\n return len(self.STARS[\"dist\"])", "def test_nbands_vrt_array(self):\n self.assertEqual(_test_array(landsat_vrt).shape[0], 2)", "def render_dof(scene, camera, HEIGHT=100, WIDTH=100, V_SAMPLES=6, H_SAMPLES=6):\n output = np.zeros((HEIGHT, WIDTH, RGB_CHANNELS), dtype=np.uint8)\n if not scene or scene.is_empty() or not camera or camera.inside(\n scene.objects\n ):\n print(\"Cannot generate an image\")\n return output\n total_samples = H_SAMPLES * V_SAMPLES\n # This is for showing progress %\n iterations = HEIGHT * WIDTH * total_samples\n step_size = np.ceil((iterations * PERCENTAGE_STEP) / 100).astype('int')\n counter = 0\n bar = Bar('Raytracing', max=100 / PERCENTAGE_STEP)\n # This is needed to use it in Git Bash\n bar.check_tty = False\n for j in range(HEIGHT):\n for i in range(WIDTH):\n color = np.array([0, 0, 0], dtype=float)\n lens_sample_offsets = []\n n0 = camera.n0\n n1 = camera.n1\n for n in range(V_SAMPLES):\n for m in range(H_SAMPLES):\n r0, r1 = np.random.random_sample(2)\n ap_sx = camera.lens_params.ap_sx\n ap_sy = camera.lens_params.ap_sy\n x_offset = ((r0 - 0.5) * m) / H_SAMPLES * ap_sx\n y_offset = ((r1 - 0.5) * n) / V_SAMPLES * ap_sy\n lens_sample_offsets.append((x_offset, y_offset))\n random_start = np.random.random_integers(0, total_samples - 1)\n for n in range(V_SAMPLES):\n for m in range(H_SAMPLES):\n r0, r1 = np.random.random_sample(2)\n x = i + ((float(m) + r0) / H_SAMPLES)\n y = HEIGHT - 1 - j + ((float(n) + r1) / V_SAMPLES)\n # Get x projected in view coord\n xp = (x / float(WIDTH)) * camera.scale_x\n # Get y projected in view coord\n yp = (y / float(HEIGHT)) * camera.scale_y\n pp = camera.p00 + xp * camera.n0 + yp * camera.n1\n npe = utils.normalize(pp - camera.position)\n sample_idx = n + m * H_SAMPLES - random_start\n x_offset, y_offset = lens_sample_offsets[sample_idx]\n ps = pp + x_offset * n0 + y_offset * n1\n fp = pp + npe * camera.lens_params.f\n director = utils.normalize(fp - ps)\n ray = Ray(ps, director)\n\n color += raytrace(ray, scene) / float(total_samples)\n counter += 1\n if counter % step_size == 0:\n bar.next()\n output[j][i] = color.round().astype(np.uint8)\n bar.finish()\n return output", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def rvs(self, num_samples):\n raise NotImplementedError()", "def kernel_sigmas(n_kernels):\n bin_size = 2.0 / (n_kernels - 1)\n l_sigma = [0.001] # for exact match. small variance -> exact match\n if n_kernels == 1:\n return l_sigma\n\n l_sigma += [0.1] * (n_kernels - 1)\n print(l_sigma)\n return l_sigma", "def kernel_sigma(n_kernels):\n sigmas = [0.001] # exact match small variance means exact match ?\n if n_kernels == 1:\n return sigmas\n return sigmas + [0.1] * (n_kernels - 1)", "def main_gamma_ray_loop(\n num_decays,\n model,\n plasma,\n time_steps=10,\n time_end=80.0,\n grey_opacity=-1,\n spectrum_bins=500,\n time_space=\"log\",\n photoabsorption_opacity=\"tardis\",\n pair_creation_opacity=\"tardis\",\n seed=1,\n path_to_decay_data=\"~/Downloads/tardisnuclear/decay_radiation.h5\",\n positronium_fraction=0.0,\n):\n # Note: not best numpy practice, but works better in numba than the alternatives\n np.random.seed(seed)\n\n # Enforce cgs\n outer_velocities = model.v_outer.to(\"cm/s\").value\n inner_velocities = model.v_inner.to(\"cm/s\").value\n ejecta_density = model.density.to(\"g/cm^3\").value\n ejecta_volume = model.volume.to(\"cm^3\").value\n ejecta_velocity_volume = (\n 4 * np.pi / 3 * (outer_velocities**3.0 - inner_velocities**3.0)\n )\n time_explosion = model.time_explosion.to(\"s\").value\n number_of_shells = model.no_of_shells\n raw_isotope_abundance = model.raw_isotope_abundance.sort_values(\n by=[\"atomic_number\", \"mass_number\"], ascending=False\n )\n\n shell_masses = ejecta_volume * ejecta_density\n\n time_start = time_explosion\n time_end *= u.d.to(u.s)\n\n assert (\n time_start < time_end\n ), \"Error, simulation start time greater than end time!\"\n\n if time_space == \"log\":\n times = np.zeros(time_steps + 1)\n\n # log time steps\n for i in range(time_steps + 1):\n times[i] = (\n np.log(time_start)\n + (np.log(time_end) - np.log(time_start)) / time_steps * i\n )\n times[i] = np.exp(times[i])\n else:\n times = np.linspace(time_start, time_end, time_steps + 1)\n\n dt_array = np.diff(times)\n effective_time_array = np.array(\n [np.sqrt(times[i] * times[i + 1]) for i in range(time_steps)]\n )\n\n # Use isotopic number density\n for atom_number in plasma.isotope_number_density.index.get_level_values(0):\n values = plasma.isotope_number_density.loc[atom_number].values\n if values.shape[1] > 1:\n plasma.number_density.loc[atom_number] = np.sum(values, axis=0)\n else:\n plasma.number_density.loc[atom_number] = values\n\n # Calculate electron number density\n electron_number_density = (\n plasma.number_density.mul(plasma.number_density.index, axis=0)\n ).sum()\n\n electron_number_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n mass_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n electron_number = (electron_number_density * ejecta_volume).to_numpy()\n\n inv_volume_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n # Pre-calculate quantities as they change with time\n for i, t in enumerate(effective_time_array):\n inv_volume_time[:, i] = (1.0 / ejecta_velocity_volume) / (t**3.0)\n mass_density_time[:, i] = shell_masses * inv_volume_time[:, i]\n electron_number_density_time[:, i] = (\n electron_number * inv_volume_time[:, i]\n )\n\n energy_df_rows = np.zeros((number_of_shells, time_steps))\n\n # Calculate number of packets per shell based on the mass of isotopes\n number_of_isotopes = plasma.isotope_number_density * ejecta_volume\n total_number_isotopes = number_of_isotopes.sum(axis=1)\n\n inventories = raw_isotope_abundance.to_inventories()\n all_isotope_names = get_all_isotopes(raw_isotope_abundance)\n all_isotope_names.sort()\n\n gamma_ray_lines = get_nuclear_lines_database(path_to_decay_data)\n\n taus = {}\n parents = {}\n gamma_ray_line_array_list = []\n average_energies_list = []\n average_positron_energies_list = []\n\n for i, isotope in enumerate(all_isotope_names):\n nuclide = rd.Nuclide(isotope)\n taus[isotope] = nuclide.half_life() / np.log(2)\n child = nuclide.progeny()\n if child is not None:\n for c in child:\n if rd.Nuclide(c).half_life(\"readable\") != \"stable\":\n parents[c] = isotope\n\n energy, intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"g\",\n )\n gamma_ray_line_array_list.append(np.stack([energy, intensity]))\n average_energies_list.append(np.sum(energy * intensity))\n positron_energy, positron_intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"bp\",\n )\n average_positron_energies_list.append(\n np.sum(positron_energy * positron_intensity)\n )\n\n # Construct Numba typed dicts\n gamma_ray_line_arrays = {}\n average_energies = {}\n average_positron_energies = {}\n\n for iso, lines in zip(all_isotope_names, gamma_ray_line_array_list):\n gamma_ray_line_arrays[iso] = lines\n\n for iso, energy, positron_energy in zip(\n all_isotope_names, average_energies_list, average_positron_energies_list\n ):\n average_energies[iso] = energy\n average_positron_energies[iso] = positron_energy\n\n # urilight chooses to have 0 as the baseline for this calculation\n # but time_start may also be valid in which case decay time is time_end - time_start\n total_energy_list = []\n\n for shell, inv in enumerate(inventories):\n decayed_energy = {}\n total_decays = inv.cumulative_decays(time_end)\n for nuclide in total_decays:\n if nuclide in parents and nuclide != \"Co-56\" and nuclide != \"Co-57\":\n parent = parents[nuclide]\n if parent in parents:\n parent = parents[parent]\n decayed_energy[parent] += (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n else:\n decayed_energy[nuclide] = (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n\n total_energy_list.append(decayed_energy)\n\n total_energy = pd.DataFrame(total_energy_list)\n\n total_energy_columns = total_energy.columns.to_list()\n\n total_energy = total_energy[\n sorted(\n total_energy_columns, key=get_nuclide_atomic_number, reverse=True\n )\n ]\n\n energy_per_mass = total_energy.divide(\n (raw_isotope_abundance * shell_masses).T.to_numpy(),\n axis=0,\n )\n\n # Time averaged energy per mass for constant packet count\n average_power_per_mass = energy_per_mass / (time_end - time_start)\n\n energy_per_mass_norm = energy_per_mass.divide(\n energy_per_mass.sum(axis=1), axis=0\n ) # .cumsum(axis=1)\n\n decayed_packet_count = num_decays * number_of_isotopes.divide(\n total_number_isotopes, axis=0\n )\n\n packets_per_isotope = (\n (energy_per_mass_norm * decayed_packet_count.T.values)\n .round()\n .fillna(0)\n .astype(int)\n )\n\n print(\"Total gamma-ray energy\")\n print(total_energy.sum().sum() * u.keV.to(\"erg\"))\n\n print(\"Total positron energy\")\n print(total_energy[\"Co-56\"].sum(axis=0) * 0.0337 * u.keV.to(\"erg\"))\n\n # Taking iron group to be elements 21-30\n # Used as part of the approximations for photoabsorption and pair creation\n # Dependent on atomic data\n iron_group_fraction_per_shell = model.abundance.loc[(21):(30)].sum(axis=0)\n\n number_of_packets = packets_per_isotope.sum().sum()\n print(\"Total packets:\", number_of_packets)\n\n packet_energy = total_energy.sum().sum() / number_of_packets\n\n print(\"Energy per packet\", packet_energy)\n\n # Need to update volume for positron deposition to be time-dependent\n print(\"Initializing packets\")\n (\n packets,\n energy_df_rows,\n energy_plot_df_rows,\n energy_plot_positron_rows,\n ) = initialize_packets(\n packets_per_isotope,\n packet_energy,\n gamma_ray_line_arrays,\n positronium_fraction,\n inner_velocities,\n outer_velocities,\n inv_volume_time,\n times,\n energy_df_rows,\n effective_time_array,\n taus,\n parents,\n average_positron_energies,\n inventories,\n average_power_per_mass,\n )\n\n print(\"Total positron energy from packets\")\n print((energy_df_rows).sum().sum() * u.eV.to(\"erg\"))\n\n total_cmf_energy = 0\n total_rf_energy = 0\n\n for p in packets:\n total_cmf_energy += p.energy_cmf\n total_rf_energy += p.energy_rf\n\n print(\"Total CMF energy\")\n print(total_cmf_energy)\n\n # Below is the Artis compensation for their method of packet rejection\n \"\"\"\n energy_ratio = total_energy.sum().sum() / total_cmf_energy\n\n print(\"Energy ratio\")\n print(energy_ratio)\n \n for p in packets:\n p.energy_cmf *= energy_ratio\n p.energy_rf *= energy_ratio\n\n for e in energy_df_rows:\n e *= energy_ratio\n \n for row in energy_plot_df_rows:\n row[1] *= energy_ratio\n \"\"\"\n print(\"Total RF energy\")\n print(total_rf_energy)\n\n energy_bins = np.logspace(2, 3.8, spectrum_bins)\n energy_out = np.zeros((len(energy_bins - 1), time_steps))\n\n # Process packets\n (\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n deposition_estimator,\n ) = gamma_packet_loop(\n packets,\n grey_opacity,\n photoabsorption_opacity,\n pair_creation_opacity,\n electron_number_density_time,\n mass_density_time,\n inv_volume_time,\n iron_group_fraction_per_shell.to_numpy(),\n inner_velocities,\n outer_velocities,\n times,\n dt_array,\n effective_time_array,\n energy_bins,\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n )\n\n # DataFrame of energy information\n energy_plot_df = pd.DataFrame(\n data=energy_plot_df_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n \"energy_input_type\",\n \"compton_opacity\",\n \"photoabsorption_opacity\",\n \"total_opacity\",\n ],\n )\n\n # DataFrame of positron energies\n energy_plot_positrons = pd.DataFrame(\n data=energy_plot_positron_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n ],\n )\n\n # DataFrame of estimated deposition\n # Multiply dataframes by inv_volume_time array\n # if per unit volume is needed\n energy_estimated_deposition = (\n pd.DataFrame(data=deposition_estimator, columns=times[:-1])\n ) / dt_array\n\n # Energy is eV/s\n energy_df = pd.DataFrame(data=energy_df_rows, columns=times[:-1]) / dt_array\n\n final_energy = 0\n for p in packets:\n final_energy += p.energy_rf\n\n print(\"Final energy to test for conservation\")\n print(final_energy)\n\n escape_energy = pd.DataFrame(\n data=energy_out, columns=times[:-1], index=energy_bins\n )\n\n return (\n energy_df,\n energy_plot_df,\n escape_energy,\n decayed_packet_count,\n energy_plot_positrons,\n energy_estimated_deposition,\n )", "def n_fft(self):\n return self._n_fft", "def KFilt(sample,fs=25):\n\t#kalman filter inputs\n \n # Dimensions of parameters:\n # 'transition_matrices': 2,\n # 'transition_offsets': 1,\n # 'observation_matrices': 2,\n # 'observation_offsets': 1,\n # 'transition_covariance': 2,\n # 'observation_covariance': 2,\n # 'initial_state_mean': 1,\n # 'initial_state_covariance': 2,\n \n n_timesteps = len(sample)\n trans_mat = []\n\n\t#mask missing values\n observations = np.ma.array(sample,mask=np.zeros(sample.shape))\n missing_loc = np.where(np.isnan(sample))\n observations[missing_loc[0][:],missing_loc[1][:]] = np.ma.masked\n\t\n\t#Import Kalman filter, inerpolate missing points and get 2nd, 3rd orde kinematics\n dt = 1./25\t#Length of each frame (should be iether 1/25 or 1/30)\t\n n_timesteps = len(sample)\n \n observation_matrix = np.array([[1,0,0,0],\n [0,1,0,0]])#np.eye(4) \n t = np.linspace(0,len(observations)*dt,len(observations))\n q = np.cov(observations.T[:2,:400])\n qdot = np.cov(np.diff(observations.T[:2,:400]))#np.cov(observations[:1,:400])\n\n h=(t[-1]-t[0])/t.shape[0]\n A=np.array([[1,0,h,.5*h**2], \n [0,1,0,h], \n [0,0,1,0],\n [0,0,0,1]]) \n\n init_mean = [sample[0],0,0] #initial mean should be close to the first point, esp if first point is human-picked and tracking starts at the beginning of a video\n observation_covariance = q*500 #ADJUST THIS TO CHANGE SMOOTHNESS OF FILTER\n init_cov = np.eye(4)*.001#*0.0026\n transition_matrix = A\n transition_covariance = np.array([[q[0,0],q[0,1],0,0],\n [q[1,0],q[1,1],0,0],\n [0,0,qdot[0,0],qdot[0,1]],\n [0,0,qdot[1,0],qdot[1,1]]])\n\n kf = KalmanFilter(transition_matrix, observation_matrix,transition_covariance,observation_covariance,n_dim_obs=2)\n\n kf = kf.em(observations,n_iter=1,em_vars=['transition_covariance','transition_matrix','observation_covariance'])\n\n #pdb.set_trace()\n \n global trans_mat, trans_cov, init_cond\n x_filt = kf.filter(observations[0])[0]#observations.T[0])[0]\n kf_means = kf.smooth(observations[0])[0]\n\t\n return kf_means,x_filt #np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1])),frames", "def breath_analyze(self, offset=0, th=10):\n # breath part\n breath_gd = np.gradient(gf(self.breath_list, 10))\n breath_gd[breath_gd > 0] = 1\n breath_gd[breath_gd < 0] = 0\n breath_pulse = breath_gd[:-1]-np.roll(breath_gd, -1)[:-1]\n breath_in = argrelextrema(breath_pulse, np.less, order=10)[0]#+offset\n breath_out = argrelextrema(breath_pulse, np.greater, order=10)[0]#+offset\n self.breath = np.sort(np.hstack([breath_in, breath_out, len(self.breath_list)-1]))\n \n if self.breath[0] == breath_in[0]:\n self.btype = 'in'\n else:\n self.btype = 'out' \n\n b_in = []\n b_out = []\n delidx = []\n\n if len(self.breath) != 0: \n for i, j in zip(self.breath[:-1], self.breath[1:]):\n breath_diff = abs(self.breath_list[j]-self.breath_list[i])\n if abs(breath_diff) > 3000: # really breath in/out\n if abs(breath_diff) < 30000: # not deep breath\n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_out.append(j-i)\n self.ngframe.append(i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_in.append(j-i)\n else: \n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j))\n b_out.append(j-i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j))\n b_in.append(j-i)\n else:\n delidx.append(np.argwhere(self.breath==j)[0][0])\n self.breath = np.delete(self.breath, np.array(delidx))\n\n print('\\naverage breath out freq is: '+str(np.round(30./np.mean(b_out), 2))+' Hz')\n print('\\naverage breath in freq is: '+str(np.round(30./np.mean(b_in), 2))+' Hz')\n else:\n raise ImportError('Doing too fast !! please redo again !!')", "def getFreeSamples( self, num, dim, maxDimLens ):\n size = 0; \n while size < num:\n rnd = [0] * dim;\n for i in range( 0, dim ):\n rnd[i] = randrange( 0, maxDimLens[i] );\n pass\n angles = self.mCSpace.map2UnscaledSpace( rnd );\n if( self.mCollisionMgr.ifCollide( angles ) ):\n self.mFreeSamples.append( rnd );\n size += 1;", "def og_features(scan,filt=None,base_noise=None,thresh=-1.4781e-10,diff=1,verbose=False,scale=10):\n #get gradients of data\n der = np.array(np.gradient(scan,diff))\n \n #calculate gardient magnitudes and directions\n der_mag = np.linalg.norm(der,axis=0) \n der_uvecs = der/der_mag\n \n z_cur = np.copy(scan).ravel()\n\n #estimate noise level and set derivative filter threshold\n if filt is None:\n filt = np.mean(signaltonoise(der_mag)[-1])\n \n \n if base_noise is not None:\n filt = np.maximum(filt,base_noise)\n \n\n\n #filter directions and magnitudes\n x, y, z = der_uvecs[0].ravel(), der_uvecs[1].ravel(), der_mag.ravel()\n \n #filter using threshold and filt\n x_filt, y_filt, z_filt = x[z_cur>thresh], y[z_cur>thresh], z[z_cur>thresh]\n #x_filt, y_filt, z_filt = x, y, z\n\n \n #print(len(z_filt))\n x_filt, y_filt, z_filt = x_filt[z_filt>filt], y_filt[z_filt>filt], z_filt[z_filt>filt]\n\n \n #calculate angles\n angles_filt = np.sign(y_filt)*np.arccos(x_filt/1)\n\n \n #print(len(angles_filt))\n \n if len(angles_filt) < 2:\n return 0,0,0\n \n #fit single line\n sol1 = least_squares(ress_1line,[-np.pi/2],args=(angles_filt,),bounds=[-np.pi,0],method='dogbox',jac='2-point',max_nfev=2000)\n\n #fit two lines by grid search\n #sol_grid = grid_search(ress_2line,angles_filt,[[-np.pi,0],[-np.pi,0]])\n \n \n singleline = sol1.x[0]\n \n mx = np.minimum(np.abs(singleline-(-np.pi)),np.abs(singleline))\n \n sol_grid = grid_search(ress_2line_pm,angles_filt,[[0,mx]],umid = singleline)\n spread_lines = sol_grid[1]\n sol_grid[1] = [singleline+spread_lines,singleline-spread_lines]\n \n \n #compute average of squared residuals for both cases\n resid1 = ress_1line(sol1.x,angles_filt)\n\n grid_c11 = np.average(np.power(resid1,2))\n \n grid_c11 = np.average(np.abs(resid1))\n \n grid_c21 = sol_grid[-1]\n \n \n multip = cotunnel_score2(scan,scan>thresh,diff,scale)\n \n final_grid2 = multip*(grid_c11-grid_c21)\n \n \n \"\"\"\n plt.scatter(angles_filt,z_filt,marker='x',c='k',s=15,linewidth=0.4)\n plt.axvline(sol1.x,color='b')\n plt.axvline(sol1.x+(np.pi),color='b')\n plt.axvline(sol_grid[1][0],0,color='r', linestyle='--')\n plt.axvline(sol_grid[1][1],0,color='r', linestyle='--')\n \n plt.axvline(sol_grid[1][0]+(np.pi),0,color='r', linestyle='--')\n plt.axvline(sol_grid[1][1]+(np.pi),0,color='r', linestyle='--')\n \n plt.xlabel(\"$\\\\theta_g$ / rad\")\n \n plt.xlim([-np.pi,np.pi])\n plt.ylim([0,z.max()])\n \n \n plt.ylabel(\"$|g|$\")\n \n plt.xticks([-np.pi,0,np.pi])\n \n plt.locator_params(axis='y', nbins=2)\n \n plt.savefig(\"og_fig.svg\")\n \n plt.show()\n \"\"\"\n return final_grid2,multip,(grid_c11-grid_c21)", "def lsamplestdev (inlist):\r\n return math.sqrt(samplevar(inlist))", "def SFSchmidt10(jd,mag,errmag,nbin=0.1,bmin=5,bmax=2000):\n\n dtarray, dmagarray, sigmaarray = SFarray(jd,mag,errmag)\n ndt=np.where((dtarray<=365))\n dtarray=dtarray[ndt]\n dmagarray=dmagarray[ndt]\n sigmaarray=sigmaarray[ndt]\n\n bins=bincalc(nbin,bmin,bmax)\n #print(len(bins))\n\n\n sf_list=[]\n tau_list=[]\n numobj_list=[]\n\n for i in range(0,len(bins)-1):\n n=np.where((dtarray>=bins[i]) & (dtarray<bins[i+1]))\n nobjbin=len(n[0])\n if nobjbin>=6:\n dmag1=np.abs(dmagarray[n])\n derr1=np.sqrt(sigmaarray[n])\n sf=(np.sqrt(np.pi/2.0)*dmag1-derr1)\n sff=np.mean(sf)\n sf_list.append(sff)\n numobj_list.append(nobjbin)\n #central tau for the bin\n tau_list.append((bins[i]+bins[i+1])*0.5)\n\n\n SF=np.array(sf_list)\n nob=np.array(numobj_list)\n tau=np.array(tau_list)\n nn=np.where(nob>6)\n tau=tau[nn]\n SF=SF[nn]\n\n\n return (tau/365.,SF)", "def infer(self, evidences, num_psi_samples=100, beam_width=25):\n psis = []\n for i in range(num_psi_samples):\n psis.append(self.psi_from_evidence(evidences))\n psi = np.mean(psis, axis=0)\n return self.generate_asts_beam_search(psi, beam_width)", "def diagnosevibrationfft(Y, df, X, bearing, radial=True):\n score = np.zeros(3)\n f_c = np.zeros(3)\n f_sb = np.zeros(3)\n for i in range(0, 3):\n if i == 0: #Inner\n f_c[i] = bearing[0]\n if radial:\n f_sb[i] = 1.0\n else:\n f_sb[i] = 0.0\n elif i == 1: #Roller\n f_c[i] = bearing[1]\n if radial:\n f_sb[i] = bearing[2]\n else:\n f_sb[i] = 0.0\n # Cage\n\n elif i == 2: #Outer ring\n f_c[i] = bearing[3]\n f_sb[i] = 0.0\n\n for j in range(0, 3):\n tempScore = diagnosefft(Y, df, f_c[j], X, f_sb[j])\n score[j] += tempScore\n\n return score", "def spectral_kurtosis(sign, fs):\n f, ff = plotfft(sign, fs)\n if not spectral_spread(sign, fs):\n return 0\n else:\n spect_kurt = ((f - spectral_centroid(sign, fs)) ** 4) * (ff / np.sum(ff))\n return np.sum(spect_kurt) / (spectral_spread(sign, fs)**2)", "def stdev(headers, data):\n\tcolumn_matrix=data.get_data(headers)\n\tmean_values=column_matrix.std(0)\n\tstd_values=mean_values.tolist()\n\treturn std_values", "def num_dims(self):\n return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]", "def generateSDFitsFromHipsr(filename_in, path_in, filename_out, path_out, write_stokes=0, cal=None):\n \n # Open h5 file\n print \"\\nOpening files\"\n print \"-------------\"\n h5file = os.path.join(path_in, filename_in)\n out_file = os.path.join(path_out, filename_out)\n h6 = Hipsr6(h5file)\n pointing = h6.tb_pointing.cols\n obs = h6.tb_observation.cols\n obs_mode = obs.obs_mode[0].strip()\n ref_beams= obs.ref_beam[:]\n\n freqs = h6.freqs\n freqs_cal = h6.freqs_cal\n \n firmware = h6.tb_firmware_config.cols.firmware[0]\n \n print \"Input file: %s\"%h6.h5.filename\n print h6\n\n if cal == None:\n abspath = os.path.abspath( __file__ ).replace('sdfits.pyc', '').replace('sdfits.py', '')\n #diode_cal_file_x = \"%s/diode_jy_x.cal\"%abspath\n #diode_cal_file_y = \"%s/diode_jy_y.cal\"%abspath\n diode_cal_file = \"%s/diode_jy.cal\"%abspath\n else:\n diode_cal_file = cal\n\n print \"Using calibration %s\"%cal\n diode_temps_x, diode_temps_y, rx_temps_x, rx_temps_y = loadDiodeTemp(h6, diode_cal_file)\n\n scan_pointing_len = h6.tb_scan_pointing.shape[0]\n \n tb_lengths = []\n for beam in h6.h5.root.raw_data:\n if beam.shape[0] != scan_pointing_len:\n beam_id = int(beam.name.lstrip('beam_'))\n print \"WARNING: beam %i len: %i, scan_pointing len: %i\"%(beam_id, beam.shape[0], scan_pointing_len)\n tb_lengths.append(np.min([beam.shape[0], scan_pointing_len]))\n \n \n num_acc = np.max(tb_lengths) \n num_rows = num_acc * 13\n\n if num_acc == 0:\n print \"No data in %s. Skipping.\"%h5file\n return -1\n \n print \"No accumulations: %s, no rows: %s\"%(num_acc, num_rows)\n\n # We now need to generate a blank SD-FITS file, with the same number of rows\n print \"\\nGenerating blank SD-FITS file with %i rows...\"%num_rows\n\n path = findLibraryPath()\n if obs_mode == 'MXCAL':\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_mxcal.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_mxcal.txt')\n elif write_stokes == 2:\n print \"Stokes flag found - writing I,Q,U,V\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_stokes.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_stokes.txt')\n elif write_stokes == 0:\n print \"Writing XX, YY\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU.txt')\n else:\n print \"Writing XX, YY, XY, YX\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_xpol.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_xpol.txt')\n \n if '200_16384' in firmware:\n coldef_file = os.path.join(path, 'coldefs_dataHDU_200_16384.txt')\n \n hdulist = generateBlankSDFits(num_rows, header_primary, header_tbl, coldef_file)\n print hdulist.info()\n \n # Next, we copy over observation data \n print \"Filling new SD-FITS with HIPSR data...\"\n sdtab = hdulist[1].data\n sdhead = hdulist[1].header\n\n # Fill in header values\n sdhead[\"OBSERVER\"] = obs.observer[0]\n sdhead[\"PROJID\"] = obs.project_id[0]\n \n # Fill in common values\n # NEW METHOD OF TIMESTAMPING - AUG 27 2013\n ref_time = int(h6.h5.root.raw_data.beam_01.cols.timestamp[0])\n ref_id = int(h6.h5.root.raw_data.beam_01.cols.id[0])\n ref_clk = np.abs(h6.h5.root.observation.cols.bandwidth[0]) * 1e6\n num_chans = h6.h5.root.raw_data.beam_01.cols.xx[0].shape[0]\n acc_len = h6.h5.root.firmware_config.cols.acc_len[0]\n # OLD - BEFORE MAR 2018 ref_delta = num_chans * acc_len * 2 / ref_clk\n # NEW - post MAR 2018\n fs = 800e6\n ref_delta = 4 * num_chans * acc_len / fs\n \n f = h6.freqs\n\n print \"Filling in common values... \",\n sdtab[\"SCAN\"][:] = 1\n sdtab[\"EXPOSURE\"][:] = ref_delta\n sdtab[\"OBJECT\"][:] = pointing.source[0]\n sdtab[\"OBJ-RA\"][:] = pointing.ra[0]\n sdtab[\"OBJ-DEC\"][:] = pointing.dec[0]\n sdtab[\"RESTFRQ\"][:] = obs.frequency[0] * 1e6\n sdtab[\"FREQRES\"][:] = np.abs(obs.bandwidth[0])*1e6 / num_chans\n sdtab[\"BANDWID\"][:] = np.abs(obs.bandwidth[0]) * 1e6\n sdtab[\"CRPIX1\"][:] = num_chans/2 + 1\n sdtab[\"CRVAL1\"][:] = obs.frequency[0] * 1e6\n sdtab[\"CDELT1\"][:] = np.abs(obs.bandwidth[0])*1e6 / num_chans\n sdtab[\"FLAGGED\"][:] = 0\n sdtab[\"SCANRATE\"][:] = obs.scan_rate[0] / 60 # Deg/min to deg/s\n\n\n # TCS INFO\n sdtab[\"OBSMODE\"][:] = obs.obs_mode[0] \n sdtab[\"IF\"][:] = 1\n print \"OK.\"\n \n row_sd = 0\n cycle_id = 0\n \n flipped = False\n if obs.bandwidth[0] < 0:\n flipped = True\n \n print \"Filling in unique values... \"\n num_cycles = np.min([scan_pointing_len, num_acc])\n for row_h5 in range(num_acc):\n cycle_id += 1 # Starts at 1 in SD-FITS file\n\n for beam in h6.h5.root.raw_data:\n beam_id = int(beam.name.lstrip('beam_'))\n LinePrint(\"%i of %i\"%(row_sd, num_rows))\n \n if cycle_id <= num_cycles:\n raj_id = \"mb%s_raj\"%beam.name.lstrip('beam_')\n dcj_id = \"mb%s_dcj\"%beam.name.lstrip('beam_')\n \n sdtab[\"CYCLE\"][row_sd] = cycle_id\n\n # Fix beam mapping (remove after fixing mapping)\n sdtab[\"BEAM\"][row_sd] = beam_id\n \n sdtab[\"CRVAL3\"][row_sd] = h6.tb_scan_pointing.col(raj_id)[cycle_id-1]\n sdtab[\"CRVAL4\"][row_sd] = h6.tb_scan_pointing.col(dcj_id)[cycle_id-1]\n\n # AZ, EL and PARANGLE should be stored for beam 1 only\n if beam_id == 1:\n sdtab[\"AZIMUTH\"][row_sd] = h6.tb_scan_pointing.col(\"azimuth\")[cycle_id-1]\n sdtab[\"ELEVATIO\"][row_sd] = h6.tb_scan_pointing.col(\"elevation\")[cycle_id-1]\n sdtab[\"PARANGLE\"][row_sd] = h6.tb_scan_pointing.col(\"par_angle\")[cycle_id-1]\n\n #sdtab[\"FOCUSAXI\"][row_sd] = h6.tb_scan_pointing.col(\"focus_axi\")[cycle_id-1]\n sdtab[\"FOCUSTAN\"][row_sd] = h6.tb_scan_pointing.col(\"focus_tan\")[cycle_id-1]\n\n # This is confusing - but it looks like FOCUSROT should be 15.0, which is sent as feed_angle\n # Likewise, focusaxi is probably supposed to be what we receive as focus_rot\n focus_rot = h6.tb_scan_pointing.col(\"focus_rot\")[cycle_id-1]\n sdtab[\"FOCUSROT\"][row_sd] = focus_rot\n sdtab[\"FOCUSAXI\"][row_sd] = h6.tb_observation.col(\"feed_angle\")[0]\n\n try:\n\n # OLD - 27 Aug 2013\n #timestamp = beam.cols.timestamp[row_h5]\n # New - based off integration length\n if beam_id == 1:\n new_id = beam.cols.id[row_h5]\n timestamp = (new_id - ref_id) * ref_delta + ref_time\n date_obs, time = timestamp2dt(timestamp)\n\n sdtab[\"DATE-OBS\"][row_sd] = date_obs\n sdtab[\"TIME\"][row_sd] = time\n\n ref_beam = ref_beams[np.argmin(np.abs(timestamp - obs.date[:]))]\n \n # Compute T_sys for each beam\n T_d_x = diode_temps_x[beam_id-1]\n T_d_y = diode_temps_y[beam_id-1]\n\n T_sys_x, T_sys_y = computeTsys(beam, row_h5, T_d_x, T_d_y)\n S_sys_x, S_sys_y = computeTsysSpec(h6, beam, row_h5, T_d_x, T_d_y)\n\n\n #print T_sys_x, T_sys_y\n sdtab[\"TSYS\"][row_sd] = (T_sys_x, T_sys_y)\n sdtab[\"TCAL\"][row_sd] = (np.average(extractMid(T_d_x)), np.average(extractMid(T_d_y)))\n #sdtab[\"CALFCTR\"][row_sd] = (1, 1)\n\n xx = beam.cols.xx[row_h5].astype('float32')\n yy = beam.cols.yy[row_h5].astype('float32')\n xx[0], yy[0] = 0, 0\n \n # See if there is cross corr \n if write_stokes in (1, 2):\n re_xy = beam.cols.re_xy[row_h5].astype('float32')\n im_xy = beam.cols.im_xy[row_h5].astype('float32')\n re_xy[0], im_xy[0] = 0, 0\n \n if flipped:\n xx, yy = xx[::-1], yy[::-1]\n if write_stokes in (1, 2):\n re_xy, im_xy = re_xy[::-1], im_xy[::-1]\n\n # DCP 2019.01 - Adding refbeam to all file types\n sdtab[\"REFBEAM\"][row_sd] = ref_beam\n #if obs_mode == 'MXCAL':\n # sdtab[\"REFBEAM\"][row_sd] = ref_beam\n\n if write_stokes == 2:\n xx = xx / fitLine(f, xx, num_chans) * S_sys_x\n yy = yy / fitLine(f, yy, num_chans) * S_sys_y\n\n re_xy = re_xy / fitLine(f, re_xy, num_chans)* np.sqrt(S_sys_x * S_sys_y)\n im_xy = im_xy / fitLine(f, im_xy, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n \n # Ettore tells me Parkes uses this definition\n # i.e. that I is the average of xx + yy\n ii = (xx + yy) / 2\n qq = (xx - yy) / 2\n uu = re_xy\n vv = im_xy\n \n # Form one data vector\n data1 = np.append(ii, qq)\n data2 = np.append(uu, vv)\n data = np.append(data1, data2)\n data = data.reshape([1,1,4,num_chans])\n else:\n\n if write_stokes == 1:\n re_xy = re_xy / fitLine(f, re_xy, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n im_xy = im_xy / fitLine(f, re_im, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n re_xy[0], im_xy[0] = 0, 0\n\n #print \"cal factor: %2.3f\"%cf\n #print \"Diode temp: %s\"%T_d\n #xx, yy = applyCal(beam, row_h5, freqs, freqs_cal, cf, T_d_x, T_d_y)\n \n xx = xx / fitLine(f, xx, num_chans) * S_sys_x\n yy = yy / fitLine(f, yy, num_chans) * S_sys_y\n\n # Multibeam stats screws up if it encounters division by 1\n xx[xx <= 1 ] = 1\n yy[yy <= 1 ] = 1\n \n do_flagger = True\n if do_flagger:\n flags = np.zeros(len(xx))\n flags[xx > 1000] = 1\n flags[yy > 1000] = 1\n flags[xx==1] = 1\n flags[yy==1] = 1\n flags = np.append(flags, flags)\n flags = flags.reshape([1,1,2,num_chans])\n \n sdtab[\"FLAGGED\"][row_sd] = flags\n \n data = np.append(xx, yy)\n data = data.reshape([1,1,2,num_chans])\n \n sdtab[\"DATA\"][row_sd] = data\n\n if write_stokes == 1:\n sdtab[\"XPOLDATA\"][row_sd] = np.row_stack((re_xy, im_xy)).flatten()\n \n except:\n if beam.name != 'beam_02':\n print \"\\nWARNING: missing row in %s\"%beam.name\n print \"Current index: %i\"%row_h5\n print \"Row length: %i\"%beam.shape[0]\n raise\n try:\n sdtab[\"FLAGGED\"][row_sd] = np.ones_like([1,1,2,num_chans])\n except ValueError:\n pass\n row_sd += 1\n else:\n print \"WARNING: scan_pointing table is not complete.\"\n print \"%s table length: %i\"%(beam.name, beam.shape[0])\n print \"scan_pointing table length: %i\"%scan_pointing_len\n\n \n h6.h5.close()\n \n if os.path.exists(out_file):\n print \"\\nInfo: File exists, deleting...\"\n os.remove(out_file)\n\n print \"\\nInfo: Saving to file\"\n hdulist.writeto(out_file)\n hdulist.close()", "def test_ncols_gtiff_array(self):\n self.assertEqual(_test_array(landsat_gtiff).shape[2], 235)", "def get_eye_specs(self, tbit, tsample, thres=0.0, nlev=2):\n\n tstart, tend = self.get_xrange()\n toff_vec = np.arange(0, tbit, tsample)\n best_idx = 0\n best_gap = 0.0\n best_values = None\n mid_lev = nlev // 2\n for idx, t_off in enumerate(toff_vec):\n # noinspection PyTypeChecker\n values = self(np.arange(tstart + t_off, tend, tbit))\n values.sort()\n\n up_idx = np.searchsorted(values, [thres])[0]\n if up_idx == 0 or up_idx == len(values):\n continue\n cur_gap = values[up_idx] - values[up_idx - 1]\n if cur_gap > best_gap:\n best_idx = idx\n best_gap = cur_gap\n best_values = values\n\n if best_values is None:\n raise ValueError(\"waveform never cross threshold=%.4g\" % thres)\n\n vstd = np.std(best_values)\n vtemp = best_values / vstd\n tmp_arr = np.linspace(vtemp[0], vtemp[-1], nlev) # type: np.ndarray\n clusters = svq.kmeans(vtemp, tmp_arr)[0]\n # clusters = svq.kmeans(vtemp, 4, iter=50)[0]\n clusters *= vstd\n clusters.sort()\n vcenter = (clusters[mid_lev] + clusters[mid_lev - 1]) / 2.0\n\n # compute eye opening/margin\n openings = []\n tr_widths = []\n last_val = best_values[0]\n bot_val = last_val\n cur_cidx = 0\n for cur_val in best_values:\n cur_cluster = clusters[cur_cidx]\n next_cluster = clusters[cur_cidx + 1]\n if abs(cur_val - cur_cluster) > abs(cur_val - next_cluster):\n openings.append(cur_val - last_val)\n tr_widths.append(last_val - bot_val)\n cur_cidx += 1\n if cur_cidx == len(clusters) - 1:\n tr_widths.append(best_values[-1] - cur_val)\n break\n bot_val = cur_val\n last_val = cur_val\n\n return {'center': (float(toff_vec[best_idx]), vcenter),\n 'levels': clusters,\n 'heights': clusters[1:] - clusters[:-1],\n 'openings': np.array(openings),\n 'trace_widths': np.array(tr_widths)\n }", "def STEM(self, N=256):\r\n import andrew.STEM as st\r\n # make a STEM probe that takes up about half the array\r\n self.stem = st.STEMprobe()\r\n self.stem.N = N\r\n self.stem.energy = 300.0e3\r\n self.stem.ampF = circle(N,N)\r\n self.stem.Q = 8.0e0\r\n self.stem.aberrations['C1'] = 150.0e0\r\n self.stem.makeParams()\r\n self.stem.makePhase()\r\n self.Illum = self.stem.probeR\r\n\r\n # make brog and twain \r\n M = int(0.23 * float(N)/ 2.0)\r\n amp = scale(brog(M) , np.min(np.abs(self.Illum)), np.max(np.abs(self.Illum)))\r\n phase = scale(twain(M), -np.pi , np.pi)\r\n sample = amp * np.exp(1.0J * phase)\r\n sample_area = np.ones((M,M),np.float64)\r\n self.Sample = np.zeros((N,N),dtype=np.complex128)\r\n self.sample_area = np.zeros((N,N),dtype=np.float64)\r\n self.Sample[N/2+1:N/2+1+M,N/2+1:N/2+1+M] = sample\r\n self.sample_area[N/2+1:N/2+1+M,N/2+1:N/2+1+M] = sample_area\r\n \r\n self.Exit = self.Sample + self.Illum \r\n self.image = np.square(np.abs(fft2(self.Exit)))\r\n self.N = N\r\n print \"STEM done\"", "def n_thres(self):\n return np.size(self.thres)", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)", "def test_gaussian():\n generator = SignalGenerator()\n data = generator.random_gaussian(means=[1, 0, -1], stds=[0.1, 0.1, 0.1])\n freq_features = FrequencyFeature(data, sr=50)\n freq_features.fft().peaks()\n top_n = range(1, 11)\n top_n_dominant_frequencies = np.concatenate(\n list(map(freq_features.dominant_frequency_power, top_n)), axis=0)\n std_top_n_dfs = np.std(top_n_dominant_frequencies, axis=0)\n assert np.all(std_top_n_dfs < 0.001)", "def transform(self, resampled_xray, n_burn_in, n_lookahead, skf_is): \n # Set all temps on world map as features\n #valid_range = range(n_burn_in, temperatures_xray['time'].shape[0] - n_lookahead)\n #time_steps, lats, lons = temperatures_xray['tas'].values.shape\n #X = temperatures_xray['tas'].values.reshape((time_steps,lats*lons))\n #X = X[valid_range,:]\n\n tas = select_box(resampled_xray['tas']) \n\n valid_range = range(n_burn_in, resampled_xray['time'].shape[0] - n_lookahead)\n #enso = get_enso_mean(temperatures_xray['tas'])\n # reshape the vector into a table years as rows, months as columns\n #enso_matrix = enso.values.reshape((-1,12))\n\n theShape = tas.shape\n n_time,n_lat,n_long = theShape[0],theShape[1],theShape[2] \n #print n_time,n_lat,n_long \n enso_matrix = tas.values.reshape(-1,12,n_lat,n_long)\n\n count_matrix = np.ones(enso_matrix.shape)\n # compute cumulative means of columns (remember that you can only use\n # the past at each time point) and reshape it into a vector\n enso_monthly_mean = (enso_matrix.cumsum(axis=0) / count_matrix.cumsum(axis=0)).reshape(-1,n_lat,n_long)#.ravel()\n # roll it backwards (6 months) so it corresponds to the month of the target\n\n enso_anomaly = tas - enso_monthly_mean\n\n enso_anomaly_rolled = np.roll(enso_anomaly, n_lookahead - 12,axis = 0)\n # select valid range\n enso_anomaly_rolled_valid = enso_anomaly_rolled[valid_range,:,:]\n # reshape it into a matrix of a single column\n X = enso_anomaly_rolled_valid.reshape(-1,n_lat*n_long)\n\n return X", "def get_mean_var(base_path, list_name, dst_name, fb_file = 'data/fb.npy'):\n \n fb = np.load(fb_file)\n with open(base_path + list_name, 'r') as f:\n list_lines = f.readlines()\n\n\n n_sum = np.zeros((3,40), dtype='float32')\n n_square_sum = np.zeros((3,40), dtype='float32')\n\n n_file = len(list_lines)\n n_chunk = 5000.0\n n_chunk_square = 20000.0\n\n print('total file : ',n_file)\n n_frame = 0\n\n for i in range(n_file):\n #for i in range(15000,16000):\n #print(i,'th')\n l = list_lines[i]\n wav_path = base_path + l[:-1]\n _, sig = wavfile.read(wav_path)\n feature, _ = extract_log_filter_bank(sig, fb)\n feature_delta = get_delta(feature, 2)\n feature_delta_delta = get_delta(feature_delta, 2)\n data = np.asarray([feature, feature_delta, feature_delta_delta], dtype = 'float32')\n\n n_sum += np.sum(data, axis=1) / n_chunk\n n_square_sum += np.sum(np.multiply(data, data), axis=1) / n_chunk_square\n n_frame+=data.shape[1]\n #print(data.shape)\n #print(np.sum(data, axis=1))\n\n if(i % 1000 == 0):\n print('---------',i,'th----------- ')\n print('sum')\n print('min : ', np.min(n_sum))\n print('max : ', np.max(n_sum))\n print('square sum')\n print('min : ', np.min(n_square_sum))\n print('max : ', np.max(n_square_sum))\n\n n_denom = n_frame / n_chunk\n n_denom_square = n_frame / n_chunk_square\n n_mean = n_sum / n_denom\n n_var = (n_square_sum / n_denom_square) - np.multiply(n_mean, n_mean)\n\n print('----------final mean----------')\n print(n_mean)\n print('----------final var-----------')\n print(n_var)\n\n np.save(dst_name+'_mean.npy', n_mean)\n np.save(dst_name+'_var.npy', n_var)\n print('final result saved at ',dst_name)", "def test_stdstar(self):\n from ..io.fluxcalibration import read_stdstar_models, write_stdstar_models\n nstd = 5\n nwave = 10\n flux = np.random.uniform(size=(nstd, nwave))\n wave = np.arange(nwave)\n fibers = np.arange(nstd)*2\n data = Table()\n data['BESTMODEL'] = np.arange(nstd)\n data['TEMPLATEID'] = np.arange(nstd)\n data['CHI2DOF'] = np.ones(nstd)\n data['REDSHIFT'] = np.zeros(nstd)\n\n fibermap = Table()\n fibermap['TARGETID'] = np.arange(nstd)\n\n input_frames = Table()\n input_frames['NIGHT'] = np.ones(nstd)*20201220\n input_frames['EXPID'] = np.arange(nstd)\n input_frames['CAMERA'] = 'b0'\n\n #- Write with data as Table, array, and dict\n write_stdstar_models(self.testfile, flux, wave, fibers,\n data, fibermap, input_frames)\n write_stdstar_models(self.testfile, flux, wave, fibers,\n np.asarray(data), fibermap, input_frames)\n\n datadict = dict()\n for colname in data.colnames:\n datadict[colname] = data[colname]\n\n write_stdstar_models(self.testfile, flux, wave, fibers, datadict,\n fibermap, input_frames)\n\n #- Now write with coefficients too\n datadict['COEFF'] = np.zeros((nstd, 3))\n write_stdstar_models(self.testfile, flux, wave, fibers, datadict,\n fibermap, input_frames)\n\n fx, wx, fibx, metadata = read_stdstar_models(self.testfile)\n self.assertTrue(np.all(fx == flux.astype('f4').astype('f8')))\n self.assertTrue(np.all(wx == wave.astype('f4').astype('f8')))\n self.assertTrue(np.all(fibx == fibers))", "def calibration_spectra(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 4*8 # Duration\n + 2*8 # Quiet time\n + 4*4 # Live time\n + 2*8 # Avg Temperature\n + 1 # Spare\n + 1 # Comp Schema accum S\n + 3 # Comp Schema accum K\n + 3 # Comp Schema accum M\n + 4*8 # Detector mask\n + 4 # Spare\n + 12 # Pixel mask\n + 1*8 # Sub spectrum mask\n + 2 # Spare\n + 8*( # 8 x \n 2 # Spare\n + 10 # Number of spectral points\n + 10 # Number of summed channels in spectral point\n + 10 # Lowest channel in sub spectrum \n )\n + 2*8 # Number of structure in packet\n )\n\n variable = (\n num_samples * (\n 4 # Spare\n + 5 # Detector ID\n + 4 # Pixel ID\n + 3 # Sub spec ID\n + 16 # Number of compressed spectral points\n + num_energies*1*8 # Compressed spectral point\n\n )\n )\n\n return fixed_header, variable", "def generate_spectra(freqs, util_RM, n_spectra=100, min_phi=-1000, max_phi=1000, phi_sampling=300, max_noise=0.333, phi_padding=0):\n # Compute the RMSFs.\n lsq = (3e8 / freqs) ** 2\n phis = numpy.linspace(min_phi, max_phi, phi_sampling)\n\n # Generate some Faraday spectra.\n\n # True parameters: peak positions, amplitudes, and phases.\n depths = numpy.random.uniform(\n min_phi + phi_padding, max_phi - phi_padding, size=(n_spectra, 2))\n amps = numpy.random.uniform(0, 1, size=(n_spectra, 2))\n amps[:, 0] = 1 # Normalise first amplitude to 1.\n # Set simple sources to have 0 for the second peak.\n simple = numpy.random.binomial(1, 0.5, size=(n_spectra,)).astype(bool)\n amps[simple, 1] = 0\n phases = numpy.random.uniform(-numpy.pi / 2, numpy.pi / 2, size=(n_spectra, 2))\n # spectra stores the complex spectrum.\n spectra = numpy.zeros((n_spectra, len(lsq)), dtype='complex')\n fdf_gt = numpy.zeros((n_spectra, phi_sampling), dtype='complex')\n\n for i in tqdm(range(n_spectra)):\n for p in range(2):\n if p == 1:\n if simple[i] and amps[i, p]:\n print(simple[i], amps[i, :])\n raise RuntimeError()\n spectra[i] += amps[i, p] * numpy.exp(2 * 1j * (phases[i, p] + depths[i, p] * lsq))\n idx = phis.searchsorted(depths[i, p])\n fdf_gt[i, idx] += amps[i, p] * numpy.cos(phases[i, p])\n fdf_gt[i, idx] += 1j * amps[i, p] * numpy.sin(phases[i, p])\n\n # Add Gaussian noise.\n sigmas = numpy.random.uniform(0, max_noise, size=(n_spectra, 1))\n noise = numpy.random.normal(loc=0, scale=sigmas, size=(n_spectra, len(lsq)))\n spectra_noisy = spectra + noise\n\n # RM synthesis on the spectra.\n sim_fdf, fwhm = util_RM.do_rmsynth_planes(spectra_noisy.real.T, spectra_noisy.imag.T, lsq, phis)\n\n sim_fdf = sim_fdf.T\n\n # Blur the true FDF to get the targets.\n targets_real = scipy.ndimage.gaussian_filter1d(fdf_gt.real, sigma=3, axis=-1)\n targets_imag = scipy.ndimage.gaussian_filter1d(fdf_gt.imag, sigma=3, axis=-1)\n targets = targets_real + 1j * targets_imag\n\n return {\n 'depths': depths,\n 'amps': amps,\n 'simple': simple,\n 'spectra': spectra,\n 'spectra_noisy': spectra_noisy,\n 'fdf_gt': fdf_gt,\n 'sim_fdf': sim_fdf,\n 'targets': targets,\n 'noise': sigmas,\n }", "def compute_SNR(x, fs):\n segments, cough_mask = segment_cough(x,fs)\n RMS_signal = 0 if len(x[cough_mask])==0 else np.sqrt(np.mean(np.square(x[cough_mask])))\n RMS_noise = np.sqrt(np.mean(np.square(x[~cough_mask])))\n SNR = 0 if (RMS_signal==0 or np.isnan(RMS_noise)) else 20*np.log10(RMS_signal/RMS_noise)\n return SNR", "def estimate_width(self, n, neighborhood=32):\n res = self.results\n time_index = res['time_index'][n]\n v_peak = res['velocities'][n]\n v_peak_index = self.spectrogram._velocity_to_index(v_peak)\n\n hoods, means, stdevs = [], [], []\n hood = neighborhood\n while hood > 1:\n n_low = max(0, v_peak_index - hood)\n n_high = min(v_peak_index + hood + 1, len(self.velocity))\n # fetch the true power values for this column of the spectrogram\n power = self.spectrogram.power(\n self.intensity[n_low:n_high, time_index])\n velocities = self.velocity[n_low:n_high]\n if hood == neighborhood:\n res = self.fit_gaussian(\n self.velocity[n_low:n_high], power, v_peak)\n res['power'] = power\n res['indices'] = (n_low, n_high)\n mean, stdev = self.moments(velocities, power)\n hoods.append(hood)\n means.append(mean)\n stdevs.append(stdev)\n hood = hood // 2\n print(stdevs)\n #\n\n return res", "def test(self, n_test_runs: int = 10) -> None:\n steps: np.ndarray = np.zeros(n_test_runs)\n rewards: np.ndarray = np.zeros(n_test_runs)\n for t in range(n_test_runs):\n steps[t], rewards[t] = self.step(collect=False)\n\n self.get_logger().warn('---------- TEST RUN RESULTS ----------')\n self.get_logger().warn(f'Average: {steps.mean()}')\n self.get_logger().warn(f'STD: {steps.std()}')\n self.get_logger().warn(f'Median: {np.median(steps)}')\n self.get_logger().warn(f'Average Reward: {rewards.mean()}')", "def calc_flux_array(self):\n \n # First determine the associated spectrum\n self.compute_template_spectrum()\n\n # Calculate baseline counts to normalise fluxes we scan over\n # Go from 10**(bin_min)*mean up to 10**(bin_max)*mean in nbins steps\n b = self.setup_b_instance(0,add_ps_mask=True)\n mean = np.sum(b.CTB_masked_compressed[0])/len(b.CTB_masked_compressed[0])\n A_array = mean*10**np.linspace(self.bin_min,self.bin_max,self.nbins)\n\n # Array to get LLs when no profile likelihood run\n norun = np.array([1.0, 1.0, 1.0, 1.0])\n\n # Now setup and compute the arrays\n LL_array = np.array([]) \n A_array_short = np.array([])\n spect_array = np.array([])\n\n for i in range(len(A_array)):\n print \"on i =\",i\n # Calculate LL\n if i == 0:\n b1 = self.setup_b_instance(A_array[i],add_ps_mask=True)\n else:\n for key in b1.fixed_template_dict_nested.keys():\n b1.fixed_template_dict_nested[key] = b1.fixed_template_dict_nested[key]*A_array[i]/A_array[i-1]\n ll_val = b1.ll(norun,4,4)\n # Make triangle\n\n # Append to arrays\n LL_array = np.append(LL_array,ll_val)\n A_array_short = np.append(A_array_short,A_array[i])\n spect_array = self.spectrum*np.array(A_array_short)\n\n # Save output\n np.save(work_dir+'ScanOutput/'+self.tag+'/En_array-'+str(self.flux_array_ebin)+'.npy',self.En_center)\n np.save(work_dir+'ScanOutput/'+self.tag+'/LL_array-'+str(self.flux_array_ebin)+'.npy',LL_array)\n np.save(work_dir+'ScanOutput/'+self.tag+'/Flux_array-'+str(self.flux_array_ebin)+'.npy',spect_array)", "def std_dev(L, is_sample=0):\n\treturn math.sqrt(variance(L, is_sample))", "def movie(l, zlim=0.7, zstep=0.01, nside=64, dump_plot_dir=None, nsn_func=None,\n bands=['g', 'r', 'i', 'z'],\n exclude_bands=['u', 'y'],\n vmax_nsn=None,\n min_cadence=0.5,\n lc_template=None,\n salt2=None):\n \n m = Metrics(l, model_filename=salt2, lc_template=lc_template, nside=nside)\n nsn_tot = np.zeros(m.npix)\n nsn_inst = np.zeros(m.npix)\n cadence_tot = np.zeros(m.npix)\n cadence_nhits = np.zeros(m.npix) \n zmax_tot = np.zeros(m.npix)\n zmax_nhits = np.zeros(m.npix)\n tmp_map = np.zeros(m.npix)\n\n # median values \n nsn_tot_history = []\n nsn_inst_history = []\n median_cadence_inst_history = []\n zmax_inst_history = []\n \n # p = Plotter()\n # for block in pxlog:\n # for m,acc in metrics:\n # r = m(block)\n # a.accumulate(r)\n # if a.do_plot:\n # p.plot_maps(a)\n \n \n \n # loop on the survey mjd -- by steps of 1 day\n for mjd in np.arange(m.mjd.min(), m.mjd.max()+1):\n zmax = np.zeros(m.npix)\n nsn = np.zeros(m.npix)\n \n # check that the sampling is ok at z=0\n s,u = m.select_window(mjd, z=0., bands=bands, exclude_bands=exclude_bands)\n c = m.cadence(u, z=0.)\n first, last = m.first_last_visits(mjd, u, z=0.)\n c[c<min_cadence] = 0.\n c[first==0.] = 0.\n c[last==0.] = 0.\n c0_ok = c>0.\n \n # loop over the redshift range, and check the resolution in\n # color as a function of redshift. Store the highest redshift\n # that passes the cuts \n for z in np.arange(0.1, zlim+zstep, zstep)[::-1]:\n # select the window \n s,u = m.select_window(mjd, z=z, exclude_bands=exclude_bands)\n \n # average cadence\n # note: explore median dt\n cz = m.cadence(u, z=z)\n \n # observations before -15 and after +30 ? \n firstz, lastz = m.first_last_visits(mjd, u, z=z)\n # cut in cadence \n cz[(cz<min_cadence)] = 0.\n \n # cut on the last visit\n cz[(firstz==0.)] = 0.\n cz[(lastz==0)] = 0.\n cz *= c0_ok\n\n # cut on sigma amplitude\n if np.abs(z-0.3) <= 0.01:\n snr_g = m.amplitude_snr(mjd, instrument_name + '::g', z, s)\n snr_r = m.amplitude_snr(mjd, instrument_name + '::r', z, s)\n snr_i = m.amplitude_snr(mjd, instrument_name + '::i', z, s)\n snr_z = m.amplitude_snr(mjd, instrument_name + '::z', z, s) \n if z <= 0.3:\n snr_ok = m.cut_on_amplitude_snr(mjd, z, s, \n snr_cuts = {instrument_name + '::g': 30., \n instrument_name + '::r': 40., \n instrument_name + '::i': 30., \n instrument_name + '::z': 20.})\n else:\n snr_ok = m.cut_on_amplitude_snr(mjd, z, s, \n snr_cuts = {instrument_name + '::r': 40., \n instrument_name + '::i': 30., \n instrument_name + '::z': 20.})\n \n # update max-z map \n zmax[(cz>0) & (snr_ok>0.) & (zmax==0.)] = z\n c[c==0] = cz[c==0]\n # update the number of supernovae for that day \n # we update (1) a map that contains the total\n # number of SNe and (2) a NTuple that contains\n # mjd, nsn, zmax\n if nsn_func is not None:\n nsn_inst[:] = 0.\n nsn_inst[zmax>0.] = nsn_func(zmax[zmax>0])\n nsn_tot[zmax>0.] += nsn_inst[zmax>0.]\n else:\n logging.warning('no function to compute number of SNe')\n\n # update the cumulative maps\n cadence_tot += c\n cadence_nhits[c>0] += 1\n zmax_tot += zmax\n zmax_nhits[zmax>0] += 1\n \n # m.plot_map(first, fig=1, vmin=0., vmax=1.25, sub=221, cbar=False)\n # m.plot_map(last, fig=1, vmin=0., vmax=1.25, sub=222, cbar=False)\n fig = plt.figure(1, figsize=(15.,7.5))\n human_date = DateTimeFromMJD(mjd).strftime('%Y-%m-%d')\n fig.suptitle('[%s mjd=%6.0f]' % (human_date, mjd))\n m.plot_map(nsn_tot, fig=1, sub=231, vmin=0., vmax=vmax_nsn, cbar=True, title='$N_{SNe}: %6.0f$ (tot)' % nsn_tot.sum())\n nsn_tot_history.append((mjd,nsn_tot.sum()))\n tmp_map[:] = hp.UNSEEN ; idx = zmax_nhits>0\n tmp_map[idx] = zmax_tot[idx] / zmax_nhits[idx]\n med = np.median(tmp_map[tmp_map>0])\n m.plot_map(tmp_map, fig=1, sub=232, vmin=0., vmax=0.5, cbar=True, title='$z_{max}$ (avg) [%4.2f]' % (med if ~np.isnan(med) else 0))\n tmp_map[:] = hp.UNSEEN ; idx = cadence_nhits>0\n tmp_map[idx] = cadence_tot[idx] / cadence_nhits[idx]\n med = np.median(tmp_map[tmp_map>0])\n m.plot_map(tmp_map, fig=1, sub=233, vmin=0., vmax=1., cbar=True, title='cadence [day$^{-1}$] (avg) [%4.2f]' % (med if ~np.isnan(med) else 0))\n \n m.plot_map(nsn_inst, fig=1, sub=234, vmin=0., vmax=0.015, cbar=True, title='$N_{SNe}: %4.0f$' % nsn_inst.sum())\n nsn_inst_history.append((mjd,nsn_inst.sum()))\n med = np.median(zmax[zmax>0])\n m.plot_map(zmax, fig=1, vmin=0., vmax=0.5, sub=235, cbar=True, title='$z_{max}$ [%4.2f]' % (med if ~np.isnan(med) else 0))\n zmax_inst_history.append((mjd,(med if ~np.isnan(med) else 0)))\n med = np.median(c[c>0])\n m.plot_cadence(c, fig=1, dump_plot_dir=dump_plot_dir, \n vmin=0.,\n vmax=1.,\n min_cadence=min_cadence,\n sub=236,\n title='cadence [day$^{-1}$] [%4.2f]' % (med if ~np.isnan(med) else 0.),\n cbar=True)\n median_cadence_inst_history.append((mjd,(med if ~np.isnan(med) else 0.)))\n\n # SNR debug plots \n fig = plt.figure(2)\n fig.suptitle('[%s mjd=%6.0f]' % (human_date, mjd))\n m.plot_map(snr_g, fig=2, sub=221, vmin=0., vmax=30., cbar=True, title='SNR[g]')\n m.plot_map(snr_r, fig=2, sub=222, vmin=0., vmax=40., cbar=True, title='SNR[r]')\n m.plot_map(snr_i, fig=2, sub=223, vmin=0., vmax=30., cbar=True, title='SNR[i]') \n m.plot_map(snr_z, fig=2, sub=224, vmin=0., vmax=20., cbar=True, title='SNR[z]', dump_plot_dir=dump_plot_dir, prefix='snr')\n\n # cadence debug plots\n\n m.fig_odometer += 1\n\n\n # dump history\n nsn_tot_history = np.rec.fromrecords(nsn_tot_history, names=['mjd', 'val'])\n nsn_inst_history = np.rec.fromrecords(nsn_inst_history, names=['mjd', 'val'])\n zmax_inst_history = np.rec.fromrecords(zmax_inst_history, names=['mjd', 'val'])\n median_cadence_inst_history = np.rec.fromrecords(median_cadence_inst_history, names=['mjd', 'val'])\n np.save(dump_plot_dir + os.sep + 'nsn_tot_history.npy', nsn_tot_history)\n np.save(dump_plot_dir + os.sep + 'nsn_inst_history.npy', nsn_inst_history)\n np.save(dump_plot_dir + os.sep + 'zmax_inst_history.npy', zmax_inst_history)\n np.save(dump_plot_dir + os.sep + 'median_cadence_inst_history.npy', median_cadence_inst_history)", "def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)", "def test_svd_smoothing(self):\n\t\t\n\t\t# 819 =~ 4096*0.2\n\t\tself.watcher.SVDSmoothing(layers=[self.fc2_layer])\n\t\tesd = self.watcher.get_ESD(layer=self.fc2_layer) \n\t\tnum_comps = len(esd[esd > 10**-10])\n\t\tself.assertEqual(num_comps, 819)", "def detect_phasedbeam(self, sig=5., show=1, save=0, clipplot=1):\n\n try:\n arr = self.phasedbeam\n except AttributeError:\n print 'Need to make phasedbeam first.'\n return\n\n reltime = self.reltime\n\n # single iteration of sigma clip to find mean and std, skipping zeros\n mean = arr.mean()\n std = arr.std()\n print 'initial mean, std: ', mean, std\n amin,amax = sigma_clip(arr.flatten())\n clipped = arr[n.where((arr < amax) & (arr > amin) & (arr != 0.))]\n mean = clipped.mean()\n std = clipped.std()\n print 'final mean, sig, std: ', mean, sig, std\n\n # Recast arr as significance array\n arr_snr = (arr-mean)/std # for real valued trial output, gaussian dis'n, zero mean\n\n # Detect peaks\n peaks = n.where(arr_snr > sig)\n peakmax = n.where(arr_snr == arr_snr.max())\n print 'peaks: ', peaks\n\n # Plot\n if show:\n p.clf()\n ax = p.axes()\n ax.set_position([0.2,0.2,0.7,0.7])\n if clipplot:\n im = p.imshow(arr, aspect='auto', origin='lower', interpolation='nearest', extent=(min(reltime),max(reltime),min(self.dmarr),max(self.dmarr)), vmin=amin, vmax=amax)\n else:\n im = p.imshow(arr, aspect='auto', origin='lower', interpolation='nearest', extent=(min(reltime),max(reltime),min(self.dmarr),max(self.dmarr)))\n cb = p.colorbar(im)\n cb.set_label('Flux Density (Jy)',fontsize=12,fontweight=\"bold\")\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n if len(peaks[0]) > 0:\n print 'Peak of %f at DM=%f, t0=%f' % (arr.max(), self.dmarr[peakmax[0][0]], reltime[peakmax[1][0]])\n\n for i in xrange(len(peaks[1])):\n ax = p.imshow(arr, aspect='auto', origin='lower', interpolation='nearest', extent=(min(reltime),max(reltime),min(self.dmarr),max(self.dmarr)))\n p.axis((min(reltime),max(reltime),min(self.dmarr),max(self.dmarr)))\n p.plot([reltime[peaks[1][i]]], [self.dmarr[peaks[0][i]]], 'o', markersize=2*arr_snr[peaks[0][i],peaks[1][i]], markerfacecolor='white', markeredgecolor='blue', alpha=0.5)\n\n p.xlabel('Time (s)', fontsize=12, fontweight='bold')\n p.ylabel('DM (pc/cm3)', fontsize=12, fontweight='bold')\n if save:\n if save == 1:\n savename = self.file.split('.')[:-1]\n savename.append(str(self.scan) + '_' + str(self.nskip/self.nbl) + '_disp.png')\n savename = string.join(savename,'.')\n elif isinstance(save, types.StringType):\n savename = save\n print 'Saving file as ', savename\n p.savefig(self.pathout+savename)\n\n return peaks,arr[peaks],arr_snr[peaks]", "def detect_phasedbeam(self, sig=5., show=1, save=0, clipplot=1):\n\n try:\n arr = self.phasedbeam\n except AttributeError:\n print 'Need to make phasedbeam first.'\n return\n\n reltime = self.reltime\n\n # single iteration of sigma clip to find mean and std, skipping zeros\n mean = arr.mean()\n std = arr.std()\n print 'initial mean, std: ', mean, std\n amin,amax = sigma_clip(arr.flatten())\n clipped = arr[n.where((arr < amax) & (arr > amin) & (arr != 0.))]\n mean = clipped.mean()\n std = clipped.std()\n print 'final mean, sig, std: ', mean, sig, std\n\n # Recast arr as significance array\n arr_snr = (arr-mean)/std # for real valued trial output, gaussian dis'n, zero mean\n\n # Detect peaks\n peaks = n.where(arr_snr > sig)\n peakmax = n.where(arr_snr == arr_snr.max())\n print 'peaks: ', peaks\n\n # Plot\n if show:\n p.clf()\n ax = p.axes()\n ax.set_position([0.2,0.2,0.7,0.7])\n if clipplot:\n im = p.imshow(arr, aspect='auto', origin='lower', interpolation='nearest', extent=(min(reltime),max(reltime),min(self.dmarr),max(self.dmarr)), vmin=amin, vmax=amax)\n else:\n im = p.imshow(arr, aspect='auto', origin='lower', interpolation='nearest', extent=(min(reltime),max(reltime),min(self.dmarr),max(self.dmarr)))\n cb = p.colorbar(im)\n cb.set_label('Flux Density (Jy)',fontsize=12,fontweight=\"bold\")\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n if len(peaks[0]) > 0:\n print 'Peak of %f at DM=%f, t0=%f' % (arr.max(), self.dmarr[peakmax[0][0]], reltime[peakmax[1][0]])\n\n for i in xrange(len(peaks[1])):\n ax = p.imshow(arr, aspect='auto', origin='lower', interpolation='nearest', extent=(min(reltime),max(reltime),min(self.dmarr),max(self.dmarr)))\n p.axis((min(reltime),max(reltime),min(self.dmarr),max(self.dmarr)))\n p.plot([reltime[peaks[1][i]]], [self.dmarr[peaks[0][i]]], 'o', markersize=2*arr_snr[peaks[0][i],peaks[1][i]], markerfacecolor='white', markeredgecolor='blue', alpha=0.5)\n\n p.xlabel('Time (s)', fontsize=12, fontweight='bold')\n p.ylabel('DM (pc/cm3)', fontsize=12, fontweight='bold')\n if save:\n if save == 1:\n savename = self.file.split('.')[:-1]\n savename.append(str(self.scan) + '_' + str(self.nskip/self.nbl) + '_disp.png')\n savename = string.join(savename,'.')\n elif isinstance(save, types.StringType):\n savename = save\n print 'Saving file as ', savename\n p.savefig(self.pathout+savename)\n\n return peaks,arr[peaks],arr_snr[peaks]", "def test_generate_2d_fractal_noise(self) -> None:\n octaves = 1\n while octaves <= 6:\n fractal_noise = NoiseGenerator.generate2DFractalNoise(self.ROWS, self.COLS, periods=8, octaves=octaves)\n visualize(fractal_noise, title=\"Fractal Noise Octaves={}\".format(octaves))\n octaves += 1", "def view_surface_rec(self, x, n_max=1000, random_state=42, title=None, dataset_name=None):\n if self.comet_exp is not None:\n # If comet_exp is set, use different backend to avoid display errors on clusters\n matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\n import matplotlib.pyplot as plt\n from grae.data.manifolds import set_axes_equal\n\n np.random.seed(random_state)\n\n x_hat = self.reconstruct(x)\n x, y = x.numpy()\n\n if x.shape[0] > n_max:\n sample_mask = np.random.choice(x.shape[0], size=n_max, replace=False)\n x_hat = x_hat[sample_mask]\n x = x[sample_mask]\n y = y[sample_mask]\n\n scene_dict = dict(SwissRoll=(0, 0), Mammoth=(-15, 90), ToroidalHelices=(30, 0))\n if dataset_name in scene_dict:\n tilt, rotation = scene_dict[dataset_name]\n else:\n tilt, rotation = 0, 0\n\n # set up a figure twice as wide as it is tall\n fig = plt.figure(figsize=plt.figaspect(0.5))\n\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.view_init(tilt, rotation)\n ax.set_title('Input')\n ax.scatter(*x.T, c=y, cmap='jet', edgecolor='k')\n set_axes_equal(ax)\n\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n\n ax.view_init(tilt, rotation)\n ax.set_title('Reconstruction')\n ax.scatter(*x_hat.T, c=y, cmap='jet', edgecolor='k')\n set_axes_equal(ax)\n\n\n if title is not None:\n fig.suptitle(title, fontsize=20)\n\n if self.comet_exp is not None:\n self.comet_exp.log_figure(figure=plt, figure_name=title)\n plt.clf()\n else:\n plt.show()", "def simulate(initstate, t, timestep=forward, drive=donothing, bounds = [0.97, 0.97, 0.97, 0.97], saveinterval=10, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x = dudt, dvdt_x = dvdt, dndt_x = dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # gives surface height array of the system after evert dt\n bounds = np.asarray(bounds, dtype=np.float32)\n h, n, u, v, f, dx, dy, dt = [initstate[k] for k in ('h', 'n', 'u', 'v', 'lat', 'dx', 'dy', 'dt')]\n \n f = np.float32(((2*2*np.pi*np.sin(f*np.pi/180))/(24*3600))[:,np.newaxis])\n \n \n du0 = np.zeros_like(u)\n dv0 = np.zeros_like(v)\n dn0 = np.zeros_like(n)\n \n \n dndt_x(h, n, u, v, dx, dy, dn0)\n dn = (dn0, np.copy(dn0), np.copy(dn0))\n \n dudt_x(h, n, f, u, v, dx, dy, du0)\n du = (du0, np.copy(du0), np.copy(du0), np.copy(du0))\n \n dvdt_x(h, n, f, u, v, dx, dy, dv0)\n dv = (dv0, np.copy(dv0), np.copy(dv0), np.copy(dv0))\n \n nu = (dx+dy)/1000\n \n mmax = np.max(np.abs(n))\n landthresh = 1.5*np.max(n) # threshhold for when sea ends and land begins\n itrs = int(np.ceil(t/dt))\n saveinterval = np.int(saveinterval//dt)\n assert (dt >= 0), 'negative dt!' # dont try if timstep is zero or negative\n \n ntt = np.zeros((np.int(np.ceil(itrs/saveinterval)),)+n.shape, dtype=np.float32)\n maxn = np.zeros(n.shape, dtype=n.dtype) # max height in that area\n \n coastx = np.less(h, landthresh) # where the reflective condition is enforced on the coast\n \n print('simulating...')\n try:\n for itr in range(itrs):# iterate for the given number of iterations\n if itr%saveinterval == 0:\n ntt[np.int(itr/saveinterval),:,:] = n\n print(np.argmax( ntt[np.int(itr/saveinterval),:,:],axis=0)[5])\n \n \n maxn = np.max((n, maxn), axis=0) # record new maxes if they are greater than previous records \n \n # pushes n, u, v one step into the future\n n,u,v, du, dv, dn = timestep(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=beta, eps=eps, gamma=gamma, mu=mu, nu=nu, dudt_x=dudt_x, dvdt_x=dvdt_x, dndt_x=dndt_x, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn)\n\n land(h, n, u, v, coastx) # how to handle land/coast\n border(n, u, v, 15, bounds) \n drive(h, n, u, v, f, dt, dx, dy, nu, coastx, bounds, mu, itr)\n print('simulation complete')\n except Exception as e:\n print('timestep: ', itr)\n raise e\n return ntt, maxn#, minn, timemax # return surface height through time and maximum heights", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def test_reviewData():\n starttime = UTCDateTime('2018-06-18T02:34:20')\n endtime = UTCDateTime('2018-06-18T02:37:20')\n st = rd.getdata('IU', 'TEIG,PAYG', '00', 'BHZ', starttime, endtime, savedat=True,\n filenamepref='Test1_', loadfromfile=True, reloadfile=False)\n\n event_lat = 14.178\n event_lon = -90.670\n\n rd.attach_coords_IRIS(st)\n rd.attach_distaz_IRIS(st, event_lat, event_lon)\n\n fig = rd.recsec(st)\n\n freqs, amps, fig2 = rd.make_multitaper(st, render=False)\n\n fig3 = rd.make_spectrogram(st)\n\n rd.nextpow2(7)\n\n stacc, stvel = rd.getpeaks(st)\n\n rd.fourier_spectra(st)", "def farid_filters(n=3):\n if n == 3:\n return [0.229879, 0.540242, 0.229879], [0.425287, 0.0, -0.425287]\n elif n == 5:\n return [0.037659, 0.249153, 0.426375, 0.249153, 0.037659], [0.109604, 0.276691, 0.0, -0.276691, -0.109604]", "def test_svd_smoothing_no_model(self):\n\t\t\n\t\t# 819 =~ 4096*0.2\n\t\tself.watcher.SVDSmoothing(model=self.model, layers=[self.fc2_layer])\n\t\tesd = self.watcher.get_ESD(layer=self.fc2_layer) \n\t\tnum_comps = len(esd[esd>10**-10])\n\t\tself.assertEqual(num_comps, 819)", "def nSlices(self):\n return self._c_param.lee_richards_n_slices", "def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")", "def sslm_counts_init(self, obs_variance, chain_variance, sstats):\n W = self.vocab_len\n T = self.num_time_slices\n\n log_norm_counts = np.copy(sstats)\n log_norm_counts /= sum(log_norm_counts)\n log_norm_counts += 1.0 / W\n log_norm_counts /= sum(log_norm_counts)\n log_norm_counts = np.log(log_norm_counts)\n\n # setting variational observations to transformed counts\n self.obs = (np.repeat(log_norm_counts, T, axis=0)).reshape(W, T)\n # set variational parameters\n self.obs_variance = obs_variance\n self.chain_variance = chain_variance\n\n # compute post variance, mean\n for w in range(W):\n self.variance[w], self.fwd_variance[w] = self.compute_post_variance(w, self.chain_variance)\n self.mean[w], self.fwd_mean[w] = self.compute_post_mean(w, self.chain_variance)\n\n self.zeta = self.update_zeta()\n self.e_log_prob = self.compute_expected_log_prob()", "def num_samples(self):\n return self._ll_tree_sequence.get_num_samples()", "def n_samples(self) -> int: # pragma: no cover\n return self.samples.shape[0]" ]
[ "0.61132187", "0.5876119", "0.56016153", "0.53787774", "0.531972", "0.52205455", "0.52205455", "0.52205455", "0.52205455", "0.5152057", "0.51412386", "0.50477684", "0.5036971", "0.5029574", "0.4998146", "0.4961353", "0.49178076", "0.49124226", "0.48896435", "0.48728356", "0.48703465", "0.4857832", "0.48429883", "0.48241234", "0.48220745", "0.48045316", "0.47867805", "0.47866383", "0.47838426", "0.4781028", "0.47486424", "0.47246197", "0.47110558", "0.46976247", "0.46920684", "0.4684321", "0.4672675", "0.4669439", "0.4668743", "0.46559933", "0.46555266", "0.46504635", "0.46361634", "0.4620818", "0.46157137", "0.46132642", "0.46110246", "0.46068555", "0.46030715", "0.46001902", "0.4598159", "0.45965928", "0.4590531", "0.45895466", "0.4573567", "0.45703146", "0.45681694", "0.4566461", "0.45596048", "0.45593262", "0.45497072", "0.45453942", "0.45445314", "0.4536242", "0.45271933", "0.4522181", "0.45216405", "0.4514809", "0.4509487", "0.4507908", "0.45057598", "0.4505404", "0.45034093", "0.45031905", "0.44981453", "0.44970447", "0.4495811", "0.4490638", "0.44865307", "0.44836703", "0.4479038", "0.44735506", "0.4472836", "0.4471791", "0.44688073", "0.4465724", "0.446521", "0.446521", "0.4459456", "0.44593236", "0.44591624", "0.44532758", "0.44521886", "0.445104", "0.44500044", "0.44490156", "0.44460753", "0.4439233", "0.4437274", "0.44310796" ]
0.5748073
2
x Position x on the map y Position y on the map theta Direction on the map
def scan(self, x, y, theta): # create ray list max_theta = theta + self.fov/2.0 min_theta = theta - self.fov/2.0 thetas = np.arange(min_theta, max_theta, self.theta_inc, dtype=np.float32) self.input_vector[:, 0] = x self.input_vector[:, 1] = y self.input_vector[:, 2] = thetas # run ray marching self.scan_method.calc_range_many(self.input_vector, self.output_vector) return self.output_vector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def theta(self):\n return atan2(self.y, self.x)", "def theta(self):\n return float(np.arctan2(self.y, self.x))", "def xy(self,theta,phi):\n dist=great_circle_distance(self.theta0,theta,self.phi0,phi)\n [yt,xt]=np.unravel_index(np.argmin(dist),dist.shape)\n return xt,yt", "def _rotate_coords(self, x, y, theta, ox, oy):\n s, c = self._pkgs['numpy'].sin(theta), self._pkgs['numpy'].cos(theta)\n x, y = self._pkgs['numpy'].asarray(x) - ox, self._pkgs['numpy'].asarray(y) - oy\n return x * c - y * s + ox, x * s + y * c + oy", "def wind_xy_to_theta(x, y):\n return np.angle( x + 1j*y, deg=False )/np.pi # note div by np.pi!", "def __call__( self , theta ):\r\n offset = np.dot( z_rot( theta ) , [ self.radius , 0 , 0 ] )\r\n # print \"Offset:\" , offset\r\n return np.add( self.center , offset )", "def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)", "def map(self, lat, long):\r\n rxy = self._r*lat/(np.pi/2)\r\n x = rxy*np.cos(long)\r\n y = rxy*np.sin(long)\r\n return (x, y)", "def polar_to_xy(r, theta):\r\n x = r*np.cos(theta)\r\n y = r*np.sin(theta)\r\n return x, y", "def latToTheta(lat):\n return (90.0 - lat) * (np.pi/180.0)", "def convert_pose_to_xy_and_theta(self, passed_stamped_pose):\n # Convert to map coordinate frame from odom\n pose = self.transform(passed_stamped_pose).pose # Apply current transform to given pose\n\n orientation_tuple = (pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w)\n angles = t.euler_from_quaternion(orientation_tuple)\n\n return (pose.position.x, pose.position.y, angles[2])", "def sat_2d_pos(theta):\n r_sat = a * (1 - e**2) / (1 + e * np.cos(theta))\n return r_sat, theta", "def ang2xy(self, theta, phi=None, lonlat=False, direct=False):\n pass", "def tanp_to_world(self, x, y):\n ra, dec = x, y\n return ra, dec", "def coordinates(self):", "def map(self, lat, long):\r\n rxy = self._r*np.sqrt(1-np.cos(lat))\r\n x = rxy*np.cos(long)\r\n y = rxy*np.sin(long)\r\n return (x, y)", "def get_theta(self):\n return self.theta", "def xyz(phi, theta):\n x = cos(theta) * cos(phi)\n y = cos(theta) * sin(phi)\n z = sin(theta)\n loc = asarray([x,y,z])\n return(loc)", "def position(t):\n return c + tangent_vec * 7 * t ** 2", "def theta():\n pass", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def coords(self, p=None, rotationDegreesCCW=0.0):\n Th, R = self.ThRcoords()\n\n x = R * math.cos(Th)\n y = R * math.sin(Th)\n if rotationDegreesCCW:\n x, y = utils.rotateXY(x, y, rotationDegreesCCW)\n return x, y", "def undo_mercator_project(x,y):\n lon = y*np.pi\n ex = np.exp(4*np.pi*x)\n lat = np.arcsin((ex - 1)/(ex +1 ))\n lon = lon*360/2/np.pi\n lat = lat*360 /2/np.pi\n return lon, lat", "def coords(self, p=None, rotationDegreesCCW=0.0):\n if p is None:\n p = 1.0 # useful for doing relative distance comparisons.\n\n i, j = self.indices()\n x = p * (COS30 * i)\n y = p * (SIN30 * i + j)\n\n if rotationDegreesCCW:\n x, y = utils.rotateXY(x, y, rotationDegreesCCW)\n\n return x, y", "def update(self, x, y, theta):\n self.x = x\n self.y = y\n self.theta = theta\n self.theta = wrap_angles(self.theta)", "def coordX(r, theta, useradians = True):\r\n if not useradians :\r\n #convert theta to radians\r\n theta = theta / 180 * math.pi\r\n x = r * math.cos(theta)\r\n return x", "def initialCoordinates():\r\n return (-250,-250)", "def _get_rotated_coords(x, y, PA):\n x_rot = y * np.cos(np.radians(PA)) + x * np.sin(np.radians(PA))\n y_rot = x * np.cos(np.radians(PA)) - y * np.sin(np.radians(PA))\n return x_rot, y_rot", "def _to_world_coord(self, x, y):\n maze = self._get_maze()\n y = maze.shape[1] - y - 1\n return (float(x) + .5) * _MAZE_CELL_SIZE, (float(y) + .5) * _MAZE_CELL_SIZE", "def set(self, x: float, y: float, theta: float):\n self.x = float(x)\n self.y = float(y)\n self.theta = float(theta)", "def forward(self, x):\r\n self.x = (self.x+(x*(math.cos(self.dir))))\r\n self.y = (self.y+(x*(math.sin(self.dir))))\r\n return (self.x, self.y)", "def get_location(self):\r\n return self.__x, self.__y", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def _get_cart_sky_coords(self, x0, y0):\n return self.x_sky - x0, self.y_sky - y0", "def theta(self):\n return self._theta", "def theta(self):\n return self._theta", "def _get_polar_sky_coords(self, x0, y0):\n x_sky, y_sky = self._get_cart_sky_coords(x0, y0)\n return np.hypot(y_sky, x_sky), np.arctan2(x_sky, y_sky)", "def __init__(self):\n self.x = 0.0\n self.y = 0.0\n self.theta = 0.0\n self.total_distance_covered = 0.0", "def pa(x):\t\t\n if(float(x)>=0 and float(x)<=180.0): \n pos_ang = float(x) - 90.0 #position angle\n if(float(x)<0 and float(x)>=-180.0):\n pos_ang = 90.0 - abs(float(x)) #position angle\n if(float(x)>180 and float(x)<=360.0):\n pos_ang = float(x) - 360.0 + 90.0 #position angle\n if(float(x)>=-360 and float(x)<-180.0):\n pos_ang = float(x) + 360.0 - 90.0 #position angle\t\n return pos_ang", "def theta_deg(self):\n return self.theta * 180 / np.pi", "def tanp_to_world(self, x, y):\n crpix1, crpix2 = self._wcs.wcs.crpix\n x = x + crpix1\n y = y + crpix2\n ra, dec = self._wcslin.all_pix2world(x, y, 1)\n return ra, dec", "def invmap(self, x, y):\r\n r = self._r\r\n rxy = np.sqrt(x*x + y*y)\r\n\r\n lat = (rxy/r)*(np.pi/2)\r\n long = np.arctan2(y, x)\r\n\r\n return (lat, long)", "def position(t, x, y):\n return x * exp(-t * y) * sin(2 * pi * t)", "def xy2ang(self, x, y=None, lonlat=False, direct=False):\n pass", "def attacker_position(inputs):\n rho, theta, _, _, _ = inputs\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n return x, y", "def state_to_coords(self, state):\n x, _, theta, _ = state\n cart_coords = (x, self.y)\n pole_coords = ([x, x + 2*self.L*math.sin(theta)],\n [self.y, self.y + 2*self.L*math.cos(theta)])\n return cart_coords, pole_coords", "def theta(point_a, point_b):\r\n dx = point_b[0] - point_a[0]\r\n dy = point_b[1] - point_a[1]\r\n\r\n if abs(dx) < 1.e-6 and abs(dy) < 1.e-6:\r\n return 360\r\n else:\r\n t = dy/(abs(dx) + abs(dy))\r\n\r\n if dx < 0:\r\n t = 2 - t\r\n elif dy < 0:\r\n t += 4\r\n\r\n if t == 0:\r\n return 360\r\n\r\n return t*90", "def theta(self):\n return self.kernel.theta", "def update_position_direction(self, l):\n\n x = self.x + self.mu * l\n mu = self.mu\n\n return x, mu", "def new_robot_coordinates(old_x, old_y):\n d = random.normalvariate(5,1)\n theta = random.uniform(math.pi/5 - math.pi/36, math.pi/5 + math.pi/36)\n new_x = old_x + d * math.cos(theta)\n new_y = old_y + d * math.sin(theta)\n return new_x, new_y", "def Pixel2World(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xdist = geoMatrix[1]\r\n ydist = geoMatrix[5]\r\n coorX = (ulX + (x * xdist))\r\n coorY = (ulY + (y * ydist))\r\n return (coorX, coorY)", "def unproject(self, (x, y)):\n lng = x/EARTH_RADIUS * RAD_TO_DEG\n lat = 2 * math.atan(math.exp(y/EARTH_RADIUS)) - math.pi/2 * RAD_TO_DEG\n return (lng, lat)", "def log_robot_location(self):\n trans, rot = self.get_robot_location()\n if trans != None and rot != None:\n degrees = (rot.yaw * 180./math.pi)\n message = {\n 'x':'{0:.3f}'.format(trans.x),\n 'y':'{0:.3f}'.format(trans.y),\n 'rotation':'{0:.3f}'.format(degrees)}\n\n self.logger.log('ROS_XYR', 'ROS_XYR', message, 'state')", "def to_world(self, x, y, **kwargs):", "def convert_pose_to_xy_and_theta(pose):\n orientation_tuple = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n angles = euler_from_quaternion(orientation_tuple)\n return pose.position.x, pose.position.y, angles[2]", "def invmap(self, x, y):\r\n r = self._r\r\n rxy = np.sqrt(x*x + y*y)\r\n\r\n lat = np.arccos(1-(rxy/r)**2)\r\n long = np.arctan2(y, x)\r\n\r\n try:\r\n long[np.isnan(lat)] = np.nan\r\n except TypeError: # Thrown if long is scalar\r\n if np.isnan(lat): long = np.nan\r\n return (lat, long)", "def _rotate_coordinate(self, x, y, angle):\n\n sin = math.sin(angle)\n cos = math.cos(angle)\n\n x_ = x * cos - y * sin\n y_ = x * sin + y * cos\n\n return (x_, y_)", "def mapr(r):\n return np.rad2deg(np.arctan(r)*2)", "def rotate((x, y), theta):\n\n return math.cos(theta) * x + math.sin(theta) * y, -math.sin(theta) * x + math.cos(theta) * y", "def coordnav(self,dx,dy,dth): \n self.cg_ant = np.array(self.cg)\n self.coord[0] = self.coord[0] - self.cg[0]\n self.coord[1] = self.coord[1] - self.cg[1] \n self.Rz = rotation.matrix([0,0,1],dth)\n self.coord = np.dot(self.Rz,self.coord)\n \n self.coord[0] = self.coord[0] + self.cg[0] + dx\n self.coord[1] = self.coord[1] + self.cg[1] + dy \n \n self.px = self.coord[:,self.px_index]\n self.Bx = self.px-self.cg \n self.basex = self.Bx/math.sqrt(np.dot(self.Bx,self.Bx))", "def homog_ang_axs( theta , k , pos ):\r\n return np.vstack( ( np.hstack( ( rot_matx_ang_axs( theta , k ) , [ [ pos[0] ] , [ pos[1] ] , [ pos[2] ] ] ) ) ,\r\n np.hstack( ( [ 0 , 0 , 0 ] , [ 1 ] ) ) ) )", "def theta(lam, gam, p):\n #lam = lam - 1e-15\n return np.pi - np.arccos(np.divide(-1 + lam*np.cos(2*np.pi*p ), w(lam, gam, p) ) )", "def _rotate_points(x, y, ang):\n theta = np.radians(ang - 90.)\n xNew = x*np.cos(theta) - y*np.sin(theta)\n yNew = x*np.sin(theta) + y*np.cos(theta)\n return xNew, yNew", "def pixel2coord(tf, x, y):\n lat = tf[0] + x*tf[1] + y*tf[2]\n lon = tf[3] + x*tf[4] + y*tf[5]\n\n return lat, lon", "def rotate_points(x, y, ang):\n theta = np.radians(ang)\n xNew = x*np.cos(theta) - y*np.sin(theta)\n yNew = x*np.sin(theta) + y*np.cos(theta)\n\n return xNew, yNew", "def rect(r, theta):\n x = r * math.cos(theta)\n y = r * math.sin(theta)\n return x,y", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def points(self, ntheta, nlayer):\n exec self.x\n exec self.y\n return [x, y]", "def spherical(self, x, y):\n\t\twhile x >= self.planet.width or x < 0 or y >= self.planet.height or y < 0:\n\t\t\t#change x if x is out of boundary\n\t\t\tif x >= self.planet.width:\n\t\t\t\tx -= (self.planet.width)\n\t\t\telif x < 0:\n\t\t\t\tx += (self.planet.width)\n\t\t\t#change y if y is out of boundary\n\t\t\tif y >= self.planet.height:\n\t\t\t\ty -= (self.planet.height)\n\t\t\telif y < 0:\n\t\t\t\ty += (self.planet.height)\n\t\treturn x, y", "def GPSlatlon2XY_time(lat_u, lon_u, theta):\n\n\trho_u = np.sqrt(np.power(lon_u, 2) + np.power(lat_u, 2))\n\ttheta_new_u = np.arctan2(lat_u, lon_u) - theta\n\n\tUx, Uy = rho_u * np.cos(theta_new_u), rho_u * np.sin(theta_new_u)\n\n\treturn Ux, Uy", "def predict_coords(self):\r\n\r\n if self.direction == 1:\r\n return [self.coords[0] + 1, self.coords[1]]\r\n if self.direction == 2:\r\n return [self.coords[0] - 1, self.coords[1]]\r\n if self.direction == 3:\r\n return [self.coords[0], self.coords[1] + 1]\r\n if self.direction == 4:\r\n return [self.coords[0], self.coords[1] - 1]", "def getA(self):\n return self.theta", "def point_angle(cx, cy, px, py):\n return atan2(py - cy, px - cx)", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def rotate_translate(pixel_pos_x, pixel_pos_y, x_trans, y_trans, phi):\n\n pixel_pos_trans_x = (pixel_pos_x - x_trans) * \\\n np.cos(phi) - (pixel_pos_y - y_trans) * np.sin(phi)\n pixel_pos_trans_y = (pixel_pos_x - x_trans) * \\\n np.sin(phi) + (pixel_pos_y - y_trans) * np.cos(phi)\n return pixel_pos_trans_x, pixel_pos_trans_y", "def GetParametricCoords(self):\n ...", "def coord_polar(mat):\n x = mat[:, 0].copy()\n y = mat[:, 1].copy()\n\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y, x)\n\n return r, theta", "def northing(self):\r\n x, y = self.lonlat2xy(self.longitude, self.latitude)\r\n return y", "def lat_lons(self):", "def world_to_tanp(self, ra, dec):\n x, y = ra, dec\n return x, y", "def xy(self):\n return self.coords.xy", "def coord(self, x, y):\n origin_x = self._raster_meta['transform'][3]\n origin_y = self._raster_meta['transform'][0]\n pixel_x = self._raster_meta['transform'][5]\n pixel_y = self._raster_meta['transform'][1]\n\n x = int((x - origin_x) / pixel_x)\n y = int((y - origin_y) / pixel_y)\n return self[x, y]", "def teleport(self, x, y, reset_rotation=False):\n self.center[0] = x\n self.center[1] = y\n self.rect.center = tuple(self.center) # update pygame sprite placement\n if reset_rotation:\n self.rotate(-self.rotation)", "def theta_phi(Collimator_square, sample_point):\n p1,p2,p3,p4=Collimator_square\n\n points = np.array([sample_point-p1, sample_point-p2, sample_point-p3, sample_point-p4])\n points=points.transpose(1,0,2) #shape: (pointsNum,4,3)\n\n theta = np.arctan2(points[:, :, 0],points[:, :, 1] )\n\n norm_x_y=np.sqrt(points[:, :, 0]**2+points[:, :, 1]**2)\n phi = np.arctan2(norm_x_y, points[:, :, 2])\n\n return theta, phi", "def GPSlatlon2XY(data_sheet, origin, theta):\n\n\tlon = np.array([[data_sheet.cell(row = i, column = 1).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat = np.array([[data_sheet.cell(row = i, column = 2).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_u = np.array([[data_sheet.cell(row = i, column = 5).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat_u = np.array([[data_sheet.cell(row = i, column = 6).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tUz = np.array([[data_sheet.cell(row = i, column = 4).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_in_km = (lon - origin[0])*111*np.cos(lat*np.pi/180)\n\tlat_in_km = (lat - origin[1])*111\n\t\n\trho_u = np.sqrt(np.power(lon_u,2) + np.power(lat_u,2))\n\ttheta_new_u = np.arctan2(lat_u,lon_u) - theta\n\n\trho = np.sqrt(np.power(lon_in_km,2) + np.power(lat_in_km,2))\n\ttheta_new = np.arctan2(lat_in_km,lon_in_km) - theta\n\n\tX, Y = rho*np.cos(theta_new), rho*np.sin(theta_new)\n\tUx, Uy = rho_u*np.cos(theta_new_u), rho_u*np.sin(theta_new_u)\n\n\treturn 1e3*X, 1e3*Y, 1e-3*Ux, 1e-3*Uy, 1e-3*Uz", "def compute_theta_phi(self, x, y, phi_0, theta_0=np.pi/2):\n angles=np.zeros(shape=[x.shape[0], y.shape[0],2])\n xx, yy=np.meshgrid(x,y)\n rho=np.sqrt(xx**2+yy**2)\n c=2.0*np.arctan(rho/2.0)\n theta=theta_0-np.arcsin(np.cos(c)*np.cos(theta_0)+yy*np.sin(c)*np.sin(theta_0)/rho)\n phi=phi_0+np.arctan(xx*np.sin(c)/(rho*np.sin(theta_0)*np.cos(c)-yy*np.cos(theta_0)*np.sin(c)))\n angles[:,:,0]=theta\n angles[:,:,1]=phi\n return angles", "def get_coord(self):\n return self.coord", "def getXYpos(relativeNullPoint, p):\n deltaLatitude = p.latitude - relativeNullPoint.latitude\n deltaLongitude = p.longitude - relativeNullPoint.longitude\n latitudeCircumference = 40075160 * math.cos(asRadians(relativeNullPoint.latitude))\n resultX = deltaLongitude * latitudeCircumference / 360\n resultY = deltaLatitude * 40008000 / 360\n return resultX, resultY", "def translateTrame(self,inTrame):\n rawConvertedY=int((inTrame.data1+inTrame.data0),16)\n rawConvertedX=int((inTrame.data3+inTrame.data2),16)\n absX=int(round(rawConvertedX/(16**4-1.0)*self.maxX))\n absY=int(round(rawConvertedY/(16**4-1.0)*self.maxY))\n LOGGER.info(\"Position sensor {} with new coordonate {} -- {}\".format(self.physic_id,absX,absY))\n return {\"coordX\":absX,\"coordY\":absY}", "def rotxaxis(ya, za, angle):\n\n y = ya * math.cos(angle) - za * math.sin(angle) \n z = ya * math.sin(angle) + za * math.cos(angle)\n \n return y, z", "def tick_odom(old_xR,old_yR,old_thetaR,dx,dy,dTheta):\n xR = old_xR+dx\n yR = old_yR+dy\n thetaR = old_thetaR+dTheta\n print (\"Nouvelle coordonnee dans le monde: x = %.3f, y = %.3f, theta = %.3f\" %(xR, yR, thetaR))\n return [xR, yR, thetaR]", "def coords(self) -> Tuple[float, float]:\n return self.lat, self.lng", "def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy", "def origin(self):\r\n\r\n return self.ox, self.oy, self.oz", "def direction(self):\r\n return 180 - atan2(self.x, self.y)*180/pi", "def position(self):\n return self.x, self.y", "def coordsxy(self, convert_to=False):\n if convert_to == 'rad':\n return (self.x*3.14159/180., self.y*3.14159/180.)\n elif convert_to == 'deg':\n return (self.x/3.14159*180., self.y/3.14159*180.)\n else:\n return (self.x, self.y)", "def phi(cylindrical_x: sc.Variable, cylindrical_y: sc.Variable) -> sc.Variable:\n return sc.atan2(y=cylindrical_y, x=cylindrical_x)", "def cartesian_To_Center(self, x, y, z):\n\n if x > 0.0 and -self.L_cap <= y <= 0.0:\n s = self.L_cap + y\n xc = x - self.rb\n yc = z\n else:\n theta = full_arctan2(y, x)\n if theta <= self.ang:\n s = theta * self.rb + self.L_cap\n xc = np.sqrt(x ** 2 + y ** 2) - self.rb\n yc = z\n elif self.ang < theta <= 2 * np.pi: # i'm being lazy here and not limiting the real end\n x0, y0 = np.cos(self.ang) * self.rb, np.sin(self.ang) * self.rb\n thetaEndPerp = np.pi - np.arctan(-1 / np.tan(self.ang))\n x, y = x - x0, y - y0\n deltaS, xc = np.cos(thetaEndPerp) * x + np.sin(-thetaEndPerp) * y, np.sin(thetaEndPerp) * x + np.cos(\n thetaEndPerp) * y\n yc = z\n xc = -xc\n s = (self.ang * self.rb + self.L_cap) + deltaS\n else:\n raise ValueError\n return s, xc, yc", "def transform_scan(self, particle, distance, theta):\n return (particle.x + distance * math.cos(math.radians(particle.theta + theta)),\n particle.y + distance * math.sin(math.radians(particle.theta + theta)))" ]
[ "0.691251", "0.683143", "0.6619195", "0.6595212", "0.63898385", "0.628581", "0.6238489", "0.61594254", "0.61381114", "0.61379653", "0.61337817", "0.6127042", "0.6115421", "0.6079736", "0.60695887", "0.6060793", "0.6034483", "0.60037607", "0.60031086", "0.59396416", "0.59340966", "0.59020454", "0.5887503", "0.58826196", "0.58797944", "0.5873579", "0.58651507", "0.58620924", "0.5856651", "0.5853568", "0.5849215", "0.5830519", "0.5816828", "0.5792183", "0.57867724", "0.57867724", "0.5761576", "0.57567585", "0.57360375", "0.5727307", "0.5708575", "0.5705571", "0.56994927", "0.56962115", "0.56868905", "0.5686017", "0.5683486", "0.56777525", "0.56714684", "0.5669854", "0.5669079", "0.5668052", "0.5658303", "0.56525147", "0.5648867", "0.56476957", "0.5644555", "0.5643301", "0.56432855", "0.5626319", "0.561571", "0.5586837", "0.5575657", "0.5575096", "0.55582416", "0.5555281", "0.55547166", "0.55497324", "0.5549298", "0.5545948", "0.5545661", "0.5537768", "0.55332243", "0.55227536", "0.5522358", "0.55182105", "0.5516888", "0.55126685", "0.5512271", "0.5510842", "0.5509168", "0.5507259", "0.55053765", "0.55031776", "0.54990715", "0.5498769", "0.5496335", "0.54856163", "0.5484048", "0.5481562", "0.5478411", "0.54768336", "0.5471334", "0.5469245", "0.5464347", "0.5463896", "0.5463705", "0.54628325", "0.54623467", "0.54591584", "0.545635" ]
0.0
-1
For C,D and W unlabelled.
def get_separated_sequence(): return [("ABCDEFG", ("ABEFG", "CD")), ("ABCDEFGCDCDDC", ("ABEFG", "CDCDCDDC")), ("", ("", "")), ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label(d, X, ind_class0, ind_class1, N, V, binary):\n if binary == True:\n K = 1\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = 0.0\n C[ind_class1, :] = 1.0\n else:\n K = 2\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = torch.tensor([1.0, 0.0])\n C[ind_class1, :] = torch.tensor([0.0, 1.0])\n\n X_train = X[:N, :]\n X_val = X[N:, :]\n C_train = C[:N, :]\n C_val = C[N:, :]\n\n return [X_train, C_train, X_val, C_val, d, K]", "def propagate_labels_simple(regions,labels):\n rlabels,_ = label(regions)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n for o,i in cors.T: outputs[o] = i\n outputs[0] = 0\n return outputs[rlabels]", "def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]", "def get_target_labels(Z, w_f):\n return np.sign(np.dot(Z, w_f))", "def _get_labels(self, ind):\n pass", "def categorical2mask(X, labels):\n X_shape = X.shape[0:2]\n if type(X_shape) == tuple:\n X_shape = list(X_shape)\n Y = np.zeros(X_shape + [3], dtype=\"uint8\")\n for i, key in enumerate(labels):\n print(X.shape,Y.shape)\n Y[...,0] = np.where(X==i, labels[key][0], Y[...,0])\n Y[...,1] = np.where(X==i, labels[key][1], Y[...,1])\n Y[...,2] = np.where(X==i, labels[key][2], Y[...,2])\n return Y", "def labelComponents26(cube):\n x,y,z = np.where(cube);\n label = np.zeros(cube.shape, dtype = 'uint8');\n ncomp = 0;\n for xp,yp,zp in zip(x,y,z):\n if label[xp,yp,zp] == 0:\n ncomp += 1;\n label = labelNeighbours26(cube, label, xp,yp,zp, ncomp);\n return ncomp, label", "def make_coco_labels(real_c):\n y = np.eye(real_c.size(1))\n\n fixed_c_list = []\n\n # single object addition and removal\n for i in range(2*real_c.size(1)):\n fixed_c = real_c.clone()\n for c in fixed_c:\n if i%2:\n c[i//2] = 0.\n else:\n c[i//2] = 1.\n fixed_c_list.append(Variable(fixed_c, volatile=True).cuda())\n\n # multi-attribute transfer (H+G, H+A, G+A, H+G+A)\n #if self.dataset == 'CelebA':\n # for i in range(4):\n # fixed_c = real_c.clone()\n # for c in fixed_c:\n # if i in [0, 1, 3]: # Hair color to brown\n # c[:3] = y[2]\n # if i in [0, 2, 3]: # Gender\n # c[3] = 0 if c[3] == 1 else 1\n # if i in [1, 2, 3]: # Aged\n # c[4] = 0 if c[4] == 1 else 1\n # fixed_c_list.append(self.to_var(fixed_c, volatile=True))\n return fixed_c_list", "def prepare_labels(labels, class_mask):\n mask = [1 if elt else -1 for elt in class_mask]\n mask = np.array(mask)\n return labels.dot(mask)", "def test_basic_labeling(self):\n # data with only 1 feature\n data = array([[-1], [1], [0.5], [0.25], [-0.33], [0]])\n # give 1 if feature value >= 0; otherwise 0\n labels = array([0, 1, 1, 1, 0, 1])\n cdata = LabeledCData(data, labels)\n\n # ensure that labelling is correct\n assert array_equal(cdata.labels, labels)", "def nodules_connection(label_data, label_header):\n\n\n las_labels = measure.label(label_data,\n neighbors=8,\n background=0,\n return_num=True)\n\n las_labels_nzero = np.nonzero(las_labels[0])\n [xdif, ydif, zdif] = [np.amax(las_labels_nzero[0])-np.amin(las_labels_nzero[0]),\n np.amax(las_labels_nzero[1])-np.amin(las_labels_nzero[1]),\n np.amax(las_labels_nzero[2])-np.amin(las_labels_nzero[2])]\n\n # conversion pixels to mm\n dims = label_header['pixdim']\n if label_header['xyzt_units'] == 10:\n #dimensions in mm\n print('xyzt_units=10')\n xdif=dims[1]*xdif\n ydif=dims[2]*ydif\n zdif=dims[3]*zdif\n\n\n return las_labels,[xdif,ydif,zdif]", "def _setup_labels(self):\n self._labels = self.get_labels()\n self._labels = self.get_predefined_labels() + list(self._labels)\n self._labels = sorted(self._labels)\n\n self._labels_2_index = {label.lower(): i for i, label in enumerate([self._unknown_label] + self._labels)}\n self._index_2_labels = {i: label for label, i in self._labels_2_index.items()}\n\n self._labels_dim = len(self._labels_2_index)\n return None", "def vectorize_labels(self):\n label_counter = Counter(self.raw_labels)\n if 'oos' in label_counter:\n label_counter.pop('oos')\n unique_labels, label_cnts = zip(*sorted(label_counter.items()))\n unique_labels, label_cnts = list(unique_labels), list(label_cnts)\n label_vocab = {label: index for index, label in enumerate(unique_labels)}\n vectorized_labels = [label_vocab.get(label, -1) for label in self.raw_labels]\n return label_vocab, vectorized_labels, label_cnts", "def encode_weak(self, labels):\n # useful for tensor empty labels\n if type(labels) is str:\n if labels == \"empty\":\n y = np.zeros(len(self.labels)) - 1\n return y\n else:\n labels = labels.split(\",\")\n if type(labels) is pd.DataFrame:\n if labels.empty:\n labels = []\n elif \"event_label\" in labels.columns:\n labels = labels[\"event_label\"]\n y = np.zeros(len(self.labels))\n for label in labels:\n if not pd.isna(label):\n i = self.labels.index(label)\n y[i] = 1\n return y", "def labels_b(self):\n return self._labels_b", "def original_clean():\n # load the data\n dataset = np.genfromtxt(\"wdbc.data\", dtype=np.float, delimiter=',', usecols=(2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27,\n 28, 29, 30, 31), encoding=None)\n labels = np.genfromtxt(\"wdbc.data\", dtype=None, delimiter=',', usecols=(1), encoding=None)\n temp_labels = np.zeros(len(labels))\n for i in range(len(labels)):\n if labels[i] == 'B':\n temp_labels[i] = 0\n else:\n temp_labels[i] = 1\n # normalize\n temp_data = normalize(dataset)\n return temp_data, temp_labels", "def get_weak_target(labels, lb_to_idx):\n classes_num = len(lb_to_idx)\n target = np.zeros(classes_num, dtype=np.bool)\n \n for label in labels: \n target[lb_to_idx[label]] = True\n \n return target", "def fix_label_names():\n\n assert trace.cpu.trace_done\n binary_addr = memorymanager.BinaryAddr(0)\n while binary_addr < len(classifications):\n c = classifications[binary_addr]\n if c is not None:\n dummy = [str(x) for x in c.as_string_list(binary_addr, None)]\n binary_addr += c.length()\n else:\n binary_addr += 1", "def __label__(self, sdfg, state):\n if self.data is None:\n return self._label(None)\n return self._label(sdfg.arrays[self.data].shape)", "def _binary_3d_label_to_sparse_value(labels):\n indices = []\n values = []\n for d0, labels_d0 in enumerate(labels):\n for d1, labels_d1 in enumerate(labels_d0):\n d2 = 0\n for class_id, label in enumerate(labels_d1):\n if label == 1:\n values.append(class_id)\n indices.append([d0, d1, d2])\n d2 += 1\n else:\n assert label == 0\n shape = [len(labels), len(labels[0]), len(labels[0][0])]\n return sparse_tensor.SparseTensorValue(\n np.array(indices, np.int64), np.array(values, np.int64),\n np.array(shape, np.int64))", "def labelUnknown(self):\n self.satisfiability = Satisfiability.UNKNOWN\n self.model = None\n self.unsatCore = []", "def lblencoder(self):\n for i in self.data.columns:\n if self.data[i].dtype=='object':\n lbl = preprocessing.LabelEncoder()\n lbl.fit(list(self.data[i].values))\n self.data[i] = lbl.transform(list(self.data[i].values))\n \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]", "def compute_gradient_saliency_maps(samples: torch.tensor,\n true_labels: torch.tensor,\n model: nn.Module):\n \"\"\"INSERT YOUR CODE HERE, overrun return.\"\"\"\n return torch.rand(6, 256, 256)", "def Ncen(self, m):\n pass", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def forward(self, input):\n label=np.dot(input,self.w)+self.b\n return label", "def combine_labels(labels):\n whole_tumor = labels[:, :3, :, :, :].sum(1) # could have 2 or 3\n tumor_core = labels[:, 1:3, :, :, :].sum(1)\n enhanced_tumor = labels[:, 2:3, :, :, :].sum(1)\n whole_tumor[whole_tumor != 0] = 1\n tumor_core[tumor_core != 0] = 1\n enhanced_tumor[enhanced_tumor != 0] = 1\n return whole_tumor, tumor_core, enhanced_tumor # (bs, ?, ?, ?)", "def all_conv_ops(self):\n pass", "def test_basic_labeled_cdata(self):\n data = array([[1, 0, 0], [0, 1, 0]])\n labels = array([1, 0])\n lcdata = LabeledCData(data, labels)\n self.assertEqual(lcdata.num_features, 3)\n self.assertEqual(lcdata.num_samples, 2)", "def __le__(self, *args):\n return _ida_hexrays.cswitch_t___le__(self, *args)", "def test_func_labeling(self):\n # Define a labeling function\n def label(x):\n return 1 if x >= 0 else 0\n\n # Create (arbitrary) data\n data = array([[500], [-17], [12], [0], [-.002], [.001]])\n\n # Manually create the labels\n labels = array([label(x) for x in data])\n\n # Create a labelled cdata object by passing in the labeling function\n cdata = LabeledCData(data, label)\n\n # Make sure the data is labelled correctly\n self.assertTrue(array_equal(labels, cdata.labels))", "def _instantiate_vars(labels: np.ndarray):\n n = len(labels)\n if len(set(labels)) == n:\n index_seed = np.arange(n)\n index_remain = np.arange(n)\n else:\n index_seed = np.argwhere(labels >= 0).ravel()\n index_remain = np.argwhere(labels < 0).ravel()\n labels = labels[index_seed]\n return index_seed.astype(np.int32), index_remain.astype(np.int32), labels.astype(np.int32)", "def canonical_coloring_label_1(G,c):\n \n H = G.copy()\n #H.allow_loops( true )\n\n for i in c:\n print( i )\n H.add_edges([(i,j) for j in c[i]])\n\n P = [G.vertices(), c.keys()]\n return H.canonical_label(partition=P)", "def circuitSat(C):", "def decode_weak(self, labels):\n result_labels = []\n for i, value in enumerate(labels):\n if value == 1:\n result_labels.append(self.labels[i])\n return result_labels", "def transform_multilabel_as_multihot(label_list,label_size):\n result=np.zeros(label_size)\n #set those location as 1, all else place as 0.\n result[label_list] = 1\n return result", "def transform_multilabel_as_multihot(label_list,label_size):\n result=np.zeros(label_size)\n #set those location as 1, all else place as 0.\n result[label_list] = 1\n return result", "def c(self):\n pass", "def c(self):\n pass", "def make_celeb_labels(real_c, c_dim=5, dataset='CelebA'):\n y = [torch.FloatTensor([1, 0, 0]), # black hair\n torch.FloatTensor([0, 1, 0]), # blond hair\n torch.FloatTensor([0, 0, 1])] # brown hair\n\n fixed_c_list = []\n\n # single attribute transfer\n for i in range(c_dim):\n fixed_c = real_c.clone()\n for c in fixed_c:\n if i < 3:\n c[:3] = y[i]\n else:\n c[i] = 0 if c[i] == 1 else 1 # opposite value\n fixed_c_list.append(Variable(fixed_c, volatile=True).cuda())\n\n # multi-attribute transfer (H+G, H+A, G+A, H+G+A)\n if dataset == 'CelebA':\n for i in range(4):\n fixed_c = real_c.clone()\n for c in fixed_c:\n if i in [0, 1, 3]: # Hair color to brown\n c[:3] = y[2]\n if i in [0, 2, 3]: # Gender\n c[3] = 0 if c[3] == 1 else 1\n if i in [1, 2, 3]: # Aged\n c[4] = 0 if c[4] == 1 else 1\n fixed_c_list.append(Variable(fixed_c, volatile=True).cuda())\n return fixed_c_list", "def cudnn_lstm_state(lstm_cell_state):\n h = tf.stack([s.h for s in lstm_cell_state])\n c = tf.stack([s.c for s in lstm_cell_state])\n return (h, c)", "def CL(self):", "def make_fixed_labels(self):\n fixed_labels = []\n for dim in range(self.opt.c_dim):\n t = [0] * self.opt.c_dim\n t[dim] = 1\n t = torch.FloatTensor(t).expand([self.opt.batch_size, self.opt.c_dim])\n fixed_labels.append(t)\n return fixed_labels", "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def get_labels_comp(F, is_p, is_m):\n labels = [\"C\"+str(idx+1)+\"|P\" if is_p[idx]\n else \"C\"+str(idx+1)+\"|M\" if is_m[idx]\n else \"C\"+str(idx+1) for idx in range(F.shape[0])]\n return labels", "def calculate_class_weights(label_data):\n neg, pos = np.bincount(label_data)\n weight_for_0 = 1 / neg\n weight_for_1 = 1 / pos\n return {0: weight_for_0, 1: weight_for_1}", "def _erosion(image, label, struct_elem):\n if struct_elem is not None:\n return binary_erosion(image == label, struct_elem).astype(np.uint16)\n return (image == label).astype(np.uint16)", "def data_column_conversion(data:pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame:\n data = data.assign(W = (data.label == 'W') + 0,D = (data.label == 'D') + 0,L = (data.label == 'L') + 0)\n data = data.drop(\"label\",axis=1)\n return data", "def change_class_labels(classes):\n u,indices=np.unique(classes,return_inverse=True)\n return u,indices", "def C(self, u, v):\n pass", "def normalize_labels(labels):\n new_labels = np.array([-1] * len(labels))\n labels = np.array(labels)\n label_dict = dict()\n for i, label in enumerate(set(labels)):\n new_labels[np.where(labels == label)] = i\n label_dict[i] = label\n return label_dict, new_labels", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def convert_tcia_labels(mask, keep_all_label=False):\n \n mask[np.isin(mask, [14])] = 0 # Remove duodenum\n label = [1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1] # no right kidney\n\n if keep_all_label:\n label += [0,0]\n\n return mask, label", "def labeling_func(df_clus):\n\n df_all_labeled = df_all_columns.copy()\n df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()\n df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)\n for i in range(0, clus_params['n_components']):\n df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()\n\n return df_all_labeled", "def __invert__(self):\n return self.wc", "def __invert__(self):\n return self.wc", "def __init__(self, w0=6):\n self.w0 = w0\n if w0 == 6:\n # value of C_d from TC98\n self.C_d = 0.776", "def __init__(self, colinds):\n self._colinds = colinds\n self._les = {\n i: sklearn.preprocessing.LabelEncoder() for i in self._colinds\n }", "def label_to_one_hot_closure(labels):\n\n def label_to_one_hot_fun(x):\n return label_to_one_hot(x, labels=labels)\n\n return label_to_one_hot_fun", "def labels_b_v_all(self):\n return self._labels_b_v_all", "def c2dw(c):\r\n return (ord(c[3])<<24)+(ord(c[2])<<16)+(ord(c[1])<<8)+(ord(c[0]))", "def lindblad(C):\n if is_scalar(C):\n return ZeroSuperOperator\n return SPre(C) * SPost(C.adjoint()) - (sympyOne / 2) * anti_commutator(\n C.adjoint() * C\n )", "def _add_warp_ctc_loss(pred, seq_len, num_label, label):\n label = mx.sym.Reshape(data=label, shape=(-1,))\n label = mx.sym.Cast(data=label, dtype='int32')\n return mx.sym.WarpCTC(data=pred, label=label, label_length=num_label, input_length=seq_len)", "def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, smoothing=0.1,\n classes = 1000):\n super(RelabelPooledCrossEntropy, self).__init__()\n\n\n self.CE = SoftTargetCrossEntropy()\n\n self.dense_weight = dense_weight\n self.smoothing = smoothing\n self.mixup_active = mixup_active\n self.classes = classes\n self.cls_weight = cls_weight\n assert dense_weight+cls_weight>0", "def label(f, Bc=None):\n from numpy import allclose, ravel, nonzero, array\n if Bc is None: Bc = secross()\n assert isbinary(f),'Can only label binary image'\n zero = subm(f,f) # zero image\n faux=f\n r = array(zero)\n label = 1\n y = gray( f,'uint16',0) # zero image (output)\n while not allclose(faux,0):\n x=nonzero(ravel(faux))[0] # get first unlabeled pixel\n fmark = array(zero)\n fmark.flat[x] = 1 # get the first unlabeled pixel\n r = infrec( fmark, faux, Bc) # detects all pixels connected to it\n faux = subm( faux, r) # remove them from faux\n r = gray( r,'uint16',label) # label them with the value label\n y = union( y, r) # merge them with the labeled image\n label = label + 1\n return y", "def labels_b_v(self):\n return self._labels_b_v", "def lca(self, v, w):", "def normalisesym(self, label):\n return label", "def test_custom_wire_labels(self, tol):\n shapes = expected_shapes(1, 3)\n weights = [np.random.random(shape) for shape in shapes]\n\n dev = DummyDevice(wires=3)\n dev2 = DummyDevice(wires=[\"z\", \"a\", \"k\"])\n\n @qml.qnode(dev)\n def circuit():\n qml.CVNeuralNetLayers(*weights, wires=range(3))\n return qml.expval(qml.Identity(0))\n\n @qml.qnode(dev2)\n def circuit2():\n qml.CVNeuralNetLayers(*weights, wires=[\"z\", \"a\", \"k\"])\n return qml.expval(qml.Identity(\"z\"))\n\n circuit()\n circuit2()\n\n assert np.allclose(dev._state[0], dev2._state[0], atol=tol, rtol=0)\n assert np.allclose(dev._state[1], dev2._state[1], atol=tol, rtol=0)", "def __le__(self, *args):\n return _ida_hexrays.cif_t___le__(self, *args)", "def unkid(self):\r\n return self.word2idx.get(UNK, 0)", "def appearance_space(state_data, C):\n return np.dot(C, state_data)", "def _all_labels_to_bitmasks(all_labels):\n l_dict = {}\n for i, label in enumerate(all_labels):\n l_dict[label.name] = 1<<i\n return l_dict", "def nnObjFunction(params, *args):\r\n\r\n n_input, n_hidden, n_class, training_data, training_label, lambdaval = args\r\n \r\n \"\"\"translates label vector of digits 0-9 into 1-K form\"\"\"\r\n count=0\r\n label10=np.zeros((training_label.shape[0],10))\r\n for x in training_label:\r\n if(x==0):\r\n label10[count]=[1,0,0,0,0,0,0,0,0,0]\r\n elif(x==1):\r\n label10[count]=[0,1,0,0,0,0,0,0,0,0]\r\n elif(x==2):\r\n label10[count]=[0,0,1,0,0,0,0,0,0,0]\r\n elif(x==3):\r\n label10[count]=[0,0,0,1,0,0,0,0,0,0]\r\n elif(x==4):\r\n label10[count]=[0,0,0,0,1,0,0,0,0,0]\r\n elif(x==5):\r\n label10[count]=[0,0,0,0,0,1,0,0,0,0]\r\n elif(x==6):\r\n label10[count]=[0,0,0,0,0,0,1,0,0,0]\r\n elif(x==7):\r\n label10[count]=[0,0,0,0,0,0,0,1,0,0]\r\n elif(x==8):\r\n label10[count]=[0,0,0,0,0,0,0,0,1,0]\r\n else:\r\n label10[count]=[0,0,0,0,0,0,0,0,0,1]\r\n count+=1\r\n \r\n w1 = params[:n_hidden * (n_input+1)].reshape((n_hidden, (n_input+1)))\r\n w2 = params[(n_hidden * (n_input+1)):].reshape((n_class, (n_hidden+1)))\r\n obj_val = 0 \r\n \r\n print('in nnobj')\r\n\r\n #Get bias dimension\r\n bias_dimension = training_data.shape[0]\r\n\r\n #Fill it all with ones\r\n bias = np.ones((bias_dimension,1))\r\n\r\n #Add bias to weights \r\n training_data_with_bias = np.concatenate((training_data,bias),1)\r\n\r\n #Feed Foward Start By Multiplying Training data by weights of w1\r\n z2 = np.dot(training_data_with_bias,np.transpose(w1))\r\n\r\n #Apply Sigmoid function\r\n a2= sigmoid(z2)\r\n #Apply Another Bias Dimension to the new matrix\r\n\r\n #bias_dimension = a2.shape[0]\r\n bias = np.ones((bias_dimension,1))\r\n a2_bias= np.concatenate((a2,bias),1)\r\n\r\n #Multiply new matrix by the weights of w2\r\n z3 = np.dot(a2_bias,np.transpose(w2))\r\n \r\n #Apply Sigmoid Function to the new data\r\n y= sigmoid(z3)\r\n\r\n #yl-ol (element of equation (9))\r\n dif= label10-y\r\n \r\n #1-ol (element of equation (9))\r\n dif2= 1-y\r\n\r\n # Finish Forward Propagation\r\n \r\n #equation (15)\r\n obj_val = ((lambdaval/(2*y.shape[0]))*(np.sum(np.square(w1))+np.sum(np.square(w2))))+(np.sum(.5*np.sum(np.square(dif),axis=1))/y.shape[0])\r\n \r\n #column vector, equation (9)\r\n elem1=np.transpose(np.array(-1*dif*dif2*y,ndmin=2))\r\n\r\n #w2 matrix with bias cut out\r\n w2trim= np.delete(w2,w2.shape[1]-1,1)\r\n\r\n #equation (12) without multiplying the xi term yet\r\n elem2=(-1*(1-a2)*(a2))*(np.dot((dif*dif2*y),w2trim))\r\n\r\n#summing up the inner part of equation (17)\r\n total=np.zeros_like(w1)\r\n for x in range(0,y.shape[0]):\r\n total+=np.dot(np.transpose(np.array(elem2[x],ndmin=2)),np.array(training_data_with_bias[x],ndmin=2))\r\n\r\n #equation (17)\r\n grad_w1 = (total+(lambdaval*w1))/y.shape[0]\r\n\r\n #equation (16)\r\n grad_w2 = (np.dot(elem1,a2_bias)+(lambdaval*w2))/y.shape[0]\r\n\r\n \r\n \r\n \r\n #Make sure you reshape the gradient matrices to a 1D array. for instance if your gradient matrices are grad_w1 and grad_w2\r\n #you would use code similar to the one below to create a flat array\r\n obj_grad = np.concatenate((grad_w1.flatten(), grad_w2.flatten()),0)\r\n print (obj_val)\r\n return (obj_val,obj_grad)", "def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, smoothing=0.1,\n classes = 1000, n_comn=2):\n super(RelabelCrossEntropy, self).__init__()\n\n\n self.CE = SoftTargetCrossEntropy()\n\n self.n_comn = n_comn\n self.dis_fn = nn.CosineSimilarity(dim=1)\n\n self.dense_weight = dense_weight\n self.smoothing = smoothing\n self.mixup_active = mixup_active\n self.classes = classes\n self.cls_weight = cls_weight\n assert dense_weight+cls_weight>0", "def __call__(self, X, W):\n\t\treturn", "def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]", "def label_data(data):\n if data == 'cat': return [1, 0]\n elif data == 'dog': return [0, 1]", "def _unused_label(self, label):\n original = label\n existing = self.column_labels\n i = 2\n while label in existing:\n label = '{}_{}'.format(original, i)\n i += 1\n return label", "def D_or_L(self) -> str:\n CO = np.array([self['C'].xyz.x, self['C'].xyz.y, self['C'].xyz.z])\n CA = np.array([self['CA'].xyz.x, self['CA'].xyz.y, self['CA'].xyz.z])\n CB = np.array([self['CB'].xyz.x, self['CB'].xyz.y, self['CB'].xyz.z])\n N = np.array([self['N'].xyz.x, self['N'].xyz.y, self['N'].xyz.z])\n\n v1 = N - CO\n v2 = CA - CO\n cp = np.cross(v1, v2)\n CB_infront = cp.dot(CB-CA) > 0\n print(CB_infront)\n return 'D' if CB_infront else 'L'", "def edge_model(label1, label2):\n if label1 == label2:\n return ALPHA\n else:\n return 1-ALPHA", "def unknown(self, w):\n # WORK HERE!!", "def remap_context_labels(self):\n c_contexts = list(self.context[self.iter])\n unique_contexts = uniqify(c_contexts)\n remap_dict = dict(zip(unique_contexts,\n range(1, len(unique_contexts) + 1)))\n\n remapped = copy.deepcopy(self.context[self.iter])\n for old, new in remap_dict.iteritems():\n self.context[self.iter][remapped==old] = new", "def __encode_one_hot_util(self):\n for col in self.cat_cols:\n if (\n col in self.train_df\n and col + str(\"Encoded\") not in self.ord_cols\n ):\n if self.test_df is not None:\n self.test_df = pd.concat(\n [\n self.test_df,\n pd.get_dummies(\n self.test_df[col], prefix=col\n ).astype(\"category\"),\n ],\n axis=1,\n )\n self.train_df = pd.concat(\n [\n self.train_df,\n pd.get_dummies(self.train_df[col], prefix=col).astype(\n \"category\"\n ),\n ],\n axis=1,\n )", "def unkW(self):\n if self._unkW is None:\n self._unkW = self.W.mean(0)\n return self._unkW", "def _binary_3d_label_to_sparse(labels):\n return sparse_tensor.SparseTensor.from_value(\n _binary_3d_label_to_sparse_value(labels))", "def dense_to_one_hot(labels_dense, num_classes):\r\n print ('in onehot', labels_dense, num_classes)\r\n num_labels = labels_dense.shape[0]\r\n index_offset = numpy.arange(num_labels) * num_classes\r\n labels_one_hot = numpy.zeros((num_labels, num_classes))\r\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\r\n return labels_one_hot", "def __call__(self, in_state, labels, label_mask, predict=False):\n t_ = tf.matmul(in_state, self._W_sftm) + self._B_sftm # t_: [batch_size * class_num]\n #t_ = tf.expand_dims(label_mask, 1) * t_\n t_sftm_ = self._activation(t_)\n if not predict:\n #labels_1hot = tf.one_hot(labels, self._class_num, 1.0, 0.0)\n loss = self._loss_f(t_, labels)\n loss = loss * label_mask\n return tf.argmax(t_sftm_, 1), t_sftm_, loss\n else:\n return tf.argmax(t_sftm_, 1), t_sftm_", "def get_labels(self):\r\n return None", "def ctc_loss(data=None, label=None, data_lengths=None, label_lengths=None, use_data_lengths=_Null, use_label_lengths=_Null, blank_label=_Null, out=None, name=None, **kwargs):\n return (0,)", "def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;", "def compute_labels(pos, neg):\n labels = np.zeros(len(pos) + len(neg), dtype=np.int8)\n labels[:len(pos)] = 1\n labels[len(pos):] = 0\n return labels", "def get_ind(labels, k):\n return (np.array(labels) == k).astype('float64')", "def empty_labels(self):\n return SemanticSegmentationLabels()", "def LDLT(self):\n\t\tpass", "def onehot_encoding(labels, dim, device):\n out = th.zeros(list(labels.size()) + [dim]).to(device)\n out.scatter_(len(out.size()) - 1, labels.unsqueeze(-1), 1.0)\n return out", "def __init__(self, E, U, height, width, filter_hs, conv_non_linear,\n hidden_units, batch_size, non_static, dropout_rates,subspace_size=None,\n activations=[Iden]):\n rng = np.random.RandomState(3435)\n feature_maps = hidden_units[0]\n self.batch_size = batch_size\n\n # define model architecture\n self.index = T.lscalar()\n self.x = T.matrix('x') \n self.y = T.ivector('y') \n self.Words = theano.shared(value=E, name=\"Words\") \n self.Users = None \n self.u = None\n self.subspace_size = subspace_size\n zero_vec_tensor = T.vector()\n self.zero_vec = np.zeros(width)\n # reset Words to 0?\n self.set_zero = theano.function([zero_vec_tensor],\n updates=[(self.Words, T.set_subtensor(self.Words[0,:],zero_vec_tensor))],\n allow_input_downcast=True)\n # inputs to the ConvNet go to all convolutional filters:\n layer0_input = self.Words[T.cast(self.x.flatten(), dtype=\"int32\")].reshape(\n (self.x.shape[0], 1, self.x.shape[1], self.Words.shape[1]))\n self.conv_layers = [] \n \n # outputs of convolutional filters\n layer1_inputs = []\n image_shape = (batch_size, 1, height, width)\n filter_w = width \n for filter_h in filter_hs: \n filter_shape = (feature_maps, 1, filter_h, filter_w)\n pool_size = (height-filter_h+1, width-filter_w+1)\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,\n image_shape=image_shape,\n filter_shape=filter_shape,\n poolsize=pool_size,\n non_linear=conv_non_linear)\n layer1_input = conv_layer.output.flatten(2)\n self.conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n # inputs to the MLP\n layer1_input = T.concatenate(layer1_inputs, 1)\n if U is not None:\n print \"Will use user embeddings\"\n self.u = T.ivector('u')\n self.Users = theano.shared(value=U, name=\"Users\")\n them_users = self.Users[self.u]\n if self.subspace_size:\n print \"and subspace\"\n # set_trace()\n self.subspace = HiddenLayer(rng, them_users, U.shape[1], subspace_size, Sigmoid)\n self.peep = theano.function([self.x, self.u],[self.subspace.output,layer1_input],allow_input_downcast=True)\n\n layer1_input = T.concatenate((layer1_input,T.nnet.sigmoid(self.subspace.output)),1)\n layer_sizes = [feature_maps*len(filter_hs)+subspace_size] \n # layer1_input = T.concatenate((layer1_input,them_users),1)\n # layer_sizes = [feature_maps*len(filter_hs)+U.shape[1]]\n\n else:\n layer1_input = T.concatenate((layer1_input,them_users),1)\n layer_sizes = [feature_maps*len(filter_hs)+U.shape[1]]\n\n else:\n print \"NO user embeddings\"\n layer_sizes = [feature_maps*len(filter_hs)]\n layer_sizes += hidden_units[1:]\n \n super(ConvNet, self).__init__(rng, input=layer1_input,\n layer_sizes=layer_sizes,\n activations=activations,\n dropout_rates=dropout_rates)\n\n # add parameters from convolutional layers\n for conv_layer in self.conv_layers:\n self.params += conv_layer.params\n if non_static:\n # if word vectors are allowed to change, add them as model parameters\n self.params += [self.Words]\n if U is not None:\n # if self.subspace_size is None:\n self.params += [self.Users]", "def _binary_2d_label_to_sparse_value(labels):\n indices = []\n values = []\n batch = 0\n for row in labels:\n label = 0\n xi = 0\n for x in row:\n if x == 1:\n indices.append([batch, xi])\n values.append(label)\n xi += 1\n else:\n assert x == 0\n label += 1\n batch += 1\n shape = [len(labels), len(labels[0])]\n return sparse_tensor.SparseTensorValue(\n np.array(indices, np.int64), np.array(values, np.int64),\n np.array(shape, np.int64))", "def c_not_align_small_fp16(self, tik_instance):\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n hw_d = h_d * w_d\n hwnoni = hw_d * self.n_o * self.n_i\n dhw_d = d_d * h_d * w_d\n\n ub_ori = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n\n all_ele = d_d * self.c_1 * hwnoni * self.c_0\n burst_len = _ceil_div(all_ele, self.cp_align_len)\n tik_instance.data_move(ub_ori,\n self.src_gm,\n 0, 1, burst_len, 0, 0)\n\n with tik_instance.for_range(0, d_d) as num_d:\n with tik_instance.for_range(0, hw_d) as num_hw:\n with tik_instance.for_range(0, n_d) as num_n:\n ori_begin = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_hw * self.n_o * self.n_i * self.c_0\\\n + num_n * self.c_0\n trans_begin = num_d * hw_d * n_d \\\n * c_d * self.cp_align_len \\\n + num_hw * n_d * c_d * self.cp_align_len \\\n + num_n * c_d * self.cp_align_len\n src_list = [ub_ori[ori_begin + 16 * i]\n for i in range(16)]\n dst_list = [ub_trans[trans_begin + 16 * i]\n for i in range(16)]\n repeat = self.c_1\n\n if repeat == 1:\n tik_instance.vnchwconv(False, False, dst_list,\n src_list, repeat, 0, 0)\n else:\n src_rep_stride = hwnoni * self.c_0 // self.cp_align_len\n dst_rep_stride = 16\n tik_instance.vnchwconv(False, False, dst_list,\n src_list,\n repeat,\n dst_rep_stride,\n src_rep_stride)\n\n with tik_instance.for_range(0, dhw_d) as num_dhw:\n src_offset = num_dhw * n_d * c_d * self.cp_align_len\n dst_offset = num_dhw * c_d * self.cp_align_len\n n_burst = n_d\n burst_len = c_d\n src_stride = 0\n dst_stride = (dhw_d - 1) * c_d\n tik_instance.data_move(ub_ori[dst_offset],\n ub_trans[src_offset],\n 0, n_burst, burst_len,\n src_stride, dst_stride)\n\n ori_begin = 0\n trans_begin = 0\n src_list = [ub_ori[ori_begin + 16 * i]\n for i in range(16)]\n dst_list = [ub_trans[trans_begin + 16 * i]\n for i in range(16)]\n out_ele = n_d * d_d * h_d * w_d * c_d\n ele_zu = _ceil_div(out_ele, 16)\n repeat = ele_zu\n\n if repeat == 1:\n tik_instance.vnchwconv(False, False, dst_list,\n src_list, repeat, 0, 0)\n else:\n src_rep_stride = 16\n dst_rep_stride = 1\n tik_instance.vnchwconv(False, False, dst_list,\n src_list,\n repeat,\n dst_rep_stride,\n src_rep_stride)\n\n burst_len = _ceil_div(out_ele, self.cp_align_len)\n tik_instance.data_move(self.dst_gm,\n ub_trans,\n 0, 1, burst_len, 0, 0)\n\n return tik_instance", "def propdown(self, hid):\n batch_size = hid.data.shape[0]\n if self.real == 0:\n W_flipped = F.swapaxes(CF.flip(self.conv.W, axes=(2, 3)), axis1=0, axis2=1)\n pre_sigmoid_activation = F.convolution_2d(hid, W_flipped, self.conv.a, pad=self.ksize-1)\n # F.matmul(hid, self.l.W) + F.broadcast_to(self.l.a, (batch_size, self.n_visible))\n v_mean = F.sigmoid(pre_sigmoid_activation)\n #print('W info ', self.conv.W.data.shape, 'W_flipped info ', W_flipped.data.shape)\n #print('W info ', self.conv.W.data[3, 0, 2, 3], 'W_flipped info ', W_flipped.data[0, 3, 8, 7])\n #print('W info ', self.conv.W.data[3, 0, 8, 7], 'W_flipped info ', W_flipped.data[0, 3, 2, 3])\n #print('W info ', self.conv.W.data[19, 0, 4, 0], 'W_flipped info ', W_flipped.data[0, 19, 6, 10])\n #print('pre_sigmoidactivation', F.sum(pre_sigmoid_activation).data)\n #print('v_mean', v_mean.data.shape)\n #print('v_mean sum', F.sum(v_mean).data)\n #print('hid', hid.data.shape)\n\n else:\n # TODO: check\n W_flipped = F.swapaxes(CF.flip(self.conv.W, axes=(2, 3)), axis1=0, axis2=1)\n v_mean = F.convolution_2d(hid, W_flipped, self.conv.a, pad=self.ksize-1)\n return v_mean" ]
[ "0.5536485", "0.54773134", "0.52980894", "0.52534986", "0.52387166", "0.5181855", "0.5111285", "0.508858", "0.5086352", "0.5073076", "0.5041172", "0.5002552", "0.49880075", "0.49566722", "0.49318683", "0.49224657", "0.49038255", "0.49009168", "0.48969057", "0.4893255", "0.48901746", "0.48869246", "0.487406", "0.4872469", "0.48682332", "0.48319244", "0.48298147", "0.48277596", "0.4816696", "0.48155004", "0.48154113", "0.481528", "0.48125672", "0.48103327", "0.4806803", "0.47967842", "0.47967842", "0.47965908", "0.47965908", "0.47913316", "0.47900057", "0.47834364", "0.4775688", "0.4775047", "0.4767798", "0.4758327", "0.4754654", "0.4753092", "0.4735042", "0.47265318", "0.472353", "0.47202098", "0.47145844", "0.47096333", "0.47063744", "0.47063744", "0.47059342", "0.47015718", "0.4698865", "0.4691657", "0.4689617", "0.4662362", "0.46558228", "0.46534094", "0.4653377", "0.4651158", "0.4650972", "0.46440458", "0.46422464", "0.46414188", "0.46362028", "0.4634749", "0.46291038", "0.46233863", "0.4621785", "0.46210045", "0.46180207", "0.46153975", "0.46136087", "0.46125704", "0.460293", "0.4599992", "0.45965567", "0.4595112", "0.45891783", "0.45791057", "0.4576326", "0.45749837", "0.45702058", "0.4569287", "0.45615262", "0.45585623", "0.4558146", "0.4556483", "0.45481327", "0.45458877", "0.45456374", "0.45447215", "0.4543219", "0.45409375", "0.45373154" ]
0.0
-1
Build the actual URL to use.
def _full_url(url, _params={}): # Support for unicode domain names and paths. scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) if not scheme: raise ValueError("Invalid URL %r: No schema supplied" % url) netloc = netloc.encode('idna') if isinstance(path, unicode): path = path.encode('utf-8') path = requote_path(path) url = str(urlparse.urlunparse([scheme, netloc, path, params, query, fragment])) if _params: if urlparse.urlparse(url).query: return '%s&%s' % (url, _params) else: return '%s?%s' % (url, _params) else: return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_url(self):\n url = BASE_URL.format(self._host, self._port)\n _LOGGER.debug(\"TOON fetch URL: %s\", url)\n return url", "def _make_url(self):\n ...", "def create_url(self):\n self.base_url = self.base + self.strs[jpn.path_latest]", "def build_url(self, endpoint):\n if hasattr(self, \"port\"):\n return \"{}://{}:{}/{}\".format(\n self.scheme, self.root_url, self.port, endpoint)\n else:\n return \"{}://{}/{}\".format(\n self.scheme, self.root_url, endpoint)", "def build_url(self, endpoint_url: str) -> str:\n return self.base_url + endpoint_url % self.instance_id", "def __http_build_url(self, url_path):\n\n return '{}://{}{}'.format(_GOVEE_API_PROTOCOL, _GOVEE_API_HOST, url_path)", "def build_url(self, host, target, params=None):\n return \"https://%s%s\" % (host, self.build_path(target, params))", "def _build_url(self, host, handler):\n scheme = 'https' if self.use_https else 'http'\n return '%s://%s/%s' % (scheme, host, handler)", "def construct_url(self):\n path = [self.path]\n path.extend([str(x) for x in self.params])\n\n url = self.client.base_url + '/'.join(x for x in path if x)\n query = self.kwargs.get('query')\n\n if query:\n # Dict -> List\n if type(query) is dict:\n query = query.items()\n\n # Remove items with `None` value\n query = [\n (k, v) for (k, v) in query\n if v is not None\n ]\n\n # Encode query, append to URL\n url += '?' + urlencode(query)\n\n return url", "def build_url(self, request, action, **query):\n base = urlparse.urljoin(request['base_url'], self.auth_prefix + '/' + action)\n return appendArgs(base, query)", "def _generate_url(self, endpoint:str, params:Dict[str, str]=None) -> str:\n if params:\n return f\"{self.BASE_URL}/{self._api_version}{endpoint}?{urlencode(params)}\"\n return f\"{self.BASE_URL}/{self._api_version}{endpoint}\"", "def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)", "def _construct_url(self, endpoint):\n return self.base_url + self.api_path + endpoint.strip('/')", "def construct_url(self,*path):\n base = self.request.protocol+\"://\"+self.request.host+\"/\"\n return base+\"/\".join(path)", "def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))", "def _build_url(self):\n u = urlparse.urljoin(settings.SITE_URL, '/#/')\n\n m = self.object.__class__.__name__\n\n if m == 'Workspace':\n return urlparse.urljoin(\n u, 'workspaces/w/{}'.format(self.object.slug)\n )\n elif m == 'Vault':\n return urlparse.urljoin(\n u, 'workspaces/w/{}/vaults/v/{}'.format(\n self.object.workspace.slug, self.object.slug))\n elif m == 'Card':\n return urlparse.urljoin(\n u, '/workspaces/w/{}/vaults/v/{}/cards/c/{}'.format(\n self.object.vault.workspace.slug, self.object.vault.slug,\n self.object.slug))\n\n return None", "def _generate_url(self, **kwargs):\n path = self.url_path.format(**kwargs)\n return self.poolbot.generate_url(path)", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def _build_url(self, tail_end):\n url = self._doc_class.urlobject.format(self._cb.credentials.org_key) + tail_end\n return url", "def _build_url(self, tail_end):\n url = self._doc_class.urlobject.format(self._cb.credentials.org_key) + tail_end\n return url", "def build_url(main_url, url_params):\n return main_url + \"/\" + \"/\".join(url_params)", "def get_url(self):\n return self.url.format(\n base_url=self.base_url,\n description=urllib.quote_plus(self.description),\n location=urllib.quote_plus(self.location),\n )", "def build_url(self, config, query):\n if(not os.environ['FLICKR_API_KEY']):\n raise ValueError('Environement variable \"FLICKR_API_KEY\" is empty')\n \n current_provider = [provider for provider in config['providers'] if provider['name'] == self.provider_name][0]\n current_provider['query']['text'] = str(query)\n current_provider['query']['api_key'] = os.environ['FLICKR_API_KEY']\n\n query_strings = helper.build_query_strings(current_provider['query'])\n\n return current_provider['base_url'] + query_strings", "def build_url(self):\n return self.data_url.format(latitude=self.latitude, longitude=self.longitude)", "def _build_url(self, service, resource_type, parameters={}):\n # authenticated dataselect queries have different target URL\n if self.user is not None:\n if service == \"dataselect\" and resource_type == \"query\":\n resource_type = \"queryauth\"\n return build_url(self.base_url, service, self.major_versions[service],\n resource_type, parameters,\n service_mappings=self._service_mappings,\n subpath=self.url_subpath)", "def __build_url(self, api_call, **kwargs):\n kwargs['key'] = self.api_key\n if 'language' not in kwargs:\n kwargs['language'] = self.language\n if 'format' not in kwargs:\n kwargs['format'] = self.__format\n api_query = urlencode(kwargs)\n\n return \"{0}{1}?{2}\".format(urls.BASE_URL,\n api_call,\n api_query)", "def build_url(self, dict_args_in_out=None):\n if dict_args_in_out is None:\n dict_args_in_out = {}\n\n url = dict_args_in_out.pop('base_url', None) or ''\n url += '/%s' % self.collection_key\n\n # do we have a specific entity?\n entity_id = dict_args_in_out.pop('%s_id' % self.key, None)\n if entity_id is not None:\n url += '/%s' % entity_id\n\n return url", "def _url_builder(url_root,api_key,path,params):\n params['api_key'] = api_key\n url_end = urlencode(params)\n url = \"%s%s%s\" % (url_root,path,url_end)\n return url", "def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url", "def _prepare_url(self):\n\n base_url = '{}://{}{}'.format(\n self.client.protocol, self.client.base_url, self.api_path\n )\n url_parts = '/'.join(\n [part for part in self.parameters[constants.RequestConst.PATH]]\n )\n\n if url_parts:\n final_url = '{}/{}'.format(base_url, url_parts)\n else:\n final_url = base_url\n\n if self.method == constants.RequestConst.GET:\n params = self.parameters[constants.RequestConst.QUERY]\n for param, value in params.items():\n if isinstance(value, list):\n params[param] = ','.join(value)\n elif isinstance(value, dict):\n params[param] = ','.join([f'{k}:{v}' for k, v in value])\n\n url_query = '?' + '&'.join([f'{k}={v}' for k, v in params.items()])\n final_url = '{}{}'.format(final_url, url_query)\n\n self.debug.ok('final url', final_url)\n\n return final_url", "def _get_url(self, *args):\n if self._baseUrl not in args:\n args.insert(0, self._baseUrl)\n args = filter(lambda item: item is not None, args)\n return \"/\".join(args)", "def create_query_url(self):\n self.__log('Starting to create the query URL.')\n query_url = self.config['API_URI']\n for key, value in self.options.items():\n if value:\n if query_url == self.config['API_URI']:\n query_url = query_url + str(key) + \"=\" + str(value)\n else:\n query_url = query_url + \"&\" + str(key) + \"=\" + str(value)\n query_url = query_url.replace(' ', '%20')\n self.__log(f'Done creating query url. URL to query: \"{query_url}\"')\n return query_url", "def get_url(self, **kwargs):\n\n return build(\n self._request.path,\n self._request.GET,\n self._meta.prefix,\n **kwargs )", "def build_api_url(project, method, base_url):\n return API_URL_TEMPLATE.format(\n api_base=base_url, api_version=API_VERSION, project=project, method=method\n )", "def build_url(base_url, path):\n if absolute_http_url_regexp.match(path):\n return path\n elif base_url:\n return \"{}/{}\".format(base_url.rstrip(\"/\"), path.lstrip(\"/\"))\n else:\n raise exceptions.ParamsError(\"base url missed!\")", "def build_target_uri(self, **kwargs):\n return self._build_uri(**kwargs)", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def build_url(self, query):\n\n parts = list(urlparse.urlparse(self.addon_url))\n parts[4] = urllib.urlencode(query)\n\n return urlparse.urlunparse(parts)", "def _build_uri(self, uri_base, params):\n if not params:\n return uri_base\n else:\n uri_extension = \"?\"\n for param in params:\n uri_extension = uri_extension + param + \"&\"\n uri_extension = uri_extension[:-1] # clip off the final & \n uri = uri_base + uri_extension\n return uri", "def _generate_url(action, query_params=None):\r\n if query_params:\r\n query_params = urllib.parse.urlencode(query_params)\r\n action = f\"{action}?{query_params}\"\r\n \r\n\r\n url = urllib.parse.urljoin(api_url, action)\r\n\r\n return url", "def base_url(self) -> URL:\n return (\n URL(self.url)\n if self.url is not None\n else URL.build(\n scheme=f\"http{'s' if self.ssl else ''}\",\n host=self.hostname or self.ipaddress,\n port=str(self.port) if self.port else None,\n path=self.base_api_path or \"\",\n )\n )", "def generate_url(self):\n if self.has_marker:\n marker_param = f'mlat={self.mlat}&mlng={self.mlng}&'\n else:\n marker_param = ''\n if self.start:\n start_param = 'start=true&'\n else:\n start_param = ''\n url = f'{MapController.MAP_URL}?{start_param}clat={self.clat}&clng={self.clng}&{marker_param}zoom={self.zoom}'\n return url", "def get_url(self) -> str:\n return urljoin(self._base_url, self.url)", "def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s%s' % (self.endpoint, self.methodname)\n else:\n return '%s%s?%s' % (self.endpoint, self.methodname, kwargs)", "def build_url(host, port, api_version=None, path=None,\n params=None, use_ssl=False):\n\n pattern = 'v\\d\\.\\d'\n if re.match(pattern, path):\n message = 'Version should not be included in path.'\n raise exceptions.InvalidConfiguration(message=message)\n\n if use_ssl:\n url = \"https://\" + host\n else:\n url = \"http://\" + host\n\n if port is not None:\n url += \":\" + port\n url += \"/\"\n\n if api_version is not None:\n url += api_version + \"/\"\n\n if path is not None:\n url += path\n\n if params is not None:\n url += \"?\"\n url += urllib.urlencode(params)\n\n return url", "def get_url(self, *args):\n try:\n url = '/'.join((self.base_url, ) + args)\n except TypeError:\n url = '/'.join((self.base_url, ) + args[0])\n return url.rstrip('/')", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def make_url(realm_url, endpoint):\n return \"{}/protocol/openid-connect/{}\".format(realm_url, endpoint)", "def __get_url(cls, url):\n url = url + AdvertCoordinationAdaptor.BASE_URL_QUERY_STRING\n return url", "def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()", "def Url(self) -> str:", "def url(self, request_path=\"\"):\n return f\"{self.scheme}://{self.host}/{request_path}\"", "def _get_base_url(self):\n template = config.baseurl_template\n # get distro name and arch\n base_url = template.format(\n host=config.gitbuilder_host,\n proj=self.project,\n pkg_type=self.pkg_type,\n arch=self.arch,\n dist=self.distro,\n flavor=self.flavor,\n uri=self.uri_reference,\n )\n return base_url", "def get_url(self, params):\n base_url = self._get_base_url()\n return str(base_url + '/'.join([str(p) for p in params])).rstrip('/')", "def _url(self, path):\n \n return self.url + path", "def getFullURL(self, date):\n\n base = self.getBaseURL()\n path = self.getPath( date )\n return f'{base}/{path}'", "def _base_url(self):\n # URL Protocol\n proto = 'https' if self._ssl else 'http'\n\n # Device port number\n if self._port is None:\n port = 8080 if self._ssl else 8008\n else:\n port = self._port\n \n return f'{proto}://{self._address}:{port}/api/v1'", "def _override_tourl(self):\n base_url = urlparse.urlparse(self.url)\n try:\n query = base_url.query\n except AttributeError:\n # must be python <2.5\n query = base_url[4]\n query = parse_qs(query)\n for k, v in self.items():\n query.setdefault(k, []).append(v)\n\n try:\n scheme = base_url.scheme\n netloc = base_url.netloc\n path = base_url.path\n params = base_url.params\n fragment = base_url.fragment\n except AttributeError:\n # must be python <2.5\n scheme = base_url[0]\n netloc = base_url[1]\n path = base_url[2]\n params = base_url[3]\n fragment = base_url[5]\n\n url = (scheme, netloc, path, params,\n urllib.urlencode(query, True), fragment)\n return urlparse.urlunparse(url)", "def create_url(self):\n\n # Format the template strings with the user credentials and host\n # information provided upon instantiation.\n url = self.sql_url_template\n url = url.format(\n username=self.sql_username,\n password=self.sql_password,\n host=self.sql_host,\n port=self.sql_port,\n db=self.sql_db\n )\n\n return url", "def get_full_url(self, url):\n param_str = self.request.GET.urlencode()\n request_url = u'%s%s' % (self.base_url, url)\n request_url += '?%s' % param_str if param_str else ''\n return request_url", "def __build_url(path, api_site_parameter, **params):\n \n query = [\"%s=%s\" % (key, params[key]) for key in params if (params[key] or key == 'pagesize') ]\n query_string = \"&\".join(query)\n url = \"%s/%s/%s?\" % (__api_endpoint, __api_version, path)\n url += query_string\n return url", "def construct_url(self, local_json: Dict) -> str:\n url_str = \"\"\n\n for arg in self.get_url_args():\n if arg == \"merchantId\":\n url_str = url_str + str(self.merchant_id) + \"/\"\n elif arg == \"signature\":\n url_str = url_str + str(self.get_url_signature(local_json)) + \"/\"\n else:\n url_str = url_str + str(local_json[arg]) + \"/\"\n\n return urljoin(self.get_url(), url_str[:-1])", "def get_short_url_base():", "def generate_url(self, campaign_id):\n pass", "def create_url(artist, song, language):\n url = __BASE_URL__ + '/wiki/{artist}:{song}'.format(artist=urlize(artist), song=urlize(song))\n if language:\n url += '/{language}'.format(language=urlize(language).lower())\n return url", "def _make_url(self, url_part, blueprint_prefix):\n parts = (blueprint_prefix, self.prefix, url_part)\n return ''.join(_ for _ in parts if _)", "def _build_uri(self, **kwargs):\n target_uri, version = str(), None\n\n if kwargs.get('category') not in ['performance', 'common']:\n version = self._build_uri_get_version(kwargs.get('version'),\n kwargs.get('no_version'))\n if version:\n target_uri += '/{version}'.format(version=version)\n\n target_uri += '/{category}'.format(\n category=kwargs.get('category'))\n\n if kwargs.get('resource_level'):\n target_uri += '/{resource_level}'.format(\n resource_level=kwargs.get('resource_level'))\n\n if kwargs.get('resource_level_id'):\n target_uri += '/{resource_level_id}'.format(\n resource_level_id=kwargs.get('resource_level_id'))\n\n if kwargs.get('resource_type'):\n target_uri += '/{resource_type}'.format(\n resource_type=kwargs.get('resource_type'))\n if kwargs.get('resource_type_id'):\n target_uri += '/{resource_type_id}'.format(\n resource_type_id=kwargs.get('resource_type_id'))\n\n if kwargs.get('resource'):\n target_uri += '/{resource}'.format(\n resource=kwargs.get('resource'))\n if kwargs.get('resource_id'):\n target_uri += '/{resource_id}'.format(\n resource_id=kwargs.get('resource_id'))\n\n if kwargs.get('object_type'):\n target_uri += '/{object_type}'.format(\n object_type=kwargs.get('object_type'))\n if kwargs.get('object_type_id'):\n target_uri += '/{object_type_id}'.format(\n object_type_id=kwargs.get('object_type_id'))\n\n return target_uri", "def url(self):\r\n course_key = \"slashes:{course_org}+{course_num}+{course_run}\".format(**self.course_info)\r\n return \"/\".join([BASE_URL, self.url_path, course_key])", "def generate_url(self, param: dict) -> str:\n\n if not param:\n return self.base_url\n\n query_params = [] # making it as list as string is immutable, may cause performance degradation\n for key in param:\n if not param[key]:\n continue\n words = str(param[key]).split()\n query_params.append(f\"{key}={'+'.join(words)}\")\n return self.base_url + \"?\" + \"&\".join(query_params)", "def _get_url(self, absolute):", "def get_api_url(self, query_, api):\n api_url = \"%s%s%s\" % (api, query_, self.api_key)\n\n return api_url", "def getBuildbotURL():", "def url_for(self, *args, **kwargs):\n return yarl.URL(self.url(parts=kwargs))", "def full_url(self):\n return \"%s://%s%s\" % (self.protocol, self.host, self.uri)", "def build_url(base_url, service, major_version, resource_type,\n parameters=None, service_mappings=None, subpath='fdsnws'):\n # Avoid mutable kwargs.\n if parameters is None:\n parameters = {}\n if service_mappings is None:\n service_mappings = {}\n\n # Only allow certain resource types.\n if service not in [\"dataselect\", \"station\"]:\n msg = \"Resource type '%s' not allowed. Allowed types: \\n%s\" % \\\n (service, \",\".join((\"dataselect\", \"station\")))\n raise ValueError(msg)\n\n # Special location handling.\n if \"location\" in parameters:\n loc = parameters[\"location\"].replace(\" \", \"\")\n # Empty location.\n if not loc:\n loc = \"--\"\n # Empty location at start of list.\n if loc.startswith(','):\n loc = \"--\" + loc\n # Empty location at end of list.\n if loc.endswith(','):\n loc += \"--\"\n # Empty location in middle of list.\n loc = loc.replace(\",,\", \",--,\")\n parameters[\"location\"] = loc\n\n # Apply per-service mappings if any.\n if service in service_mappings:\n url = \"/\".join((service_mappings[service], resource_type))\n else:\n if subpath is None:\n parts = (base_url, service, str(major_version),\n resource_type)\n else:\n parts = (base_url, subpath.lstrip('/'), service,\n str(major_version), resource_type)\n url = \"/\".join(parts)\n\n if parameters:\n # Strip parameters.\n for key, value in parameters.items():\n try:\n parameters[key] = value.strip()\n except Exception:\n pass\n url = \"?\".join((url, urlencode(parameters, safe=':,*')))\n \n return url", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "def get_randori_base_url(self) -> str:\n return urljoin(self.endpoint_url, self.organization_name)", "def build_request_url(symbol, start_date, end_date):\n pass", "def url (self):\n return Links.createURL('/')", "def url(self) -> str:\n return f\"{self._get_base_url()}{self.path_extension}\"", "def __get_full_url(self, operation, slug_params):\n return (self.base_url + operation[1]) % slug_params", "def get_url():\n if os.environ['SERVER_PORT'] == '80':\n scheme = 'http://'\n else:\n scheme = 'https://'\n host = os.environ['SERVER_NAME']\n script_name = urllib.quote(os.environ.get('SCRIPT_NAME', ''))\n path_info = urllib.quote(os.environ.get('PATH_INFO', ''))\n qs = os.environ.get('QUERY_STRING', '')\n if qs:\n qs = '?' + qs\n return scheme + host + script_name + path_info + qs", "def GenerateUrl():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['redirect_uri'] = Constants.AUTH['REDIRECT']\n params['scope'] = Constants.AUTH['SCOPE']\n params['response_type'] = 'code'\n return '%s?%s' % (Constants.OAUTH, FormatUrl(params))", "def makeAccessURL(cls, baseURL):\n\t\treturn \"%s/%s\"%(baseURL, cls.name)", "def get_url():\n key = _get_key()\n return key.generate_url(300)", "def _build_url(self, story):\n return u'/api/items/{}/schedule/'.format(story)", "def url_base(self):\n return 'http://%s:%d/' % (self.host, self.port)", "def _get_base_url(self):\n return '/{}/'.format(self.name.replace('__', '/'))", "def build_path(self, target, params=None):\n if sys.version_info < (3,) and type(target) == unicode:\n target = target.encode(\"utf8\")\n\n target_path = urllib.quote(target)\n\n params = params or {}\n params = params.copy()\n\n if self.locale:\n params['locale'] = self.locale\n\n if params:\n return \"/%s%s?%s\" % (BaseSession.API_VERSION, target_path, urllib.urlencode(params))\n else:\n return \"/%s%s\" % (BaseSession.API_VERSION, target_path)", "def _build_url(self, path):\n return \"{0}/blazing-jdbc/{1}\".format(self.baseurl, path)", "def __url(self, *els):\n\n urls = [str(el) for el in els]\n urls.insert(0, self.BASE_URL)\n\n return '/'.join(s.strip('/') for s in urls)", "def test_client_build_url():\n eq_(\"{0}/{1}\".format(client.BASE_URL, \"v1/charges/\"), client.build_url(\"v1/charges/\"))", "def url(self, **kwargs):\n return self._location.url(**kwargs)", "def make_url(self, artist, song):\n url = \"http://www.azlyrics.com/lyrics/{}/{}.html\".format(artist, song)\n return url", "def TOURL(self) -> str:\n\t\treturn \"%s://%s:%d/\" % (\"https\" if self.useSSL else \"https\", self.toHost, self.toPort)", "def url(self) -> str:\n return self.HTTP.url if self.HTTP else self._url", "def build_url(ori_lon, ori_lat, des_lon, des_lat, year, month, day, hour, minute, args={}):\n options = dict()\n with open(option_file, 'r', newline='') as file:\n # Read the options file\n for line in file:\n if line[0] == '#': # if the first character of a line is '#' skip it\n continue\n splited_line = line.rstrip().split(':')\n if len(splited_line) < 2: # if it is a line with no ':'\n continue\n options[splited_line[0]] = splited_line[1]\n base_URL = 'localhost:' + port + '/otp/routers/default/plan'\n fromPlace = ori_lon + ',' + ori_lat\n toPlace = des_lon + ',' + des_lat\n date = year + '/' + month + '/' + day\n time = hour + ':' + minute + ':00'\n\n url = 'http://' + base_URL + '?fromPlace=' + fromPlace + '&toPlace=' + toPlace + '&date=' + date + '&time=' + time\n for option_name in options.keys():\n option = options[option_name]\n url += '&' + option_name + '=' + option\n if not 'mode' in url:\n url += '&mode=TRANSIT,WALK'\n for key in args.keys():\n url+= '&' + key + '=' + args[key]\n\n return url", "def MakeUrl(host, port=80, location=''):\n return \"http://{shost}{sdelimiter}{sport}/{slocation}\".format(\\\n shost=host,\n sdelimiter=':' if port != 80 else '',\n sport=port if port != 80 else '',\n slocation=location )", "def build_url(handle_or_url):\n if handle_or_url.startswith('https://') or handle_or_url.startswith('http://'):\n return handle_or_url\n else:\n return 'https://cdn.filestackcontent.com/{0}'.format(handle_or_url)" ]
[ "0.82525396", "0.80543995", "0.7988607", "0.7964267", "0.79619455", "0.7841471", "0.77490044", "0.77251446", "0.77089393", "0.76246357", "0.75926405", "0.75856274", "0.7532603", "0.75000435", "0.74994606", "0.74262804", "0.742292", "0.74154943", "0.74154943", "0.73670965", "0.73670965", "0.7319349", "0.7310445", "0.7282703", "0.7267873", "0.726725", "0.7264372", "0.725883", "0.72579086", "0.72534823", "0.72210425", "0.7203703", "0.72008663", "0.71928215", "0.7153339", "0.7146332", "0.71412426", "0.71236944", "0.708277", "0.70683235", "0.70597804", "0.7056306", "0.70375776", "0.70224595", "0.7013349", "0.70017684", "0.6993044", "0.6991119", "0.6991119", "0.6969213", "0.6965804", "0.6945444", "0.6935624", "0.6926093", "0.6923504", "0.69032705", "0.68561", "0.6851904", "0.6839692", "0.68282735", "0.68145096", "0.6797853", "0.67917734", "0.67895365", "0.67895156", "0.6789338", "0.6785182", "0.6784002", "0.67804193", "0.67740405", "0.6763475", "0.6754389", "0.67543334", "0.6745437", "0.67188114", "0.67124224", "0.6711685", "0.67050606", "0.6695607", "0.66914344", "0.6690974", "0.6688931", "0.6685789", "0.6685011", "0.66836727", "0.667769", "0.66667753", "0.666052", "0.66601485", "0.66591954", "0.6658447", "0.664566", "0.6644422", "0.6636488", "0.6632262", "0.6624612", "0.662082", "0.6617404", "0.6612717", "0.6612419", "0.6612274" ]
0.0
-1
Sets up the connection. Will optionally accept a size or else will use a chunked TransferEncoding.
def setup(self, size=None): if size: self.size = size if not self.size: self.size = UNKNOWN_LENGTH self.body.length = self.size req = self.conn.make_request('PUT', self.url, headers=self.headers, data=self.body) self.req = req print "ChunkedTwistedConnection: STARTED REQUEST"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _send_connection_init(self, request: Request) -> None:\n # Need to set these manually here instead of manipulating via\n # __setitem__() otherwise the H2Connection will emit SettingsUpdate\n # frames in addition to sending the undesired defaults.\n self._h2_state.local_settings = h2.settings.Settings(\n client=True,\n initial_values={\n # Disable PUSH_PROMISE frames from the server since we don't do anything\n # with them for now. Maybe when we support caching?\n h2.settings.SettingCodes.ENABLE_PUSH: 0,\n # These two are taken from h2 for safe defaults\n h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,\n h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536,\n },\n )\n\n # Some websites (*cough* Yahoo *cough*) balk at this setting being\n # present in the initial handshake since it's not defined in the original\n # RFC despite the RFC mandating ignoring settings you don't know about.\n del self._h2_state.local_settings[\n h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL\n ]\n\n self._h2_state.initiate_connection()\n self._h2_state.increment_flow_control_window(2**24)\n await self._write_outgoing_data(request)", "def __init__(self, host, port, compress=True, chunk_size=1300, **kwargs):\n\n DatagramHandler.__init__(self, host, port)\n BaseHandler.__init__(self, **kwargs)\n\n self.compress = compress\n self.chunk_size = chunk_size", "def __init__(self, bitstream, chunk_size):\n self.stream_chunker = self.chunker(bitstream, chunk_size)", "def __init__(self, resp, chunk_size, conn_to_close):\n self.resp = resp\n self.chunk_size = chunk_size\n self.conn_to_close = conn_to_close", "def __init__(self, host='localhost', port=8125, max_buffer_size=50):\n self.max_buffer_size = max_buffer_size\n self._send = self._send_to_server\n self.connect(host, port)\n self.encoding = 'utf-8'", "def __init__(self, port=8080, bufferSize=1024, encoding=\"utf-8\"):\n self.BUFFER_SIZE = bufferSize\n self.PORT_NUM = port\n self.ENCODING = encoding\n self.RECV_LIMIT = 5 #DO NOT CHANGE!!\n self.socket = socket(AF_INET,SOCK_STREAM)\n self.socket.settimeout(0.5)#intentionally very low. DO NOT CHANGE!!", "def __init__(self, size, connection):\n pycastle_log.debug(str(self)+\" start\")\n try:\n assert isinstance(connection, CastleConnection), \"wtf\"\n self.buf = castle_shared_buffer_create(connection.conn, size)\n self.size = size\n self.connection = connection\n pycastle_log.info(\"Made buffer {0} of size {1} with connection {2}\".format(self.buf, self.size, self.connection.conn))\n except Exception, e:\n pycastle_log.error(str(self)+\" got exception {0}:{1}\".format(type(e), e))\n raise\n finally:\n pycastle_log.debug(str(self)+\" stop\")", "def prepare(self):\n if self.request.method.upper() == 'POST':\n if 'expected_size' in self.request.arguments:\n self.request.connection.set_max_body_size(\n int(self.get_argument('expected_size')))\n try:\n total = int(self.request.headers.get(\"Content-Length\", \"0\"))\n except KeyError:\n total = 0\n self.multipart_streamer = MultiPartStreamer(total)", "def open(self):\n streaming_specs = self.get_streaming_specs()\n self._stream = chunked_requests.Stream(**streaming_specs)", "def __init__(self, transport, buff_size=16384, socket_timeout=5.0,\n progress=None, sanitize=_sh_quote):\n self.transport = transport\n self.buff_size = buff_size\n self.socket_timeout = socket_timeout\n self.channel = None\n self.preserve_times = False\n self._progress = progress\n self._recv_dir = b''\n self._rename = False\n self._utime = None\n self.sanitize = sanitize\n self._dirtimes = {}", "def __init__(self, data_size):\n try:\n self.data_size = int(data_size)\n except ValueError as exc:\n raise ValueError(\"Exepected arg 'size' to be int: \" + str(exc))\n self.packet = bytearray()\n self.in_data = False\n self.header_pos = 0\n self.transport = None", "def est_connection(self):\n try:\n file_size = math.ceil(self.get_file_size())\n with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:\n print(f\"[+]connecting to {self.HOSTNAME}:{self.PORT}\")\n s.connect((self.HOSTNAME,self.PORT))\n print(f\"[+]Connected\")\n # prime the server with file meta data\n s.send(f\"{self.FILENAME} {file_size}\".encode())\n print(f\"[+]Sending file info from: {self.get_full_path()}\")\n self.stream_files(s)\n return \n\n except socket.error as msg:\n print(f\"Caught exception: {msg}\")", "def __init__(self, size):\n self.handle_errors(size)\n self.__size = size", "def init_connection(self, connection):", "def _setup(self, addr, size):\n # No-op base implementation", "def __init__(self, stream, progress_callback, progress_chunk_size):\n self._stream = stream\n self._progress_callback = progress_callback\n self._progress_chunk_size = progress_chunk_size\n self._bytes_transferred = 0\n self._progress_chunk = 0", "def done (self):\r\n\r\n # ----------------------------------------\r\n # persistent connection management\r\n # ----------------------------------------\r\n\r\n # --- BUCKLE UP! ----\r\n\r\n connection = get_header(CONNECTION, self.header).lower()\r\n\r\n close_it = 0\r\n wrap_in_chunking = 0\r\n\r\n if self.version == '1.0':\r\n if connection == 'keep-alive':\r\n if 'Content-Length' not in self:\r\n close_it = 1\r\n else:\r\n self['Connection'] = 'Keep-Alive'\r\n else:\r\n close_it = 1\r\n elif self.version == '1.1':\r\n if connection == 'close':\r\n close_it = 1\r\n elif 'Content-Length' not in self:\r\n if 'Transfer-Encoding' in self:\r\n if not self['Transfer-Encoding'] == 'chunked':\r\n close_it = 1\r\n elif self.use_chunked:\r\n self['Transfer-Encoding'] = 'chunked'\r\n wrap_in_chunking = 1\r\n else:\r\n close_it = 1\r\n elif self.version is None:\r\n # Although we don't *really* support http/0.9 (because we'd have to\r\n # use \\r\\n as a terminator, and it would just yuck up a lot of stuff)\r\n # it's very common for developers to not want to type a version number\r\n # when using telnet to debug a server.\r\n close_it = 1\r\n\r\n outgoing_header = producers.simple_producer(self.get_reply_header_text())\r\n\r\n if close_it:\r\n self['Connection'] = 'close'\r\n\r\n if wrap_in_chunking:\r\n outgoing_producer = producers.chunked_producer (\r\n producers.composite_producer (self.outgoing)\r\n )\r\n # prepend the header\r\n outgoing_producer = producers.composite_producer(\r\n [outgoing_header, outgoing_producer]\r\n )\r\n else:\r\n # prepend the header\r\n self.outgoing.insert(0, outgoing_header)\r\n outgoing_producer = producers.composite_producer (self.outgoing)\r\n\r\n # apply a few final transformations to the output\r\n self.channel.push_with_producer (\r\n # globbing gives us large packets\r\n producers.globbing_producer (\r\n # hooking lets us log the number of bytes sent\r\n producers.hooked_producer (\r\n outgoing_producer,\r\n self.log\r\n )\r\n )\r\n )\r\n\r\n self.channel.current_request = None\r\n\r\n if close_it:\r\n self.channel.close_when_done()", "def open(self, transport_config, options, protocol_class=None):", "def connection_made(self, transport):\n self._transport = transport\n self._when_connected = datetime.datetime.now()\n self._last_received = datetime.datetime.now()\n\n reader_factory = self._reader_factory\n writer_factory = self._writer_factory\n reader_kwds = {}\n writer_kwds = {}\n\n if self.default_encoding:\n reader_kwds[\"fn_encoding\"] = self.encoding\n writer_kwds[\"fn_encoding\"] = self.encoding\n reader_kwds[\"encoding_errors\"] = self._encoding_errors\n writer_kwds[\"encoding_errors\"] = self._encoding_errors\n reader_factory = self._reader_factory_encoding\n writer_factory = self._writer_factory_encoding\n\n if self._limit:\n reader_kwds[\"limit\"] = self._limit\n\n self.reader = reader_factory(**reader_kwds)\n\n self.writer = writer_factory(\n transport=transport,\n protocol=self,\n reader=self.reader,\n server=True,\n **writer_kwds\n )\n\n logger.info(\"Connection from %s\", self)\n\n self._waiter_connected.add_done_callback(self.begin_shell)\n asyncio.get_event_loop().call_soon(self.begin_negotiation)", "def done(self, *arg, **kw):\r\n\r\n # ----------------------------------------\r\n # persistent connection management\r\n # ----------------------------------------\r\n\r\n # --- BUCKLE UP! ----\r\n\r\n connection = http_server.get_header(http_server.CONNECTION,self.header)\r\n connection = connection.lower()\r\n\r\n close_it = 0\r\n wrap_in_chunking = 0\r\n globbing = 1\r\n\r\n if self.version == '1.0':\r\n if connection == 'keep-alive':\r\n if not 'Content-Length' in self:\r\n close_it = 1\r\n else:\r\n self['Connection'] = 'Keep-Alive'\r\n else:\r\n close_it = 1\r\n elif self.version == '1.1':\r\n if connection == 'close':\r\n close_it = 1\r\n elif not 'Content-Length' in self:\r\n if 'Transfer-Encoding' in self:\r\n if not self['Transfer-Encoding'] == 'chunked':\r\n close_it = 1\r\n elif self.use_chunked:\r\n self['Transfer-Encoding'] = 'chunked'\r\n wrap_in_chunking = 1\r\n # globbing slows down tail -f output, so only use it if\r\n # we're not in chunked mode\r\n globbing = 0\r\n else:\r\n close_it = 1\r\n elif self.version is None:\r\n # Although we don't *really* support http/0.9 (because\r\n # we'd have to use \\r\\n as a terminator, and it would just\r\n # yuck up a lot of stuff) it's very common for developers\r\n # to not want to type a version number when using telnet\r\n # to debug a server.\r\n close_it = 1\r\n\r\n outgoing_header = producers.simple_producer(self.build_reply_header())\r\n\r\n if close_it:\r\n self['Connection'] = 'close'\r\n\r\n if wrap_in_chunking:\r\n outgoing_producer = deferring_chunked_producer(\r\n deferring_composite_producer(self.outgoing)\r\n )\r\n # prepend the header\r\n outgoing_producer = deferring_composite_producer(\r\n [outgoing_header, outgoing_producer]\r\n )\r\n else:\r\n # prepend the header\r\n self.outgoing.insert(0, outgoing_header)\r\n outgoing_producer = deferring_composite_producer(self.outgoing)\r\n\r\n # hook logging into the output\r\n outgoing_producer = deferring_hooked_producer(outgoing_producer,\r\n self.log)\r\n\r\n if globbing:\r\n outgoing_producer = deferring_globbing_producer(outgoing_producer)\r\n\r\n self.channel.push_with_producer(outgoing_producer)\r\n\r\n self.channel.current_request = None\r\n\r\n if close_it:\r\n self.channel.close_when_done()", "def Prepare(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def initialize(self):\n if not self.connection.is_closed():\n self.connection.close()\n\n self.connection.connect()", "def __init__(self, width=None, chunk_size=None):\r\n self._width = width or 50\r\n if not isinstance(self._width, Compatibility.integer):\r\n raise ValueError('The width must be an integer, given %s' % self._width)\r\n\r\n self._chunk_size = chunk_size or Amount(10, Data.KB)\r\n if not isinstance(self._chunk_size, Amount) or not isinstance(self._chunk_size.unit(), Data):\r\n raise ValueError('The chunk_size must be a Data Amount, given %s' % self._chunk_size)\r\n\r\n self._start = time.time()", "def makeConnection(self, transport):\n pass", "def __init__(self, host, port, use_socket=None, server=False, handler=False, bufsize=\"auto\", compress=False, compress_level=6):\n super(BinarySocket, self).__init__(host, port, server=server, use_socket=use_socket, use_pickle=False, bufsize=bufsize, handler=handler)\n self.__header_buffer = \"\"\n self.__binary_buffer = \"\"\n self.__meta_buffer = \"\"\n self.__header_length = 2 * 4 + 1 # 2 Unsigned Ints, 1 Bool\n self.__binary_length = None\n self.__binary_compressed = False\n self.__meta_length = None\n self.__buffer_lock = threading.Lock()\n\n self.set_compression(compress, compress_level)", "def connection_made(self, transport):\n #self._transport = transport\n\n self._server_ip, self._server_port = (\n transport.get_extra_info('peername')[:2])\n\n self.stream = self._stream_factory(\n transport=transport, client=True, log=self.log)\n\n# self.reader = self._factory_reader()\n# self.reader.set_transport(transport)\n self.shell = self._shell_factory(client=self, log=self.log)\n\n self.init_environment_values()\n self.set_stream_callbacks()\n self._last_received = datetime.datetime.now()\n self._connected = datetime.datetime.now()\n\n # begin connect-time negotiation\n self._loop.call_soon(self.begin_negotiation)\n\n # resolve server fqdn (and later, reverse-dns)\n self._server_host = self._loop.run_in_executor(\n None, socket.gethostbyaddr, self._server_ip)\n self._server_host.add_done_callback(self.after_server_lookup)\n\n self.log.info(self)", "async def read_chunk(self, size: int = ...) -> bytes:\n ...", "def __init__(self, ip, port, stream_id, auth_options, chunk_size=2097152):\n self.stream_id = stream_id\n # Create a rtmp url using ip, port, and stream_id\n connection_string = \"rtmp://{0}:{1}/view/{2}\".format(ip, port, stream_id)\n # Add authorization parameters to connection string\n self.connection_string = self._auth_RTMP(\n connection_string,\n auth_options[\"loginUrl\"],\n auth_options[\"rtmpRequestUrl\"],\n auth_options[\"username\"],\n auth_options[\"password\"])\n # Create a stream connection to rtmp url\n self.connection = librtmp.RTMP(self.connection_string, live=True)\n self.connection.connect()\n self.stream = self.connection.create_stream()\n self.chunk_size = chunk_size\n self.bytes_read = 0\n self.previous_read = 0\n self.data = b''\n self.is_reading = True", "def __init__(self, size):\n self.__size = size\n self.integer_validator(\"size\", size)\n super().__init__(size, size)\n self._size = size", "def connection_made(self, transport):\n self.transport = transport\n self.buf = bytes()\n self.msgs_recvd = 0\n print('Reader connection created')", "def start(self):\n self.protocol.makeConnection(self.transport)", "def start(self):\n self.protocol.makeConnection(self.transport)", "def __init__(self, fabric, connection, executor, url, port,\n operation_timeout_seconds):\n super(StreamCollection, self).__init__(connection, executor)\n url = urlparse(url)\n self.header = connection.headers\n self.fabric = fabric\n dcl_local = self.fabric.localdc(detail=True)\n ws_url = \"wss://api-%s/_ws/ws/v2/\"\n self._ws_url = ws_url % (dcl_local[\"tags\"][\"url\"])", "def initConnBufSizeFrame(self,referenceID, bufferSize):\r\n # Strip any colons in the mac address\r\n self.referenceID = referenceID\r\n\r\n # Set the frame content, convert the bufferSize into a string\r\n self.content = str(bufferSize)\r\n\r\n # Set the content length\r\n self.contentLength = len(self.content)\r\n\r\n # Set the correct frame message type\r\n self.mesgType = MULTIPLEXER_CONN_BUF_SIZE", "def read(self, size=None, timeout_ms=None, **kwargs):\n raise NotImplementedError(\"implement in derived transport class\")", "def initialize(self, config: DataConsumerConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.INPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = int(self.batch_size / 2) + 5\n chn.sock_opts['sndhwm'] = 5", "def test_initialize_connection_with_size(self):\n snapshot = fake_snapshot.fake_snapshot_obj(\n self.ctx, **{'volume': self.volume,\n 'provider_id': self.snapshot_id,\n 'volume_size': 8})\n props = self.driver.initialize_connection_snapshot(\n snapshot,\n self.connector)\n # validate the volume type\n self.assertEqual(props['driver_volume_type'], 'scaleio')\n # make sure a volume name and id exist\n self.assertIsNotNone(props['data']['scaleIO_volname'])\n self.assertEqual(self.snapshot_id,\n props['data']['scaleIO_volume_id'])\n # make sure QOS properties are set\n self.assertIn('iopsLimit', props['data'])", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if len(args):\n # args[0] is a connection object\n\n try:\n self._host = args[0].get_phos_host()\n self._port = args[0].get_phos_port()\n except AttributeError:\n # Not a Phos connection object. Too bad.\n pass\n logging.getLogger(\"pyhive.hive\").setLevel(logging.WARN)\n logging.getLogger(\"requests.packages.urllib3.connectionpool\").setLevel(logging.WARN)", "def __init__(chanRequest, command, path, version, contentLength, inHeaders):", "def __init__(self, mpu, original_size, download, chunk_size, min_chunk, max_chunk):\n super(ChunkedMultipartUpload, self).__init__(mpu)\n self._mpu = mpu\n self._original_size = original_size\n self._download = download\n self._chunk_size = chunk_size\n self._partial_chunks = {}\n self._min_chunk = min_chunk\n self._max_chunk = max_chunk", "def setupTcp(self):\n \tself.tcpManager = QueuedConnectionManager()\n \tself.tcpReader = QueuedConnectionReader(self.tcpManager, 0)\n \tself.tcpWriter = ConnectionWriter(self.tcpManager, 0)", "def __init__(self, *args, **kwargs):\n # _max_bytes will be updated after _CheckConnect and _PostRequest.\n self._batch_size = 1\n self._max_bytes = http_common.DEFAULT_MAX_BYTES\n self._target_url = ''\n self._gpg = None\n super(OutputHTTP, self).__init__(*args, **kwargs)", "def open_connection(self, connection):\n pass", "def set_up(self, host, port):\n self.socket.bind((host, port))\n self.socket.listen(5)\n while True:\n connectSocket , addr = self.socket.accept()\n sentence = connectSocket.recv(2048)\n sentence = sentence.decode()\n self.file(sentence)\n self.parser(connectSocket)", "def set_pool_size(self, pool_size):\n self._semantic_decoder.set_pool_size(pool_size)\n if self._instance_decoder is not None:\n self._instance_decoder.set_pool_size(pool_size)", "def __init__(self, *args, **kwargs):\n super(Client, self).__init__(role='c', *args, **kwargs)\n\n # Internal variables\n self._bulksize = None\n self._server_hostname = None\n self._port = None\n self._num_streams = None\n self._zerocopy = False", "def set_prefetch_size(size):\n if size <= 0 or size > INT32_MAX:\n raise ValueError(\"Prefetch size given is not within the required range.\")\n _config.set_op_connector_size(size)", "def __init__(self, embedding_size: int, num_heads: int):\n super().__init__()\n self.n_heads = num_heads\n self.head_size: int = embedding_size // self.n_heads\n self.embedding_size: int = self.head_size * self.n_heads", "def __init__(self, host, port, timeout=10, timeout_limit=3):\n self._buffer = b\"\"\n self._conn = socket.create_connection((host, port), timeout=timeout)\n self.timeout = timeout\n self.timeout_limit = timeout_limit", "def __init__(self, host=\"localhost\", port=1717, username=\"admin\", password=\"admin\", environment=\"\", **kwargs):\n username = username or find_in_kwargs_by_alias('username', kwargs)\n password = password or find_in_kwargs_by_alias('password', kwargs)\n prefs = find_in_kwargs_by_alias('prefs', kwargs)\n if prefs:\n # Hack to use ConfigParser with java style properties file\n with open(os.path.abspath(os.path.expanduser(prefs))) as stream:\n lines = itertools.chain((\"[default]\",), stream)\n prefs = ConfigParser()\n prefs.read_file(lines)\n prefs = dict(prefs._sections['default'])\n else:\n prefs = {}\n self.host = prefs.get('host', host)\n self.port = int(prefs.get('port', port))\n self.username = prefs.get('username', username)\n self.password = prefs.get('password', password)\n self.environment = prefs.get('environment', environment)\n try:\n transport = TSocket.TSocket(self.host, self.port)\n transport = TTransport.TBufferedTransport(transport)\n\n # Edit the buffer attributes in the transport to use BytesIO\n setattr(transport, '_TBufferedTransport__wbuf', BytesIO())\n setattr(transport, '_TBufferedTransport__rbuf', BytesIO(b\"\"))\n\n # Edit the write method of the transport to encode data\n def write(slf, buf):\n try:\n slf._TBufferedTransport__wbuf.write(buf)\n except TypeError:\n buf = bytes(buf, 'utf-8')\n slf.write(buf)\n except Exception as e:\n # on exception reset wbuf so it doesn't contain a partial function call\n self._TBufferedTransport__wbuf = BytesIO\n raise e\n transport.write = types.MethodType(write, transport)\n\n # Edit the flush method of the transport to use BytesIO\n def flush(slf):\n out = slf._TBufferedTransport__wbuf.getvalue()\n # reset wbuf before write/flush to preserve state on underlying failure\n slf._TBufferedTransport__wbuf = BytesIO()\n slf._TBufferedTransport__trans.write(out)\n slf._TBufferedTransport__trans.flush()\n transport.flush = types.MethodType(flush, transport)\n\n # Edit the read method of the transport to use BytesIO\n def read(slf, sz):\n ret = slf._TBufferedTransport__rbuf.read(sz)\n if len(ret) != 0:\n return ret\n\n slf._TBufferedTransport__rbuf = BytesIO(slf._TBufferedTransport__trans.read(max(sz, slf._TBufferedTransport__rbuf_size)))\n return slf._TBufferedTransport__rbuf.read(sz)\n transport.read = types.MethodType(read, transport)\n\n # Edit the readAll method of the transport to use a bytearray\n def readAll(slf, sz):\n buff = b''\n have = 0\n while have < sz:\n chunk = slf.read(sz - have)\n have += len(chunk)\n buff += chunk\n if len(chunk) == 0:\n raise EOFError()\n return buff\n transport.readAll = types.MethodType(readAll, transport)\n\n protocol = TBinaryProtocol.TBinaryProtocol(transport)\n self.client = ConcourseService.Client(protocol)\n transport.open()\n self.transport = transport\n self.__authenticate()\n self.transaction = None\n except Thrift.TException:\n raise RuntimeError(\"Could not connect to the Concourse Server at \"+self.host+\":\"+str(self.port))", "def test_blocksize_streamable_subset(self):\n self.default_kwargs['blocksize'] = 65535\n self.encoder = StreamEncoder(**self.default_kwargs)\n with self.assertRaisesRegex(EncoderInitException, 'FLAC__STREAM_ENCODER_INIT_STATUS_NOT_STREAMABLE'):\n self.encoder._init()", "def __init__(self, event_loop: asyncio.AbstractEventLoop, ssl_context: ssl.SSLContext=None,\n loglevel: int=logging.DEBUG, buffer_size: int=asyncio.streams._DEFAULT_LIMIT):\n self._event_loop = event_loop\n self._server = None\n if not ssl_context:\n # This looks very similar to the code for create_default_context\n # That's because it is the code\n # For some reason, create_default_context doesn't like me and won't work properly\n self._ssl = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23)\n # SSLv2 considered harmful.\n self._ssl.options |= ssl.OP_NO_SSLv2\n\n # SSLv3 has problematic security and is only required for really old\n # clients such as IE6 on Windows XP\n self._ssl.options |= ssl.OP_NO_SSLv3\n self._ssl.load_default_certs(ssl.Purpose.SERVER_AUTH)\n self._ssl.options |= getattr(_ssl, \"OP_NO_COMPRESSION\", 0)\n self._ssl.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)\n self._ssl.options |= getattr(_ssl, \"OP_CIPHER_SERVER_PREFERENCE\", 0)\n\n else:\n self._ssl = ssl_context\n\n self._bufsize = buffer_size\n self.default_butterfly = Butterfly\n self.default_net = Net\n\n self._executor = futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() * 2 + 1)\n\n self.net = None\n self.log_level = loglevel\n self.logger = logging.getLogger(\"ButterflyNet\")\n self.logger.setLevel(loglevel)\n if self.logger.level <= logging.DEBUG:\n self._event_loop.set_debug(True)\n\n self.butterflies = {}", "def SetDataEncoding(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def test_blocksize_lax(self):\n self.default_kwargs['blocksize'] = 65535\n self.default_kwargs['streamable_subset'] = False\n self.encoder = StreamEncoder(**self.default_kwargs)\n self.encoder._init()", "def setupClass(cls):\n cls._tmp_dir = tempfile.mkdtemp()\n cls.test_filepath = os.path.join( cls._tmp_dir, \"test_data.h5\" )\n cls._generate_testdata_h5(cls.test_filepath)\n cls.server_proc, cls.shutdown_event = cls._start_mockserver( cls.test_filepath, same_process=True )\n cls.client_connection = httplib.HTTPConnection( \"localhost:8000\" )", "def test_blocksize_streamable_subset(self):\n self.default_kwargs['blocksize'] = 65535\n self.encoder = FileEncoder(**self.default_kwargs)\n with self.assertRaisesRegex(EncoderInitException, 'FLAC__STREAM_ENCODER_INIT_STATUS_NOT_STREAMABLE'):\n self.encoder._init()", "def __init__(self, serversocket, byte_count_size=BYTES_PER_SHORT):\n self.socket_, self.ip = serversocket.accept()\n self.socket_.setblocking(False)\n self.handle = ''\n self.residue_from_previous_messages = b''\n self.pending_messages = []\n self.byte_count_size = byte_count_size\n self.__dict__['initialized_'] = True", "def __init__(self, base_url=None, address=None, **kwargs):\n if not base_url and not address:\n raise Exception(\n \"You must provide either a `base_url` or `address` argument\"\n )\n self.session = requests.Session(**kwargs)\n if base_url:\n self.base_url = self._attempt_connections([base_url])\n else:\n # normalize the URL and try a number of variations until we find one that's able to connect\n logger.info(\n \"Attempting connections to variations of the URL: {}\".format(base_url)\n )\n self.base_url = self._attempt_connections(\n get_normalized_url_variations(address)\n )", "async def init(self):\n self.init_connection_params()\n self._pool = await self._create_pool()\n\n return self", "def __setup_conn__(self, **kwargs):\n self.ext_conn = setup_conn(**kwargs)", "def __init__(self, conn, addr):\n self.conn = conn\n self.addr = addr\n \n self.buff = \"\"\n self.recvsize = 1024", "def __init__(self, smartcards=None, **kwargs):\n FileSystemClient.__init_with_kwargs__(self, kwargs, port=22)\n\n self.smartcards = smartcards\n self.__port_forwarding_register = PortForwardingRegister()\n\n self._init(**kwargs)", "def __init__(self, data, chunksize, axis, **kwargs):\n\n self.data = data\n self._chunksize = int(chunksize)\n self.axis = axis\n self.kwargs = kwargs", "def _setup_connection(self, parameters):\n logger.info('Connecting to %s', parameters)\n return pika.BlockingConnection(parameters = parameters)", "def __init__(self, size=0):\n if type(size) is not int:\n raise TypeError('size must be an integer')\n if size < 0:\n raise ValueError('size must be >= 0')\n self.__size = size", "def __init__(self, host=None, port=22, username='who', password='who', compress=False,\n timeout=socket._GLOBAL_DEFAULT_TIMEOUT):\n self.debuglevel = DEBUGLEVEL\n self.host = host\n self.port = port\n self.username = username\n self.password = password\n self.compress = compress\n self.timeout = timeout\n self.sock = None\n self.rawq = b''\n self.cookedq = b''\n self.eof = 0\n self.ssh_client = paramiko.SSHClient()\n self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n if self.host:\n self.open(host, port, username, password, compress, timeout)", "def __init__(self, connection):\n\n self._conn = connection", "def __init__(__self__, *,\n size: pulumi.Input[int]):\n pulumi.set(__self__, \"size\", size)", "def __init__(self, stream, address, server):\n logger.info(\"connection - address: %s\", address)\n self.stream = stream\n self.address = address\n self.server = server\n self.stream.set_close_callback(self._on_disconnect)\n self.wait()", "def __init__(self, size):\n self.__size = size", "def __init__(self, size):\n self.__size = size", "def __init__(self, size):\n self.__size = size", "def initialize_connection(self):\n # TODO how to track state of connection if this fails?\n assert(self.state == self.State.DISCONNECTED)\n\n self.socket.settimeout(self.CONNECTION_TIMEOUT_S)\n self.socket.connect((self.peer_info['ip'], self.peer_info['port']))\n\n # Get to initializing state once connection succeeds\n self.state = self.State.INIT_HANDSHAKE\n\n handshake = PeerHandshake(consts.PEER_ID, self.info_hash)\n self.socket.send(handshake.serialize())", "def __init__(self, port=8080, bufferSize=4096, encoding=\"utf-8\", \n altsocket=None, ip_whitelist=[], externalBlock=True, \n allowAll=False):\n self.BUFFER_SIZE = bufferSize\n self.PORT_NUM = port\n self.ENCODING = encoding\n self.WHITE_LIST = ip_whitelist\n self.LOCAL_ONLY = externalBlock\n self.ALLOW_ALL = allowAll\n if altsocket == None:\n self.socket = socket(AF_INET,SOCK_STREAM)\n self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n self.socket.bind((\"localhost\",self.PORT_NUM))\n self.socket.listen(3) # standard, change if you want.\n self.socket.settimeout(2) #timeout in 3 seconds...\n else: \n self.socket = altsocket", "def __init__(self, action_type=None, length=None, port=None,\n max_length=None):\n super().__init__()\n self.action_type = action_type\n self.length = length\n self.port = port\n self.max_length = max_length", "def __init__(self, quality: int = 7, bitrate: int = 64):\n self._output = mp3.Mp3(quality, bitrate)\n self._output.add_callback(self._enqueue)\n self._socket = None\n self._source = None\n self._endpoint = None\n self._password = None\n # Icecast doesn't actually support chunked encoding\n self._chunk = False", "def install_connection(connection):\n global _connection\n _connection = connection", "def initialize(self, config: DataProviderConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.OUTPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = 5\n chn.sock_opts['sndhwm'] = int(self.batch_size / 2) + 5", "def pre_init(frequency=PYGAME_MIXER_DEFAULT_FREQUENCY,\r\n size=PYGAME_MIXER_DEFAULT_SIZE,\r\n channels=PYGAME_MIXER_DEFAULT_CHANNELS,\r\n chunksize=PYGAME_MIXER_DEFAULT_CHUNKSIZE):\r\n global _request_frequency, _request_size, _request_stereo, \\\r\n _request_chunksize\r\n _request_frequency = frequency\r\n _request_size = size\r\n _request_stereo = channels\r\n _request_chunksize = chunksize", "def __init__(self, handle, server_hostname=None, mode='rw'):\n if not isinstance(handle, pyuv.Stream):\n raise TypeError(\"handle: expecting a 'pyuv.Stream' instance, got {!r}\"\n .format(type(handle).__name__))\n super(Transport, self).__init__(handle, mode)\n self._server_hostname = server_hostname", "def __init__(self, size: Size) -> None:\n self.size = size", "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\r\n if not _SCRIBE_PRESENT:\r\n raise self.ScribeHandlerException(\r\n \"Could not initialize ScribeHandler: Scribe modules not present.\")\r\n self._buffer_enabled = kwargs.pop(\"buffer\")\r\n self._category = kwargs.pop(\"category\")\r\n self._client = None\r\n self._host = kwargs.pop(\"host\")\r\n self._log_buffer = []\r\n self._port = kwargs.pop(\"port\")\r\n self._transport = None\r\n Handler.__init__(self, *args, **kwargs)", "def __init__(self, size=0):\n self.__size = size", "def __init__(self, size=0):\n self.__size = size", "async def _open_connection(self) -> None:\n self.logger.info(\n f\"Connecting to gpsd at {self.connection_args['host']}\" +\n (f\":{self.connection_args['port']}\"\n if self.connection_args['port'] else ''))\n self.reader, self.writer = await asyncio.wait_for(\n asyncio.open_connection(**self.connection_args),\n self.connection_timeout)\n # Set socket options\n sock = self.writer.get_extra_info('socket')\n if sock is not None:\n if 'SO_KEEPALIVE' in self.alive_opts:\n sock.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE,\n self.alive_opts['SO_KEEPALIVE'])\n if hasattr(\n sock,\n 'TCP_KEEPIDLE') and 'TCP_KEEPIDLE' in self.alive_opts:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE, # pylint: disable=E1101\n self.alive_opts['TCP_KEEPIDLE'])\n if hasattr(\n sock,\n 'TCP_KEEPINTVL') and 'TCP_KEEPINTVL' in self.alive_opts:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPINTVL, # pylint: disable=E1101\n self.alive_opts['TCP_KEEPINTVL'])\n if hasattr(\n sock,\n 'TCP_KEEPCNT') and 'TCP_KEEPCNT' in self.alive_opts:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPCNT,\n self.alive_opts['TCP_KEEPCNT'])", "def __init__(self, size=0):\n self.__size = size\n if isinstance(self.__size, int):\n if size < 0:\n raise ValueError('size must be >= 0')\n else:\n self.__size = size\n else:\n raise TypeError('size must be an integer')", "def test_blocksize_lax(self):\n self.default_kwargs['blocksize'] = 65535\n self.default_kwargs['streamable_subset'] = False\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder._init()", "def __init__(self, channel_number, body_size, props):\n Frame.__init__(self, spec.FRAME_HEADER, channel_number)\n self.body_size = body_size\n self.properties = props", "def __init__(self, *args, **kwds):\n\t\tself._size_limit = kwds.pop(\"size_limit\", None)\n\t\tOrderedDict.__init__(self, *args, **kwds)\n\t\tself._check_size_limit()", "def __init__(self, connectionPool, timeout=10):\n MsgPackProtocol.__init__(self, timeout)\n self.connectionPool = connectionPool\n self.log = Logger(system=self)\n self.storage = self.connectionPool.storage\n self.peersKeyId = None", "def __init__(self, shell=TerminalShell, stream=TelnetStream,\n encoding='utf-8', log=logging, force_binary=False,\n waiter_connected=None, waiter_closed=None):\n self.log = log\n self.force_binary = force_binary\n self._shell_factory = shell\n self._stream_factory = stream\n self._default_encoding = encoding\n self._loop = asyncio.get_event_loop()\n\n #: session environment as S.env['key'], defaults empty string value\n self._env = collections.defaultdict(str, **self.default_env)\n\n #: toggled when transport is shutting down\n self._closing = False\n\n #: datetime of last byte received\n self._last_received = None\n\n #: datetime of connection made\n self._connected = None\n\n #: future result stores value of gethostbyaddr(sever_ip)\n self._server_host = asyncio.Future()\n\n #: server_fqdn is result of socket.getfqdn() of server_host\n self._server_fqdn = asyncio.Future()\n\n #: values for properties ``server_ip`` and ``server_port``\n self._server_ip = None\n self._server_port = None\n\n #: waiter is a Future that completes when connection is closed.\n if waiter_closed is None:\n waiter_closed = asyncio.Future()\n self.waiter_closed = waiter_closed\n\n if waiter_connected is None:\n waiter_connected = asyncio.Future()\n self.waiter_connected = waiter_connected", "def connection_made(self, transport: BaseTransport) -> None:\n self.transport = transport", "def init_stream_handler(\n self, \n logger, \n loop, \n netconf_ip, \n netconf_port,\n statistics,\n xml_to_json_translator):\n self._logger = logger\n self._asyncio_loop = loop\n self._encoding = \"xml\"\n self._netconf_ip = netconf_ip\n self._netconf_port = netconf_port\n self._stat = statistics\n self._xml_to_json_translator = xml_to_json_translator", "def connect(self):\n if isinstance(self._sock, socket.socket):\n return\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.host, self.port))\n except socket.error, e:\n raise ConnectionError(\"Error %s connecting to %s:%s. %s.\" % (e.args[0], self.host, self.port, e.args[1]))\n else:\n self._sock = sock\n self._fp = self._sock.makefile('r')", "def __init__(self, connection):\n super().__init__(connection)", "def setsize(self, size):\n self.__size = size", "def setUp(self) -> None:\n local_sock, remote_sock = socketpair() # We apparently can't use family=AF_INET on Linux\n local_sock.settimeout(1.0)\n remote_sock.settimeout(1.0)\n self.inverter = Inverter(local_sock, None)\n # This sock mimics the actual inverter, i.e. the remote side of the\n # connection. Send messages on it to mimic the actual inverter sending\n # messages to the Inverter class.\n self.sock = remote_sock", "def setUp(self):\n self.test_max_size = 10", "def __init__(self, size):\n self.size = size\n self.buffer = [None]*size\n self.start = 0\n self.end = 0" ]
[ "0.5874525", "0.58596337", "0.57424444", "0.57155627", "0.571499", "0.5675915", "0.566236", "0.5662341", "0.56123924", "0.5471748", "0.5447842", "0.5433867", "0.5358979", "0.53248644", "0.53098845", "0.52834433", "0.5279242", "0.5220655", "0.5175288", "0.51495194", "0.5126971", "0.5102948", "0.5066454", "0.5006225", "0.49974203", "0.49876526", "0.49875414", "0.49744007", "0.49731275", "0.49513358", "0.492831", "0.492831", "0.49208462", "0.4918306", "0.49103725", "0.48908457", "0.48863202", "0.4886068", "0.48759487", "0.48696867", "0.48670936", "0.48628843", "0.484963", "0.4848903", "0.48420322", "0.4819153", "0.48187572", "0.48089457", "0.47990537", "0.47957104", "0.47882786", "0.4780114", "0.47681126", "0.4767458", "0.47589967", "0.47575375", "0.4743393", "0.4739416", "0.47147077", "0.4709405", "0.47070664", "0.47053397", "0.46914852", "0.4691095", "0.4689122", "0.4689056", "0.46839908", "0.46833408", "0.46771178", "0.46766734", "0.46766734", "0.46766734", "0.4676553", "0.46757862", "0.46747947", "0.46747038", "0.4674699", "0.46730125", "0.46688175", "0.4666739", "0.46604672", "0.4658389", "0.4648628", "0.46447623", "0.46447623", "0.4643824", "0.4643506", "0.46368054", "0.46367493", "0.4636692", "0.46345577", "0.46314707", "0.46298674", "0.46244735", "0.46148473", "0.46122172", "0.46113893", "0.46077633", "0.46049064", "0.46045044" ]
0.7370442
0
Sends a chunk of data.
def send_chunk(self, chunk): print "ChunkedTwistedConnection: send chunk" return self.body.send(chunk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_chunk(chunk, send_socket):\n length = len(chunk)\n data = str(length).zfill(MAX_CHUNK_SIZE).encode() + chunk\n send_socket.send(data)", "def send_chunk(chnk, sock):\n length = len(chnk)\n data = str(length).zfill(MAX_CHUNK_SIZE).encode() + chnk\n sock.send(data)", "def inject_send(data):\n tsent = 0\n bytes = len(data)\n chunksize = filesize / 100\n if chunksize < 4096:\n chunksize = 4096\n while bytes > 0:\n sent = imap.sslobj.write(data[:chunksize])\n if sent == bytes:\n common.progress(filesize, bytes)\n break # avoid copy\n tsent += sent\n common.progress(filesize, tsent)\n data = data[sent:]\n bytes = bytes - sent", "def send(self, bytestream):\n total_sent = 0\n length_data = len(bytestream)\n try:\n while total_sent < length_data:\n # Returns the number of bytes sent\n nr_sent = self.socket.send(bytestream[total_sent:])\n total_sent += nr_sent\n except (socket.error, socket.timeout):\n # Evt17: Transport connection closed\n self.event_queue.put('Evt17')", "def send(self, data, is_chunked=False):\r\n if self.sock is None:\r\n if self.auto_open:\r\n self.connect()\r\n else:\r\n raise NotConnected()\r\n\r\n if self.debuglevel > 0:\r\n print \"send:\", repr(data)\r\n blocksize = 8192\r\n if hasattr(data, 'read') and not isinstance(data, array):\r\n if self.debuglevel > 0:\r\n print \"sendIng a read()able\"\r\n datablock = data.read(blocksize)\r\n while datablock:\r\n if self.debuglevel > 0:\r\n print 'chunked:', is_chunked\r\n if is_chunked:\r\n if self.debuglevel > 0:\r\n print 'send: with trunked data'\r\n lenstr = string.upper(hex(len(datablock))[2:])\r\n self.sock.sendall('%s\\r\\n%s\\r\\n' % (lenstr, datablock))\r\n else:\r\n self.sock.sendall(datablock)\r\n datablock = data.read(blocksize)\r\n if is_chunked:\r\n self.sock.sendall('0\\r\\n\\r\\n')\r\n else:\r\n self.sock.sendall(data)", "def send(self, data: bytes):\n\n self.client.sendall(data)\n\n return len(data)", "def send(self, data: bytes):", "def recv_chunk(self, data):", "def send(self, data: bytes) -> int:\n ...", "def send_bytes(self, data: bytes) -> None:", "async def write(self, data: bytes):\n while data:\n await self.wait_for_write()\n try:\n sent = self.socket.send(data)\n except OSError as e:\n self.logger.debug(\"Failed to write: %s\", e)\n raise asyncio.TimeoutError()\n data = data[sent:]", "def send_data(self, data):\n self._transport.write(data)", "def send_chunked(self, chunks, payload, trailers):\r\n\r\n chunk_list = chunks.split(',')\r\n pointer = 0\r\n for cwidth in chunk_list:\r\n cwidth = int(cwidth)\r\n # send chunk length indicator\r\n self.wfile.write(format(cwidth, 'x').upper() + \"\\r\\n\")\r\n # send chunk payload\r\n self.wfile.write(payload[pointer:pointer + cwidth] + \"\\r\\n\")\r\n pointer += cwidth\r\n\r\n # is there another chunk that has not been configured? Send it anyway for the sake of completeness..\r\n if len(payload) > pointer:\r\n # send chunk length indicator\r\n self.wfile.write(format(len(payload) - pointer, 'x').upper() + \"\\r\\n\")\r\n # send chunk payload\r\n self.wfile.write(payload[pointer:] + \"\\r\\n\")\r\n\r\n # we're done with the payload. Send a zero chunk as EOF indicator\r\n self.wfile.write('0'+\"\\r\\n\")\r\n\r\n # if there are trailing headers :-) we send them now..\r\n for trailer in trailers:\r\n self.wfile.write(\"%s: %s\\r\\n\" % (trailer[0], trailer[1]))\r\n\r\n # and finally, the closing ceremony...\r\n self.wfile.write(\"\\r\\n\")", "def send(self, data):\n self.socket.sendall(data)", "def sendData(self, data):\n self.tx.sendBuffer(data)", "def save_send(socket, data):\r\n\r\n # We have no control about how much data the clients accepts,\r\n # thus we send in chunks until done\r\n while len(data) > 0:\r\n try:\r\n send_data_size = socket.send(data)\r\n # remove sent portion form data\r\n data = data[send_data_size:]\r\n except error as msg:\r\n # most likely socket busy, buffer full or not yet ready\r\n sleep(0.01)", "def send(self, data):\n pass", "def _send_data(self, data, time):\n pass", "def send(self, data):", "def _send_from_buffer(cls, buf, stream):\n remaining_bytes = len(buf)\n while remaining_bytes > 0:\n next_chunk_bytes = min( remaining_bytes, VoxelsNddataCodec.STREAM_CHUNK_SIZE )\n chunk_start = len(buf)-remaining_bytes\n chunk_stop = len(buf)-(remaining_bytes-next_chunk_bytes)\n stream.write( buf[chunk_start:chunk_stop] )\n remaining_bytes -= next_chunk_bytes", "def send(self, data):\n self.sent.put(data)", "async def _send_stream_data(\n self, request: Request, stream_id: int, data: bytes\n ) -> None:\n while data:\n max_flow = await self._wait_for_outgoing_flow(request, stream_id)\n chunk_size = min(len(data), max_flow)\n chunk, data = data[:chunk_size], data[chunk_size:]\n self._h2_state.send_data(stream_id, chunk)\n await self._write_outgoing_data(request)", "def send(self, buf, offset=0, size=None):\n raise NotImplementedError", "def send(self, data):\n self._send(data)", "def send(self, data):\n self.sock.send(data)", "def send(self, data):\n self.sock.send(data)", "def send_to_data_channel(self, sock, data):\n resp = sock.send(data)\n print_debug(resp)\n self.logger.log(\"Sent: %s\" % data)\n return resp", "async def send_raw(self, data: bytes) -> None:\n await self.socket.sendall(data)", "def send(self, data: bytes) -> int:\n return self.connection.send(data)", "def send(self, socket, data):\n data_length = len(data)\n socket.send(self.struct.pack(data_length))\n\n total_sent = 0\n while total_sent < data_length:\n sent = socket.send(data[total_sent:])\n if not sent:\n raise RuntimeError(\"Socket connection was broken.\")\n total_sent += sent", "def send(connection, data):\n connection.send(pickle.dumps(data))", "async def send_data(self, data, stream_id):\n while data:\n while self.conn.local_flow_control_window(stream_id) < 1:\n try:\n await self.wait_for_flow_control(stream_id)\n except asyncio.CancelledError as e:\n print(e)\n return\n\n chunk_size = min(\n self.conn.local_flow_control_window(stream_id),\n len(data),\n self.conn.max_outbound_frame_size,\n )\n\n try:\n self.conn.send_data(\n stream_id,\n data[:chunk_size],\n end_stream=(chunk_size == len(data))\n )\n except (StreamClosedError, ProtocolError) as e:\n print(e)\n # The stream got closed and we didn't get told. We're done\n # here.\n break\n\n self.transport.write(self.conn.data_to_send())\n data = data[chunk_size:]", "def send(self,data):\r\n # Get the data length\r\n fullDataLength = len(data)\r\n \r\n # Input sanity\r\n if fullDataLength == 0:\r\n raise ValueError, \"Cannot send a null data-set!\"\r\n \r\n # Send chunks of data until it is all sent\r\n while True:\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Make sure we have available outgoing bandwidth\r\n self.socketLocks[\"outgoing\"].acquire()\r\n try:\r\n self.socketLocks[\"outgoing\"].release()\r\n except:\r\n # Some weird timing issues can cause an exception, but it is harmless\r\n pass\r\n \r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Get our own lock\r\n self.socketLocks[\"send\"].acquire()\r\n \r\n # How much outgoing traffic is available?\r\n outgoingAvailable = self.bufferInfo[\"outgoing\"]\r\n \r\n # If we can, just send it all at once\r\n if len(data) < outgoingAvailable:\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, data)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] -= len(data)\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # We need to explicitly leave the loop\r\n break\r\n \r\n # We need to send chunks, while waiting for more outgoing B/W\r\n else:\r\n # Get a chunk of data, and send it\r\n chunk = data[:outgoingAvailable]\r\n try:\r\n # Instruct the multiplexer object to send our data\r\n self.mux._send(self.id, chunk)\r\n except AttributeError:\r\n # The multiplexer may be closed\r\n # Check if the socket is closed\r\n self._handleClosed()\r\n \r\n # Reduce the size of outgoing avail\r\n self.bufferInfo[\"outgoing\"] = 0\r\n\r\n # Lock the outgoing lock, so that we block until we get a MULTIPLEXER_CONN_BUF_SIZE message\r\n self.socketLocks[\"outgoing\"].acquire()\r\n \r\n # Trim data to only what isn't sent syet\r\n data = data[outgoingAvailable:]\r\n \r\n # Release the lock\r\n self.socketLocks[\"send\"].release()\r\n \r\n # If there is no data left to send, then break\r\n if len(data) == 0:\r\n break\r\n \r\n # Return bytes sent, which is always the full message\r\n # since we will block indefinately until everything is sent.\r\n return fullDataLength", "def sendall(self, data):\n while data and self.running:\n ret = self.sendFn(data[:MAX_SEND_SIZE])\n assert ret > 0\n data = data[ret:]", "def send_data(self, msg):\n totalsent = 0\n # tt= struct.unpack('c'*len(msg), msg)\n # print(tt)\n while totalsent < len(msg):\n try:\n sent = self.sockfd.send(msg)\n except:\n print(f'{self.ip} socket failed')\n break\n if sent == 0:\n raise RuntimeError(\"Socket connection broken\")\n totalsent = totalsent + sent", "def _send(self, data, newline=\"\\r\\n\", sock=None):\n self.outbuff.append(data+newline)\n for msg in self.outbuff:\n if self.print_raw:\n logger.debug(msg.strip())\n self.sock.sendall((msg+newline).encode(\"utf-8\"))", "def send_message(self, data):\n self.transport.write(data)", "def send_message(self,data):\n num_bytes = len(data)\n message = WriteMessage()\n message.write_uint32(num_bytes)\n message.data.extend(data)\n self.socket.sendall(message.data)", "def _send_data(self):\n pass", "def write(self, data):\n self._write_lock.acquire()\n try:\n self.socket.sendall(data)\n finally:\n self._write_lock.release()", "def send(self, data):\n if self.print_send:\n dumpdata.dumpdata(' > Send: ', '{:02x}', data)\n try:\n self.socket.send(data)\n except ConnectionAbortedError as err:\n raise Closed(err)", "def send (self, data):\n return self.sending.send(data)", "def sendData(data):\n\n\tslen = struct.pack('<I', len(data))\n\t#connSock.sendall(slen + data)\n\tconnSock.sendall(slen)\n\tconnSock.sendall(data)\n\n\treturn 0", "def send_bytes(self, data):\n raw_data = bytes(data)\n\n attempts = 0\n while True:\n try:\n self._sock.sendall(raw_data)\n return\n except (socket.timeout, BrokenPipeError):\n print('in socket exeption....')\n if (attempts < self._retries):\n attempts += 1\n self._sock.close()\n self._sock.connect((self._ip, self._port))\n else:\n raise", "def send(self, data, flush = False):\n self._send_buffer += data\n to_send = \"\"\n\n if flush:\n # If `send_event` is set or we're being forced to flush, send everything we have.\n to_send += self._send_buffer\n self._send_buffer = \"\"\n\n if len(self._send_buffer) > self._buffer_size:\n # If the sending buffer is oversized, send all but the final few bytes and trim it.\n to_send += self._send_buffer[:-self._buffer_size]\n self._send_buffer = self._send_buffer[-self._buffer_size:]\n\n if to_send:\n # Despite it's name, the underlying class uses `sendall`.\n super(BaseConnection, self).send(to_send)", "def send(self, data):\n starttime = time.time()\n while 1:\n if self._waiting_response==1:\n if time.time() - starttime > self._maxrespdelay:\n break\n _LOGGER.debug(\"Send going to sleep\\n\")\n time.sleep(self._sleeptime)\n else:\n break\n\n currtime = time.time()\n if currtime - self._lastcall > self._maxtime:\n self.reset()\n self._lastcall = currtime\n _LOGGER.debug(\"Sending: %s\", data)\n if not testing:\n self.serial.reset_input_buffer()\n bytessent = self.serial.write(data.encode())\n return bytessent\n else:\n self._waiting_response = 1\n return len(data)", "def send_data(data):\n\n # In order for the data to be transmitted, it has to be in bytes format\n pickled_data = pickle.dumps(data)\n # Actual length of the data (for example 3) \n data_length = len(pickled_data)\n # Padded length of the data (for example '3 ')\n padded_length = pickle.dumps(data_length)\n padded_length += b' ' * (HEADER_SIZE - len(padded_length))\n\n # Send the padded length and then the data right after\n conn.send(padded_length)\n conn.send(pickled_data)", "def write(self, chunk):\r\n if not self.started_response:\r\n raise AssertionError(\"WSGI write called before start_response.\")\r\n \r\n if not self.sent_headers:\r\n self.sent_headers = True\r\n self.send_headers()\r\n \r\n if self.chunked_write and chunk:\r\n buf = [hex(len(chunk))[2:], \"\\r\\n\", chunk, \"\\r\\n\"]\r\n self.sendall(\"\".join(buf))\r\n else:\r\n self.sendall(chunk)", "def net_send(out_data: bytes, conn: socket.socket) -> None:\n print(\"Sending {} bytes\".format(len(out_data)))\n conn.send(out_data)", "async def send(self):\n message = b'foo\\nbar\\nbaz\\nqux\\n'\n for b in message:\n await asyncio.sleep(0.5)\n self.transport.serial.write(bytes([b]))\n print(f'Writer sent: {bytes([b])}')\n self.transport.close()", "def write(self, data):\n with self._write_lock:\n self.socket.send(data)", "def send(self, data):\r\n\r\n self._serial_object.write(data)", "def _send(self, data_str):\n\n self._handler.sendall(data_str)", "def send_data(no_of_packets):\n generate = data_buf_pb2.Send()\n generate.nop = no_of_packets\n data = generate.SerializeToString()\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create socket and send data\n s.connect((receiver_node_ip, receiver_node_port))\n s.sendall(data)\n s.close()", "def send_data(self, str_data):\n try:\n self.s.sendall(str_data.encode())\n except OSError as e:\n print(e)", "def write(self, chunk):\n return self.tornado_request.write(chunk)", "def send(self, data):\n if self._fuzz_data_logger is not None:\n self._fuzz_data_logger.log_send(data)\n\n num_sent = self._target_connection.send(data=data)\n\n if self._fuzz_data_logger is not None:\n self._fuzz_data_logger.log_info(\"{0} bytes sent\".format(num_sent))", "def send(self, msg):\n raise NotImplementedError(\"DataStream does not implement send.\")", "def nonblocking_send(self, data):\n try:\n if len(data) == 0:\n return None\n self.amount_so_far += self.socket.send(data[self.amount_so_far:])\n except Exception as exc:\n active_sockets_dict.pop(self.socket, None)\n self.socket.close()\n print(\"An error occurred: %s\\n\" % exc)\n return -1\n ret = self.is_send_done()\n return ret", "def send(self, data: bytes) -> int:\n if not data:\n raise Exception(\"Can't send empty data!\")\n\n LOG.debug(\"=> %s\", data.hex())\n\n data = int.to_bytes(len(data), 2, byteorder=\"big\") + data\n offset: int = 0\n seq_idx: int = 0\n length: int = 0\n\n while offset < len(data):\n # Header: channel (0x0101), tag (0x05), sequence index\n header: bytes = b\"\\x01\\x01\\x05\" + seq_idx.to_bytes(2, byteorder=\"big\")\n data_chunk: bytes = (header +\n data[offset:offset + 64 - len(header)])\n\n self.device.write(b\"\\x00\" + data_chunk)\n length += len(data_chunk) + 1\n offset += 64 - len(header)\n seq_idx += 1\n\n return length", "def send(self, data: bytes) -> None:\n size = len(data).to_bytes(self.SIZE_BYTES, self.BYTEORDER)\n message = size + data\n sent = self.socket.send(message)\n # For TCP we don't really need to send all the message bytes\n # in one go, BUT it becomes important if we switch to UDP\n if sent != len(message):\n raise RuntimeError(\"Unable to send all the data required\")", "def send_data(self):\n self.socket.send(\"DATA\\r\\n\")\n response = self.get_response()\n if response[0] != 354:\n print \"An error has occured try again\"\n print response[1]\n sys.exit(0)", "def send_data(self):\n data = self.datastore.use(self.data_name)\n if data is None:\n self.dbg(\"sockets_warning\", \"Data is none for {}\", [self.data_name])\n encoded_data = json.dumps(data).encode()\n self.conn.sendall(encoded_data)\n self.dbg(\"sockets_verbose\", \"Data sent\")", "def send_data(self, stream_id):\n send_stream = self.send_streams.get(stream_id)\n\n if not send_stream:\n # window updates trigger sending of data, but can happen after a stream\n # has been completely sent\n return\n\n if not send_stream.headers_sent:\n # don't attempt to send any data until the headers have been sent\n return\n\n try:\n window_size = self.conn.local_flow_control_window(stream_id=stream_id)\n max_frame_size = self.conn.max_outbound_frame_size\n\n for chunk in send_stream.read(window_size, max_frame_size):\n log.debug(\"sending data on stream %s: %s...\", stream_id, chunk[:100])\n\n self.conn.send_data(stream_id=stream_id, data=chunk)\n\n except StreamClosedError:\n return\n\n if send_stream.exhausted:\n log.debug(\"closing exhausted stream, stream %s\", stream_id)\n self.end_stream(stream_id)", "def write(self, data):\n self.buffer.write(data)\n self.offset += len(data)", "def __send_message(self, data):\n if RemotePlayerProxy.DEBUG:\n print(f'[RPP] [SEND] -> [{self.name}]: {data}')\n\n try:\n self.__socket.sendall(bytes(data, 'ascii'))\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(e)", "def write(self, data):\n if self.closed:\n raise ConnectionResetError(\n 'Transport closed - cannot write on %s' % self\n )\n else:\n t = self.transport\n if self._paused or self._buffer:\n self._buffer.appendleft(data)\n self._buffer_size += len(data)\n self._write_from_buffer()\n if self._buffer_size > 2 * self._b_limit:\n if self._waiter and not self._waiter.cancelled():\n self.logger.warning(\n '%s buffer size is %d: limit is %d ',\n self._buffer_size, self._b_limit\n )\n else:\n t.pause_reading()\n self._waiter = self._loop.create_future()\n else:\n t.write(data)\n self.changed()\n return self._waiter", "def _send(self, data: bytes):\n if self._pre_send is not None:\n data = self._pre_send(data)\n if data is None:\n return\n\n self._transport.sendto(data, self._peer)", "def mySend(sock, data):\r\n\r\n # check socket object validity\r\n if sock is None:\r\n return\r\n\r\n # set a timeout\r\n sock.settimeout(TIMEOUT)\r\n\r\n # data is a string message: it needs to be converted to bytes\r\n data = str(data).encode(ENCODING_TYPE)\r\n\r\n # get size of the message\r\n size = len(data)\r\n\r\n # put size on a 16 byte string filled with 0s\r\n # e.g. size = 123\r\n # strSize = 0000000000000123\r\n strSize = str(size).zfill(SIZE_LENGTH)\r\n strSize = strSize.encode(ENCODING_TYPE)\r\n\r\n # send the size of the data\r\n totalSent = 0\r\n while totalSent < SIZE_LENGTH:\r\n try:\r\n sent = sock.send(strSize[totalSent:])\r\n except socket.timeout:\r\n raise socket.timeout\r\n if sent == 0:\r\n raise RuntimeError(\"sock connection broken\")\r\n totalSent = totalSent + sent\r\n\r\n # send data\r\n totalSent = 0\r\n while totalSent < size:\r\n try:\r\n sent = sock.send(data[totalSent:])\r\n except socket.timeout:\r\n raise socket.timeout\r\n if sent == 0:\r\n raise RuntimeError(\"sock connection broken\")\r\n totalSent = totalSent + sent", "def send_data(self, **kwargs):", "def _writeBytes(self, b):\n self.socket.send(b)", "def _send_frame(self, dest, data):\n self._log.debug(\"write {} to {}\".format(len(data), dest)) \n # send to endpoint\n self._conn.sendto(data, (dest,0))", "def send_data(self,data):\n try:\n data = self.encode(data)\n except:\n raise Exception(\"Invalid data type for encoding\")\n return False\n try:\n # Put metadata of size ahead of the real data\n # It supports upto 3.99 GB data so no worry about fragmentation\n # Provides supports for message-based protocol\n data = struct.pack('>I',len(data)) + data\n self.sockObj.send(data)\n except:\n raise Exception(\"Can't send data over conenction\")\n return False\n\n return True", "async def send(self, websocket, payload) -> None:\n if isinstance(payload, list):\n data_size: int = 0\n\n for data in payload:\n _data = pickle.dumps(data)\n await websocket.send(_data)\n data_size += sys.getsizeof(_data)\n else:\n _data = pickle.dumps(payload)\n await websocket.send(_data)\n data_size = sys.getsizeof(_data)\n\n logging.info(\"[Client #%d] Sent %s MB of payload data to the server.\",\n self.client_id, round(data_size / 1024**2, 2))", "def send_streaming_message(self, data):\n header, data = format_msg(data)\n self.server_socket_streaming.sendto(header,\n self.client_streaming_address)\n self.server_socket_streaming.sendto(data,\n self.client_streaming_address)", "def chunk(f, n, data):\n\n\t# Chunk ID\n\tf.write(number(2, n))\n\t# Chunk length\n\tf.write(number(4, len(data)))\n\t# Data\n\tf.write(data)", "def send_file(self, filename, BUFF_LENGTH):\n out_file = open(filename,\"rb\")\n file_bytes = out_file.read(1024) \n while file_bytes != b'':\n self.client.send(file_bytes)\n file_bytes = out_file.read(1024) # read next block from file\n self.client.send(b'')", "def send_data(self, data: dict):\n pass", "def sendto(self, data, addr):\n asyncio.ensure_future(self.__inner_protocol.send_data(data, addr))", "def sendData(self, data):\n self.transport.write(zlib.compress(rencode.dumps(data)))", "def flush(self, data):", "def send(self, msg):\n msg = stc.pack('>I', len(msg)) + msg\n self.sendall(msg)", "def _send_body(self, body, body_type):\n if body_type == BODY_FLAT:\n # Special case for files and other 'readable' objects.\n if hasattr(body, 'read'):\n while True:\n block = body.read(16*1024)\n if not block:\n break\n\n try:\n self._sock.send(block)\n except TypeError:\n raise ValueError(\n \"File objects must return bytestrings\"\n )\n\n return\n\n # Case for bytestrings.\n elif isinstance(body, bytes):\n self._sock.send(body)\n\n return\n\n # Iterables that set a specific content length.\n else:\n for item in body:\n try:\n self._sock.send(item)\n except TypeError:\n raise ValueError(\"Body must be a bytestring\")\n\n return\n\n # Chunked! For chunked bodies we don't special-case, we just iterate\n # over what we have and send stuff out.\n for chunk in body:\n length = '{0:x}'.format(len(chunk)).encode('ascii')\n\n # For now write this as four 'send' calls. That's probably\n # inefficient, let's come back to it.\n try:\n self._sock.send(length)\n self._sock.send(b'\\r\\n')\n self._sock.send(chunk)\n self._sock.send(b'\\r\\n')\n except TypeError:\n raise ValueError(\n \"Iterable bodies must always iterate in bytestrings\"\n )\n\n self._sock.send(b'0\\r\\n\\r\\n')\n return", "def sendBuffer():\n dislin.sendbf()", "def send(self,data,timeout=None):\r\n # Set the timeout if None\r\n if timeout is None:\r\n timeout = self.timeout\r\n\r\n # Get the start time\r\n starttime = getruntime()\r\n\r\n # Block until we can write\r\n rblock, wblock = self.socket.willblock()\r\n while wblock:\r\n # Check if we should break\r\n if timeout > 0:\r\n # Get the elapsed time\r\n diff = getruntime() - starttime\r\n\r\n # Raise an exception\r\n if diff > timeout:\r\n raise SocketTimeoutError,\"send() timed out!\"\r\n\r\n # Sleep\r\n # Since switching to the fibonacci backoff, the nature of \r\n # this field has changed. Rather than implement the backoff \r\n # for checking block status (seems wasteful) we'll just use \r\n # a constant value. Ten ms seems appropriate.\r\n sleep(0.010)\r\n\r\n # Update rblock\r\n rblock, wblock = self.socket.willblock()\r\n\r\n # Do the recv\r\n return self.socket.send(data)", "def sendData(self):\n out = ''\n for line in self.sendq:\n line = 'put ' + line + self.tagstr\n out += line + '\\n'\n LOG.debug('SENDING: %s', line)\n\n if not out:\n LOG.debug('no data in sendq?')\n return\n\n try:\n if self.dryrun:\n print out\n else:\n self.cissd.sendall(out)\n self.sendq = []\n # If an exception occurs, try sending data again next time\n except socket.error, msg:\n LOG.error('failed to send data: %s', msg)\n try:\n self.cissd.close()\n except socket.error:\n pass\n self.cissd = None", "def _send(self):\n while self.socket is not None:\n try:\n data = self._get_data_from_send_queue()\n if self.socket is not None:\n header = self._create_data_header(data)\n with self.socket_lock:\n self.socket.sendall(header + data)\n except Exception as err:\n getLogger(__name__).debug((\"Unexpected exception occurred,\"\n \" send thread may be in a\"\n \" corrupted state\\n\"\n \"Error: {}\".format(err)))", "def write_data(self, data):\n ofs = 0\n size = len(data)\n try:\n while ofs < size:\n # how many bytes should we write?\n wr_size = self.wrbuf_chunksize\n if wr_size > size - ofs:\n # reduce the write size\n wr_size = size - ofs\n # write the bytes\n n = self._write(data[ofs : ofs + wr_size])\n if n <= 0:\n raise usbdev_error(\"USB bulk write error\")\n ofs += n\n # return the number of bytes written\n return ofs\n except usb.core.USBError as e:\n raise usbdev_error(str(e))", "def _send(self):\n data = self.output_buffer.view()\n if not data:\n return\n if self.closed():\n raise self.Error(\"Failed to write to closed connection {!r}\".format(self.server.address))\n if self.defunct():\n raise self.Error(\"Failed to write to defunct connection {!r}\".format(self.server.address))\n self.socket.sendall(data)\n self.output_buffer.clear()", "def send_mes(mes, sock):\n # print(\"mes \"+mes.decode())\n # try:\n length = len(mes)\n data = str(length).zfill(MAX_CHUNK_SIZE).encode() + mes\n sock.send(data)\n # except socket.error as msg:\n # print(msg)\n # sock.close()", "def sendToClients(self, data):\n for client in self.__clients:\n result = client.write(data)\n if (result < 0):\n print \"Error writing to\", self.__clientName(client), \"-\", client.errorString()\n elif (result <> len(data)):\n print \"Only wrote\", result, \"of\", len(data), \"bytes to\", self.__clientName(client)", "def _chunk_send(self, metrics):\n messages = self._create_messages(metrics)\n request = self._create_request(messages)\n packet = self._create_packet(request)\n\n response = None\n\n for host_addr in self.zabbix_uri:\n logger.debug('Sending data to %s', host_addr)\n\n # create socket object\n connection_ = socket.socket()\n if self.socket_wrapper:\n connection = self.socket_wrapper(connection_)\n else:\n connection = connection_\n\n connection.settimeout(self.timeout)\n\n try:\n # server and port must be tuple\n connection.connect(host_addr)\n connection.sendall(packet)\n except socket.timeout:\n logger.error('Sending failed: Connection to %s timed out after %d seconds', host_addr, self.timeout)\n connection.close()\n continue\n except socket.error as err:\n # In case of error we should close connection, otherwise\n # we will close it after data will be received.\n logger.warning('Sending failed: %s', getattr(err, 'msg', str(err)))\n connection.close()\n continue\n\n try:\n response = self._get_response(connection)\n\n logger.debug('%s response: %s', host_addr, response)\n except socket.error as err:\n logger.error('Sending failed: %s', getattr(err, 'msg', str(err)))\n raise socket.error(response)\n\n break\n\n if response is None:\n logger.error('Sending failed: no servers available')\n raise socket.error()\n\n if response and (\"response\" not in response or response.get('response') != 'success'):\n logger.debug('Response error: %s}', response)\n raise socket.error(response)\n\n return response", "def send(self, data):\n print(\"sending: {}\".format(data))\n self.forward_in_sock.send_string(\"{}\\n\".format(data))", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def handle_write(self):\n sent = self.send(self.append_send_buffer)\n self.append_send_buffer = self.append_send_buffer[sent:]", "def send(self, data: Union[bytes, str]) -> None:\n if self.readyState != \"open\":\n raise InvalidStateError\n\n if not isinstance(data, (str, bytes)):\n raise ValueError(f\"Cannot send unsupported data type: {type(data)}\")\n\n self.transport._data_channel_send(self, data)", "def send_data(self, data):\r\n try:\r\n self.sock.sendto(data, self.addr)\r\n except Exception:\r\n print(\"Cant't send a package\")", "def send_message(self, data):\n self.agent_msg_queue.put(data)\n self._send_counter += 1", "def write(self, data):\n with self.writing: # state check\n self.write_buffer.enqueue(data)\n if len(self.write_buffer) > 2 * self.bufsize:\n yield self.flush()\n elif len(self.write_buffer) > self.bufsize:\n self.flush()()\n do_return(len(data))" ]
[ "0.78114295", "0.7532161", "0.7232472", "0.71928585", "0.7086782", "0.7002679", "0.6989813", "0.69814444", "0.69016576", "0.68666893", "0.68550235", "0.6853498", "0.67988867", "0.6783611", "0.67816645", "0.67800605", "0.6746048", "0.6729257", "0.6725598", "0.67044735", "0.67041624", "0.6678867", "0.6669147", "0.66383004", "0.66168433", "0.66168433", "0.6610788", "0.65852153", "0.6584088", "0.65770674", "0.6566498", "0.65456724", "0.65456176", "0.6517252", "0.64977944", "0.6494848", "0.64876217", "0.64793783", "0.6427462", "0.64235955", "0.6420426", "0.63971525", "0.63761276", "0.6349568", "0.6344828", "0.6339741", "0.6339724", "0.6336639", "0.6316015", "0.6312204", "0.63023424", "0.62921906", "0.62748736", "0.6235074", "0.62269044", "0.6213315", "0.62003976", "0.6177589", "0.6171921", "0.6160647", "0.61405915", "0.613642", "0.61348", "0.6132432", "0.6132257", "0.6130538", "0.61172855", "0.61133736", "0.6073346", "0.6065859", "0.60538304", "0.60536265", "0.6048798", "0.6039241", "0.60377526", "0.6036944", "0.6036582", "0.60271454", "0.60268974", "0.602142", "0.601666", "0.60089386", "0.60049134", "0.6004686", "0.60035557", "0.599379", "0.59899676", "0.59803045", "0.59774786", "0.5974124", "0.5972035", "0.5971899", "0.5970166", "0.59689546", "0.59689546", "0.59689546", "0.5958603", "0.59441143", "0.59395313", "0.593869" ]
0.7723083
1
Finished the request out and receives a response.
def finish(self): self.body.finish()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finish(self):\n return self.tornado_request.finish()", "def done(self):\n ## All done with the request object\n self.closed = True\n self.d.callback('')", "def finished(self, reply):\n pass", "def finish_successful_request(self):\n self.session_manager.finish_successful_request()", "def RequestHandler_finish(self):\n if self.request._oboe_finish_ev and self.request._oboe_ctx and self.request._oboe_ctx.is_valid():\n ev = self.request._oboe_finish_ev\n ctx = self.request._oboe_ctx\n if hasattr(self, 'get_status'): # recent Tornado\n ev.add_info(\"Status\", self.get_status())\n elif hasattr(self, '_status_code'): # older Tornado\n ev.add_info(\"Status\", self._status_code)\n\n ev.add_edge(oboe.Context.get_default())\n ctx.report(ev)\n\n # clear the stored oboe event/metadata from the request object\n self.request._oboe_ctx = None\n self.request._oboe_finish_ev = None", "def end():\n\tdata = bottle.request.json\n\t#print(\"END:\", json.dumps(data))\n\treturn HTTPResponse(status=200)", "def finish_request(self, request, client_address):\n\t\tself.RequestHandlerClass(request, client_address, self)", "def handleResponseEnd(self):\r\n try:\r\n if not self._finished:\r\n reactor.callInThread(\r\n self.resource.cacheContent,\r\n self.father,\r\n self._response,\r\n self.buffer\r\n )\r\n proxy.ProxyClient.handleResponseEnd(self)\r\n except RuntimeError:\r\n # because we don't care if the user hits\r\n # refresh before the request is done\r\n pass", "def _request_finished(self):\n\n self._next_request = self._next_request_ts()\n\n self._logger.debug(\"next call at %s\" % (time.strftime(\"%H:%M:%S\", time.localtime(self._next_request))))", "def finish(self):\n self.logger.debug(\"%s -> finish()\" % self)\n self.lines = ''.join(self.response_data).split(CRLF)\n\n if len(self.lines) < 1:\n raise nntplib.NNTPDataError(\"No data received\")\n\n self.response_code, self.response_message = self.lines[0][:3], \\\n self.lines[0][3:].strip()\n\n self.logger.debug(\"code = %s\" % self.response_code)\n self.logger.debug(\"msg = %s\" % self.response_message)", "def onfinish( request ):", "def onfinish( request ):", "def handle_finished (self):\n\n print self.in_headers\n print self.in_cookies\n print self.content_type\n print self.content_encoding\n print self.response_code\n print self.is_allowing_persistence\n print self.content", "def cb_request_done(result):\n self._current_request = None\n return result", "def send_final_request(self):\n with open(self.output_path, \"r\") as text_file:\n data = json.load(text_file)\n print self.request_handler.send(data)", "def exit(self) -> None:\n\n self.result = self.handle_success('finished-task')", "def process_request(self, request, client_address):\n self.finish_request(request, client_address)", "def finish_response(self, request, response):\n logger.debug(\"TornadoHandler::finish_response\")\n\n try:\n response = self._render_template(request, response)\n except Exception as e:\n return self._handle_response_exception(request, response, e)\n\n try:\n self._apply_response_middleware(request, response)\n except: # Any exception should be gathered and handled\n signals.got_request_exception.send(sender=self.__class__, request=request)\n response = self.handle_uncaught_exception(request, resolver, sys.exc_info())\n\n self._tornado_request_handler.django_finish_request(response)\n\n self._response_finished = True\n return response", "def end_request(self, environ):\n pass", "def onFinished( self, resultLine ):\n\t\treturn self.agi.finish()", "async def finalize(self):\n self._req_event_emitter.disconnect()\n await self._task", "def process_request(self, request, client_address):\n\t\tself.finish_request(request, client_address)\n\t\tself.close_request(request)", "def __finish(self):\n self.finished.emit()", "def _handle_finished(self, iq):\n log.debug('Received out-of-band data result for %s from %s:' % (\n iq['oob_transfer']['url'], iq['from']))\n found_sid = self.streamSessions[iq[\"id\"]]\n \n if found_sid is not None:\n del self.streamSessions[iq[\"id\"]]\n if iq[\"type\"].lower == \"error\":\n self.fileFinishedSending(found_sid, False)\n elif iq[\"type\"].lower == \"result\":\n self.fileFinishedSending(found_sid, True)", "def stop(self):\n self.close_conn()\n self.finished = True\n self.request_handler.stop()\n print \"Finished closing\"", "def finish(self):\r\n\r\n self._is_finished = True", "def send_finish_event(self):\n self.status['type'] = '__end__'\n self._send()", "def respond(self, request):\n self.prepare(request)\n try:\n self.process(request)\n return self.get_response(request)\n finally:\n self.finalize()", "def done(self, request):\n raise NotImplementedError(\"Your %s class has not defined a done() \" \\\n \"method, which is required.\" \\\n % self.__class__.__name__)", "def render(self, response):\n logger.debug(\"TornadoRequest::render\")\n response = self._handler.finish_response(self, response)\n logger.debug(\"response: Finished\")", "def finish():", "def finish():", "def finish():", "def finish():", "def _finished(self) -> None:", "def finish(self):\r\n self.start_finish()\r\n self.wait_finish()", "def get(self):\n self.finish(json.dumps(self.build_response_dict()))", "def finish():\n pass", "def handle_request(self, given_request: Request):\n with open(request.output, mode=\"w\", encoding='utf-8') as file:\n file.write(request.result)\n return True", "def _on_response(self):\n request = self._requests.pop(0)\n try:\n request[-1].cancel()\n left = request[-1].end - Engine.instance().time\n except Exception:\n left = request[5]\n pass\n\n response = self.current_response\n\n close_after = response.headers.get('Connection', '') == 'close'\n close_after &= self.keep_alive\n\n # Is this a 100 Continue?\n if response.status == 100:\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Did we catch a redirect?\n if response.status in (301,302) and request[9] <= self.max_redirects:\n # Generate a new request, using the new URL.\n new_url = urlparse.urljoin(response.full_url,\n response.headers['Location'])\n\n new_headers = request[3].copy()\n del new_headers['Host']\n\n new_req = self._add_request(request[0], new_url, new_headers,\n request[4], left, False)\n new_req[6] = request[6]\n new_req[7] = request[7]\n new_req[9] = request[9] + 1\n\n new_req.append(\n Engine.instance().defer(left, self._request_timeout, new_req))\n\n self._requests.insert(0, new_req)\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Try converting to unicode?\n if self.unicode:\n content_type = response.headers.get('Content-Type','')\n if 'charset=' in content_type:\n content_type, _, encoding = content_type.partition('charset=')\n try:\n response.body = response.body.decode(encoding)\n except (LookupError, UnicodeDecodeError):\n pass\n\n # Determine the handler function to use.\n if callable(request[6]):\n func = request[6]\n else:\n func = self.on_response\n\n # Call the handler function.\n try:\n func(0, response)\n except Exception:\n log.exception('Error in HTTP response handler.')\n\n # Process the next request.\n self.current_response = None\n\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()", "def onRequestFinish(self, api, request, error):\n logging.info('Request finished ({}); Result: {}'\n .format(request, error))\n\n request_type = request.getType()\n if request_type == MegaRequest.TYPE_LOGIN:\n api.fetchNodes()\n elif request_type == MegaRequest.TYPE_FETCH_NODES:\n self.root_node = api.getRootNode()\n elif request_type == MegaRequest.TYPE_ACCOUNT_DETAILS:\n account_details = request.getMegaAccountDetails()\n logging.info('Account details received')\n logging.info('Storage: {} of {} ({} %)'\n .format(account_details.getStorageUsed(),\n account_details.getStorageMax(),\n 100 * account_details.getStorageUsed()\n / account_details.getStorageMax()))\n logging.info('Pro level: {}'.format(account_details.getProLevel()))\n\n # Notify other thread to go on.\n if request_type not in self._NO_EVENT_ON:\n self.continue_event.set()", "def end(response):\n if isinstance(response.response, ClosingIterator):\n return response\n\n diff = time.time() - request.start\n del request.start\n\n if response.response:\n response.response[0] = response.response[0].replace('__EXECUTION_TIME__', '{:.3}'.format(diff))\n response.headers[\"content-length\"] = len(response.response[0])\n\n return response", "def finish(self) -> None:\n self.__exit__(None, None, None)", "def finish(self):\n self.connection.reset_arguments()\n self.connection.write_ok()", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def do_QUIT(self):\r\n self.send_response(200)\r\n self.end_headers()\r\n self.server.stop = True", "def _do_done(self, event):\n self._done(event.result)", "def _do_done(self, event):\n self._done(event.result)", "def end(self):\n self.logger.info(self.result)\n return self.result", "def finish(self):", "def finish(self):", "def deferred_response(response, request):\n request.write(simplejson.dumps(response))\n request.finish()", "def finish(self):\n pass", "def finish(self):\n pass", "def _CompleteRequest(self, request_id, result):\n logging.info('Reaped %s, result = %r', request_id, result)\n completion_path = self._GetRequestPathname(request_id, self._COMPLETE)\n with open(completion_path, 'w') as f:\n pickle.dump(result, f)\n self._ClearRequest(request_id, self._RUNNING)", "def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)", "def Complete(self, request, global_params=None):\n config = self.GetMethodConfig('Complete')\n return self._RunMethod(\n config, request, global_params=global_params)", "def finish(self) -> None:", "def finish(self) -> None:", "async def _response_handler(self):", "def handle_execution_response(self, data, *, wait):\n ...", "def AsyncHTTPClient_finish(request, callback=None, headers=None):\n if hasattr(callback, '_oboe_ctx'): # wrapped callback contains md\n ev = callback._oboe_ctx.create_event('exit', 'cURL') # adds edge to md\n if hasattr(request, '_oboe_ctx'): # add edge to entry event for this async HTTP call\n ev.add_edge(request._oboe_ctx)\n mdobj = callback\n\n elif hasattr(request, '_oboe_ctx'): # callback contains no metadata, but request obj does\n ev = request._oboe_ctx.create_event('exit', 'cURL')\n mdobj = request\n\n else: # no metadata found\n return\n\n if headers and hasattr(headers, 'get') and headers.get('X-Trace', None):\n response_md = headers.get('X-Trace')\n ev.add_edge_str(response_md) # add response X-Trace header\n\n mdobj._oboe_ctx.report(ev) # increments metadata in mdobj", "def serve_response(self):\n try:\n print self.path\n response_info = self.responses_qeues[self.path.split(\"?\").pop(0)].pop(0)\n print response_info\n except:\n self.send_response(404)\n self.end_headers()\n return\n\n \"\"\"If response_info has also a delay set, wait the time specified.\"\"\"\n if \"delay\" in response_info:\n time.sleep(response_info[\"delay\"])\n\n \"\"\"Send the status code.\"\"\"\n status_code = response_info[\"status_code\"]\n self.send_response(status_code)\n\n \"\"\"Send specific headers, if any.\"\"\"\n if \"headers\" in response_info:\n headers = response_info[\"headers\"]\n for header_name in headers.keys():\n self.send_header(header_name, headers.get(header_name))\n self.end_headers()\n\n \"\"\"Send the body, if any.\"\"\"\n if \"body\" in response_info:\n body = response_info[\"body\"]\n self.wfile.write(json.dumps(body))", "def close_request(self, request):\n\t\tpass", "def response(self, req, request_id, body):\n session = get_session()\n asyncrequest = model_query(session, AsyncRequest, filter=AsyncRequest.request_id == request_id).one()\n if not asyncrequest.expire:\n return responeutils.agentrespone(session, request_id, body)\n else:\n return responeutils.agentrespone(get_cache(), request_id, body)", "def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False", "def finished(self):\n pass", "def handle_session_end_request():\n speech_output = None\n response = response_builders.build_response(session_attributes,\n response_builders.build_speechlet_response(card_title,\n speech_output, reprompt_text, should_end_session))\n return response", "def handle_response(self, response):\n self.__log(f'Received response from server. The code is: \"{response}\"')\n if not response.status_code == 200:\n self.handle_api_error(response)\n self.to_output_file(response.text)", "def finalize_response(self, request, response, *args, **kwargs):\n\t\t# Make the error obvious if a proper response is not returned\n\t\tassert isinstance(response, BaseResponse), (\n\t\t\t'Expected a `Response` object '\n\t\t\t'to be returned from the view, but received a `%s`'\n\t\t\t% type(response)\n\t\t)\n\t\treturn response", "def exit(self):\n self.client.logout(self.creds, self.environment)\n self.transport.close()", "def exit(self):\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"exit()\"\n\n # exit() functionality is implemented with a special dst.\n exit_msg = Msg(\n dst = DST_EXIT,\n x = randint(0, UINT32_MAX),\n y = randint(0, UINT32_MAX),\n op = randint(0, UINT8_MAX),\n result = randint(0, UINT64_MAX))\n\n # First, bury a REQUEST.\n self.read(length=SZ_MSG)\n\n # Then, write the exit packet to TAP.\n self.write(str(exit_msg))\n\n # Exit the poller.\n return -1", "def end(self):\n self._log.debug('%s: doing ..', __class__.__name__)\n self._log.debug('%s: done.', __class__.__name__)", "def onfinish():", "def handler(self):\n\t\tself.exitClient()", "def close_request(self, request):\n\t\trequest.close()", "def process_response(self, request, response): # pylint: disable=unused-argument\r\n try:\r\n tracker.get_tracker().exit_context(self.CONTEXT_NAME)\r\n except: # pylint: disable=bare-except\r\n pass\r\n\r\n return response", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._servo.end()\n self._mtr.end()\n self._log.debug('done')", "def _response(self, *lines):\n for line in lines:\n self.client.dataReceived(line + b'\\r\\n')\n self.client.dataReceived(\n b'0001 OK [READ-ONLY] ' + self.command + b' completed\\r\\n')", "def finishThread(self):\n logging.info(\"Fin Thread\")\n self.buildCreatedDict()\n self.cleanThread()\n self.accept()", "def process_response(self, _request, response):\r\n try:\r\n tracker.get_tracker().exit_context(CONTEXT_NAME)\r\n except Exception: # pylint: disable=broad-except\r\n pass\r\n\r\n return response", "def response(self):\n try:\n (code, message) = self.route_request()\n except HTTPError as e:\n logger.exception(e.message)\n logger.error(e.message)\n code = e.code\n message = e.message\n except UserError as e:\n msg = str(e)\n logger.exception(msg)\n logger.error(msg)\n code = 500\n message = {'error': msg}\n except Exception as e:\n logger.exception(str(e))\n logger.error(\"Internal error\")\n # This is an unknown error. Just inform there is an internal error.\n code = 500\n message = {'error': \"Internal error.\"}\n\n try:\n # Try to send the response\n self.send_response(int(code))\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.wfile.write(json.dumps(message, cls=JSONEncoder)\n .encode('utf-8'))\n except Exception as e:\n logger.exception(str(e))\n logger.error(\"Could not send response\")", "def finished(self):\n\t\telog(\"finished\")", "def _callback(self, request):\r\n msgID = uuid4().hex\r\n event = Event()\r\n\r\n with self._pendingLock:\r\n self._pending[msgID] = event\r\n\r\n self._reactor.callFromThread(self.received, request._buff, msgID)\r\n\r\n # Block execution here until the event is set, i.e. a response has\r\n # arrived\r\n event.wait()\r\n\r\n with self._pendingLock:\r\n response = self._pending.pop(msgID, None)\r\n\r\n if not isinstance(response, Message):\r\n # TODO: Change exception?\r\n raise rospy.ROSInterruptException('Interrupted.')\r\n\r\n return response", "def finished(self):", "def handle_done_output(self, out):\n out.set_status(Constants.DONE)\n self.logger.debug(\"This output ({0}) exists, skipping the processing\".format(out))", "async def done(self, *args, **kwargs):\n raise NotImplementedError()", "def finish(self):\n with self._lock: # just to be tidy; lock not really needed to set a boolean\n self._done = True", "def process_request_thread(self, request, client_address):\n # pylint: disable=broad-except\n try:\n self.finish_request(request, client_address)\n except Exception:\n self.handle_error(request, client_address)\n self.shutdown_request(request)", "def _HandleShutdown(self):\n self.send_response(httplib.OK)\n self.send_header('Content-Type', 'text/plain')\n self.end_headers()\n self.wfile.write('API Server Quitting')\n self.server.shutdown()", "def finished(self, result):\n raise NotImplementedError(\"Subclasses mut override finished()\")", "def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)", "def __procFinished(self, exitCode, exitStatus):\n self.__finish()", "def on_finish(self):\n pass", "def FinishSequence(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def process_response(self, request, response):\n return response", "def process_response(self, request, response):\n return response", "def callback(self, widget, response):\n self.response = response\n self.destroy()", "def callback(self, widget, response):\n self.response = response\n self.destroy()", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._log.debug('done')" ]
[ "0.7359422", "0.73036295", "0.7245725", "0.712501", "0.69328076", "0.69194645", "0.6853112", "0.6752575", "0.67313087", "0.66815627", "0.66713023", "0.66713023", "0.6668481", "0.6632425", "0.65602535", "0.65523654", "0.6549984", "0.6437685", "0.6395554", "0.6376815", "0.62960666", "0.6262932", "0.62523", "0.62353384", "0.62194055", "0.62107575", "0.6207942", "0.6185713", "0.6179011", "0.6176799", "0.6174568", "0.6174568", "0.6174568", "0.6174568", "0.61406386", "0.61300415", "0.61294234", "0.6113812", "0.6066558", "0.60638654", "0.6041054", "0.6034018", "0.60108334", "0.5996921", "0.5979995", "0.59765947", "0.59623206", "0.59623206", "0.59571743", "0.59503883", "0.59503883", "0.59503746", "0.5943279", "0.5943279", "0.59384537", "0.5931601", "0.59263796", "0.59247315", "0.59247315", "0.59209895", "0.5907947", "0.5894734", "0.589403", "0.58929604", "0.58925635", "0.58894616", "0.587739", "0.58687973", "0.58500373", "0.5845556", "0.5834646", "0.58211845", "0.5812415", "0.5808706", "0.5805335", "0.5802055", "0.57977116", "0.5792389", "0.57916903", "0.57859284", "0.57622707", "0.575012", "0.5745275", "0.57440674", "0.5730471", "0.572275", "0.57221174", "0.5716345", "0.57109547", "0.5710026", "0.5703926", "0.5702069", "0.5697104", "0.5692487", "0.5690076", "0.56898725", "0.56898725", "0.56811285", "0.56811285", "0.5678335" ]
0.6356392
20
This function will generate the file names in a directory tree by walking the tree either topdown or bottomup. For each directory in the tree rooted at directory top (including top itself), it yields a 3tuple (dirpath, dirnames, filenames).
def get_filepaths(directory): file_paths = [] # List which will store all of the full filepaths. # Walk the tree. for root, directories, files in os.walk(directory): for filename in files: if filename.endswith('.wav'): # Join the two strings in order to form the full filepath. filepath = os.path.join(root, filename) file_paths.append(filepath) # Add it to the list. # pdb.set_trace() file_paths.sort() return file_paths
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def walk_tree(top):\n nodes = [top]\n for dirpath, dirnames, filenames in os.walk(top):\n for dirname in dirnames:\n nodes.append(os.path.join(dirpath, dirname))\n for filename in filenames:\n nodes.append(os.path.join(dirpath, filename))\n\n return nodes", "def walktree (self, top = \".\", depthfirst = True):\n \n names = os.listdir(top)\n if not depthfirst:\n yield top, names\n for name in names:\n try:\n st = os.lstat(os.path.join(top, name))\n except os.error:\n continue\n if stat.S_ISDIR(st.st_mode):\n for (newtop, children) in self.walktree (os.path.join(top, name), depthfirst):\n #print 'Scanning ', newtop\n yield newtop, children\n if depthfirst:\n yield top, names", "def walk(top):\n yield top\n for name in os.listdir(top):\n name = os.path.join(top, name)\n if os.path.isdir(name) and not os.path.islink(name):\n for directory in walk(name):\n yield directory", "def iterfiles(top=ROOT, verbose=False):\n # NOTE: os.walk() ignores errors and this can be more efficient\n if isinstance(top, pathlib.Path):\n top = str(top)\n stack = [top]\n while stack:\n root = stack.pop()\n if verbose:\n print(root)\n direntries = scandir(root)\n dirs = []\n for d in direntries:\n if d.is_dir():\n dirs.append(d.path)\n else:\n yield d\n stack.extend(dirs[::-1])", "def walk(dir_pathname, recursive=True, topdown=True, followlinks=False):\n walk_func = get_dir_walker(recursive, topdown, followlinks)\n for root, dir_names, file_names in walk_func(dir_pathname):\n yield (root, dir_names, file_names)", "def walk(self, mtop, topdown=True):\n dirents = self.ls(mtop)\n\n mdirs, mnondirs = [], []\n for dirent in sorted(dirents.values(), key=itemgetter(\"name\")):\n if dirent[\"type\"] == \"directory\":\n mdirs.append(dirent)\n else:\n mnondirs.append(dirent)\n\n if topdown:\n yield mtop, mdirs, mnondirs\n for mdir in mdirs:\n mpath = ujoin(mtop, mdir[\"name\"])\n for x in self.walk(mpath, topdown):\n yield x\n if not topdown:\n yield mtop, mdirs, mnondirs", "def walk(top):\r\n yield top\r\n for name in os.listdir(top):\r\n name = os.path.join(top, name)\r\n if os.path.isdir(name) and not os.path.islink(name):\r\n for dir in walk(name):\r\n yield dir", "def walk_tree(self, path, topdown=True):\n if isinstance(path, File):\n # Called with File object as an argument\n root = path\n path = root.path\n else:\n root = File(path)\n\n files, dirs = [], []\n\n try:\n for item in os.listdir(path):\n file_path = os.path.join(path, item)\n\n if self.path_ignore and self.path_ignore.match(file_path):\n # Skip excluded paths\n lg.debug(\"Ignoring path %s\" % file_path)\n continue\n\n try:\n f_object = File(file_path, seen=root.already_seen)\n except UnsupportedFileType as e:\n lg.warn('%s ..skipping' % e)\n continue\n except OSError as e:\n if e.errno == errno.ENOENT:\n # File already removed, go on\n lg.debug('File already removed: %s' % e)\n continue\n elif e.errno in [errno.EPERM, errno.EACCES]:\n # Permission denied or operation not permitted, log error and go on\n lg.error(e)\n continue\n else:\n # Other errors should be fatal, but we don't want them to be\n # eg. corrupted file on GlusterFS may raise IOError, but we want to continue\n lg.exception(e)\n continue\n\n if f_object.directory is True:\n dirs.append(f_object)\n else:\n files.append(f_object)\n except OSError as e:\n # Exceptions that may come from os.listdir()\n if e.errno == errno.ENOENT:\n # Directory doesn't exist, go on\n pass\n elif e.errno in [errno.EPERM, errno.EACCES]:\n # Permission denied or operation not permitted, log error and go on\n lg.error(e)\n pass\n else:\n # Other errors should be fatal, but we don't want them to be\n # eg. corrupted file on GlusterFS may raise IOError, but we want to go on\n lg.exception(e)\n pass\n\n if topdown:\n yield root, dirs, files\n\n for item in dirs:\n for x in self.walk_tree(item):\n yield x\n\n if not topdown:\n yield root, dirs, files", "def _get_all_files_names(wit_path, dir_name=None, plus_root=True):\n\n if dir_name is None:\n dir_name = wit_path\n directory = os.listdir(dir_name)\n directory.remove('.wit')\n else:\n directory = os.listdir(dir_name)\n\n for item in directory:\n if os.path.isdir(item):\n for root, _, files in os.walk(os.path.join(dir_name, item), topdown=False):\n for name in files:\n if plus_root:\n\n yield os.path.join(os.path.relpath(root, wit_path), name)\n else:\n yield name\n else:\n if plus_root:\n yield os.path.join(os.path.relpath(dir_name, wit_path), item)\n else:\n yield item", "def listdir(dir_pathname,\n recursive=True,\n topdown=True,\n followlinks=False):\n for root, dir_names, file_names in walk(dir_pathname,\n recursive, topdown, followlinks):\n for dir_name in dir_names:\n yield absolute_path(os.path.join(root, dir_name))\n for file_name in file_names:\n yield absolute_path(os.path.join(root, file_name))", "def walk(top, topdown=True, onerror=None, followlinks=False):\r\n # Determine which are files and which are directories\r\n dirs = []\r\n nondirs = []\r\n try:\r\n for entry in scandir(top):\r\n if entry.is_dir():\r\n dirs.append(entry)\r\n else:\r\n nondirs.append(entry)\r\n except OSError as error:\r\n if onerror is not None:\r\n onerror(error)\r\n return\r\n\r\n # Yield before recursion if going top down\r\n if topdown:\r\n # Need to do some fancy footwork here as caller is allowed to modify\r\n # dir_names, and we really want them to modify dirs (list of DirEntry\r\n # objects) instead. Keep a mapping of entries keyed by name.\r\n dir_names = []\r\n entries_by_name = {}\r\n for entry in dirs:\r\n dir_names.append(entry.name)\r\n entries_by_name[entry.name] = entry\r\n\r\n yield top, dir_names, [e.name for e in nondirs]\r\n\r\n dirs = []\r\n for dir_name in dir_names:\r\n entry = entries_by_name.get(dir_name)\r\n if entry is None:\r\n # Only happens when caller creates a new directory and adds it\r\n # to dir_names\r\n entry = GenericDirEntry(top, dir_name)\r\n dirs.append(entry)\r\n\r\n # Recurse into sub-directories, following symbolic links if \"followlinks\"\r\n for entry in dirs:\r\n if followlinks or not entry.is_symlink():\r\n new_path = join(top, entry.name)\r\n for x in walk(new_path, topdown, onerror, followlinks):\r\n yield x\r\n\r\n # Yield before recursion if going bottom up\r\n if not topdown:\r\n yield top, [e.name for e in dirs], [e.name for e in nondirs]", "def os_walk(top, topdown=True, onerror=None, followlinks=False):\r\n try:\r\n names = os_listdir(top)\r\n except OSError as err:\r\n if onerror is not None:\r\n onerror(err)\r\n return\r\n\r\n dirs, nondirs = [], []\r\n for name in names:\r\n if os.path.isdir(os.path.join(top, name)):\r\n dirs.append(name)\r\n else:\r\n nondirs.append(name)\r\n\r\n if topdown:\r\n yield top, dirs, nondirs\r\n for name in dirs:\r\n new_path = os.path.join(top, name)\r\n if followlinks or not os.path.islink(new_path):\r\n for x in os_walk(new_path, topdown, onerror, followlinks):\r\n yield x\r\n if not topdown:\r\n yield top, dirs, nondirs", "def walk_up ( dirpath, topdown=False, max_iter=None ):\n def iter_partial_paths ( _join_path=os.sep.join ):\n fspath = os.path.normpath ( dirpath ).rstrip ( os.sep )\n path_elements = fspath.split ( os.sep )\n\n if path_elements:\n p_start = 0 if path_elements[0] else 1\n high = len ( path_elements )\n\n if topdown:\n if not path_elements[0]:\n yield os.sep\n\n for k in range ( p_start+1, high+1 ):\n yield _join_path ( path_elements[:k] )\n else:\n for k in range ( high, p_start, -1 ):\n yield _join_path ( path_elements[:k] )\n\n if not path_elements[0]:\n yield os.sep\n # --- end of iter_partial_paths (...) ---\n\n if max_iter is None:\n for path in iter_partial_paths():\n yield path\n else:\n for n, path in enumerate ( iter_partial_paths() ):\n if n < max_iter:\n yield path\n else:\n return", "def _walk_paths(self, paths):\r\n for path in sorted(paths):\r\n if os.path.isdir(path):\r\n for dir_name, _, filenames in sorted(os.walk(path)):\r\n for filename in filenames:\r\n filename = os.path.join(dir_name, filename)\r\n yield os.path.relpath(filename, path), filename\r\n else:\r\n yield os.path.basename(path), path", "def iter_dir_tree(top, nohidden=True, pattern=\".*\"):\n for root, dirs, files in os.walk(top):\n if nohidden:\n remove_hidden_files(dirs)\n remove_hidden_files(files)\n for f in files:\n if re.match(pattern, f):\n yield os.path.join(root, f)", "def get_ntuples_from_xml_files(top_directory):\n for (dirpath, dirnames, filenames) in os.walk(top_directory):\n print(\"Looking in\", dirpath)\n for filename in filenames:\n full_filename = os.path.join(dirpath, filename)\n rel_path = os.path.relpath(full_filename, top_directory)\n ntuple_iter = get_ntuple_filenames_from_xml(full_filename)\n yield rel_path, ntuple_iter", "def list_files(top_path):\n\n results = []\n\n for root, dirs, files in os.walk(top_path, topdown=True):\n\n # Exclude dot files like .git\n dirs[:] = [name for name in dirs if not name.startswith('.')]\n files[:] = [name for name in files if not name.startswith('.')]\n\n for file_name in files:\n results.append(os.path.join(root, file_name))\n\n results.sort()\n return results", "def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False):\n for root, _, file_names in walk(dir_pathname,\n recursive, topdown, followlinks):\n for file_name in file_names:\n yield absolute_path(os.path.join(root, file_name))", "def find_files(top_directory, exclude=[], include_top_directory_in_name=True):\n import os\n import re\n paths_and_names = []\n exclude = [re.compile(exclusion) for exclusion in exclude]\n top_directory = os.path.abspath(os.path.expanduser(top_directory))\n parent_directory = os.path.dirname(top_directory)\n for root, dirs, files in os.walk(top_directory, topdown=True):\n dirs.sort(key=str.lower) # Go in case-insensitive alphabetical order\n files.sort(key=str.lower) # Go in case-insensitive alphabetical order\n for exclusion in exclude:\n for d in dirs:\n if exclusion.search(os.path.relpath(d, top_directory)):\n dirs.remove(d)\n for f in files:\n if exclusion.search(os.path.relpath(f, top_directory)):\n files.remove(f)\n for f in files:\n path = os.path.join(root, f)\n if include_top_directory_in_name:\n name = os.path.relpath(path, parent_directory)\n else:\n name = os.path.relpath(path, top_directory)\n paths_and_names.append([path, name])\n return paths_and_names", "def walk(top=None, excluded=('.git', '.ve', '_static', 'build', 'fixtures')):\n if not top:\n top = os.getcwd()\n\n for root, dirs, files in os.walk(top):\n for directory in excluded:\n if directory in dirs:\n dirs.remove(directory)\n for name in files:\n yield os.path.join(root, name), name", "def walktree(input):\n if os.path.isfile(input):\n return [input]\n else:\n fileNames = []\n for root, dirs, files in os.walk(input):\n fileNames += [os.path.join(root, f) for f in files]\n return fileNames", "def get_files_list(tree):\n result = list()\n for (dir_path, _, file_names) in walk(tree):\n if file_names:\n for file in file_names:\n if file.lower().endswith(('.png', '.jpg', '.jpeg')):\n result.append(path.join(dir_path, file))\n\n return result", "def list_directories(dir_pathname, recursive=True, topdown=True,\n followlinks=False):\n for root, dir_names, _ in walk(dir_pathname, recursive, topdown, followlinks):\n for dir_name in dir_names:\n yield absolute_path(os.path.join(root, dir_name))", "def dirwalk(self, topdown=False): # DirObj.dirwalk\n if topdown:\n yield self\n\n for name, d in self.subdirs.iteritems():\n for dirEntry in d.dirwalk():\n yield dirEntry\n\n if not topdown:\n yield self", "def file_walker(root,**kwargs):\n\n # Get our keyword argunents, and do some initialization.\n max_depth=kwargs.get('depth',None)\n if max_depth==None:\n max_depth=sys.maxsize # I don't think we'll hit this limit in practice.\n follow_links=kwargs.get('follow_links',True)\n prune=compile_filename_patterns(kwargs.get('prune',[]))\n ignore=compile_filename_patterns(kwargs.get('ignore',[]))\n report_dirs=kwargs.get('report_dirs',False)\n if report_dirs not in (False,True,'first','last'):\n raise ValueError(\"report_dirs=%r is not one of False, True, 'first', or 'last'.\"%(report_dirs,))\n stack=[(0,root)] # Prime our stack with root (at depth 0).\n been_there=set([os.path.abspath(os.path.realpath(root))])\n dir_stack=[] # Stack of paths we're yielding after exhausting those directories.\n\n while stack:\n depth,path=stack.pop()\n if report_dirs in (True,'first'):\n yield path+os.sep\n elif report_dirs=='last':\n dir_stack.append(path+os.sep)\n flist=os.listdir(path)\n flist.sort()\n dlist=[]\n # First, let the caller iterate over these filenames.\n for fn in flist:\n p=os.path.join(path,fn)\n if os.path.isdir(p):\n # Just add this to this path's list of directories for now.\n dlist.insert(0,fn)\n continue\n pat,mat=first_match(fn,ignore)\n if not pat:\n yield p\n # Don't dig deeper than we've been told to.\n if depth<max_depth:\n # Now, let's deal with the directories we found.\n for fn in dlist:\n p=os.path.join(path,fn)\n # We might need to stack this path for our fake recursion.\n if os.path.islink(p) and not follow_links:\n # Nope. We're not following symlinks.\n continue\n rp=os.path.abspath(os.path.realpath(p))\n if rp in been_there:\n # Nope. We've already seen this path (and possibly processed it).\n continue\n m=None\n pat,mat=first_match(fn,prune)\n if pat:\n # Nope. This directory matches one of the prune patterns.\n continue\n # We have a keeper! Record the path and push it onto the stack.\n been_there.add(rp)\n stack.append((depth+1,p))\n while dir_stack:\n yield dir_stack.pop()", "def gen_recursive_filelist(d):\n \n for root, directories, files in os.walk(d):\n for file in files:\n yield os.path.join(root, file)", "def listFiles(root):\n for dirpath, dirnames, filenames in os.walk(root):\n for file in filenames:\n yield os.path.join(dirpath, file)", "def iter_tree(root):\n\tfor file_rel in _iter_tree_next(os.path.abspath(root), '', {}):\n\t\tyield file_rel", "def walk2(dirname):\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))", "def walk2(dirname):\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))", "def get_all_files(directory):\r\n for dirpath, _dirnames, filenames in os.walk(directory):\r\n for filename in filenames:\r\n yield (filename, dirpath)", "def files_and_folders(self, depth):\n for directory in self.rarc._directories[self.directory_index:][:self.directory_count]:\n yield depth, directory\n if isinstance(directory, Folder):\n if directory.data_offset < len(self.rarc._nodes):\n node = self.rarc._nodes[directory.data_offset]\n if directory.name == \".\" or directory.name == \"..\":\n continue\n yield from node.files_and_folders(depth + 1)", "def printFiles2(main_dir):\n # traverse root directory, and list directories as dirs and files as files\n for root, dirs, files in os.walk(main_dir):\n print(\"DirName: \" + root)\n for file in files:\n print(\"FileName: \" + root + os.sep + file)", "def walklevel(top_dir, level=None):\n top_dir = top_dir.rstrip(os.path.sep)\n assert os.path.isdir(top_dir)\n num_sep = top_dir.count(os.path.sep)\n for root, dirs, files in os.walk(top_dir):\n yield root, dirs, files\n if level is not None:\n num_sep_this = root.count(os.path.sep)\n if num_sep + level <= num_sep_this:\n del dirs[:]", "def all_files_under(path):\r\n for cur_path, dirnames, filenames in os.walk(path):\r\n for filename in filenames:\r\n yield os.path.join(cur_path, filename)", "def getFilePaths(directory):\r\n\tfor folder, subs, files in os.walk(directory):\r\n\t\tfor filename in files:\r\n\t\t\tyield os.path.join(folder, filename)", "def walk_tree(top_most_path, callback):\n for file in os.listdir(top_most_path):\n pathname = os.path.join(top_most_path, file)\n mode = os.stat(pathname)[ST_MODE]\n if S_ISDIR(mode):\n # It's a directory, recurse into it\n walk_tree(pathname, callback)\n elif S_ISREG(mode):\n # It's a file, call the callback function\n callback(pathname)\n else:\n # Unknown file type, print a message\n print(\"Skipping %s\" % pathname)", "def _get_all_files(dir_path):\n for root, _, filenames in os.walk(dir_path):\n for name in filenames:\n target = os.path.join(root, name)\n yield target", "def iter_files(root_dir: str, sep: str = '/') -> Generator[str, None, None]:\n def f(parent_path, parent_name):\n for f_name in os.listdir(parent_path):\n f_child_path = parent_path + os.sep + f_name\n f_child_name = parent_name + sep + f_name\n if os.path.isdir(f_child_path):\n for s in f(f_child_path, f_child_name):\n yield s\n else:\n yield f_child_name\n\n for name in os.listdir(root_dir):\n child_path = root_dir + os.sep + name\n if os.path.isdir(child_path):\n for x in f(child_path, name):\n yield x\n else:\n yield name", "def walk(top):\n dirs = []\n nondirs = []\n\n # We may not have read permission for top, in which case we can't\n # get a list of the files the directory contains. os.walk\n # always suppressed the exception then, rather than blow up for a\n # minor reason when (say) a thousand readable directories are still\n # left to visit. That logic is copied here.\n try:\n scandir_it = os.scandir(top)\n except OSError as error:\n return\n\n with scandir_it:\n while True:\n try:\n try:\n entry = next(scandir_it)\n except StopIteration:\n break\n except OSError as error:\n return\n\n try:\n is_dir = entry.is_dir(follow_symlinks=False)\n except OSError:\n # If is_dir() raises an OSError, consider that the entry is not\n # a directory, same behaviour than os.path.isdir().\n is_dir = False\n\n if is_dir:\n dirs.append(entry)\n else:\n nondirs.append(entry)\n\n yield top, dirs, nondirs\n\n # Recurse into sub-directories\n for d in dirs:\n new_path = os.path.join(top, d.name)\n # Issue #23605: os.path.islink() is used instead of caching\n # entry.is_symlink() result during the loop on os.scandir() because\n # the caller can replace the directory entry during the \"yield\"\n # above.\n if not os.path.islink(new_path):\n yield from walk(new_path)", "def listDir(path):\n filenames = []\n for root, dirs, files in os.walk(path):\n for i in files:\n filenames.append(os.path.join(root, i))\n return filenames", "def getImmediateSubdirectories(dir):", "def list_all_files(in_dir):\n\n for dirname, dirs, files in os.walk(in_dir):\n for filename in files:\n yield op.join(dirname, filename)", "def find_all(path, filenames):\n path = os.path.abspath(path)\n try:\n for root, dirs, files in os.walk(path, topdown=True):\n dirs.sort()\n for filename in filenames:\n if filename in files:\n yield os.path.abspath(os.path.join(root, filename))\n except UnicodeDecodeError:\n # This is an error of not being able to walk the dir when there are unicode files in them :|\n pass", "def build_files_list(root_dir):\n return [\n os.path.join(dirpath, file_path)\n for dirpath, subdirs, files in os.walk(root_dir)\n for file_path in files\n ]", "def build_files_list(root_dir):\n return [\n os.path.join(dirpath, file_path)\n for dirpath, subdirs, files in os.walk(root_dir)\n for file_path in files\n ]", "def getFiles(dir):\n # dig looking for files\n a = os.walk(dir)\n b = True\n filenames = []\n \n while b:\n try:\n (dirpath, dirnames, files) = a.next()\n filenames.append([dirpath, tuple(files)])\n except:\n b = False\n return filenames", "def getFiles(dir):\n # dig looking for files\n a= os.walk(dir)\n b = True\n filenames = []\n \n while (b):\n try:\n (dirpath, dirnames, files) = a.next()\n filenames.append([dirpath, tuple(files)])\n except:\n b = False\n return filenames", "def iterative_tree(\n basedir: Union[str, PurePath],\n nfolders_func: Callable,\n nfiles_func: Callable,\n repeat: int = 1,\n maxdepth: Optional[int] = None,\n filename: Callable = random_string,\n payload: Optional[Callable[[Path], Generator[Path, None, None]]] = None,\n) -> Tuple[List[Path], List[Path]]:\n alldirs = []\n allfiles = []\n basedir = Path(basedir)\n basedir.mkdir(parents=True, exist_ok=True)\n for i in range(repeat):\n for root, dirs, files in os.walk(str(basedir)):\n depth = os.path.relpath(root, str(basedir)).count(os.sep)\n if maxdepth and depth >= maxdepth - 1:\n del dirs[:]\n n_folders = nfolders_func(depth)\n n_files = nfiles_func(depth)\n for _ in range(n_folders):\n p = Path(root) / random_string()\n p.mkdir(exist_ok=True)\n alldirs.append(p)\n if not payload:\n for _ in range(n_files):\n p = Path(root) / filename()\n p.touch(exist_ok=True)\n allfiles.append(p)\n else:\n payload_generator = payload(Path(root))\n for _ in range(n_files):\n p = next(payload_generator)\n allfiles.append(p)\n\n alldirs = list(set(alldirs))\n allfiles = list(set(allfiles))\n return alldirs, allfiles", "def directory_tree(directory: pathlib.Path) -> None:\n directory_tree_string = ''\n # Turn directory into a pathlib.Path object\n # if not already one\n if not isinstance(directory, pathlib.Path):\n directory = pathlib.Path(directory)\n #print(f'+ {directory}')\n directory_tree_string += f'\\n+ {directory}'\n for path in sorted(directory.rglob('*')):\n depth = len(path.relative_to(directory).parts)\n spacer = ' ' * depth\n #print(f'{spacer}+ {path.name}')\n directory_tree_string += f'\\n{spacer}+ {path.name}'\n return directory_tree_string", "def traverse_posts(root):\n for (dirpath, dirnames, filenames) in os.walk(root):\n for filename in filenames:\n date, file = parse_filename(filename)\n if not date or not file:\n continue\n yield tuple([dirpath, filename, file, date])", "def path_generator(initial_root):\n for root, dirs, files in os.walk(initial_root):\n paths = [os.path.join(root, name) for name in files]\n return paths", "def get_all_files(pathdir: str) -> list:\n from os import path, walk\n\n '''\n os.walk(root_path) - directory tree generator.\n For each directory on root_path return a tuple:\n (path_for_dir, list_dirs_on_the_dir, list_files_on_the_dir)\n\n trash\n ├── dir1\n │   ├── dir2\n │   │   ├── dir3\n │   │   └── file3\n │   ├── file1\n │   └── file2\n └── dir4\n ├── dir5\n │   ├── file5\n │   └── file6\n └── file4\n\n >>> import os\n >>> list(os.walk('/home/myrequiem/trash'))\n [\n ('trash', ['dir1', 'dir4'], []),\n ('trash/dir1', ['dir2'], ['file2', 'file1']),\n ('trash/dir1/dir2', ['dir3'], ['file3']),\n ('trash/dir1/dir2/dir3', [], []),\n ('trash/dir4', ['dir5'], ['file4']),\n ('trash/dir4/dir5', [], ['file5', 'file6'])\n ]\n '''\n\n allfiles = []\n\n try:\n from tqdm import tqdm\n except ImportError:\n def tqdm(*args, **kwargs):\n if args:\n return args[0]\n return kwargs.get('iterable', None)\n\n for root, dirs, files in tqdm(walk(pathdir), leave=False,\n ncols=80, unit=''):\n del dirs\n for fls in files:\n allfiles.append(path.join(root, fls))\n\n return allfiles", "def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)", "def get_files(dir: str) -> List[str]:\n ret = []\n for root, dirs, files in os.walk(dir):\n for name in dirs:\n ret.extend(get_files(os.path.join(root, name)))\n for name in files:\n ret.append(os.path.join(root, name))\n return ret", "def list_files(startpath):\n for root, dirs, files in os.walk(startpath):\n level = root.replace(startpath, '').count(os.sep)\n indent = ' ' * 4 * (level)\n print('{}{}/'.format(indent, os.path.basename(root)))\n subindent = ' ' * 4 * (level + 1)\n for f in files:\n print('{}{}'.format(subindent, f))", "def created_names(self, prefix):\n assert os.path.isdir(prefix)\n cwd = os.getcwd()\n os.chdir(prefix)\n names = tuple(sorted(filter(\n os.path.isdir,\n glob.glob(os.path.join(*('*' * self.depth))))))\n os.chdir(cwd)\n return names", "def dirtree(dir, index):\n filenames = os.listdir(dir)\n for filename in filenames:\n if not os.path.isdir(os.path.abspath(dir+'/'+filename)):\n if filename == filenames[-1]:\n print('| '*index+'\\--', filename)\n else:\n print('| '*index+'|--', filename)\n else:\n print('| '*index+'|--', filename)\n dir = dir + '/' + filename\n dirtree(dir, index+1)", "def process_dir(pool, topdir):\n for root, dirs, files in os.walk(topdir):\n # Not really needed, but makes things consistent.\n dirs.sort()\n files.sort()\n\n for path in files:\n process_file(pool, os.path.join(root, path))", "def directory_tree(self, root=None, print_value=None):\n files = (\n self.drive.files()\n .list(\n q=\"mimeType = 'application/vnd.google-apps.folder' and trashed = false\",\n corpora=\"drive\",\n spaces=\"drive\",\n fields=\"files(id, name, parents)\",\n includeItemsFromAllDrives=True,\n supportsAllDrives=self.shared_drive[0],\n driveId=self.shared_drive[1],\n )\n .execute()\n )\n file_dict = {}\n file_names = {}\n for file in files[\"files\"]:\n if file[\"parents\"][0] not in file_dict:\n file_dict[file[\"parents\"][0]] = {}\n file_dict[file[\"parents\"][0]][file[\"id\"]] = file[\"name\"]\n file_names[file[\"id\"]] = file[\"name\"]\n tree = {}\n results = []\n\n def recurse(parent_id, tree_pos):\n if len(file_dict) == 0:\n return\n if parent_id in file_dict:\n parent = file_dict[parent_id]\n for folder in parent.keys():\n tree_pos[folder] = {}\n results.append(folder)\n if len(tree_pos) > 0:\n for folder in tree_pos.keys():\n recurse(folder, tree_pos[folder])\n\n if root is not None:\n results.append(root[\"id\"])\n recurse(root[\"id\"], tree)\n elif self.shared_drive[0]:\n results.append(self.shared_drive[1])\n recurse(self.shared_drive[1], tree)\n else:\n results.append(\"root\")\n recurse(\"root\", tree)\n\n def tree_name(tree_pos, space):\n if len(tree_pos) == 0:\n return\n for id, folder in tree_pos.items():\n print(f\"{' '*space}{file_names[id]} [{id}]\")\n if len(folder) > 0:\n tree_name(tree_pos[id], space + 4)\n\n if print_value is not None:\n root_title = self.get(results[0])\n print(f\"{root_title['name']} [{root_title['id']}]\")\n tree_name(tree, 4)\n\n if root is not None:\n return results\n else:\n return tree", "def walk(dirname): \n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)", "def walk(rootdir):\n flist = []\n for root, dirs, files in os.walk(rootdir):\n flist = flist + [os.path.join(root, x) for x in files]\n return flist", "def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files", "def list_files_into_directory(directory_path: str) -> [str]:\n for root, directory_names, file_names in walk(directory_path):\n return file_names", "def recursion(directory, jpg_name=None):\r\n out = [directory, []]\r\n\r\n if jpg_name is None:\r\n jpg_name = []\r\n\r\n for file in os.listdir(directory):\r\n if os.path.isdir(os.path.join(directory, file)):\r\n logger.info(f'Going deeper - {os.path.join(directory, file)}')\r\n recursion(os.path.join(directory, file), jpg_name)\r\n\r\n elif file.endswith('.jpg'):\r\n logger.info(f'{file} found')\r\n out[1].append(file)\r\n logger.info(f'Appended {file} file to output list')\r\n\r\n if out[1]:\r\n jpg_name.extend(out)\r\n\r\n return jpg_name", "def print_dir_tree(dir_path):\n top_path = Path(dir_path)\n if Path(top_path).exists() and Path(top_path).is_dir():\n print(f'+ {top_path}')\n paths = [p for p in sorted(top_path.rglob('*')) if not path_is_hidden(p)]\n for path in paths:\n depth = len(path.relative_to(top_path).parts)\n spacer = ' ' * depth\n print(f'{spacer}+ {path.name}')\n\n else:\n print(\"The path {} is not a directory.\".format(dir_path))", "def build_file_list(path):\n dirs = []\n files = []\n for x in path.iterdir():\n try:\n if x.is_symlink():\n continue\n elif x.is_dir():\n dirs.append(x)\n new_dirs, new_files = build_file_list(x)\n dirs.extend(new_dirs)\n files.extend(new_files)\n elif x.is_file():\n files.append(x)\n except PermissionError:\n continue\n return dirs, files", "def build_directory_tree(path):\n body = \"<!DOCTYPE html><html><body>\"\n mimetype = \"text/html\"\n for dir_name, sub_dir_list, file_list in os.walk(path):\n body += \"<h3>Directory: {}</h3>\".format(dir_name.split(\"webroot\")[-1])\n body += \"<ul>\"\n for fname in file_list:\n body += \"<li>{} </li>\".format(fname)\n body += \"</ul>\"\n body += \"</body></html>\"\n return body, mimetype", "def walk_recursive(root, pattern='*.py'):\r\n for root, dirnames, filenames in os.walk(root):\r\n for filename in fnmatch.filter(filenames, pattern):\r\n yield os.path.join(root, filename)", "def get_all_files_and_nested(file_path):\n stack_dirs = list()\n all_files = list()\n first_level_files = listdir(file_path)\n for f in first_level_files:\n full_f_path = join(file_path, f)\n if isdir(full_f_path):\n stack_dirs.append(full_f_path)\n else:\n all_files.append(full_f_path)\n for d in stack_dirs:\n all_files.extend(get_all_files_and_nested(d))\n return all_files", "def walk(dirname):\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)", "def traverse(self, path):\n\n path_list = [s for s in path.split('/') if len(s) > 0 ]\n # print(path)\n # print('files:', self.files)\n directory = self.files\n index = 0\n while index < len(path_list) and path_list[index] in directory:\n if type(directory[path_list[index]]) is str: # directory is a file\n break\n directory = directory[path_list[index]]\n index += 1\n print('info', directory, path_list[index:])\n return directory, path_list[index:]", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n files = [\n (os.lstat(os.path.join(root, fi)).st_size, os.path.join(root, fi), os.lstat(os.path.join(root, fi)).st_ino) for fi\n in files if (os.lstat(os.path.join(root, fi)).st_size > self.size)]\n self.fileList.extend(files)\n if len(self.excludeList) > 0:\n dirnames[:] = [dir for dir in dirnames if dir not in self.excludeList]\n if not self.cross_mount_points:\n dirnames[:] = [dir for dir in dirnames if not os.path.ismount(os.path.join(root, dir))]", "def create_rooted_trees_from_dir(paths, fout, outgroup):\n #pdb.set_trace()\n fout = open(fout, 'w')\n for count, path in enumerate(paths):\n base_path, tree_file_name = os.path.split(path)\n #pdb.set_trace()\n fin = open(path)\n for tree in fin:\n tree = tree.strip()\n tree = Tree(tree)\n tree.set_outgroup(outgroup)\n newick = tree.write(format=5) + '\\n'\n fout.write(newick)\n print count+1\n fout.close()", "def walk( root, depth=None, include=None, exclude=None, callback=None ):\n\tfiles = []\n\tstack = [ root, ]\n\tif callback is None:\n\t\tcallback = lambda x:files.append(x)\n\twhile stack and ( not depth or len(stack) <= depth ):\n\t\td = stack.pop()\n\t\tsubs = []\n\t\tfor e in listdir( d ):\n\t\t\tc = join( d, e )\n\t\t\tif isdir( c ):\n\t\t\t\tsubs.append( c )\n\t\t\telif ((not include) or match( include, c)) \\\n\t\t\t\t and ((not exclude) or not match( exclude, c)):\n\t\t\t\tcallback( c )\n\t\tstack.extend( subs )\n\treturn files", "def walk_deep(path):\n for root, _, filenames in os.walk(path):\n for f in filenames:\n yield os.path.join(root, f).replace('\\\\', '/')", "def getLevelNames(names):\n topNames = []\n deeperNames = []\n for item in names:\n if isinstance(item, str):\n topNames.append(item)\n else:\n topNames.append(item[0])\n # Names immediately under the current level must be\n # qualified with the current level full name\n for j in item[1]:\n if isinstance(j, str):\n subname = '%s/%s' % (item[0], j)\n else: # j is a 2-tuple\n jlist = list(j)\n jlist[0] = '%s/%s' % (item[0], jlist[0])\n subname = tuple(jlist)\n deeperNames.append( subname)\n return topNames, deeperNames", "def get_files(root_dir, recursive=True):\n\n ret_files = []\n\n for root, _, files in os.walk(root_dir, topdown=True):\n\n for name in files:\n ret_files.append(os.path.join(root, name))\n\n if not recursive:\n break\n\n return ret_files", "def scan_tree(path):\n list_of_file_paths = []\n for file_obj in scandir(path):\n if file_obj.is_dir(follow_symlinks=False):\n # yield from scan_tree(file_obj.path)\n list_of_file_paths.extend(scan_tree(file_obj.path))\n else:\n # yield file_path\n if 'DS_Store' not in file_obj.path:\n list_of_file_paths.append(file_obj.path)\n return list_of_file_paths", "def _recursivelyFindFiles(self, topLevelDirectory, extension=\".py\"):\n print ('finding ' + extension + '...\\n')\n tempFilesFound = []\n tempSubDirs = {} #initialize temporary dictionary of sbudirectories\n \n for dirpath, dirnames, filenames in os.walk(topLevelDirectory):\n #print 'dirpath= ' + dirpath\n for filename in filenames:\n #check file extension and verify this is not a hidden file\n #also need to verify that the entity is a file (this avoids problems when directory names have file extensions)\n if filename[-len(extension):] == extension and filename[0] != '.' and os.path.isfile(dirpath+\"/\"+filename):\n #print 'filename = ' + dirpath +'/'+filename\n if dirpath == topLevelDirectory:\n tempFilesFound.append(dirpath+\"/\"+filename)\n else:\n #print '********* '\n #print dirpath\n tempSubDirs[dirpath] = True\n\n ##recursively search sub-directories\n #for dirname in dirnames:\n ##ignore directories with names that begin with a '.' or '_'\n #if dirname[0] != '.' and dirname[0] != '_':\n #self._findFiles(dirname, extension)\n \n #self.SubDirsFound=self.subdirs.keys()\n\n #in Python 3 dict.keys(), dict.values() and dict.items() will all return iterable views instead of lists \n if sys.version_info >= (3, 0):\n return (tempFilesFound, list(tempSubDirs.keys()))\n \n return (tempFilesFound, tempSubDirs.keys())", "def get_file_list_recursively(top_directory, allowed_extensions=[]):\n if not exists(top_directory):\n raise ValueError('Directory \"{}\" does NOT exist.'.format(top_directory))\n\n file_list = []\n\n for cur_dir, cur_subdirs, cur_files in os.walk(top_directory):\n\n for file in cur_files:\n\n f_name, f_ext = splitext(file)\n\n if f_ext:\n if allowed_extensions and f_ext not in allowed_extensions:\n pass # skip this file\n else:\n file_list.append(join(cur_dir, file))\n sys.stdout.write('\\r[{}] - found {:06d} files...'.format(top_directory, len(file_list)))\n sys.stdout.flush()\n else:\n pass # todo decide what to do with files without extension\n\n sys.stdout.write(' Done.\\n')\n\n return file_list", "def finddirs(root):\n retval = []\n for root, dirs, files in os.walk(root):\n for d in dirs:\n retval.append(os.path.join(root, d))\n return retval", "def traverse(self, filelist, depth=0):\n if depth > 10:\n return ['depth > 10']\n level = {}\n for entry in (path for path in self.connection.nlst() if path not in ('.', '..')):\n try:\n self.connection.cwd(entry)\n level[entry] = self.traverse(filelist, depth+1)\n self.connection.cwd('..')\n except ftplib.error_perm:\n level[entry] = None\n return level", "def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def find_files(top_dir, pattern, level=None):\n fnames = []\n fpaths = []\n for root, dirs, files in walklevel(top_dir=top_dir, level=level):\n for filename in fnmatch.filter(files, pattern):\n fnames.append(filename)\n fpaths.append(os.path.join(root, filename))\n return fnames, fpaths", "def get_filenames(path):\r\n xs = []\r\n for (dirpath, dirnames, filenames) in os.walk(path):\r\n xs.extend(filenames)\r\n break\r\n xs.sort()\r\n return xs", "def walkthrough(software_map):\n\n for i in software_map:\n\n if not i[\"is_file\"]:\n\n # for each directory: make a index.md\n dname = \"./docs/\" + i[\"name\"]\n index = \"./docs/\" + i[\"name\"] + \"/index.md\"\n print(index)\n os.mkdir(dname)\n\n with open(index, \"w+\") as f:\n\n children = i[\"children\"]\n\n # list files\n f.write(\"Files:\\n\\n\")\n for i in children:\n if i[\"is_file\"]:\n\n fname = i[\"name\"]\n fext = fname.split(\".\")\n if len(fext) == 2:\n fext = fext[1]\n else:\n fext = \"none\"\n # for each file, note name and extension\n f.write(fname + \" : \" + fext + \"\\n\")\n\n # list subdirectories\n f.write(\"\\nSubdirectories:\\n\\n\")\n for i in children:\n if not i[\"is_file\"]:\n\n dirname = i[\"name\"]\n\n # note the number of files and subdirs in it\n num_files, num_dirs = 0, 0\n for child in i[\"children\"]:\n if child[\"is_file\"]:\n num_files += 1\n elif not child[\"is_file\"]:\n num_dirs += 1\n\n # note down name and numbers for each dir\n f.write(dirname + \" : \" + str(num_files) + \" files, \" +\n str(num_dirs) + \" directories\\n\")\n\n # goto subdir\n if len(i[\"children\"]) > 0:\n walkthrough(i[\"children\"])", "def walkdir(folder):\n for dirpath, dirs, files in os.walk(folder):\n for filename in files:\n yield os.path.abspath(os.path.join(dirpath, filename))", "def get_filepaths(directory):\n file_paths = [] # List which will store all of the full filepaths.\n\n # Walk the tree.\n for root, directories, files in os.walk(directory):\n root = os.path.normpath(root)\n directories.sort()\n files.sort()\n for filename in files:\n # Join the two strings in order to form the full filepath.\n if not root.startswith('_') and not root.startswith('.') and filename.endswith(\".md\"):\n filepath = os.path.normpath(os.path.join(root, filename))\n file_paths.append(filepath) # Add it to the list.\n\n return file_paths # Self-explanatory.", "def get_files_in_directory(dir_name: str):\n filenames = []\n\n for entry in walk(dir_name):\n root_dir = entry[0]\n dir_files = entry[2]\n filenames.extend([Path(root_dir).joinpath(file) for file in dir_files])\n\n return filenames", "def walk(self, top, followlinks=False):\r\n try:\r\n names = self.listdir(top)\r\n except os.error:\r\n return\r\n\r\n items = []\r\n for name in names:\r\n items.append(name)\r\n\r\n yield top, items\r\n\r\n for name in items:\r\n new_path = os.path.join(top, name)\r\n if followlinks or not self.islink(new_path):\r\n for x in self.walk(new_path, followlinks):\r\n yield x", "def list_folders_into_directory(directory_path: str) -> [str]:\n for root, directory_names, file_names in walk(directory_path):\n return directory_names", "def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index", "def walkFolder(self, folder, topdown=True):\n if isinstance(folder, basestring):\n folderObject = self.getFolder(folder)\n else:\n folderObject = folder\n dirs = folderObject.childFolders\n containedObjects = folderObject.containedObjects\n if dirs is None:\n dirs = []\n if containedObjects is None:\n containedObjects = []\n if topdown:\n yield folderObject, dirs, containedObjects\n\n for nextDir in dirs:\n for x in self.walkFolder(nextDir.selfUrl):\n yield x\n if not topdown:\n yield folderObject, dirs, containedObjects", "def walk_package(pkgname, root):\n dirs = []\n files = []\n for name in pkg_resources.resource_listdir(pkgname, str(root)):\n fullname = root / name\n if pkg_resources.resource_isdir(pkgname, str(fullname)):\n dirs.append(fullname)\n else:\n files.append(Path(name))\n for new_path in dirs:\n yield from walk_package(pkgname, new_path)\n yield root, dirs, files", "def findTrees( Dir, treeList ):\n \t_dirSave = Dir\n\t#Storing the father->child info\n\ttry:\n\t\tlistOfKeys = Dir.GetListOfKeys()\n\t##-- Checking that is a Tag and Probe fit \n\t## file. IF it is not, then we find\n\t## some not expected TKeys diferent from TTree\n\texcept AttributeError:\n\t\tmessage = \"\"\"\\033[1;31mError: The root file is not an standard T&P NTuple file\\033[1;m\"\"\" \n\t\traise AttributeError, message\n \tfor key in Dir.GetListOfKeys():\n \t\tclassName = key.GetClassName()\n \t\tif key.IsFolder() and className != 'TTree': #Watch TTree is a folder\n \t\t\t##-- Extracting the Folder from Dir\n\t\t\t_subdir = Dir.Get(key.GetName())\n \t\t\t##-- And browsing inside recursively\n \t\t\ttreeList = findTrees(_subdir,treeList)\n \t\t\t##-- To avoid segmentation faults, we need to return\n\t\t\t## at the original directory in order to continue\n\t\t\t## extracting subdirectories (or elements of the directory)\n \t\t\t_dirSave.cd()\n \t\t##-- Storing in the dictionary the interesting objects\n \t\telif className == 'TTree':\n\t\t\t#Asuming unique id object-->complet path\n\t\t\ttreeList.append( Dir.GetPath().split(':/')[1]+'/'+key.GetName() )\n \n \treturn treeList", "def getDirListing (dirPath, revert):\n dirList = []\n fileList = []\n for root, dirs, files in os.walk (dirPath, False):\n for name in files:\n fileList.append (os.path.join (root, name))\n for name in dirs:\n dirList.append (os.path.join (root, name))\n\n if revert == True:\n return fileList + dirList\n else:\n dirList.reverse ()\n return dirList + fileList", "def _subdirectories(self):\n for o in os.listdir(self.directory):\n if os.path.isdir(os.path.join(self.directory, o)):\n yield os.path.join(self.directory, o)", "def root_sort(root_dir, exclude=[]):\n print(\" \")\n print(\"<-------------->\")\n print(\"ROOT DIRECTORY \" + \" : \" + root_dir)\n print(\"<-------------->\")\n print(\" \")\n print(\"SORTING ROOT DIRECTORY FILES\")\n root_dir_list = []\n\n for root, dirs, files in os.walk(root_dir):\n if (root.split(\"/\")[-1] in exclude and \n root.split(\"/\")[-1] != ''):\n\n print(\"EXCLUDING: \" + root)\n # Skip the direcories that are listed in exclude_dir\n dirs[:] = [d for d in dirs if d not in exclude]\n files[:] = [] # Remove all misc files\n current_folder = root\n # We don't want the root directory!!\n if (current_folder != root_dir):\n # Cycles subfolders and files in the current sub-folder\n for sub_root, sub_dirs, sub_files in os.walk(root):\n # Sorts the files in the subfolder to have the file \n # Pass to yt in position [0]\n sub_files.sort()\n # Appends path of the enzo target file to root_dir_list \n root_dir_list.append(os.path.join(root, sub_files[0]))\n \n root_dir_list.sort()\n \n return root_dir_list", "def walk_directory(self, path):\n files = []\n for dirpath, dirnames, filenames in os.walk(path):\n for filename in filenames:\n files.append(os.path.join(dirpath, filename))\n return files", "def eachfilename(dir2list, printfname=0):\n if printfname: print('eachfilename is matching for \\n' + dir2list);\n if isinstance(dir2list,str):\n if not os.path.exists(dir2list): # if not a valid (single) filename\n dir2list=[dir2list] # try it as a list\n if isinstance(dir2list,list) or isinstance(dir2list,tuple):\n for line in dir2list:\n for fname in glob.iglob(line):\n fname = fname.replace('\\\\','/')\n if printfname: print(fname)\n yield fname\n elif isinstance(dir2list,str):\n pp, ff = os.path.split(dir2list); pp+='/';\n for line in open(dir2list):\n line = line.strip()\n if line.startswith('##') : continue ## skip those lines\n for fname in glob.iglob( pp + line ):\n fname=fname.replace('\\\\','/')\n if printfname: print(fname)\n yield fname" ]
[ "0.74382895", "0.73284924", "0.6859594", "0.6823699", "0.6807483", "0.67260796", "0.6721552", "0.6637687", "0.6594345", "0.6485284", "0.6484495", "0.6478459", "0.64450955", "0.6372701", "0.63717425", "0.63453346", "0.6339544", "0.6330374", "0.6294579", "0.62940586", "0.6279649", "0.6260713", "0.62574005", "0.6235525", "0.6234086", "0.6231545", "0.62096214", "0.61957854", "0.6192225", "0.6192225", "0.6184156", "0.6169862", "0.6154047", "0.6120484", "0.6063081", "0.6062711", "0.60552025", "0.59951216", "0.5993814", "0.5973551", "0.59629905", "0.596145", "0.593226", "0.5928437", "0.5915306", "0.5915306", "0.5911737", "0.5904492", "0.58960056", "0.5886942", "0.5869757", "0.58649606", "0.58605134", "0.5857068", "0.58494085", "0.5848429", "0.5835022", "0.5801156", "0.57992595", "0.57749265", "0.5774115", "0.57692224", "0.5756722", "0.57453", "0.5737123", "0.57348883", "0.5732801", "0.57287335", "0.5716659", "0.57077533", "0.5704953", "0.57006055", "0.56947714", "0.5692991", "0.56896174", "0.5673912", "0.56661534", "0.5654317", "0.56529576", "0.5645489", "0.56434405", "0.56433547", "0.5640592", "0.5629572", "0.56237173", "0.56226534", "0.5620901", "0.56162393", "0.56103283", "0.55967826", "0.5586501", "0.5580347", "0.5571464", "0.5565672", "0.5561741", "0.556138", "0.5549781", "0.5549298", "0.5544126", "0.55418587", "0.55372554" ]
0.0
-1
Calculate the distance and rotation to the edge of the desk
def getDistanceAndRotationToEdge(l, f, r): if DEBUG: print "lfr:", l,",",f,",",r # Maths help from: http://xaktly.com/MathNonRightTrig.html # - Specfically the law of cosines, but at least one of their # examples is wrong, but methods are correct... sigh. # # For triangle with forward length, shortest of # left and right length, and desk edge as sides... # # f = forward distance length # l = left distance length # r = right distance length # e = length of desk edge between left and right views # s = shortest of left and right distance length # v = "view" angle of how much robot looks left or right # g = angle between f and e # d = distance between robot and edge of desk # a = angle between the way the robot is facing and edge of desk # (i.e. if the robot is facing directly towards edge it's 0) # (in radians or degrees?..) # # e² = f² + s² - 2 * f * s * cos(v) # g = sin⁻¹ * (s * sin(v) / e) # d = f * sin(g) # a = 180 - 90 - g (minus or positive depending on if s is left or right) # Figure out if the edge of the desk is more to the right or left # s = min(l, r) <-- Used to use this, but need additional things. # r | l | s # x | x | ? # 1 | 1 | ? Logic table for _r_ight, _l_eft, and output # 0 | 0 | ? _s_hortest distances from robot to desk edge # x | 0 | l # 1 | x | r x = None # 0 | 1 | r 1 = arbitrary high-ish value # x | 1 | l 0 = arbitrary low-ish value # 1 | 0 | l # 0 | x | r # Distance to right and left are missing? if r is None and l is None: if DEBUG: print "INFO: Skipping edge calcs because of missing distances." return int(round(f)), 0 # Distance to right and left identical? elif r == l: if DEBUG: print "INFO: Skipping edge calcs because of identical distances." # This is unlikely-ish because l, f, r are floats... # # r < f r > f # ◆ | or ◼ # ____➘| __🠛__ # return int(round(min(r, f))), 0 # Figure out if _l_eft or _r_ight is the shorter distance else: if r is None: s = l direction = -1 elif l is None: s = r direction = 1 elif l < r: s = l direction = -1 elif r < l : s = r direction = 1 cosV = math.cos(math.radians(45)) sinV = math.sin(math.radians(45)) e = f**2 + s**2 - 2 * f * s * cosV e = math.sqrt(e) g = math.degrees(math.asin(s * sinV / e)) d = f * math.sin(math.radians(g)) # Switching degrees/radians f'debugging a = (90 - g) * direction ''' # Debug stuff print "f =", f print "l =", l print "r =", r print "e =", e print "s =", s print "v =", 45 print "g =", g print "d =", d print "a =", a ''' distance = int(round(d)) rotation = int(round(a)) if DEBUG: print "Distance to edge:", str(distance) + "cm" print "Rotation to edge:", str(rotation) + "°" return distance, rotation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEdgeAngle():\n '''\n returns angle a\n a\n ◿\n b c\n '''\n ANGLE_OFFSET = 8 # How far off the angle measurements are in degrees.\n THRESHOLD = 220 # How much light must be reflected to 'notice' the desk.\n angle = 0\n while angle < panTilt.TLT_RANGE:\n angle += 1\n panTilt.tilt(int(angle))\n deskDetected = ir.readWithDelay()\n # print \"Angle:\", angle + ANGLE_OFFSET, \", ir reading:\", deskDetected\n if deskDetected > THRESHOLD or angle == panTilt.TLT_RANGE:\n # print \"-----------------------\"\n break # Break out of looking downwards loop\n panTilt.up() # Look up again\n return 90 - angle - ANGLE_OFFSET", "def calculate_distance_edge(self):\n if self.mu > 0:\n # right interface is intersected next\n dx = self.cell_xr - self.x\n self.next_cell_index = self.cell_index + 1\n else:\n # left interface is intersected next\n dx = self.cell_xl - self.x\n self.next_cell_index = self.cell_index - 1\n\n return dx / self.mu", "def getEdgeDistance():\n '''\n a\n ◿\n b c\n\n hypotenuse\n ◿ adjacent\n opposite\n\n tan(a) = opposite/adjacent\n adjacent * tan(a) = opposite\n '''\n\n # An estimated multiplier to take into account the larger infrared dot\n # observed when further away from as surface - think torch beam onto a\n # wall getting larger as it gets further away, but only the radius\n # (center downwards) being relevant.\n # TODO: Maybe move into infrared sensor code?\n MULTI = 1.2\n\n edgeDistance = BOT_HEIGHT * math.tan(math.radians(getEdgeAngle()))\n edgeDistance *= MULTI\n\n if DEBUG:\n print \"Distance to edge: \", int(round(edgeDistance))\n\n return edgeDistance", "def calculate_distance_edge(self):\n mu_star = -np.sqrt(1. - (self.cell_xl / self.x)**2)\n\n if self.mu <= mu_star:\n\n l_edge = (-self.mu * self.x -\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xl**2))\n self.next_cell_index = self.cell_index - 1\n\n else:\n\n l_edge = (-self.mu * self.x +\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xr**2))\n self.next_cell_index = self.cell_index + 1\n\n return l_edge", "def getDistances():\n\n # If there's a wall in the way then there's no edge that way (probably)\n\n wallL, edgeL = getDistance(-45) # Left\n wallF, edgeF = getDistance( 0) # Forward\n wallR, edgeR = getDistance( 45) # Right\n\n panTilt.pan() # Recenter\n\n return wallL, edgeL, wallF, edgeF, wallR, edgeR", "def direction(self):\r\n return 180 - atan2(self.x, self.y)*180/pi", "def fangle_degr(self):\r\n\r\n return self._versor_1.angle_degr(self._versor_2)", "def calculate_clockwise_angle_and_distance(self, center_node, spoke_node): # pylint: disable=R0201\n if not spoke_node['id'] in center_node['relations']:\n raise Exception('spoke_node_id must be related to center node')\n\n refvec = [0, 1]\n point = spoke_node['coords']\n origin = center_node['coords']\n\n # Vector between point and the origin: v = p - o\n vector = [point[0] - origin[0], point[1] - origin[1]]\n # Length of vector: ||v||\n lenvector = math.hypot(vector[0], vector[1])\n # If length is zero there is no angle\n if lenvector == 0:\n return -math.pi, 0\n\n # Normalize vector: v/||v||\n normalized = [vector[0]/lenvector, vector[1]/lenvector]\n dotprod = normalized[0]*refvec[0] + normalized[1]*refvec[1] # x1*x2 + y1*y2\n diffprod = refvec[1]*normalized[0] - refvec[0]*normalized[1] # x1*y2 - y1*x2\n angle = math.atan2(diffprod, dotprod)\n\n # Negative angles represent counter-clockwise angles so we need to subtract them\n # from 2*pi (360 degrees)\n if angle < 0:\n return 2 * math.pi + angle, lenvector\n\n # I return first the angle because that's the primary sorting criterium\n # but if two vectors have the same angle then the shorter distance should come first.\n # (lenvector should never really be needed, however, since that would mean edges overlap)\n return angle, lenvector", "def distance_between_wheels():", "def faceDiagonal(self):\n faceDiagonal = (2**(1/2)) * self.sideLength\n return faceDiagonal", "def edge_dxy(self):\r\n loc = self.loc\r\n rect = loc.coord\r\n p1 = rect[0]\r\n p2 = rect[1]\r\n edx = p2[0] - p1[0] # Find edge direction\r\n edy = p2[1] - p1[1]\r\n return edx, edy", "def edgeCurl(self):\n if getattr(self, '_edgeCurl', None) is None:\n assert self.dim > 1, \"Edge Curl only programed for 2 or 3D.\"\n\n n = self.vnC # The number of cell centers in each direction\n L = self.edge # Compute lengths of cell edges\n S = self.area # Compute areas of cell faces\n\n # Compute divergence operator on faces\n if self.dim == 2:\n\n D21 = sp.kron(ddx(n[1]), speye(n[0]))\n D12 = sp.kron(speye(n[1]), ddx(n[0]))\n C = sp.hstack((-D21, D12), format=\"csr\")\n self._edgeCurl = C*sdiag(1/S)\n\n elif self.dim == 3:\n\n D32 = kron3(ddx(n[2]), speye(n[1]), speye(n[0]+1))\n D23 = kron3(speye(n[2]), ddx(n[1]), speye(n[0]+1))\n D31 = kron3(ddx(n[2]), speye(n[1]+1), speye(n[0]))\n D13 = kron3(speye(n[2]), speye(n[1]+1), ddx(n[0]))\n D21 = kron3(speye(n[2]+1), ddx(n[1]), speye(n[0]))\n D12 = kron3(speye(n[2]+1), speye(n[1]), ddx(n[0]))\n\n O1 = spzeros(np.shape(D32)[0], np.shape(D31)[1])\n O2 = spzeros(np.shape(D31)[0], np.shape(D32)[1])\n O3 = spzeros(np.shape(D21)[0], np.shape(D13)[1])\n\n C = sp.vstack((sp.hstack((O1, -D32, D23)),\n sp.hstack((D31, O2, -D13)),\n sp.hstack((-D21, D12, O3))), format=\"csr\")\n\n self._edgeCurl = sdiag(1/S)*(C*sdiag(L))\n return self._edgeCurl", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def virtual_distance(self):\n conflict_zone_radio = 384.0\n path_width = 172.0\n right_turn_radio = path_width / 4.0\n left_turn_radio = 3 * path_width / 4.0\n initial_straight_section = conflict_zone_radio - path_width / 2.0\n if self.get_intention() == \"s\":\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_intention() == \"r\":\n # Calculate real virtual distance\n if self.get_virtual_x_position() <= initial_straight_section:\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_virtual_y_position() > -right_turn_radio:\n virtual_distance_value = (\n initial_straight_section + atan(\n (\n self.get_virtual_x_position() -\n initial_straight_section\n ) / (right_turn_radio + self.get_virtual_y_position())\n ) * right_turn_radio\n )\n else:\n virtual_distance_value = (\n initial_straight_section + pi * right_turn_radio / 2.0 -\n self.get_virtual_y_position() - right_turn_radio\n )\n\n a = path_width / 2.0\n b = right_turn_radio + path_width / 4.0\n c = pi * right_turn_radio / 2.0\n # Scale virtual distance\n if virtual_distance_value <= initial_straight_section + c:\n virtual_distance_value *= (\n (initial_straight_section + a + b) /\n (initial_straight_section + c)\n )\n else:\n virtual_distance_value += a + b - c\n\n else:\n # Calculate real virtual distance\n if self.get_virtual_x_position() <= initial_straight_section:\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_virtual_y_position() < left_turn_radio:\n virtual_distance_value = (\n initial_straight_section + atan(\n (\n self.get_virtual_x_position() -\n initial_straight_section\n ) / (\n left_turn_radio -\n self.get_virtual_y_position()\n )\n ) * left_turn_radio\n )\n else:\n virtual_distance_value = (\n initial_straight_section + pi * left_turn_radio / 2 +\n self.get_virtual_y_position() - left_turn_radio\n )\n\n a = path_width / 2\n b = right_turn_radio + path_width / 4\n c = pi * left_turn_radio / 2\n # Scale virtual distance\n if virtual_distance_value <= initial_straight_section + c:\n virtual_distance_value *= (\n (initial_straight_section + a + b) /\n (initial_straight_section + c)\n )\n else:\n virtual_distance_value += a + b - c\n\n return virtual_distance_value", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def doMath(self, distance):\n\t\trotational_corr = -.1*(self.front_point-self.back_point)\n\t\tdistance_corr = -.2*(self.middle - distance)\n\t\treturn rotational_corr + distance_corr", "def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance", "def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))", "def distancia(self, other):\n return ((self.x-other.x)**2 + (self.y-other.y)**2 + (self.z-other.z)**2) ** (1 / 2)", "def total_edge_angle(e1, e2):\n e1_source = section.index(e1[0])\n e2_target = section.index(e2[1])\n\n \"\"\" Given a pair of vertices, call angle_delta between them. \"\"\"\n f = lambda pair: utils.angle_delta(self.node_heading[pair[0]], self.node_heading[pair[1]])\n\n \"\"\" Map f onto each pair of adjacent vertices, and return the abs of the summed result. \"\"\"\n return abs(sum(map(f, zip(section[e1_source + 1:e2_target], section[e1_source + 2:e2_target + 1]))))", "def _calc_side(self):\n\n # Calculation of the side of the car with respect to the trajectory\n next_index = self.index + 1\n\n if next_index == len(self.x_trajectory):\n next_index = self.index\n\n trajectory_vector = ((self.x_trajectory[next_index]\n - self.x_trajectory[self.index]),\n (self.y_trajectory[next_index]\n - self.y_trajectory[self.index]))\n\n x_diff = self.x - self.x_trajectory[self.index]\n y_diff = self.y - self.y_trajectory[self.index]\n\n ugv_vector = (x_diff, y_diff)\n\n vector_z = ugv_vector[0] * trajectory_vector[1] \\\n - ugv_vector[1] * trajectory_vector[0]\n\n if vector_z >= 0:\n\n # It is in the right side\n self.sign = 1\n\n else:\n\n # It is in the left side\n self.sign = -1\n\n return self.sign", "def _geodesic_distance(mesh, face1, face2, edge):\n edge_center = (mesh.vertices[edge[0]] + mesh.vertices[edge[1]]) / 2\n return _list_length(_list_minus(edge_center, _face_center(mesh, face1))) + \\\n _list_length(_list_minus(edge_center, _face_center(mesh, face2)))", "def comp_angle_magnet(self):\n Rbo = self.get_Rbo()\n W0 = self.comp_W0m()\n Harc = self.comp_H_arc()\n if self.is_outwards():\n return float(2 * arctan(W0 / (2 * (Rbo + self.H1 - Harc))))\n else:\n return float(2 * arctan(W0 / (2 * (Rbo - self.H1 - Harc))))\n\n # if self.W0_is_rad:\n # return self.W0\n # else: # Convert W0 from m to rad\n # Rbo = self.get_Rbo()\n # return float(2 * arcsin(self.W0 / (2 * Rbo)))", "def angle(self) -> int:", "def degrees(self):\n A = self.adjacency()\n A.data = np.ones(A.nnz)\n right = np.array(A.sum(1)).ravel()\n left = np.array(A.sum(0)).ravel()\n return right, left", "def get_diameter(self) -> float:\r\n \r\n return (self.box[3] - self.box[1] + self.box[2] - self.box[0]) / 2", "def _get_angle_and_dist_to_avoid(self, detection, direction='left'):\n OVERSHOOT_DIST = 0.20 # meters, distance to overshoot target by\n base_link_pose = self._transform_to_base_link(detection)\n radius = math.sqrt(base_link_pose.pose.position.x ** 2\n + base_link_pose.pose.position.y ** 2)\n tag_point = Point(x=base_link_pose.pose.position.x,\n y=base_link_pose.pose.position.y)\n\n path_edge_point = Point()\n # solve for x given the radius and y-coord of a point on a circle\n # Just set x to zero if radius is too small (if tag is too close to\n # the rover. Protects math.sqrt() from evaluating a negative number.\n if radius > Planner.PATHWAY_EDGE_DIST:\n path_edge_point.x = math.sqrt(radius ** 2\n - Planner.PATHWAY_EDGE_DIST ** 2)\n else:\n path_edge_point.x = 0\n path_edge_point.y = Planner.PATHWAY_EDGE_DIST\n if direction == 'left':\n path_edge_point.y *= -1\n\n return (-self._angle_between(tag_point, path_edge_point),\n path_edge_point.x + OVERSHOOT_DIST)", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def mrr_diagonal(geom: base.BaseGeometry) -> float:\n if len(geom) <= 1:\n return 0\n if len(geom) == 2:\n return geo.distance( # type: ignore\n lat1=geom[0].y, lon1=geom[0].x, lat2=geom[1].y, lon2=geom[1].x\n )\n mrr = LineString(geom).minimum_rotated_rectangle\n if isinstance(mrr, Point):\n return 0\n try: # in most cases, mrr is a Polygon\n x, y = mrr.exterior.coords.xy\n except AttributeError: # then it should be a LineString\n p0, p1 = mrr.coords[0], mrr.coords[-1]\n return geo.distance(p0[1], p0[0], p1[1], p1[0]) # type: ignore\n return geo.distance(y[0], x[0], y[2], x[2]) # type: ignore", "def cable_length(self):\n skel = self.physical_space(copy=False)\n\n v1 = skel.vertices[skel.edges[:,0]]\n v2 = skel.vertices[skel.edges[:,1]]\n\n delta = (v2 - v1)\n delta *= delta\n dist = np.sum(delta, axis=1)\n dist = np.sqrt(dist)\n\n return np.sum(dist)", "def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )", "def get_distance(edge, center, r, candidate):\n p1, p2, p0 = edge[0], edge[1], center\n edge_len = norm([p2[0] - p1[0], p2[1] - p1[1]])\n sq = abs((p2[1]-p1[1])*p0[0] - (p2[0]-p1[0])*p0[1] + p2[0]*p1[1] - p2[1]*p1[0])\n dist = sq / edge_len\n if same_side(edge, center, candidate):\n return r + dist\n else:\n return r - dist", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) **0.5", "def manhatam_distance(self) -> int:\n return abs(self.north) + abs(self.east)", "def rotation(self) -> float:\n xs, ys = self.xcoords.data, self.ycoords.data\n rot = 0\n if xs.ndim == 2:\n ddx1 = xs[0, -1] - xs[0, 0]\n ddy1 = ys[0, -1] - ys[0, 0]\n if not np.isclose(ddx1, 0):\n rot = math.degrees(math.atan(ddy1 / ddx1))\n else:\n rot = -90\n if ddx1 < 0:\n rot = 180 + rot\n elif ddy1 < 0:\n rot = 360 + rot\n return rot", "def getDistance(angle):\n\n panTilt.pan(angle)\n time.sleep(DELAY)\n wallDistance = getWallDistance()\n edgeDistance = getEdgeDistance() if wallDistance is None else None\n\n return wallDistance, edgeDistance", "def _angle_of_attack(self, rel_wind, blade_chord):\n # blade_chord_vector - (relative_wind + pi)\n # rel_oposite = rel_wind.rotated(math.pi)\n aoa_rad = rel_wind.theta - blade_chord.theta\n aoa_rad = vec.normalize_angle(aoa_rad)\n aoa_360 = aoa_rad * 360 / math.tau\n return aoa_rad, aoa_360", "def create_receiver_viewing_field(self):\n height = self.receiver_height\n # revert in viewing direct\n angle, _ = f.convert_direction(self.receiver_elevation, self.receiver_azimuth)\n\n if angle < 90:\n # from rec (at idx) to TOA (len(h.a.))\n height_at_rad_field = np.arange(\n self.height_array[-1],\n height - self.swiping_height,\n -self.swiping_height,\n )\n\n elif angle > 90:\n # from ground (at 0) to rec (at idx)\n height_at_rad_field = np.arange(\n 0, height + self.swiping_height, self.swiping_height\n )\n\n else:\n height_at_rad_field = np.NAN\n\n return height_at_rad_field", "def row_to_edge(row):\r\n return float(row[\"Dem\"]) - float(row[\"Rep\"])", "def compute_steering_angle(self, frame):\n preprocessed = img_preprocess(frame)\n X = np.asarray([preprocessed])\n #steering_angle = self.model.predict(X)[0]\n steering_angle = self.model(X, training=False)[0]\n\n logging.debug('new steering angle: %s' % steering_angle)\n return int(steering_angle + 0.5) # round the nearest integer", "def direction(self):\n return atan2d(self.y, self.x)", "def edge_tangent(edge):\n tan = None\n for l in edge.link_loops:\n t = edge.calc_tangent(l)\n if not round(t.z):\n tan = t\n return tan", "def distancia_entre_pontos(self,alvo2):\r\n dx=(self.x-alvo2.x)\r\n dy=(self.y-alvo2.y)\r\n d= (dx**2+dy**2)**(0.5)\r\n return (d)", "def get_direction_matrix(self) -> int:", "def get_elevation_along_edge(self, from_, to):\n pass", "def __bcc_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def distances(self):", "def rsdl(self):\n\n if self.opt['Monotone'] and self.k > 0:\n return np.linalg.norm((self.X - self.Y).ravel())\n return np.linalg.norm((self.X - self.Yprv).ravel())", "def __bcc_left_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def row_to_edge(row):\n return float(row[\"Dem\"]) - float(row[\"Rep\"])", "def edge_length(self):\n if self.edge_length_l is not None:\n return self.edge_length_l\n else:\n self.edge_length_l = (2 * self.radius * math.sin(math.pi / self.vert_count))\n return self.edge_length_l", "def distanceFromOrigin(self):\n return ((self.x)**2+(self.y)**2)**0.5", "def vert_angle(time, data, height, distance):\n\n altitude = float(data[time]['altitude'])\n\n return round((degrees(atan2(height, distance)) - altitude) / 2, 4)", "def diagonal(self):\r\n return math.sqrt((self.width ** 2) + (self.height ** 2))", "def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)", "def ddm(self):\n return gon2ddm(self.gon_angle)", "def dist(x, y):\n dx = x[0] - y[0]\n dy = x[1] - y[1]\n ans = dx**2 + dy**2\n ans = ans**(0.5)\n return ans", "def get_road_rotation(self):\r\n if self.container is not None:\r\n rot = self.container.get_road_rotation()\r\n rot2 = self.rotation\r\n if rot2 is None:\r\n rot2 = rot\r\n return rot2\r\n \r\n rot = self.track.get_road_rotation()\r\n rot2 = self.rotation\r\n if rot2 is None:\r\n rot2 = rot\r\n return rot2", "def dist4(a,b): # compute distance between two points a & b\n return mag4(sub4(a,b))", "def dist(x, y):\n dx = x[0] - y[0]\n dy = x[1] - y[1]\n ans = dx**2 + dy**2\n ans = ans**(0.5)\n return ans", "def __bcc_top_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def angle2D(self) -> float:\n\n return self.v2ddict.angle2d()", "def darcy_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n visc_i = visc_mix_ph(i, T0=self.inl[0].T.val_SI)\n visc_o = visc_mix_ph(o, T0=self.outl[0].T.val_SI)\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n\n re = 4 * abs(i[0]) / (np.pi * self.D.val * (visc_i + visc_o) / 2)\n\n return ((i[1] - o[1]) - 8 * abs(i[0]) * i[0] * (v_i + v_o) / 2 *\n self.L.val * lamb(re, self.ks.val, self.D.val) /\n (np.pi ** 2 * self.D.val ** 5))", "def angle(self) -> float:\n ...", "def steps_to_angle():\n pass", "def Agl2ArcLen(self,agl):\r\n\r\n return (self.distance_between_wheels/2)*agl", "def calculate_distance(self, decrease_factor):\n return self.caltab_height * self.focal_length / (self.sensor_height * decrease_factor)", "def create_left_right_tangent(self):\n self.arc_incident_tan = Arc(\n start_angle = PI/2 + self.incident_angle,\n angle = PI/2 - self.incident_angle,\n radius = self.arc_incident_tan_radius,\n color = self.arc_incident_color,\n arc_center = self.mirror_origin\n )\n\n theta_in_tan_pos_offset = -2.0 * RIGHT + 0.8 * UP\n self.tex_theta_in_tan = TexMobject(r\"90^{\\circ}\",\n r\"-\",\n r\"\\theta_{i}\",\n color=self.tex_theta_in_color).\\\n move_to(self.mirror_origin + theta_in_tan_pos_offset)\n\n self.arc_reflected_tan = Arc(\n start_angle = 0,\n angle = PI/2 - self.reflected_angle,\n radius = self.arc_reflected_tan_radius,\n color = self.arc_reflected_color,\n arc_center = self.mirror_origin\n )\n\n theta_out_tan_pos_offset = 2.0 * RIGHT + 0.8 * UP\n self.tex_theta_ref_tan = TexMobject(r\"90^{\\circ}\",\n r\"-\",\n r\"\\theta_{r}\",\n color=self.tex_theta_ref_color).\\\n move_to(self.mirror_origin + theta_out_tan_pos_offset)", "def ortho_line_cut(self):\n x_mid_left, y_mid_left = self.midpoint(0,1) # Computes the mid point of the LHS face of the edm cut\n x_mid_right, y_mid_right = self.midpoint(2,3) # Computes the mid point of the RHS face of the edm cut\n\n ave_grad = self.average_grad()\n m_horizontal = -1/ave_grad #90 degrees rotation of the vertical line average gradient\n\n horizontal_eq_c = y_mid_right - m_horizontal*x_mid_right # y offset of horizontal line\n vertical_eq_left_c = y_mid_left - ave_grad * x_mid_left # y offset of vertical line on left side\n\n x_intersect, y_intersect = self.intersect_point(m_horizontal, horizontal_eq_c, ave_grad,vertical_eq_left_c)\n\n\n coordleft = [x_intersect, y_intersect]\n coordright =[x_mid_right, y_mid_right]\n\n dist = self.distance(coordleft, coordright)\n\n return coordleft, coordright, dist", "def right_edge(f: SwimmingFish) -> float:\n if f.dx > 0:\n return f.posn.x + f.fish.size\n else:\n return f.posn.x", "def _rad_center(self):\n return ((self.rad_hi + self.rad_lo) / 2).to(\"deg\")", "def DistanceFromOrigin(self):\r\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def __rotation(self, node, right_rotation=False):\n # left rotation\n if not right_rotation:\n # update parent\n parent = node.parent\n neighbor = node.right_child\n if node.key <= parent.key:\n parent.left_child = neighbor\n else:\n parent.right_child = node.right_child\n\n # update node\n node.parent = neighbor\n node.right_child = neighbor.left_child\n\n # update neighbor\n neighbor.parent = parent\n neighbor.left_child = node\n\n # print(\"--> before left rotation:\")\n # print(\"size tree of node {}: {}\".format(node.key, node.size_tree))\n # print(\"size tree of node {}: {}\".format(neighbor.key, neighbor.size_tree))\n\n # update size of tree\n node.size_tree -= neighbor.size_tree\n node.size_tree += node.right_child.size_tree\n neighbor.size_tree -= node.right_child.size_tree\n neighbor.size_tree += node.size_tree\n\n # print(\"--> after left rotation:\")\n # print(\"size tree of node {}: {}\".format(node.key, node.size_tree))\n # print(\"size tree of node {}: {}\".format(neighbor.key, neighbor.size_tree))\n\n # right rotation\n else:\n # update parent\n parent = node.parent\n neighbor = node.left_child\n if node.key <= parent.key:\n parent.left_child = neighbor\n else:\n parent.right_child = node.right_child\n\n # update node\n node.parent = neighbor\n node.left_child = neighbor.right_child\n\n # update neighbor\n neighbor.parent = parent\n neighbor.right_child = node\n\n # print(\"--> before right rotation:\")\n # print(\"size tree of node {}: {}\".format(node.key, node.size_tree))\n # print(\"size tree of node {}: {}\".format(neighbor.key, neighbor.size_tree))\n\n # update size of tree\n node.size_tree -= neighbor.size_tree\n node.size_tree += node.left_child.size_tree\n neighbor.size_tree -= node.left_child.size_tree\n neighbor.size_tree += node.size_tree\n\n # print(\"--> after right rotation:\")\n # print(\"size tree of node {}: {}\".format(node.key, node.size_tree))\n # print(\"size tree of node {}: {}\".format(neighbor.key, neighbor.size_tree))", "def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix", "def max_front_wheel_angle():", "def test_revolute_from_dh(self):\n x_offset = 1\n z_offset = 2\n # Rotate around the z axis\n r = Joint.revolute_from_dh(0, 0, x_offset, z_offset)\n t_mat = r(np.array([np.pi / 2]))\n rot_vec = np.dot(t_mat[:3, :3], np.array([1, 0, 0]))\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))\n self.assertTrue(np.allclose(t_mat[2, 3], z_offset))\n # x was rotated 90 degrees, and is now y\n self.assertTrue(np.allclose(t_mat[1, 3], x_offset))", "def ForsterOrientationFactor(d1, d2, r):\n rn = r / norm(r) ##Normalized distance vector\n d1n = d1/ norm(d1)\n d2n = d2/ norm(d2)\n Factor = 3 * dot(d1n, rn) * dot(d2n, rn) - dot(d1n, d2n)\n return Factor", "def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))", "def right_rotation(self, ang_vel):\n vel = self.om_left_max * self.R + ang_vel * self.L\n om_right = (vel + ang_vel * self.L) / self.R \n return vel, om_right", "def get_distance(self):\n print(\"voici la distance à l'obstacle\")", "def es_dist(day):\n return 1.00011 + \\\n 0.034221*cos(day_angle(day)) + \\\n 0.00128*sin(day_angle(day)) + \\\n 0.000719*cos(2*day_angle(day)) + \\\n 0.000077*sin(2*day_angle(day))", "def dec(self):\n return self.dec_angle", "def DIRECTION(x,y,x2=0,y2=0):\n\tif x!=x2 and y!=y2:\n\t\tif x < x2:\n\t\t\tdirection = acos((x2-x)/DISTANCE(x,y,x2,y2))*180/pi\n\t\t\tif y<y2 : direction = 360 - direction\n\t\telse:\n\t\t\tdirection = acos((x-x2)/DISTANCE(x,y,x2,y2))*180/pi\n\t\t\tif y < y2 : direction += 180\n\t\t\telse : direction = 180 - direction\n\telif x < x2 : direction = 0\n\telif x > x2 : direction = 180\n\telif y < y2 : direction = 270\n\telif y > y2 : direction = 90\n\telse : direction = 0\n\treturn direction", "def turn_to_endpoint(previous_direction, w_real, d, d_traveled):\n theta = math.atan((D_ENDPOINT-d_traveled)/w_real)\n phi = np.pi/2 - theta\n\n if previous_direction==0:\n # drone yaws to the right to the direction of the endpoint\n yaw(phi)\n else:\n # drone yaws to the left to the direction of the endpoint\n yaw(-phi)\n phi = -phi\n\n return phi", "def get_direction(self):\n return self.actual_coordinates[2]", "def GetLoCorner(self):\n ...", "def _calculate_gravity(self, object_2, object_1):\n\n def _calculate_angle(x0, y0, x1, y1):\n \"\"\"Counts angle in radians between vector (x0, y0)(x1, y1) and horizontal axis (CW) in canvas\n coordinate system\n :returns 0 if x0 == y0 == x1 == y1 == 0\n [0.. +3.14] if vector points down\n (-3.14.. 0] if vector points up\n \"\"\"\n if x0 == y0 == x1 == y1 == 0:\n return 0\n\n if x1 - x0 > 0: # pointing to the right semi-plane\n angle = atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 >= 0: # adding pi if pointing to the left-bottom quart\n angle = pi + atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 < 0: # subtract pi if pointing to the left-upper quart\n angle = -pi + atan((y1 - y0) / (x1 - x0))\n else: # zerodevision handle\n if y1 - y0 > 0: # pointing down\n angle = pi / 2\n else: # pointing up\n angle = -pi / 2\n\n return angle\n\n m1, x1, y1 = self._get_object_params(object_1)\n m2, x2, y2 = self._get_object_params(object_2)\n R = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\n F = G * m1 * m2 / R ** 2\n angle = _calculate_angle(x1, y1, x2, y2)\n Fx1 = F * cos(angle)\n Fy1 = F * sin(angle)\n Fy2, Fx2 = -Fy1, -Fx1 # vectors are exactly opposite\n return Fx2, Fy2, Fx1, Fy1", "def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full", "def _handleEdge(self):\r\n if self._aliensdown == False:\r\n for row in self.getAliens():\r\n for alien in row:\r\n if not alien is None:\r\n alien.y -= ALIEN_V_WALK\r\n self._direction = (-1)*self._direction\r\n self._aliensdown = True\r\n else:\r\n for row in self.getAliens():\r\n for alien in row:\r\n if not alien is None:\r\n alien.x += self._direction*ALIEN_H_WALK\r\n self._aliensdown = False", "def euler(faces, edges, verticies):\n\n # Return the calculated value\n return verticies + edges - faces", "def cantor() -> bigger.MCG[Edge]: # pylint: disable=too-many-statements\n\n POS, EQ, NEG = +1, 0, -1\n\n def edges() -> Iterable[Edge]:\n for x in naturals():\n for y in [POS, EQ, NEG]:\n yield x, y\n\n def negate(X: Edge) -> Edge:\n return X[0], -X[1]\n\n def invert(sign: int, X: tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]) -> tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]:\n return X if sign == POS else (negate(X[6]), not X[7], negate(X[4]), not X[5], negate(X[2]), not X[3], negate(X[0]), not X[1])\n\n def link(edge: Edge) -> tuple[Edge, bool, Edge, bool, Edge, bool, Edge, bool]:\n n, k = edge\n if k == EQ: # Equator\n if n == 0:\n return ((0, NEG), False, (1, NEG), True, (1, POS), False, (0, POS), True)\n elif n == 1:\n return ((2, POS), False, (0, POS), False, (0, NEG), True, (2, NEG), True)\n else: # n > 1\n return ((3 * n - 3, NEG), False, (3 * n - 1, NEG), True, (3 * n - 1, POS), False, (3 * n - 3, POS), True)\n\n # Northern / Southern hemisphere.\n if n == 0:\n return invert(k, ((0, EQ), False, (1, POS), False, (1, EQ), True, (2, POS), False))\n elif n == 1:\n return invert(k, ((4, POS), False, (3, POS), False, (0, POS), True, (0, EQ), False))\n elif n == 2:\n return invert(k, ((7, POS), False, (6, POS), False, (0, POS), False, (1, EQ), True))\n N, r = n // 3 + 1, n % 3\n incoming = 3 * (N // 2) - (1 if N % 2 else 2)\n if r == 0:\n return invert(k, ((N, EQ), False, (n + 2, POS), False, (incoming, POS), True, (n + 1, POS), False))\n elif r == 1:\n return invert(k, ((6 * N - 2, POS), False, (6 * N - 3, POS), False, (n - 1, POS), False, (incoming, POS), True))\n else: # r == 2:\n return invert(k, ((6 * N + 1, POS), False, (6 * N + 0, POS), False, (n - 2, POS), True, (N, EQ), False))\n\n T = bigger.Triangulation.from_pos(edges, link)\n\n def generator(name: str) -> bigger.Encoding[Edge]: # pylint: disable=too-many-branches\n twist_match = re.match(r\"(?P<curve>[ab])_(?P<n>-?\\d+)$\", name)\n rotate_match = re.match(r\"r$\", name)\n\n if twist_match is not None:\n parameters = twist_match.groupdict()\n curve_name = parameters[\"curve\"]\n N = int(parameters[\"n\"])\n if curve_name == \"a\":\n if N == 1:\n cut_sequence = [(0, EQ), (0, POS), (1, EQ)]\n else:\n cut_sequence = [(0, EQ), (N, EQ), (3 * N - 3, POS)]\n while N > 1:\n low_N = N // 2\n cut_sequence.append((3 * low_N - (1 if N % 2 else 2), POS))\n if N % 2:\n cut_sequence.append((3 * low_N - 3, POS))\n N = low_N\n elif curve_name == \"b\":\n if N <= 3:\n cut_sequence = [(0, EQ), (0, POS), (1, EQ)]\n else:\n extend_left = N % 2\n N = N // 2\n cut_sequence = [(N, EQ), (3 * N - 3, POS)]\n while N > 1:\n N_low = N // 2\n cut_sequence.append((3 * N_low - (1 if N % 2 else 2), POS))\n if extend_left:\n cut_sequence.append((3 * N_low - 3, POS))\n if N % 2 != extend_left:\n cut_sequence.append((N_low, EQ))\n break\n N = N_low\n else:\n cut_sequence.append((0, EQ))\n\n curve = T(dict(((x, y * s), 1) for x, y in cut_sequence for s in [+1, -1]))\n return curve.twist()\n elif rotate_match is not None:\n\n def isom(edge: Edge) -> Edge:\n n, k = edge\n if k == EQ:\n if n == 0:\n return (1, EQ)\n elif n == 1:\n return (0, EQ)\n return (n ^ (1 << n.bit_length() - 2), k)\n\n if n == 0:\n return (0, k)\n elif n == 1:\n return (2, k)\n elif n == 2:\n return (1, k)\n N, r = n // 3 + 1, n % 3\n return (3 * (N ^ (1 << N.bit_length() - 2)) - 3 + r, k)\n\n return T.encode([(-1, isom, isom)])\n\n raise ValueError(f\"Unknown mapping class {name}\")\n\n return bigger.MCG(T, generator)", "def getAngle(self):\n return self.articulateEncoder.getDistance()+self.angleOffset", "def get_ending_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[-1].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[-1].y\n return delta_y, delta_x" ]
[ "0.6792521", "0.67125314", "0.6442544", "0.6154413", "0.5941092", "0.5854384", "0.58526313", "0.5838844", "0.58166885", "0.5809942", "0.57968384", "0.5790198", "0.5733752", "0.57309914", "0.5662172", "0.5659722", "0.5644403", "0.5641197", "0.5620599", "0.55770284", "0.5569615", "0.555166", "0.5506792", "0.5505262", "0.54924667", "0.5491714", "0.54822713", "0.54792947", "0.5438697", "0.53948087", "0.5394365", "0.5388989", "0.538544", "0.538544", "0.538544", "0.538544", "0.538544", "0.538544", "0.538544", "0.537994", "0.53759396", "0.5375823", "0.53725946", "0.5363301", "0.5360302", "0.53398246", "0.5338359", "0.5334056", "0.5333644", "0.530821", "0.5305982", "0.5297892", "0.52859044", "0.5279731", "0.5264844", "0.52633286", "0.5259626", "0.52575487", "0.52472055", "0.52438164", "0.5237474", "0.52348965", "0.52328956", "0.5231054", "0.5230423", "0.5218978", "0.5215876", "0.521553", "0.5213665", "0.5203134", "0.51902926", "0.5189639", "0.5186415", "0.51821804", "0.5179732", "0.51782984", "0.5176331", "0.5175269", "0.51714694", "0.5168484", "0.51601905", "0.5156266", "0.51544243", "0.5150193", "0.51459444", "0.5144606", "0.51444334", "0.51429605", "0.51409304", "0.5139937", "0.5138675", "0.5135052", "0.5131295", "0.5130923", "0.5130884", "0.5127669", "0.5127358", "0.51222175", "0.51197934", "0.51111823" ]
0.6808179
0
Given a value break it down by the ilk of node (usb or pci), the vendor, and the device or product.
def parse_value(value: str) -> Tuple[str, str, str]: value_pattern = r'^(usb|pci)\(([^:]{4}):([^:]{4})\)$' matches = re.match(value_pattern, value) assert matches, value ilk, vendor, device = matches.group(1), matches.group(2), matches.group(3) return ilk, vendor, device
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vendor_list():\n return ['nxos', 'eos', 'cumulus']", "def get_vendor(mac):\r\n return p.get_manuf(mac) or 'None'", "def bios_vendor(self):\n\t\treturn self.__info_dict['info']['bios_vendor']['value']", "def device_catalog_path_value_converter(value):\n paths = []\n for path in value:\n pt = tuple(path.split(\"/\"))\n if pt and pt[-2]==\"devices\":\n pt = pt[:-2] + pt[-1:]\n paths.append(pt)\n return paths", "def device(self, value):\n try:\n if isinstance(value, str):\n self._device_serial = value\n self._check_requirements()\n except ValueError:\n self._device_serial = None", "def __init__(self, device=None, value=None):\n self.device = device\n self.value = value", "def __init__(self, device=None, value=None):\n self.device = device\n self.value = value", "def __init__(self, device=None, value=None):\n self.device = device\n self.value = value", "def __init__(self, device=None, value=None):\n self.device = device\n self.value = value", "def __init__(self, device=None, value=None):\n self.device = device\n self.value = value", "def __init__(self, device=None, value=None):\n self.device = device\n self.value = value", "def _get_vendor_product_id(device_dict):\n return f'{_get_vendor_id(device_dict)}/{_get_product_id(device_dict)}'", "def get_vendor(self, result, host, mac):\n if \"vendor\" in result['scan'][host] and mac in result['scan'][host]['vendor']:\n return result['scan'][host]['vendor'][mac]\n else:\n return \"\"", "def vendor(self) -> str:\n return self.properties[DBUS_ATTR_VENDOR]", "def get_vendor(disk):\n\n if DISKINFO[\"/dev/\"+disk][\"Type\"] == \"Partition\":\n #We need to use the info from the host disk, which will be whatever came before.\n return DISKINFO[DISKINFO[\"/dev/\"+disk][\"HostDevice\"]][\"Vendor\"]\n\n else:\n try:\n vendor = PLIST[\"MediaName\"].split()[0]\n\n except KeyError:\n vendor = \"Unknown\"\n\n return vendor", "def set_value_to_device(self, dev_name, value):\n dev = self.devices[dev_name]\n # If it is an analog channel\n if 'model' in dev.properties:\n if dev.properties['model'] == 'ni':\n daq = self.devices[dev.properties['connection']['device']]\n conditions = {\n 'dev': dev,\n 'value': value\n }\n daq.driver.analog_output_dc(conditions)\n else:\n dev.apply_values(value)", "def _manufacturer(self, mac_address):\n # Initialize key variables\n manufacturer = ''\n\n # Process data\n mac_oui = mac_address[0:6]\n if mac_oui in self.oui:\n manufacturer = self.oui[mac_oui]\n\n # Return\n return manufacturer", "def _get_vendor_id(device_dict):\n return device_dict['vendor_id'].split()[0].split('x')[-1]", "def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._config[CONF_SERIAL])},\n \"name\": self._config[CONF_NAME],\n \"manufacturer\": \"Bosch\",\n }", "def device(self):\n if (self.symbol.type == self.scanner.NAME):\n device_name = self.names.get_name_string(self.symbol.id)\n device_id = self.names.query(device_name)\n self.old_symbol = self.symbol # for reporting duplicate devices\n self.symbol = self.scanner.get_symbol()\n if (self.symbol.type == self.scanner.COLON):\n self.symbol = self.scanner.get_symbol()\n device_kind = self.logictype()\n\n if(self.symbol.type == self.scanner.COMMA):\n self.symbol = self.scanner.get_symbol()\n if(self.symbol.type == self.scanner.KEYWORD):\n if(self.symbol.id in [self.scanner.initial_ID,\n self.scanner.inputs_ID,\n self.scanner.period_ID, self.scanner.sequence_ID]):\n\n self.symbol = self.scanner.get_symbol()\n\n # initialise list to hold device property numbers\n device_property_list = []\n\n if(self.symbol.type == self.scanner.NUMBER):\n number_val = int(\n self.names.get_name_string(self.symbol.id))\n if device_kind == self.names.query(\"SIGGEN\"):\n if (number_val == 0 or number_val == 1):\n device_property_list.append(number_val)\n self.symbol = self.scanner.get_symbol()\n else:\n # Error: Siggen signal value has\n # to be '0' or '1'.\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END.\n self.error(\n self.SIGGEN_QUALIFIER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Not a SIGGEN\n device_property_list.append(number_val)\n self.symbol = self.scanner.get_symbol()\n\n # Extract sequence of numbers for SIGGEN\n while (self.symbol.type == self.scanner.COMMA):\n if device_kind == self.names.query(\n \"SIGGEN\"):\n self.symbol = self.scanner.get_symbol()\n if(self.symbol.type == (\n self.scanner.NUMBER)):\n number_val = int(\n self.names.get_name_string(\n self.symbol.id))\n if (number_val == 0 or (\n number_val == 1)):\n device_property_list.append(\n number_val)\n self.symbol = (\n self.scanner.get_symbol())\n else:\n # Error: Signal value has\n # to be '0' or '1'.\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END.\n list1 = [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY\n ]\n list2 = [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID\n ]\n self.error(\n self.SIGGEN_QUALIFIER,\n list1,\n list2)\n else:\n # Error: Needs to be an integer\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END\n list1 = [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY\n ]\n list2 = [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID\n ]\n self.error(\n self.INTEGER, list1, list2)\n else:\n # Error: Excess qualifiers\n # for non-SIGGEN\n # Stop symbs:';','}','CONNECT',\n # 'MONITOR', END\n self.error(\n self.devices.EXCESS_QUALIFIER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Needs to be an integer\n # Stop symbs:';','}','CONNECT','MONITOR', END\n self.error(\n self.INTEGER, [\n self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Parameter: 'initial',\n # inputs, period, sequence.\n # Stopping symbols: ';' , '}','CONNECT', 'MONITOR'\n # or 'END' KEYWORD '\n self.error(self.NEED_QUALIFIER,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Comma has to be followed by parameter\n # speficification\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR'\n # or 'END' KEYWORD\n self.error(self.NEED_QUALIFIER,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # There is no device property\n device_property_list = None\n\n if (self.symbol.type == self.scanner.SEMICOLON):\n self.symbol = self.scanner.get_symbol()\n else:\n # Error: Device definition needs to end in ';'\n # Stopping symbols: NAME, ';' , '}', 'CONNECT', 'MONITOR'\n # or 'END' KEYWORD\n self.error(self.NO_DEVICE_SEMICOLON,\n [self.scanner.KEYWORD,\n self.scanner.SEMICOLON,\n self.scanner.NAME,\n self.scanner.RIGHT_CURLY],\n [self.scanner.CONNECT_ID,\n self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Device name has to be followed by ':'\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.NO_DEVICE_COLON, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n else:\n # Error: Valid Device name required\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n self.DEVICE_NAME, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n\n # Check for device semantic errors\n if self.error_count == 0:\n # Only check for semantic errors if no errors so far\n err = self.devices.make_device(\n device_id, device_kind, device_property_list)\n if err != self.devices.NO_ERROR:\n # Stopping symbols: ';' , '}', 'CONNECT', 'MONITOR' or 'END'\n # KEYWORD\n self.error(\n err, [\n self.scanner.KEYWORD, self.scanner.SEMICOLON,\n self.scanner.RIGHT_CURLY], [\n self.scanner.CONNECT_ID, self.scanner.MONITOR_ID,\n self.scanner.END_ID])\n\n # Increment input pin counter by number of pins on new device\n if self.error_count == 0:\n device_name_string = self.names.get_name_string(device_kind)\n if device_name_string == \"DTYPE\":\n self.num_input_pin += 4\n elif device_name_string in [\"AND\", \"OR\", \"NAND\", \"NOR\"]:\n self.num_input_pin += device_property_list[0]\n elif device_name_string == \"XOR\":\n self.num_input_pin += 2", "def getNodeUSB(self,node):\n data = self.connect('get','nodes/%s/scan/usb' % (node),None)\n return data", "def vendor_name(self):\n return self._device.vendor", "def parse_url(cls, urlstr: str, scheme: str,\n vdict: Dict[str, int],\n pdict: Dict[int, Dict[str, int]],\n default_vendor: int) -> Tuple[UsbDeviceDescriptor, int]:\n urlparts = urlsplit(urlstr)\n if scheme != urlparts.scheme:\n raise UsbToolsError(\"Invalid URL: %s\" % urlstr)\n try:\n if not urlparts.path:\n raise UsbToolsError('URL string is missing device port')\n path = urlparts.path.strip('/')\n if path == '?' or (not path and urlstr.endswith('?')):\n report_devices = True\n else:\n interface = to_int(path)\n report_devices = False\n except (IndexError, ValueError) as exc:\n raise UsbToolsError('Invalid device URL: %s' % urlstr) from exc\n candidates, idx = cls.enumerate_candidates(urlparts, vdict, pdict,\n default_vendor)\n if report_devices:\n UsbTools.show_devices(scheme, vdict, pdict, candidates)\n raise SystemExit(candidates and\n 'Please specify the USB device' or\n 'No USB-Serial device has been detected')\n if idx is None:\n if len(candidates) > 1:\n raise UsbToolsError(\"%d USB devices match URL '%s'\" %\n (len(candidates), urlstr))\n idx = 0\n try:\n desc, _ = candidates[idx]\n vendor, product = desc[:2]\n except IndexError:\n raise UsbToolsError('No USB device matches URL %s' %\n urlstr) from None\n if not vendor:\n cvendors = {candidate[0] for candidate in candidates}\n if len(cvendors) == 1:\n vendor = cvendors.pop()\n if vendor not in pdict:\n raise UsbToolsError('Vendor ID %s not supported' %\n (vendor and '0x%04x' % vendor))\n if not product:\n cproducts = {candidate[1] for candidate in candidates\n if candidate[0] == vendor}\n if len(cproducts) == 1:\n product = cproducts.pop()\n if product not in pdict[vendor].values():\n raise UsbToolsError('Product ID %s not supported' %\n (product and '0x%04x' % product))\n devdesc = UsbDeviceDescriptor(vendor, product, desc.bus, desc.address,\n desc.sn, idx, desc.description)\n return devdesc, interface", "def device_info(devid: int = 0) -> str: # pragma: no cover\n numdev = jax.device_count()\n if devid >= numdev:\n raise RuntimeError(f\"Requested information for device {devid} but only {numdev} present.\")\n dev = jax.devices()[devid]\n if dev.platform == \"cpu\":\n info = \"CPU\"\n else:\n info = f\"{dev.platform.upper()} ({dev.device_kind})\"\n return info", "def enumerate_candidates(cls, urlparts: SplitResult,\n vdict: Dict[str, int],\n pdict: Dict[int, Dict[str, int]],\n default_vendor: int) -> \\\n Tuple[List[Tuple[UsbDeviceDescriptor, int]], Optional[int]]:\n specifiers = urlparts.netloc.split(':')\n plcomps = specifiers + [''] * 2\n try:\n plcomps[0] = vdict.get(plcomps[0], plcomps[0])\n if plcomps[0]:\n vendor = to_int(plcomps[0])\n else:\n vendor = None\n product_ids = pdict.get(vendor, None)\n if not product_ids:\n product_ids = pdict[default_vendor]\n plcomps[1] = product_ids.get(plcomps[1], plcomps[1])\n if plcomps[1]:\n try:\n product = to_int(plcomps[1])\n except ValueError as exc:\n raise UsbToolsError('Product %s is not referenced' %\n plcomps[1]) from exc\n else:\n product = None\n except (IndexError, ValueError) as exc:\n raise UsbToolsError('Invalid device URL: %s' %\n urlunsplit(urlparts)) from exc\n sernum = None\n idx = None\n bus = None\n address = None\n locators = specifiers[2:]\n if len(locators) > 1:\n try:\n bus = int(locators[0], 16)\n address = int(locators[1], 16)\n except ValueError as exc:\n raise UsbToolsError('Invalid bus/address: %s' %\n ':'.join(locators)) from exc\n else:\n if locators and locators[0]:\n try:\n devidx = to_int(locators[0])\n if devidx > 255:\n raise ValueError()\n idx = devidx\n if idx:\n idx = devidx-1\n except ValueError:\n sernum = locators[0]\n candidates = []\n vendors = [vendor] if vendor else set(vdict.values())\n vps = set()\n for vid in vendors:\n products = pdict.get(vid, [])\n for pid in products:\n vps.add((vid, products[pid]))\n devices = cls.find_all(vps)\n if sernum:\n if sernum not in [dev.sn for dev, _ in devices]:\n raise UsbToolsError(\"No USB device with S/N %s\" % sernum)\n for desc, ifcount in devices:\n if vendor and vendor != desc.vid:\n continue\n if product and product != desc.pid:\n continue\n if sernum and sernum != desc.sn:\n continue\n if bus is not None:\n if bus != desc.bus or address != desc.address:\n continue\n candidates.append((desc, ifcount))\n return candidates, idx", "def set_value(self, device_name, val):\n epics.caput(device_name, val)\n\n\t\t#mu = mu\n\t\t#sig = math.sqrt(abs(mu))\n\t\t#y = (float(x)-mu)/(sig)", "def _vendor_request(self, direction, request, length_or_data=0, value=0, index=0, timeout=1000):\n return self.device.ctrl_transfer(\n direction | usb.TYPE_VENDOR | usb.RECIP_DEVICE,\n request, value, index, length_or_data, timeout)", "def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu", "def test_get_pci_device_by_moid(self):\n pass", "def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Boiler Module',\n 'manufacturer': 'Eneco',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'boiler_module'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id),\n }", "def vendor(n, vendors_from_inn):\n inns = list(map(str, vendors_from_inn.keys()))\n\n for i in inns:\n if str(n).startswith(i):\n return vendors_from_inn[int(i)]", "def convert_density(self,event):\n try:\n #Compare other unit to one unit(kilograms/liter)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"grains/gallon(UK)\": 0.0000143, \"grains/gallon(US)\": 0.000017, \"grams/cubic centimeters\": 1.0, \"grams/liter\": 0.001, \"grams/millimeters\": 1.0, \"kilograms/cubic meters\": 0.001, \"kilograms/liter\": 1.0, \"megagrams/cubic meter\": 1.0, \"milligrams/millimeters\": 0.001, \"milligrams/liter\": 0.000001, \"ounces/cubic inch\": 1.729994, \"ounces/gallon(UK)\": 0.006236, \"ounces/gallon(US)\": 0.007489, \"pounds/cubic inch\": 27.679904, \"pounds/cubic foot\": 0.016018, \"pounds/gallon(UK)\": 0.099776, \"pounds/gallon(US)\": 0.119826, \"slugs/cubic foot\": 0.515318, \"tonnes/cubic meter\": 1.0, \"tons(UK)/cubic yard\": 1.328939, \"tons(US)/cubic yard\": 1.186553}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def _manufacturer_from_status(status: dict[str, str]) -> str | None:\n return (\n status.get(\"device.mfr\")\n or status.get(\"ups.mfr\")\n or status.get(\"ups.vendorid\")\n or status.get(\"driver.version.data\")\n )", "def InventoryDevices(self):\n self.logger.debug(\"Start Inventory...\")\n \n # Find our desired usb devices. These should be present in /dev somewhere.\n osDevices = os.listdir(\"/dev\")\n osDevices.sort()\n\n # Loop through all devices in /dev asking them what they are.\n for anOSDevice in osDevices:\n \n deviceName = \"/dev/\" + anOSDevice\n # We're making use of the unix command \"udevadm\". Read up on it!\n cmd = [\"udevadm\", \"info\", \"-q\", \"all\", \"-n\", deviceName]\n #print(cmd)\n pid=\"\"\n vid=\"\"\n uid=\"\"\n \n # Launch udevadm for the current device name.\n FNULL = open(os.devnull, 'w')\n proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=FNULL)\n while True:\n line = proc.stdout.readline()\n if len(line) != 0:\n #print(line.rstrip())\n # Parse out the pieces of the output lines looking for the relavent information.\n parts = re.split(\"[ ]\", line.__str__())\n #print(parts)\n if len(parts) > 1:\n kvParts = re.split(\"[=]\", parts[1].__str__())\n #print(kvParts)\n # We care about procuct id, vendor id and serial number.\n if (kvParts[0] == \"ID_VENDOR_ID\"):\n vid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_MODEL_ID\"):\n pid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_SERIAL\"):\n uid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_SERIAL_SHORT\"):\n uid = kvParts[1][:-1]\n else:\n break\n\n # We found a device with a Product ID and Vendor ID. Is it one were expecting?\n if len(pid) > 0 and len(vid) > 0:\n self.logger.info( \"Checking if device with ProductID: \" + pid + \" and VendorID: \" + vid + \" on \" + deviceName + \" is needed...\") \n foundItem = next((x for x in self.expectedDevices if isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) and \n x.pid == pid and\n x.vid == vid and\n x.uid == uid and\n x.inventoried == False), None)\n \n if foundItem is not None:\n if isinstance(foundItem, usb_serial_device.USBSerialDevice) == True:\n if anOSDevice.startswith( 'tty') == True:\n # Device is a Serial USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n else:\n #Device is a plain USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n \n FNULL.close()\n\n\n # At this point, we may still not have all the found devices. So we'll fall back to using \"lsub\" to look for devices.\n # The reason they are not found is that some devices do not add an entry to /dev. However, lsusb does not give a\n # serial number\n cmd = [\"lsusb\"]\n # print(cmd)\n pid = \"\"\n vid = \"\"\n uid = \"\"\n\n # Launch udevadm for the current device name.\n FNULL = open(os.devnull, 'w')\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=FNULL)\n while True:\n line = proc.stdout.readline()\n if len(line) != 0:\n # print(line.rstrip())\n # Parse out the pieces of the output lines looking for the relavent information.\n parts = re.split(\"[ ]\", line.__str__())\n # print(parts)\n if len(parts) > 1:\n kvParts = re.split(\"[:]\", parts[5].__str__())\n # print(kvParts)\n # We care about procuct id, vendor id.\n vid = kvParts[0]\n pid = kvParts[1]\n\n # We found a device with a Product ID and Vendor ID. Is it one were expecting?\n if len(pid) > 0 and len(vid) > 0:\n self.logger.info(\n \"Checking if device with ProductID: \" + pid + \" and VendorID: \" + vid + \" is needed...\")\n foundItem = next((x for x in self.expectedDevices if\n isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) and\n x.pid == pid and\n x.vid == vid and\n x.uid == uid and\n x.inventoried == False), None)\n\n if foundItem is not None:\n if isinstance(foundItem, usb_serial_device.USBSerialDevice) == True:\n if anOSDevice.startswith('tty') == True:\n # Device is a Serial USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n else:\n # Device is a plain USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n\n\n else:\n break\n\n\n FNULL.close()\n\n # Here, we probe to see if any ethernet connected devices are up and listening for connections.\n while True:\n foundItem = next((x for x in self.expectedDevices if isinstance(x, (ethernet_device.EthernetDevice)) and \n x.inventoried == False and x.checked == False), None)\n if foundItem is not None:\n #socket.setdefaulttimeout(10.0)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(10.0)\n try:\n s.connect((foundItem.host, foundItem.port))\n foundItem.inventoried = True;\n except:\n foundItem.inventoried = False;\n # Okay to swallow!\n pass\n finally:\n s.close()\n foundItem.checked = True;\n else:\n break\n \n # Record what we found.\n self.logger.info(\"The following devices were inventoried:\")\n for x in self.expectedDevices:\n if x.inventoried == True:\n if isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) == True:\n self.logger.info(x.name + \" Device Node: \" + x.devPath)\n else:\n self.logger.info(x.name)\n self.foundDevices.append(x)", "def get_product(disk):\n\n if DISKINFO[\"/dev/\"+disk][\"Type\"] == \"Partition\":\n #We need to use the info from the host disk, which will be whatever came before.\n return DISKINFO[DISKINFO[\"/dev/\"+disk][\"HostDevice\"]][\"Product\"]\n\n else:\n try:\n product = ' '.join(PLIST[\"MediaName\"].split()[1:])\n\n except KeyError:\n product = \"Unknown\"\n\n return product", "def get_value(self, device_name):\n return epics.caget(str(device_name))", "def _get_device_model(self):\n dev_str = str(self.ui.comboBox_device.currentText())\n dev = Device(dev_str)\n model = dev.getProductType()[0]\n ai_channels = dev.getAIChannels()\n ci_channels = dev.getCIChannels()\n if len(ai_channels) > 0:\n self.ui.tabWidget.setTabEnabled(1, True)\n else:\n self.ui.tabWidget.setTabEnabled(1, False)\n self.ui.label_device_model.setText(model)\n self.ui.comboBox_ci.clear()\n if ci_channels:\n self.ui.comboBox_ci.addItems([c.split(\"/\")[1] for c in ci_channels])\n #self._setup_table_digital()\n return dev_str, model", "def get_os_details(self, result, host):\n if \"osmatch\" in result['scan'][host] and len(result['scan'][host][\"osmatch\"]) > 0:\n name = result['scan'][host][\"osmatch\"][0][\"name\"]\n os_family = result['scan'][host][\"osmatch\"][0][\"osclass\"][0][\"osfamily\"]\n os_gen = result['scan'][host][\"osmatch\"][0][\"osclass\"][0][\"osgen\"]\n return [name, os_family, os_gen]\n elif \"osclass\" in result['scan'][host]:\n name = result['scan'][host]['osclass']['vendor']\n os_family = result['scan'][host]['osclass']['osfamily']\n os_gen = result['scan'][host]['osclass']['osgen']\n return [name, os_family, os_gen]\n else:\n return [\"\", \"\", \"\"]", "def device_info(self) -> Dict[str, Any]:\n return {\n 'name': 'Electricity Meter',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'electricity'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, 'meter_adapter'),\n }", "def _get_device(node):\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n\n # Save the device information\n node[\"devices\"] = {}\n node[\"devices\"][\"dpdk_devices\"] = vpp.get_dpdk_devices()\n node[\"devices\"][\"kernel_devices\"] = vpp.get_kernel_devices()\n node[\"devices\"][\"other_devices\"] = vpp.get_other_devices()\n node[\"devices\"][\"linkup_devices\"] = vpp.get_link_up_devices()", "def _find_devices(cls, vendor: int, product: int,\n nocache: bool = False) -> Set[UsbDevice]:\n backend = cls._load_backend()\n vidpid = (vendor, product)\n if nocache or (vidpid not in cls.UsbDevices):\n # not freed until Python runtime completion\n # enumerate_devices returns a generator, so back up the\n # generated device into a list. To save memory, we only\n # back up the supported devices\n devs = set()\n vpdict = {} # Dict[int, List[int]]\n vpdict.setdefault(vendor, [])\n vpdict[vendor].append(product)\n for dev in backend.enumerate_devices():\n device = UsbDevice(dev, backend)\n if device.idVendor in vpdict:\n products = vpdict[device.idVendor]\n if products and (device.idProduct not in products):\n continue\n devs.add(device)\n if sys.platform == 'win32':\n # ugly kludge for a boring OS:\n # on Windows, the USB stack may enumerate the very same\n # devices several times: a real device with N interface\n # appears also as N device with as single interface.\n # We only keep the \"device\" that declares the most\n # interface count and discard the \"virtual\" ones.\n filtered_devs = dict()\n for dev in devs:\n vid = dev.idVendor\n pid = dev.idProduct\n ifc = max([cfg.bNumInterfaces for cfg in dev])\n k = (vid, pid, dev.bus, dev.address)\n if k not in filtered_devs:\n filtered_devs[k] = dev\n else:\n fdev = filtered_devs[k]\n fifc = max([cfg.bNumInterfaces for cfg in fdev])\n if fifc < ifc:\n filtered_devs[k] = dev\n devs = set(filtered_devs.values())\n cls.UsbDevices[vidpid] = devs\n return cls.UsbDevices[vidpid]", "def build_dev_strings(cls, scheme: str,\n vdict: Dict[str, int],\n pdict: Dict[int, Dict[str, int]],\n devdescs: Sequence[Tuple[UsbDeviceDescriptor,\n int]]) -> \\\n List[Tuple[str, str]]:\n indices = {} # Dict[Tuple[int, int], int]\n descs = []\n for desc, ifcount in sorted(devdescs):\n ikey = (desc.vid, desc.pid)\n indices[ikey] = indices.get(ikey, 0) + 1\n # try to find a matching string for the current vendor\n vendors = []\n # fallback if no matching string for the current vendor is found\n vendor = '%04x' % desc.vid\n for vidc in vdict:\n if vdict[vidc] == desc.vid:\n vendors.append(vidc)\n if vendors:\n vendors.sort(key=len)\n vendor = vendors[0]\n # try to find a matching string for the current vendor\n # fallback if no matching string for the current product is found\n product = '%04x' % desc.pid\n try:\n products = []\n productids = pdict[desc.vid]\n for prdc in productids:\n if productids[prdc] == desc.pid:\n products.append(prdc)\n if products:\n product = products[0]\n except KeyError:\n pass\n for port in range(1, ifcount+1):\n fmt = '%s://%s/%d'\n parts = [vendor, product]\n sernum = desc.sn\n if not sernum:\n sernum = ''\n if [c for c in sernum if c not in printablechars or c == '?']:\n serial = '%d' % indices[ikey]\n else:\n serial = sernum\n if serial:\n parts.append(serial)\n elif desc.bus is not None and desc.address is not None:\n parts.append('%x' % desc.bus)\n parts.append('%x' % desc.address)\n # the description may contain characters that cannot be\n # emitted in the output stream encoding format\n try:\n url = fmt % (scheme, ':'.join(parts), port)\n except Exception:\n url = fmt % (scheme, ':'.join([vendor, product, '???']),\n port)\n try:\n if desc.description:\n description = '(%s)' % desc.description\n else:\n description = ''\n except Exception:\n description = ''\n descs.append((url, description))\n return descs", "def get_vendor(mac_addr: str) -> str:\n\n parse_wireshark_oui_database()\n\n mac_addr = mac_addr.lower().replace(':', '').replace('-', '').replace('.', '')\n\n # Split the MAC address in different ways and check against the oui_dict\n for split_length in _oui_length_split_list:\n oui = mac_addr[:split_length]\n if oui in _oui_dict:\n return _oui_dict[oui]\n\n return ''", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": \"Brightech\",\n }", "def device_info(self) -> Dict[str, Any]:\n agreement = self.toon.agreement\n model = agreement.display_hardware_version.rpartition('/')[0]\n sw_version = agreement.display_software_version.rpartition('/')[-1]\n return {\n 'identifiers': {\n (DOMAIN, agreement.id),\n },\n 'name': 'Toon Display',\n 'manufacturer': 'Eneco',\n 'model': model,\n 'sw_version': sw_version,\n }", "def find_device(device):\n return usb.core.find(idVendor=device['idVendor'], idProduct=device['idProduct'])", "def usb_devices():\r\n ret_out = utils.run('lsusb').stdout.strip('\\n').replace(',', ' ')\r\n return ret_out", "def test_get_pci_device_list(self):\n pass", "def describe(device):\n base = os.path.basename(device)\n # USB-Serial devices\n sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)\n if os.path.exists(sys_dev_path):\n sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))\n return usb_string(sys_usb)\n\n # Arduino wants special handling\n sys_dev_path = '/sys/class/tty/%s/device/driver/' % (base)\n for x in os.listdir(sys_dev_path):\n # Driver directory's name contains device ID in /sys/bus/usb/drivers/usb\n temp = x.split(\":\")\n if len(temp) == 2:\n # No Arduino adds, need to save space!\n return usb_string(temp[0]).replace(\"(www.arduino.cc)\", \"\").strip()\n\n # USB-CDC devices\n sys_dev_path = '/sys/class/tty/%s/device/interface' % (base,)\n if os.path.exists(sys_dev_path):\n return read_line(sys_dev_path)\n\n return base", "def vendor(self):\n return self._vendor", "def show_device_information_long(self):\n\n for device in self._devices:\n print(\"\")\n if device['Device Type'].startswith(\"enclosu\"):\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n else:\n if device.get('Device Type'):\n print(\"{0:>32}: {1}\".format(\"Device Type\", device['Device Type']))\n if device['Device Type Description']:\n print(\"{0:>32}: {1}\".format(\"Device Description\", device['Device Type Description']))\n if device.get('Linux Device Name'):\n print(\"{0:>32}: {1}\".format(\"Linux Device Name\", device['Linux Device Name']))\n if device.get('SCSI Device Name'):\n print(\"{0:>32}: {1}\".format(\"SCSI Device Name\", device['SCSI Device Name']))\n if device.get('Product Identification'):\n print(\"{0:>32}: {1}\".format(\"Product Identification\", device['Product Identification']))\n if device.get('Vendor Identification'):\n print(\"{0:>32}: {1}\".format(\"Vendor Identification\", device['Vendor Identification']))\n if device.get('Firmware Version'):\n print(\"{0:>32}: {1}\".format(\"Firmware Version\", device['Firmware Version']))\n if device.get('Serial Number'):\n print(\"{0:>32}: {1}\".format(\"Serial Number\", device['Serial Number']))\n if device.get('Drive Capacity'):\n print(\"{0:>32}: {1}\".format(\"Drive Capacity\", device['Drive Capacity']))\n if device.get('Block Length'):\n print(\"{0:>32}: {1}\".format(\"Block Length\", device['Block Length']))\n if device.get('Power On Hours'):\n print(\"{0:>32}: {1}\".format(\"Power On Hours\", device['Power On Hours']))\n if device.get('Current Temperature'):\n print(\"{0:>32}: {1}\".format(\"Current Temperature\", device['Current Temperature']))\n if device.get('SAS Address'):\n print(\"{0:>32}: {1}\".format(\"SAS Address\", device['SAS Address']))\n if device.get('Enclosure Device'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Device\", device['Enclosure Device']))\n if device.get('Enclosure Slot'):\n print(\"{0:>32}: {1}\".format(\"Enclosure Slot\", device['Enclosure Slot']))\n if device.get('Slot Description'):\n print(\"{0:>32}: {1}\".format(\"Slot Desciption\", device['Slot Description']))\n\n if len(self._devices):\n print(\"\")", "def get_mbed_devices(self):\n upper_ven = [ven.upper() for ven in self.usb_vendor_list]\n mounts_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SYSTEM\\MountedDevices')\n for point, label, _ in self.iter_vals(mounts_key):\n printable_label = label.decode('utf-16le', 'ignore')\n if ('DosDevices' in point and\n any(v in printable_label.upper() for v in upper_ven)):\n logger.debug(\"Found Mount point %s with usb ID %s\",point,\n printable_label)\n yield (point, printable_label)\n else:\n logger.debug(\"Skipping Mount point %r label %r\", point, label)", "def device_info(self) -> Dict[str, Any]:\n via_device = 'meter_adapter'\n if self.toon.gas.is_smart:\n via_device = 'electricity'\n\n return {\n 'name': 'Gas Meter',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'gas'),\n },\n 'via_device': (DOMAIN, self.toon.agreement.id, via_device),\n }", "def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices", "def generateDevicePortTree( value, device, port, values ):\n new_entry = False\n if device not in values:\n values[device] = {port: value}\n new_entry = True\n if port not in values[device]:\n values[device][port] = value\n new_entry = True\n return new_entry", "def find_manufacturer(products, alist):\n for manufacturer in products:\n if manufacturer in alist:\n if manufacturer in product_mappings:\n other_manufacturers = \\\n product_mappings[manufacturer]['other_manufacturers']\n if other_manufacturers:\n return manufacturer, ', '.join(other_manufacturers)\n return manufacturer, manufacturer\n for manufacturer in product_mappings:\n other_manufacturers = \\\n product_mappings[manufacturer]['other_manufacturers']\n for other_name in other_manufacturers:\n if other_name in alist:\n return manufacturer, ', '.join(other_manufacturers)\n return False, None", "def vendor_id(self):\n return self._device.vendor_id", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self.config_entry.entry_id)},\n \"name\": NAME,\n \"model\": VERSION,\n \"manufacturer\": NAME,\n }", "def _value(self):\n return self.device.value(*self._id[1:])", "def get_version(self, value):\n version = []\n for i in range(2):\n version = [(value >> (i * 16)) & 0xFFFF] + version\n return '.'.join([str(x) for x in version])", "def _lsusbv_on_device(bus_id, dev_id):\n _, raw_output = cmd_helper.GetCmdStatusAndOutputWithTimeout(\n ['lsusb', '-v', '-s', '%s:%s' % (bus_id, dev_id)], timeout=10)\n\n device = {'bus': bus_id, 'device': dev_id}\n depth_stack = [device]\n\n # TODO(jbudorick): Add documentation for parsing.\n for line in raw_output.splitlines():\n # Ignore blank lines.\n if not line:\n continue\n # Filter out error mesage about opening device.\n if _COULDNT_OPEN_ERROR_RE.match(line):\n continue\n # Find start of device information.\n m = _LSUSB_BUS_DEVICE_RE.match(line)\n if m:\n if m.group(1) != bus_id:\n logging.warning(\n 'Expected bus_id value: %r, seen %r', bus_id, m.group(1))\n if m.group(2) != dev_id:\n logging.warning(\n 'Expected dev_id value: %r, seen %r', dev_id, m.group(2))\n device['desc'] = m.group(3)\n continue\n\n indent_match = _INDENTATION_RE.match(line)\n if not indent_match:\n continue\n\n depth = 1 + len(indent_match.group(1)) / 2\n if depth > len(depth_stack):\n logging.error(\n 'lsusb parsing error: unexpected indentation: \"%s\"', line)\n continue\n\n while depth < len(depth_stack):\n depth_stack.pop()\n\n cur = depth_stack[-1]\n\n m = _LSUSB_GROUP_RE.match(line)\n if m:\n new_group = {}\n cur[m.group(1)] = new_group\n depth_stack.append(new_group)\n continue\n\n m = _LSUSB_ENTRY_RE.match(line)\n if m:\n new_entry = {\n '_value': m.group(2),\n '_desc': m.group(3),\n }\n cur[m.group(1)] = new_entry\n depth_stack.append(new_entry)\n continue\n\n logging.error('lsusb parsing error: unrecognized line: \"%s\"', line)\n\n return device", "def hwinfo(device):\n base = os.path.basename(device)\n if os.path.exists('/sys/class/tty/%s/device' % (base,)):\n # PCI based devices\n sys_id_path = '/sys/class/tty/%s/device/id' % (base,)\n if os.path.exists(sys_id_path):\n return read_line(sys_id_path)\n # USB-Serial devices\n sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)\n if os.path.exists(sys_dev_path):\n sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))\n return usb_sysfs_hw_string(sys_usb)\n # USB-CDC devices\n if base.startswith('ttyACM'):\n sys_dev_path = '/sys/class/tty/%s/device' % (base,)\n if os.path.exists(sys_dev_path):\n return usb_sysfs_hw_string(sys_dev_path + '/..')\n return 'n/a' # XXX directly remove these from the list?", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": \"Somfy\",\n \"name\": self.name,\n \"model\": self.tahoma_device.widget,\n \"sw_version\": self.tahoma_device.type,\n }", "def manufacturer_encode(self, value, size):\n if value is None or value == False:\n return None\n data = []\n for i in range(0, size):\n data.append((value >> (i * 8)) & 0xFF)\n return data", "def __str__(self):\n return '%s' % (self.vendor)", "def fillManufacturerFromDiscoverResponse(tv):\n if ('informations' in tv and\n 'general' in tv['informations'] and\n 'device' in tv['informations']['general'] and\n 'manufacturer' in tv['informations']['general']['device']):\n tv['computed']['manufacturer'] = tv['informations']['general']['device']['manufacturer']", "def manufacturer(self) -> int:\n return self.__manufacturer", "def device(self):\n return self._vars[0].device", "def test_device_info_guess_os(properties, expected_os):\n assert DeviceInfo(properties).operating_system == expected_os", "def get_device_value(ip, value, community_string=\"public\"):\n\n iterator = get_iterator(ip, value, community_string)\n\n error_indication, error_status, error_index, var_binds = next(iterator)\n\n if error_indication: # SNMP engine errors\n print(error_indication)\n else:\n if error_status: # SNMP agent errors\n print(\n '%s at %s' % (error_status.prettyPrint(), var_binds[int(error_index) - 1] if error_index else '?'))\n else:\n for varBind in var_binds: # SNMP response contents\n return str(varBind).split(\"=\")[1].replace(\" \", \"\")", "def get_manufacturer_bytes(self):\n manufacturer = self._manufacturer.upper()\n id = ((ord(manufacturer[0]) - 64) * 32 * 32 +\n (ord(manufacturer[1]) - 64) * 32 +\n (ord(manufacturer[2]) - 64))\n if 0x0421 <= id <= 0x6b5a:\n return self.manufacturer_encode(id, 2)\n return False", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def get_device(cls, devdesc: UsbDeviceDescriptor) -> UsbDevice:\n cls.Lock.acquire()\n try:\n if devdesc.index or devdesc.sn or devdesc.description:\n dev = None\n if not devdesc.vid:\n raise ValueError('Vendor identifier is required')\n devs = cls._find_devices(devdesc.vid, devdesc.pid)\n if devdesc.description:\n devs = [dev for dev in devs if\n UsbTools.get_string(dev, dev.iProduct) ==\n devdesc.description]\n if devdesc.sn:\n devs = [dev for dev in devs if\n UsbTools.get_string(dev, dev.iSerialNumber) ==\n devdesc.sn]\n if devdesc.bus is not None and devdesc.address is not None:\n devs = [dev for dev in devs if\n (devdesc.bus == dev.bus and\n devdesc.address == dev.address)]\n if isinstance(devs, set):\n # there is no guarantee the same index with lead to the\n # same device. Indexing should be reworked\n devs = list(devs)\n try:\n dev = devs[devdesc.index or 0]\n except IndexError as exc:\n raise IOError(\"No such device\") from exc\n else:\n devs = cls._find_devices(devdesc.vid, devdesc.pid)\n dev = list(devs)[0] if devs else None\n if not dev:\n raise IOError('Device not found')\n try:\n devkey = (dev.bus, dev.address, devdesc.vid, devdesc.pid)\n if None in devkey[0:2]:\n raise AttributeError('USB backend does not support bus '\n 'enumeration')\n except AttributeError:\n devkey = (devdesc.vid, devdesc.pid)\n if devkey not in cls.Devices:\n # only change the active configuration if the active one is\n # not the first. This allows other libusb sessions running\n # with the same device to run seamlessly.\n try:\n config = dev.get_active_configuration()\n setconf = config.bConfigurationValue != 1\n except USBError:\n setconf = True\n if setconf:\n try:\n dev.set_configuration()\n except USBError:\n pass\n cls.Devices[devkey] = [dev, 1]\n else:\n cls.Devices[devkey][1] += 1\n return cls.Devices[devkey][0]\n finally:\n cls.Lock.release()", "def convertHwdesc(value):\n hwtype,hwname=getHardware(value)\n d={}\n if hwname!='unknown':\n \td['hardware']=hwname\n if hwtype!='unknown':\n \td['type']=hwtype\n d['hwdesc']=value.replace(' ','_')\n return d", "def _get_device_id() -> str:\n with open(\"/proc/cpuinfo\", \"r\") as f:\n for line in f.readlines():\n if line.startswith('Serial'):\n return line.split(':')[1].strip()\n return 'N/A'", "def GetMacVendor(macAddress):\n\turlMac = \"https://macvendors.co/api/%s/pipe\" % macAddress\n\tif macAddress in [\"\",\"FF-FF-FF-FF-FF-FF\"]:\n\t\treturn None\n\n\ttry:\n\t\t#sys.stderr.write(\"urlMac=%s\\n\"%urlMac)\n\n\t\timport urllib2\n\t\treq = urllib2.Request(urlMac)\n\t\treq.add_header('User-Agent', \"API Browser\")\n\t\tresp = urllib2.urlopen(req)\n\t\tcontent = resp.readlines()[0]\n\n\t\t#sys.stderr.write(\"content=%s\\n\"%content)\n\t\t#sys.stderr.write(\"content=%s\\n\"%str(type(content)))\n\t\tsplitMac = content.split(\"|\")\n\t\t#sys.stderr.write(\"splitMac[0]=%s\\n\"%splitMac[0])\n\t\treturn splitMac[0]\n\texcept:\n\t\texc = sys.exc_info()[1]\n\t\t#sys.stderr.write(\"Caught %s\\n\"%str(exc))\n\t\t# Any error returns a none strng: Thisinformation is not that important.\n\t\treturn \"Cannot determine vendor\"", "def get_battery(self, os: str, line: List, value: str, key: str):\n\n line.append(value[value.find(key) + len(key) + 2 :])", "def __convert_to_devunit(self, value, in_devunit=False):\r\n if not in_devunit:\r\n value *= self.DEVUNIT_RATIO\r\n elif not isinstance(value, int):\r\n self.print_msg((\"Warning: non-integer value ({}) is given \"\r\n + \"in device unit.\").format(value))\r\n return int(value)", "def device_info(self):\n return {\n \"name\": get_device_name(self._data, 0),\n \"identifiers\": {(DOMAIN, get_identifier(self._data, 0))},\n \"manufacturer\": MANUFACTURER,\n \"model\": self._data.wiserhub.system.product_type,\n \"sw_version\": self._data.wiserhub.system.firmware_version,\n \"via_device\": (DOMAIN, self._data.wiserhub.system.name),\n }", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._uuid)},\n \"name\": self._device.device_data[self._uuid]['name'],\n \"manufacturer\": \"Nest Labs\",\n \"model\": self._device.device_data[self._uuid]['model'],\n }", "def fp_from_device(new_light_devices, new_medium_devices, new_heavy_devices):\n device = kg_to_tonnes((new_light_devices) * 75 + \\\n (new_medium_devices) * 200 + (new_heavy_devices) * 800)\n return device", "def test_lsusb_v_centos_7_7(self):\n self.assertEqual(jc.parsers.lsusb.parse(self.centos_7_7_lsusb_v, quiet=True), self.centos_7_7_lsusb_v_json)", "def usb_sysfs_hw_string(sysfs_path):\n snr = read_line(sysfs_path + '/serial')\n if snr:\n snr_txt = '%s' % (snr,)\n else:\n snr_txt = ''\n if doPrintVendorID:\n return 'USB VID:PID=%s:%s SNR=%s' % (\n read_line(sysfs_path + '/idVendor'),\n read_line(sysfs_path + '/idProduct'),\n snr_txt\n )\n else:\n return snr_txt", "def __str__(self):\n return \"Device %d\" % self.device_id", "def device_info(device_id):\n device_info_map = listall.device_raw_info()[\"devices\"]\n for operating_system in device_info_map.keys():\n devices = device_info_map[operating_system]\n for device in devices:\n if device[\"udid\"].lower() == device_id.lower():\n return device\n return None", "def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._device.unique_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Apple\",\n \"model\": self._device.device_model,\n }", "def _find_dev(vendor_id, product_id, device_id):\n for bus in usb.busses():\n for dev in bus.devices:\n if dev.idVendor == vendor_id and dev.idProduct == product_id:\n if device_id is None or dev.filename == device_id:\n loginf('Found device on USB bus=%s device=%s' % (bus.dirname, dev.filename))\n return dev\n return None", "def manufacturer(self):\n return self._device.manufacturer", "def device_id(self):\n data = fcntl.ioctl(self._fd, _EVIOCGID, '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\n idbus, idvendor, idproduct, idversion = struct.unpack(\"hhhh\", data)\n return idbus, idvendor, idproduct, idversion", "def parse_devices_spt(self, devices=None):\n\n if not devices:\n self._logger.warning(\"The devices list is empty, so no devices parsed!\")\n return\n try:\n for entry in devices['SCSI Devices']['Device List']:\n device_type = entry['Peripheral Device Type Description']\n if self._include_enclosures:\n if not device_type.startswith('Direct') and \\\n not device_type.startswith('Host Managed') and \\\n not device_type.startswith('Enclosure'):\n continue\n else:\n if not device_type.startswith('Direct') and \\\n not device_type.startswith('Host Managed'):\n continue\n\n # Parse remaining information.\n if device_type.startswith('Direct') or device_type.startswith('Host Managed'):\n device_type = 'disk'\n if self.product_name and not self.product_name in entry['Product Identification'].strip():\n continue;\n if self.vendor_name and not self.vendor_name in entry['Vendor Identification'].strip():\n continue;\n if self.serial_number and not self.serial_number in entry['Product Serial Number'].strip():\n continue;\n if self.target_port and not self.target_port in entry['Device Target Port']:\n continue;\n elif device_type.startswith('Enclosure'):\n device_type = 'enclosure'\n\n device = dict()\n device['Device Type'] = device_type\n\n device['Device Type Description'] = entry['Peripheral Device Type Description']\n device['Product Identification'] = entry['Product Identification'].strip()\n device['Vendor Identification'] = entry['Vendor Identification'].strip()\n device['Revision Level'] = entry['Firmware Revision Level'].strip()\n\n if entry.get('Full Firmware Version') is not None:\n fwver = entry['Full Firmware Version']\n if not fwver.startswith('<not available>'):\n device['Firmware Version'] = fwver\n\n serial = entry['Product Serial Number']\n device['Serial Number'] = serial.strip()\n\n # Note: Not currently displayed. (WWN == LUN Device Identification)\n wwn = entry['Device World Wide Name']\n if wwn.startswith('<not available>'):\n wwn = \"\"\n device['Device World Wide Name'] = wwn\n\n sas_address = entry['Device Target Port']\n if not sas_address.startswith('<not available>'):\n device['SAS Address'] = sas_address\n self._sas_addresses += 1\n else:\n device['SAS Address'] = \"\"\n\n # Note: There's probably a better Pythonic way to do this?\n device['Linux Device Name'] = \"\"\n device['SCSI Device Name'] = \"\"\n device['DMMP Device Name'] = \"\"\n\n # Parse the device paths.\n for path_type in entry['Path Types']:\n if path_type.get('Linux Device'):\n # Handle multiple Linux device paths. (these are \"sd\" devices)\n if device.get('Linux Device Name') and path_type.get('SCSI Nexus'):\n new_device = copy.deepcopy(device)\n self._devices.append(new_device)\n # Fall through to update this device entry.\n # Initialize information for this (or next) device.\n device['Linux Device Name'] = path_type['Linux Device']\n device['Linux SCSI Nexus'] = path_type['SCSI Nexus']\n if path_type.get('SCSI Device'):\n device['SCSI Device Name'] = path_type['SCSI Device']\n if path_type.get('Device Target Port'):\n device['SAS Address'] = path_type['Device Target Port']\n\n elif path_type.get('SCSI Device'):\n # Handle multiple SCSI device paths. (now, \"sg\" devices only)\n if device.get('SCSI Device Name') and path_type.get('SCSI Nexus'):\n new_device = copy.deepcopy(device)\n self._devices.append(new_device)\n # Fall through to update this device entry.\n # Initialize information for this (or next) device.\n device['SCSI Device Name'] = path_type['SCSI Device']\n device['SCSI Nexus'] = path_type['SCSI Nexus']\n if path_type.get('Device Target Port'):\n device['SAS Address'] = path_type['Device Target Port']\n\n elif path_type.get('DMMP Device') is not None:\n # Initialize information for this device. (limited)\n device['DMMP Device Name'] = path_type['DMMP Device']\n\n # Hack: We don't find a SCSI device if there's no serial number or device ID (WWN).\n # This is observed on Linux VM's, so not common, but we still wish to handle this!\n if not len(device['SCSI Device Name']):\n # Funky DM-MP names are skipped! (we deal with sd and/or sg devices only)\n # /dev/mapper/centos_cos--lab--vm01-root\n if not len(device['Linux Device Name']):\n continue\n\n self._devices.append(device)\n\n except RuntimeError as exc:\n self._logger.error(\"Failed to acquire SCSI devices: {0}\".format(exc))\n raise exc", "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n identifiers={(DOMAIN, self.unique_id)},\n manufacturer=\"Volumio\",\n model=self._info[\"hardware\"],\n name=self._name,\n sw_version=self._info[\"systemversion\"],\n )", "def _device_sort_key(iface):\n dev = (iface.get(\"device\") or \"\").lower()\n if dev.startswith(\"eth\") or dev.startswith(\"en\"):\n return \"0\" + dev\n if dev.startswith(\"wl\"):\n return \"1\" + dev\n if dev.startswith(\"e\") or dev.startswith(\"w\"):\n return \"2\" + dev\n else:\n return dev", "def _check_deviceline(self):\n # Check if device name is valid\n if self._check_name(self.symbol):\n self.device_name = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '=' is used\n if self._is_equal(self.symbol):\n # Get next symbol\n self.symbol = self.scanner.get_symbol()\n # Check if name has been assigned to a valid device type\n if self._check_validdevice(self.symbol):\n self.device_kind = self.symbol\n self.symbol = self.scanner.get_symbol()\n if self._is_semicolon(self.symbol):\n # No device property\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create device if no previous errors\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind))\n # Send the returned error ID for error reporting\n self._display_semantic_error(device_error)\n self.symbol = self.scanner.get_symbol()\n elif self._is_comma(self.symbol):\n # Device property set\n self.symbol = self.scanner.get_symbol()\n self.device_param, \\\n self.device_paramvalue \\\n = self._check_paramindevice()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create device if no previous errors\n if self._device_type_returner(\n self.device_kind) == \\\n self.devices.SIGGEN:\n # Use symbol attribute 'value' to get parameter\n # value, since the symbol's 'id' attribute\n # would not capture a leading '0' in the signal\n # generator's signal string\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind),\n self.device_paramvalue.value)\n else:\n # For other device types\n device_error = self.devices.make_device(\n self.device_name.id,\n self._device_type_returner(\n self.device_kind),\n self.device_paramvalue.id)\n # Send the returned error ID for error reporting\n self._display_semantic_error(device_error)\n self._check_semicolon_else_skip(self.symbol)\n self.symbol = self.scanner.get_symbol()\n else:\n # Neither semicolon nor comma\n self._display_syntax_error(\"semicoloncomma\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # The device type is not valid\n self._display_syntax_error(\"devicetype\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # No '='\n self._display_syntax_error(\"equal\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n else:\n # The device name is not valid\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n\n return None", "def _get_product_id(device_dict):\n return device_dict['product_id'].split('x')[-1]", "def to_device(device, x):\n if device is None:\n return x\n elif device < 0:\n return cuda.to_cpu(x)\n else:\n return cuda.to_gpu(x, device)", "def __init__(self):\n self.Revision = '0'\n self.Serial = None\n try:\n with open('/proc/cpuinfo','r') as f:\n for line in f:\n splitLine = line.split(':')\n if len(splitLine) < 2:\n continue\n key = splitLine[0].strip()\n value = splitLine[1].strip()\n if key == 'Revision':\n self.Revision = value\n if key == 'Serial' and value != len(value) * '0':\n self.Serial = value\n except:\n exception (\"Error reading cpuinfo\")\n self.model = 'Unknown'\n if self.Revision == 'Beta':\n self.model = 'Raspberry Pi Model B (Beta)'\n if self.Revision in ('000d', '000e', '000f', '0002', '0003', '0004', '0005', '0006'):\n self.model = 'Raspberry Pi Model B'\n if self.Revision in ('0007', '0008', '0009'):\n self.model = 'Raspberry Pi Model A'\n if self.Revision in ('0010', '0013', '900032'):\n self.model = 'Raspberry Pi Model B +'\n if self.Revision in ('0011', '0014'):\n self.model = 'Raspberry Pi Compute Module'\n if self.Revision in ('0012', '0015'):\n self.model = 'Raspberry Pi Model A+'\n if self.Revision in ('a01040', 'a01041', 'a21041', 'a22042'):\n self.model = 'Raspberry Pi 2 Model B'\n if self.Revision in ('900092', '900093', '920093'):\n self.model = 'Raspberry Pi Zero'\n if self.Revision in ('9000c1',):\n self.model = 'Raspberry Pi Zero W'\n if self.Revision in ('a02082', 'a22082', 'a32082'):\n self.model = 'Raspberry Pi 3 Model B' \n if self.Revision in ('a020d3'):\n self.model = 'Raspberry Pi 3 Model B+'\n if self.Revision in ('a020a0'):\n self.model = 'Raspberry Pi Compute Module 3'\n if 'Rockchip' in CPU_HARDWARE:\n self.model = 'Tinker Board'\n self.manufacturer = 'Element14/Premier Farnell'\n if self.Revision in ('a01041', '900092', 'a02082', '0012', '0011', '0010', '000e', '0008', '0004', 'a020d3', 'a01040', 'a020a0'):\n self.manufacturer = 'Sony, UK'\n if self.Revision in ('a32082'):\n self.manufacturer = 'Sony, Japan'\n if self.Revision in ('0014', '0015', 'a21041', 'a22082', '920093'):\n self.manufacturer = 'Embest, China'\n if self.Revision in ('0005', '0009', '000f'):\n self.manufacturer = 'Qisda'\n if self.Revision in ('0006', '0007', '000d'):\n self.manufacturer = 'Egoman'\n if self.Revision == '0000':\n if 'Rockchip' in CPU_HARDWARE:\n self.manufacturer = 'ASUS'\n else:\n try:\n with open('/proc/device-tree/model', 'r') as model_file:\n for line in model_file:\n if 'BeagleBone' in line:\n index = line.index('BeagleBone')\n self.manufacturer = line[:index - 1].strip(' \\n\\t\\0')\n self.model = line[index:].strip(' \\n\\t\\0')\n break\n except:\n exception (\"Error reading model\")", "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.tesla_device.id())},\n \"name\": self.tesla_device.car_name(),\n \"manufacturer\": \"Tesla\",\n \"model\": self.tesla_device.car_type,\n \"sw_version\": self.tesla_device.car_version,\n }", "def _get_usb_devices(self):\n\n # Get every device on the bus\n device_re = re.compile(\"Bus\\s+(?P<bus>\\d+)\\s+Device\\s+(?P<device>\\d+).+ID\\s(?P<id>\\w+:\\w+)\\s(?P<tag>.+)$\", re.I)\n df = subprocess.check_output(\"lsusb\")\n devices = []\n\n for i in df.decode().split('\\n'):\n if i:\n info = device_re.match(i)\n if info:\n dinfo = info.groupdict()\n dinfo['device'] = '/dev/bus/usb/%s/%s' % (dinfo.pop('bus'), dinfo.pop('device'))\n devices.append(dinfo)\n\n # Filter only for the STLink devices\n st_link_devices = []\n for device in devices:\n if self.STLINK_VENDOR_ID in device['id']:\n st_link_devices.append(device)\n\n self.usb_devices = st_link_devices" ]
[ "0.57974625", "0.5661134", "0.56355417", "0.55662423", "0.5533631", "0.5503334", "0.5503334", "0.5503334", "0.5503334", "0.5503334", "0.5503334", "0.5413959", "0.53457385", "0.5296801", "0.52907497", "0.52587706", "0.52237445", "0.52177995", "0.52067405", "0.517088", "0.5161193", "0.51514477", "0.5135221", "0.5128968", "0.5128023", "0.51059866", "0.50786406", "0.50738555", "0.5057736", "0.505613", "0.504826", "0.50472337", "0.50418895", "0.5039476", "0.5033303", "0.5019515", "0.5016803", "0.49908897", "0.49536788", "0.49506924", "0.4946991", "0.49374735", "0.49321336", "0.49274743", "0.4912814", "0.49105892", "0.49047446", "0.4903383", "0.49003863", "0.48948154", "0.4883973", "0.48721448", "0.48720652", "0.48719013", "0.48565617", "0.48410448", "0.4840291", "0.48387292", "0.4838713", "0.4806959", "0.4803337", "0.47896644", "0.47882393", "0.47802106", "0.4779487", "0.47697735", "0.47458243", "0.47380653", "0.47374913", "0.47348902", "0.47283936", "0.47229365", "0.4719252", "0.4719252", "0.4718739", "0.47181052", "0.47117302", "0.47100955", "0.4705323", "0.46943858", "0.46856758", "0.46847212", "0.46802518", "0.46709913", "0.46705705", "0.46682882", "0.46674818", "0.4664987", "0.46645597", "0.46634173", "0.46551758", "0.46536407", "0.46424633", "0.46413755", "0.46397978", "0.4638418", "0.4636842", "0.46358746", "0.4631822", "0.4627889" ]
0.6276907
0
Since the ventricle is usually not convex, using radial coordinates can be misleading. A simple search deals with the proper order of the points and ensures a singlepixel edge.
def _walk_on_edge(self, coordinates_of_edge): sorted_edge = list() edge_points = list() self.distance_matrix = cdist(coordinates_of_edge, coordinates_of_edge, metric='euclidean') self.distance_matrix[self.distance_matrix == 0] = 100 cur_point = list(min(coordinates_of_edge, key=lambda t: t[1])) sorted_edge.append(cur_point) while 1: try: new_point, flag = self._find_closest_point(coordinates_of_edge, cur_point, sorted_edge) except TypeError: plt.scatter(sorted_edge, s=1) plt.xlim((0, 256)) plt.ylim((-256, 0)) self._save_failed_qc_image('Search for new point failed') break if flag: edge_points.append(cur_point) if len(edge_points) == 2: break sorted_edge.reverse() cur_point = sorted_edge[-1] else: cur_point = new_point sorted_edge.append(cur_point) basal_septal_edge = min(edge_points, key=lambda x: x[0]) if np.all(basal_septal_edge != sorted_edge[0]): sorted_edge.reverse() return sorted_edge
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indices ( self, *r):\n \n if len(r) == 3: # only 3 values given -> use x,y,radius method\n xpos = self.gpos\n xis = []\n yis = []\n dr2 = (xpos[0, :]-r[0])**2 + (xpos[1, :]-r[1])**2\n # array with true/false entries\n inds = dr2 <= r[2]**2 \n for np in arange(self.size)[inds]: # np -- points in x2-circle\n xi, yi = self.index(xpos[0, np], xpos[1, np])\n xis += [xi]\n yis += [yi]\n if not (xis and yis): # if no points in circle, take nearest one\n return self.index(r[0], r[1])\n else:\n return array(xis), array(yis)\n elif len(r) == 4: # rectangular subdomain - old functionality\n xi1, yi1 = self.index(min(r[0], r[2]), min(r[1], r[3]))\n xi2, yi2 = self.index(max(r[0], r[2]), max(r[1], r[3]))\n return s_[xi1:xi2+1], s_[yi1:yi2+1]\n else: # use enveloping polygon\n xpos = self.gpos\n xis = []\n yis = []\n #replaced matplotlib Path by numpy\n #p = Path(array(r).reshape(-1,2))\n #inds = p.contains_points()\n #inds = in_poly(xpos[:2,:].T,array(r).reshape(-1,2))\n poly = Polygon(array(r).reshape(-1,2)[:,0],array(r).reshape(-1,2)[:,1])\n dists = poly.is_inside(xpos[0,:],xpos[1,:]) \n inds = dists >= 0\n for np in arange(self.size)[inds]: # np -- points in x2-circle\n xi, yi = self.index(xpos[0, np], xpos[1, np])\n xis += [xi]\n yis += [yi]\n if not (xis and yis): # if no points inside, take nearest to center\n center = array(r).reshape(-1,2).mean(0)\n return self.index(center[0], center[1])\n else:\n return array(xis), array(yis)\n #return arange(self.size)[inds]", "def indices ( self, *r):\n \n if len(r) == 3: # only 3 values given -> use x,y,radius method\n xpos = self.gpos\n xis = []\n yis = []\n dr2 = (xpos[0, :]-r[0])**2 + (xpos[1, :]-r[1])**2\n # array with true/false entries\n inds = dr2 <= r[2]**2 \n for np in arange(self.size)[inds]: # np -- points in x2-circle\n xi, yi = self.index(xpos[0, np], xpos[1, np])\n xis += [xi]\n yis += [yi]\n if not (xis and yis): # if no points in circle, take nearest one\n return self.index(r[0], r[1])\n else:\n return array(xis), array(yis)\n elif len(r) == 4: # rectangular subdomain - old functionality\n xi1, yi1 = self.index(min(r[0], r[2]), min(r[1], r[3]))\n xi2, yi2 = self.index(max(r[0], r[2]), max(r[1], r[3]))\n return s_[xi1:xi2+1], s_[yi1:yi2+1]\n else: # use enveloping polygon\n xpos = self.gpos\n xis = []\n yis = []\n #replaced matplotlib Path by numpy\n #p = Path(array(r).reshape(-1,2))\n #inds = p.contains_points()\n #inds = in_poly(xpos[:2,:].T,array(r).reshape(-1,2))\n poly = Polygon(array(r).reshape(-1,2)[:,0],array(r).reshape(-1,2)[:,1])\n dists = poly.is_inside(xpos[0,:],xpos[1,:]) \n inds = dists >= 0\n for np in arange(self.size)[inds]: # np -- points in x2-circle\n xi, yi = self.index(xpos[0, np], xpos[1, np])\n xis += [xi]\n yis += [yi]\n if not (xis and yis): # if no points inside, take nearest to center\n center = array(r).reshape(-1,2).mean(0)\n return self.index(center[0], center[1])\n else:\n return array(xis), array(yis)\n #return arange(self.size)[inds]", "def FindPointsWithinRadius(self, p_float, , vtkIdList):\n ...", "def find_position(self, xv, yv):\n # Convert position in spheric coord\n phi = xv*self.FOV_img/360/self.img_res\n theta = yv*self.FOV_img_Y/180/self.img_res_Y\n phi2 = phi+(360-self.FOV_img)/2\n theta2 = theta+(180-self.FOV_img_Y)/2\n\n u, v, w = spheric2cart(np.radians(theta2), np.radians(phi2)) # give cartesian coord of pixel\n\n # ignore errors due to /0 -> inf, -inf\n # divide (w/v) and invalid arctan2()\n with np.errstate(all='ignore'): # OPTIMIZE: see comment about pi = -pi and don't matter if -0 or 0 -> just replace by pi\n beta = -np.arctan(w/v)\n# beta2 = -np.arctan2(w, v)\n\n# v2 = np.dot(rotation_matrix(beta), [u, v, w]) # take 3*3 created matrix and aplly to vector\n matrix = rotation_matrix(beta)\n u2 = matrix[0, 0]*u\n v2 = matrix[1, 1]*v+matrix[1, 2]*w\n w2 = matrix[2, 1]*v+matrix[2, 2]*w\n _, seen_angle = cart2spheric(u2, v2, w2) # return phi in equator \"projection\"\n\n seen_angle = np.degrees(seen_angle)\n seen_angle = np.mod(seen_angle, 360) # define phi [0, 360]\n\n# seen_angle[seen_angle > 360] -= 360\n deviated_angle = np.zeros(seen_angle.shape)\n deviated_angle[seen_angle < 180] = self.interpolation(seen_angle[seen_angle < 180])\n deviated_angle[seen_angle >= 180] = 360 - self.interpolation(360-seen_angle[seen_angle >= 180])\n# np.flip(deviated_angle, 1) \" mais probleme overlap entre left et right\n\n theta = pi/2# *np.ones(deviated_angle.shape)\n phi = np.radians(deviated_angle)\n u3, v3, w3 = spheric2cart(theta, phi) #get cart coord of deviated pixel\n\n matrix = rotation_matrix(-beta)\n u4 = matrix[0, 0]*u3\n v4 = matrix[1, 1]*v3+matrix[1, 2]*w3\n w4 = matrix[2, 1]*v3+matrix[2, 2]*w3\n\n theta, phi = cart2spheric(u4, v4, w4) #give spheric coord of deviated pixel\n\n theta, phi = np.degrees(theta), np.degrees(phi)\n\n phi -= (360-self.FOV_img)/2\n theta -= (180-self.FOV_img_Y)/2\n\n with np.errstate(all='ignore'): # OPTIMIZE\n phi = np.mod(phi, 360) # define phi [0, 360]\n theta = np.mod(theta, 180) # define phi [0, 360]\n\n phi[phi == 360] = 0\n xv2 = phi*360/self.FOV_img*self.img_res\n yv2 = theta*180/self.FOV_img_Y*self.img_res_Y #give deviated angle pixel position\n\n xv2[np.isnan(xv2)] = -1\n yv2[np.isnan(yv2)] = -1\n\n xv2 = np.array(xv2, dtype=int)\n yv2 = np.array(yv2, dtype=int)\n\n return xv2, yv2", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def computeNormalAndCurvature():\n radius = 50\n for i,j in pts:\n nb_pts = ti.cast(0, ti.f32)\n accu_0 = ti.cast(0, ti.f32)\n accu_1 = ti.cast(0, ti.f32)\n accu_2 = ti.cast(0, ti.f32)\n accu_3 = ti.cast(0, ti.f32)\n accu_4 = ti.cast(0, ti.f32)\n accu_5 = ti.cast(0, ti.f32)\n accu_6 = ti.cast(0, ti.f32)\n accu_7 = ti.cast(0, ti.f32)\n accu_8 = ti.cast(0, ti.f32)\n z = 0\n for x in range(i-radius, i+radius):\n for y in range(j-radius, j+radius):\n if ti.is_active(block1, [x,y]):\n accu_0 += x * x\n accu_1 += x * y\n accu_2 += x * z\n accu_3 += y * y\n accu_4 += y * z\n accu_5 += z * z\n accu_6 += x\n accu_7 += y\n accu_8 += z\n nb_pts += 1\n accu_0 /= nb_pts\n accu_1 /= nb_pts\n accu_2 /= nb_pts\n accu_3 /= nb_pts\n accu_4 /= nb_pts\n accu_5 /= nb_pts\n accu_6 /= nb_pts\n accu_7 /= nb_pts\n accu_8 /= nb_pts\n cov_mat_0 = accu_0 - accu_6 * accu_6\n cov_mat_1 = accu_1 - accu_6 * accu_7\n cov_mat_2 = accu_2 - accu_6 * accu_8\n cov_mat_4 = accu_3 - accu_7 * accu_7\n cov_mat_5 = accu_4 - accu_7 * accu_8\n cov_mat_8 = accu_5 - accu_8 * accu_8\n cov_mat_3 = cov_mat_1\n cov_mat_6 = cov_mat_2\n cov_mat_7 = cov_mat_5\n\n # Compute eigen value and eigen vector\n # Make sure in [-1, 1]\n scale = ti.max(1.0, ti.abs(cov_mat_0))\n scale = ti.max(scale, ti.abs(cov_mat_1))\n scale = ti.max(scale, ti.abs(cov_mat_2))\n scale = ti.max(scale, ti.abs(cov_mat_3))\n scale = ti.max(scale, ti.abs(cov_mat_4))\n scale = ti.max(scale, ti.abs(cov_mat_5))\n scale = ti.max(scale, ti.abs(cov_mat_6))\n scale = ti.max(scale, ti.abs(cov_mat_7))\n scale = ti.max(scale, ti.abs(cov_mat_8))\n if scale > 1.0:\n cov_mat_0 /= scale\n cov_mat_1 /= scale\n cov_mat_2 /= scale\n cov_mat_3 /= scale\n cov_mat_4 /= scale\n cov_mat_5 /= scale\n cov_mat_6 /= scale\n cov_mat_7 /= scale\n cov_mat_8 /= scale\n \n # Compute roots\n eigen_val_0 = ti.cast(0, ti.f32)\n eigen_val_1 = ti.cast(0, ti.f32)\n eigen_val_2 = ti.cast(0, ti.f32)\n \n c0 = cov_mat_0 * cov_mat_4 * cov_mat_8 \\\n + 2 * cov_mat_3 * cov_mat_6 * cov_mat_7 \\\n - cov_mat_0 * cov_mat_7 * cov_mat_7 \\\n - cov_mat_4 * cov_mat_6 * cov_mat_6 \\\n - cov_mat_8 * cov_mat_3 * cov_mat_3\n c1 = cov_mat_0 * cov_mat_4 \\\n - cov_mat_3 * cov_mat_3 \\\n + cov_mat_0 * cov_mat_8 \\\n - cov_mat_6 * cov_mat_6 \\\n + cov_mat_4 * cov_mat_8 \\\n - cov_mat_7 * cov_mat_7\n c2 = cov_mat_0 + cov_mat_4 + cov_mat_8\n \n if ti.abs(c0) < 0.00001:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n else:\n s_inv3 = ti.cast(1.0 / 3.0, ti.f32)\n s_sqrt3 = ti.sqrt(3.0)\n c2_over_3 = c2 * s_inv3\n a_over_3 = (c1 - c2 * c2_over_3) * s_inv3\n if a_over_3 > 0:\n a_over_3 = 0\n \n half_b = 0.5 * (c0 + c2_over_3 * (2 * c2_over_3 * c2_over_3 - c1))\n q = half_b * half_b + a_over_3 * a_over_3 * a_over_3\n if q > 0:\n q = 0\n \n rho = ti.sqrt(-a_over_3)\n theta = ti.atan2(ti.sqrt(-q), half_b) * s_inv3\n cos_theta = ti.cos(theta)\n sin_theta = ti.sin(theta)\n eigen_val_0 = c2_over_3 + 2 * rho * cos_theta\n eigen_val_1 = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta)\n eigen_val_2 = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta)\n temp_swap = ti.cast(0, ti.f32)\n \n # Sort in increasing order.\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n if eigen_val_1 >= eigen_val_2:\n temp_swap = eigen_val_2\n eigen_val_2 = eigen_val_1\n eigen_val_1 = temp_swap\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n \n if eigen_val_0 <= 0:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n # end of compute roots\n\n eigen_value = eigen_val_1 * scale # eigen value for 2D SDF\n # eigen value for 3D SDF\n #eigen_value = eigen_val_0 * scale\n\n #print(\"eigen_val_0 \", eigen_val_0)\n #print(\"eigen_val_1 \", eigen_val_1)\n #print(\"eigen_val_2 \", eigen_val_2)\n \n # TODO\n #scaledMat.diagonal ().array () -= eigenvalues (0)\n #eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;\n\n # Compute normal vector (TODO)\n #visual_norm[i,j][0] = eigen_val_0 #eigen_vector[0]\n #visual_norm[i,j][1] = eigen_val_1 #eigen_vector[1]\n #visual_norm[i,j][2] = eigen_val_2 #eigen_vector[2]\n\n # Compute the curvature surface change\n eig_sum = cov_mat_0 + cov_mat_1 + cov_mat_2\n visual_curv[i,j][0] = 0\n if eig_sum != 0:\n visual_curv[i,j][0] = eigen_val_1 # true curvature is: ti.abs(eigen_value / eig_sum)", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def get_fan_in(xy=(0, 0), dim_x_l=10, dim_y_l=10, dim_x_u=9, dim_y_u=9, block_x=2, block_y=2, radius=2):\n x = xy[0]\n y = xy[1]\n if dim_x_u > 1:\n factor_x = ((dim_x_l-1)-(block_x-1))/(1.0*(dim_x_u-1))\n else:\n factor_x = ((dim_x_l-1)-(block_x))/2.0\n if dim_y_u > 1:\n factor_y = ((dim_y_l-1)-(block_y-1))/(1.0*(dim_y_u-1))\n else:\n factor_y = ((dim_y_l-1)-(block_y))/2.0\n results = []\n if dim_x_u > 1 and dim_y_u > 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((factor_x*(x))+xx), int((factor_y*(y))+yy)))\n return results\n elif dim_x_u == 1 and dim_y_u > 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((dim_x_l-block_x)/2.0+xx), int((factor_y*(y)+yy))))\n return results\n elif dim_x_u > 1 and dim_y_u == 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((factor_x*(x)+xx)), int((dim_y_l-block_y)/2.0+yy)))\n return results\n elif dim_x_u == 1 and dim_y_u == 1:\n for xx in range(block_x):\n for yy in range(block_y):\n if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2:\n continue\n results.append((int((dim_x_l-block_x)/2.0+xx), int((dim_y_l-block_y)/2.0+yy)))\n return results", "def pt(x,y):\n t = sample_side\n global d, ds, samples, mask\n d = depth[y-t:y+t,x-t:x+t]\n\n # This is where I choose which point in the sample to use. I take\n # the minimum, which is the nearest pixel. Other possibilities\n # are median, mean, etc.\n if method=='median':\n meand = np.median(d[d<2047])\n if method=='mean':\n meand = np.mean(d[d<2047])\n if method=='min':\n meand = d[d<2047].min()\n if method=='kmeans':\n import Pycluster\n labels, error, nfound = Pycluster.kcluster(d.reshape(-1,1),4)\n labels = labels.reshape(d.shape)\n means = np.array([d[labels==i].mean() for i in range(labels.max()+1)])\n nearest = np.argmin(means)\n mask = labels==nearest\n samples = d[mask]\n\n def radius(target):\n x,y = np.nonzero(d == target)\n return np.sqrt((x[0]-sample_side/2)**2+(y[0]-sample_side/2)**2)\n cands = (samples.min(), samples.max())\n rads = [radius(i) for i in cands]\n\n meand = means.min()\n #meand = cands[np.argmax(rads)]\n #meand = np.median(samples)\n #meand = samples.min() if np.median(samples) > np.mean(samples) else samples.max()\n return x,y,meand,1", "def endpoints(line_points):\n neighbors = []\n for p in line_points:\n aux = 0\n for q in line_points:\n if np.linalg.norm(p-q) == 1:\n aux += 1\n neighbors.append(aux)\n e_points = np.where(np.array(neighbors)==1)\n return line_points[e_points]", "def find_cluster(self, point: tuple) -> tuple:\r\n\r\n # set quickness\r\n quickness = 1\r\n\r\n # Create initial bounding box\r\n bb = [point[0], point[1], point[0], point[1]]\r\n\r\n # Get the first direction to expand the box\r\n direction = self.touching_border(bb, quickness)\r\n\r\n # loop until the box has no green on the perimeter\r\n while direction != 'EDGE':\r\n\r\n # Check if there is an error\r\n if bb[2] - bb[0] > self.size[0] / 4 \\\r\n or bb[3] - bb[1] > self.size[1] / 4:\r\n \r\n bb[0] = 0\r\n bb[2] = len(self.bin_pic[0]) - 1\r\n return ('ERROR',\r\n [(x, y)\r\n for x in range(bb[0], bb[2] + 1)\r\n for y in range(bb[1], bb[3] + 1)],\r\n bb)\r\n\r\n # Expand Down and Right\r\n if direction == 'BR':\r\n bb[2] += quickness\r\n bb[3] += quickness\r\n\r\n # Expand Down and Left\r\n elif direction == 'BL':\r\n bb[0] -= quickness\r\n bb[3] += quickness\r\n\r\n # Expand Right\r\n elif direction == 'RIGHT':\r\n bb[2] += quickness\r\n\r\n # Expand Down \r\n elif direction == 'BOTTOM':\r\n bb[3] += quickness\r\n\r\n # Expand Left \r\n elif direction == 'LEFT':\r\n bb[0] -= quickness\r\n\r\n # Expand Up\r\n elif direction == 'TOP':\r\n bb[1] -= quickness\r\n\r\n # Check the area directly around the current box\r\n elif direction == 'NONE':\r\n cntn = False\r\n \r\n for i in range(1, 3):\r\n\r\n # if there is a green pixel just outside of the box,\r\n # expand the box to cover it and continue searching\r\n tb = self.touching_border([bb[0] - i,\r\n bb[1] - i,\r\n bb[2] + i,\r\n bb[3] + i])\r\n \r\n if tb != 'NONE':\r\n direction = tb\r\n cntn = True\r\n break\r\n \r\n if cntn:\r\n continue\r\n\r\n break\r\n \r\n # Default case\r\n else:\r\n raise IndexError(str(direction) + ' is not a valid direction!')\r\n\r\n # Get new direction to expand in\r\n direction = self.touching_border(bb, quickness)\r\n\r\n # Gather all the green pixels within the bounding box \r\n cluster = [(x, y)\r\n for x in range(bb[0], bb[2] + 1)\r\n for y in range(bb[1], bb[3] + 1)\r\n if self.bin_pic[y][x]]\r\n\r\n # Don't count the plant if it's touching the edge of the picture\r\n if direction == 'EDGE':\r\n if len(cluster) > 250:\r\n return (bb, cluster)\r\n else:\r\n return (None, cluster, bb)\r\n \r\n return (bb, cluster)", "def FindClosestPointWithinRadius(self, p_float, , p_float_4):\n ...", "def get_point(k, refpt):\n i = 0\n while i < k:\n rho, theta = np.random.uniform(r, 2*r), np.random.uniform(0, 2*np.pi)\n pt = refpt[0] + rho*np.cos(theta), refpt[1] + rho*np.sin(theta), 0\n if not (0 <= pt[0] < width and 0 <= pt[1] < height):\n # This point falls outside the domain of the grid, so try again.\n i += 1\n continue\n if point_valid(pt) and is_on_face(pt, v1, v2, v3):\n return pt\n i += 1\n # We failed to find a suitable point in the vicinity of refpt.\n return False", "def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval", "def __contains__(self, point, e=10e-10):\n v1 = self.vector\n v2 = Vector.createFromTwoPoints(self.point, point)\n return v1.colinear(v2, e)", "def contains ( self, pos ):\n dr2 = (pos[0, :]-self.x)**2 + (pos[1, :]-self.y)**2\n # which points are in the circle?\n if self.include_border:\n inds = (dr2 - self.r**2) < self.abs_tol\n else:\n inds = (dr2 - self.r**2) < -self.abs_tol\n \n \n # if there's no poit inside\n if ~inds.any() and self.default_nearest: \n inds[argmin(dr2)] = True\n \n return inds", "def checkForCircle(self, points, tangents):\n if len(points)<10:\n return False, 0\n\n if all(points[0]==points[-1]): # last exactly equals the first.\n # Ignore last point for this check\n points = points[:-1]\n tangents = tangents[:-1]\n #print 'Removed last ', points\n xmin,ymin, w, h = computeBox( points)\n diag2=(w*w+h*h)\n \n diag = sqrt(diag2)*0.5\n norms = numpy.sqrt(numpy.sum( tangents**2, 1 ))\n\n angles = numpy.arctan2( tangents[:,1], tangents[:,0] ) \n #debug( 'angle = ', repr(angles))\n N = len(angles)\n \n deltas = points[1:] - points[:-1] \n deltasD = numpy.concatenate([ [D(points[0],points[-1])/diag], numpy.sqrt(numpy.sum( deltas**2, 1 )) / diag] )\n\n # locate and avoid the point when swicthing\n # from -pi to +pi. The point is around the minimum\n imin = numpy.argmin(angles)\n debug(' imin ',imin)\n angles = numpy.roll(angles, -imin)\n deltasD = numpy.roll(deltasD, -imin)\n n=int(N*0.1)\n # avoid fluctuations by removing points around the min\n angles=angles[n:-n]\n deltasD=deltasD[n:-n]\n deltasD = deltasD.cumsum()\n N = len(angles)\n\n # smooth angles to avoid artificial bumps\n angles = smoothArray(angles, n=max(int(N*0.03),2) )\n\n deltaA = angles[1:] - angles[:-1]\n deltasDD = (deltasD[1:] -deltasD[:-1])\n deltasDD[numpy.where(deltasDD==0.)] = 1e-5*deltasD[0]\n dAdD = abs(deltaA/deltasDD)\n belowT, count = True,0\n for v in dAdD:\n if v>6 and belowT:\n count+=1\n belowT = False\n belowT= (v<6)\n\n self.temp = (deltasD,angles, tangents, dAdD )\n fracStraight = numpy.sum(deltasDD[numpy.where(dAdD<0.3)])/(deltasD[-1]-deltasD[0])\n curveLength = deltasD[-1]/3.14\n #print \"SSS \",count , fracStraight\n if curveLength> 1.4 or fracStraight>0.4 or count > 6:\n isCircle =False\n else: \n isCircle= (count < 4 and fracStraight<=0.3) or \\\n (fracStraight<=0.1 and count<5)\n\n if not isCircle:\n return False, 0\n \n # It's a circle !\n radius = points - numpy.array([xmin+w*0.5,ymin+h*0.5])\n radius_n = numpy.sqrt(numpy.sum( radius**2, 1 )) # normalize\n\n mini = numpy.argmin(radius_n) \n rmin = radius_n[mini]\n maxi = numpy.argmax(radius_n) \n rmax = radius_n[maxi]\n # void points around maxi and mini to make sure the 2nd max is found\n # on the \"other\" side\n n = len(radius_n)\n radius_n[maxi]=0 \n radius_n[mini]=0 \n for i in range(1,n/8+1):\n radius_n[(maxi+i)%n]=0\n radius_n[(maxi-i)%n]=0\n radius_n[(mini+i)%n]=0\n radius_n[(mini-i)%n]=0\n radius_n_2 = [ r for r in radius_n if r>0]\n rmax_2 = max(radius_n_2)\n rmin_2 = min(radius_n_2) # not good !!\n anglemax = numpy.arccos( radius[maxi][0]/rmax)*numpy.sign(radius[maxi][1])\n return True, (xmin+w*0.5,ymin+h*0.5, 0.5*(rmin+rmin_2), 0.5*(rmax+rmax_2), anglemax)", "def vignette_sampling_coordinates(\n principal_point: ArrayLike = np.array([0.5, 0.5]),\n aspect_ratio: float = 1,\n diagonal_samples: int = 10,\n diagonal_selection: int = 2,\n edge_samples: int = 10,\n samples_rho: int = 7,\n samples_phi: int = 21,\n radius: float = 0.9,\n radial_bias: float = 1,\n) -> NDArrayFloat:\n\n principal_point = as_float_array(principal_point)\n\n samples = []\n\n diagonal = np.linspace(0, 1, diagonal_samples)\n diagonal = np.hstack(\n [diagonal[1:diagonal_selection], diagonal[-diagonal_selection:-1]]\n )\n samples.append(tstack([diagonal, diagonal]))\n samples.append(tstack([diagonal, 1 - diagonal]))\n\n edge = np.linspace(0, 1, edge_samples)\n samples.append(tstack([edge, zeros(edge_samples)]))\n samples.append(tstack([edge, ones(edge_samples)]))\n samples.append(tstack([zeros(edge_samples), edge])[1:-1])\n samples.append(tstack([ones(edge_samples), edge])[1:-1])\n\n coordinates = np.vstack(samples)\n\n coordinates[..., 0] = LinearInterpolator(\n [0, 0.5, 1], [0, principal_point[0], 1]\n )(coordinates[..., 0])\n coordinates[..., 1] = LinearInterpolator(\n [0, 0.5, 1], [0, principal_point[1], 1]\n )(coordinates[..., 1])\n\n radial_samples = radial_sampling_function(\n samples_rho,\n samples_phi,\n cast(float, 1 + (np.max(principal_point - 0.5) * 2)),\n radial_bias,\n )\n # NOTE: Some randomisation is required to avoid a\n # \"LinAlgError: Singular matrix\" exception raised by\n # \"scipy.interpolate.RBFInterpolator\" definition.\n radial_samples += (\n np.random.default_rng(8).random(radial_samples.shape) - 0.5\n ) / 1000\n radial_samples = np.reshape(radial_samples / (2 * 1 / radius), [-1, 2])\n radial_samples[..., 1] *= aspect_ratio\n radial_samples += principal_point\n\n coordinates = np.vstack([coordinates, radial_samples])\n\n coordinates = coordinates[\n np.logical_and(\n np.all(coordinates >= 0, axis=-1),\n np.all(coordinates <= 1, axis=-1),\n )\n ]\n\n return coordinates", "def find_points(self):\n\n points = [\n (self.inner_radius, 0, \"straight\"),\n (self.inner_radius, self.height / 2, \"straight\"),\n (self.outer_radius, self.height / 2, \"straight\"),\n (self.outer_radius, self.arc_height / 2, \"circle\"),\n (self.mid_radius, 0, \"circle\"),\n (self.outer_radius, -self.arc_height / 2, \"straight\"),\n (self.outer_radius, -self.height / 2, \"straight\"),\n (self.inner_radius, -self.height / 2, \"straight\")\n ]\n\n self.points = points", "def nearest_voxel(center, roi):\n nearest=[]\n min_dist = 10000\n for vxl in roi:\n dist = sum(abs(np.subtract(vxl,center)))/3\n if dist < min_dist:\n min_dist=dist\n nearest=[vxl]\n elif dist==min_dist:\n nearest.append(vxl)\n # print(nearest)\n return nearest[random.randint(0,len(nearest)-1)]", "def parametrized_circle(point_a, point_b, point_c, theta):\n radius, center = shortest_line_to_point(point_a, point_b, point_c)\n # print'center, radius \\n', center, radius\n center_axis = np.subtract(point_a, point_b)\n # print 'center axis %s , radius %s, center %s' % (center_axis, radius, center)\n # center_axis dot <1,1,z> = 0 returns perp vector\n in_plane = norm_vect(np.subtract(point_c, center))\n perp_1 = np.cross(center_axis, in_plane)\n perp_2 = np.cross(center_axis, perp_1)\n # print 'perp dick', perp_1, perp_2\n # norm perpendicular vectors\n perp_1 = norm_vect(perp_1)\n perp_2 = norm_vect(perp_2)\n if -1e-6 > np.dot(perp_1, perp_2) > 1e-6 or -1e-6 > (np.dot(perp_1, center_axis)) > 1e-6 or \\\n -1e-6 > np.dot(perp_2, center_axis) > 1e-6:\n print 'not perpendicular'\n # print np.dot(perp_1, perp_2), np.dot(perp_1, center_axis), np.dot(perp_2, center_axis)\n x = center[0] + (radius * math.cos(theta) * perp_2[0]) + (radius * math.sin(theta) * perp_1[0])\n y = center[1] + (radius * math.cos(theta) * perp_2[1]) + (radius * math.sin(theta) * perp_1[1])\n z = center[2] + (radius * math.cos(theta) * perp_2[2]) + (radius * math.sin(theta) * perp_1[2])\n return [x, y, z]", "def nearest_in_n_sphere(self, value, r):\n return self.nearest_in_bounding_box(value, r)\n \n # This seems right\n # return self.binary_search_find_nearest_neighbors_in_radius(value, r)\n \n # This seems wrong\n # return self.recur_find_nearest_n_neighbor(value, r)", "def radiiPoints(self, R):\n width = 2*R+1\n xspace = np.arange(width)-R\n yspace = np.arange(width)-R\n xx, yy= np.meshgrid(xspace, yspace)\n dist = np.sqrt(xx**2+yy**2)\n xpts = np.nonzero((dist<R+0.5) & (dist>R-0.5))[0]-R\n ypts = np.nonzero((dist<R+0.5) & (dist>R-0.5))[1]-R\n order = np.argsort(np.arctan2(xpts, ypts))\n return xpts[order], ypts[order]", "def find_reference_radials(azimuth, velocity):\n\n def find_min_quadrant(azi, vel, nvalid_gate_qd, nsum_moy):\n return azi[nvalid_gate_qd >= nsum_moy][np.argmin(np.nanmean(np.abs(vel), axis=1)[nvalid_gate_qd >= nsum_moy])]\n\n nvalid_gate = np.sum(~np.isnan(velocity), axis=1)\n nvalid_gate[nvalid_gate < 10] = 0\n nsum_tot = np.sum(~np.isnan(velocity[nvalid_gate > 0, :]))\n nvalid_beam = len(azimuth[nvalid_gate > 0])\n\n nsum_moy = nsum_tot / nvalid_beam\n if nsum_moy > 0.7 * velocity.shape[1]:\n nsum_moy = 0.7 * velocity.shape[1]\n\n try:\n start_beam = find_min_quadrant(azimuth, velocity, nvalid_gate, nsum_moy)\n except ValueError:\n start_beam = azimuth[np.argmin(np.nanmean(np.abs(velocity), axis=1))]\n\n nb = np.zeros((4,))\n for i in range(4):\n pos = (azimuth >= i * 90) & (azimuth < (i + 1) * 90)\n try:\n nb[i] = find_min_quadrant(azimuth[pos], velocity[pos, :], nvalid_gate[pos], nsum_moy)\n except ValueError:\n nb[i] = 9999\n\n opposition = start_beam + 180\n if opposition >= 360:\n opposition -= 360\n\n end_beam = nb[np.argmin(np.abs(nb - opposition))]\n\n return start_beam, end_beam", "def pointPotential(x,y,q,posx,posy):\n k = 8.99e9\n V = (k * q) / (sqrt(x**2 + (y - sqrt((posx**2 + posy**2)))**2))\n return V", "def find_bbox_coord(point_x, point_y):\r\n is_good_rect = True\r\n bottom_x, bottom_y = [], []\r\n top_x, top_y = [], []\r\n if len(point_x) < 4:\r\n is_good_rect = False\r\n if len(point_x) == 4:\r\n out_of_repeats_x = []\r\n out_of_repeats_y = []\r\n delta = 10**(-6)\r\n for j in range(len(point_x)): # add delta for the reason of not mess in equal angles\r\n out_of_repeats_x.append(point_x[j] + delta*j)\r\n out_of_repeats_y.append(point_y[j] + delta*j)\r\n point_x, point_y = out_of_repeats_x, out_of_repeats_y\r\n \r\n quadrate_width = ((point_x[1] - point_x[0])**2+(point_y[1] - point_y[0])**2)**0.5\r\n quadrate_height = ((point_x[1] - point_x[2])**2+(point_y[1] - point_y[2])**2)**0.5\r\n aspect_ratio = quadrate_width / quadrate_height\r\n if aspect_ratio > 0.7 and aspect_ratio < 1.3:\r\n is_good_rect = False \r\n ###Aprint('Квадрат. Закрашиваем')\r\n elif quadrate_width * quadrate_height < 100:\r\n is_good_rect = False \r\n ###Aprint('Квадрат. Закрашиваем')\r\n else:\r\n ###Aprint('Прямоугольник')\r\n edge_x, edge_y = point_x, point_y\r\n bottom_x, bottom_y, top_x, top_y, is_good_rect = top_bottom_dots(point_x, point_y, edge_x, edge_y)\r\n \r\n elif len(point_x) > 4:\r\n ###Aprint('Многоугольник')\r\n out_of_repeats_x = []\r\n out_of_repeats_y = []\r\n delta = 10**(-4)\r\n for j in range(len(point_x)): # add delta for the reason of not mess in equal angles\r\n out_of_repeats_x.append(point_x[j] + delta*j)\r\n out_of_repeats_y.append(point_y[j] + delta*j)\r\n point_x, point_y = out_of_repeats_x, out_of_repeats_y\r\n \r\n edge_x, edge_y = find_4_dots(point_x, point_y)\r\n\r\n bottom_x, bottom_y, top_x, top_y, is_good_rect = top_bottom_dots(point_x, point_y, edge_x, edge_y)\r\n \r\n if is_good_rect:\r\n \r\n bottom_edge_x, bottom_edge_y = [], []\r\n for i in bottom_x:\r\n if i in edge_x:\r\n index = bottom_x.index(i)\r\n bottom_edge_x.append(bottom_x[index])\r\n bottom_edge_y.append(bottom_y[index])\r\n bottom_edge_x, bottom_edge_y = zip(*sorted(zip(bottom_edge_x, bottom_edge_y)))\r\n bottom_lowest_point = [bottom_edge_x[0], bottom_edge_y[0]]\r\n\r\n top_edge_x, top_edge_y = [], []\r\n for i in top_x:\r\n if i in edge_x:\r\n index = top_x.index(i)\r\n top_edge_x.append(top_x[index])\r\n top_edge_y.append(top_y[index])\r\n top_edge_x, top_edge_y = zip(*sorted(zip(top_edge_x, top_edge_y)))\r\n top_lowest_point = [top_edge_x[0], top_edge_y[0]]\r\n\r\n bottom_x, bottom_y = Euclidian_distance_sorting(bottom_x, bottom_y, bottom_lowest_point)\r\n top_x, top_y = Euclidian_distance_sorting(top_x, top_y, top_lowest_point)\r\n else:\r\n bottom_x, bottom_y, top_x, top_y = [], [], [], []\r\n \r\n return is_good_rect, bottom_x, bottom_y, top_x, top_y", "def find_reference_radials(azi, vel, debug=False):\n pos_valid = get_valid_rays(vel)\n pos_static = get_static_rays(vel)\n\n # Finding intersects of criteria 1 to 3.\n weight_valid = np.arange(0, len(pos_valid), 1)\n weight_static = np.arange(0, len(pos_static), 1)\n\n total_weight = np.zeros(len(pos_valid)) + np.NaN\n for cnt, (one_valid, one_valid_weight) in enumerate(zip(pos_valid, weight_valid)):\n try:\n one_static_weight = weight_static[one_valid == pos_static][0]\n except IndexError:\n one_static_weight = 9999\n\n total_weight[cnt] = one_static_weight + one_valid_weight\n\n pos1 = pos_valid[np.argmin(total_weight)]\n\n# # Finding the 2nd radial of reference\n# pos2 = pos1 + len(azi) // 2\n# if pos2 >= len(azi):\n# pos2 -= len(azi)\n\n try:\n ref2_range_min, ref2_range_max = get_opposite_azimuth(azi[pos1])\n if ref2_range_min < ref2_range_max:\n goodpos = np.where((azi >= ref2_range_min) & (azi <= ref2_range_max))[0]\n else:\n goodpos = np.where((azi >= ref2_range_min) | (azi <= ref2_range_max))[0]\n\n rslt = [(a, total_weight[a == pos_valid][0]) for a in goodpos if a in pos_valid]\n opposite_pos, opposite_weight = zip(*rslt)\n pos2 = opposite_pos[np.argmin(opposite_weight)]\n except Exception:\n pos2 = pos1 + len(azi) // 2\n if pos2 > len(azi):\n pos2 -= len(azi)\n if debug:\n print(f\"References are azimuths {azi[pos1]} and {azi[pos2]}, i.e. azimuthal positions {pos1} and {pos2}.\")\n\n return pos1, pos2", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def test_get_neighborhood_radius_consistent():\r\n grid_spacing = random.uniform(1e-6, 10.0)\r\n center = numpy.random.random(random.randint(1, 3))\r\n\r\n # Find points with radius neighborhood\r\n radius = random.uniform(_distance_to_nearest(grid_spacing, center), grid_spacing*5)\r\n points = ill.get_neighborhood_radius(grid_spacing, center, radius)\r\n\r\n # Every points found within this radius, should be in the points of a larger radius\r\n outer_points = ill.get_neighborhood_radius(grid_spacing, center,\r\n radius+random.uniform(0.0, grid_spacing*5))\r\n\r\n for point in points:\r\n assert point in outer_points", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def _MaskedEucl(self, v, radius, contribute=False):\n # Select only masked xyz / data :\n masked = self.data.mask\n xyz, data = self.xyz[masked, :], self.data.data[masked]\n # Get sign of the x coordinate :\n xsign = np.sign(xyz[:, 0]).reshape(1, -1)\n # Predefined masked euclidian distance :\n nv = v.shape[0]\n fmask = np.ones((v.shape[0], 3, len(data)), dtype=bool)\n\n # For each triangle :\n for k in range(3):\n # =============== EUCLIDIAN DISTANCE ===============\n eucl = cdist(v[:, k, :], xyz).astype(np.float32)\n fmask[:, k, :] = eucl <= radius\n # Contribute :\n if not contribute:\n # Get vertices signn :\n vsign = np.sign(v[:, k, 0]).reshape(-1, 1)\n # Find where vsign and xsign are equals :\n isign = np.logical_and(vsign != xsign, xsign != 0)\n fmask[:, k, :][isign] = False\n # Find where there's sources under radius and need to be masked :\n m = fmask.reshape(fmask.shape[0] * 3, fmask.shape[2])\n idx = np.dot(m, np.ones((len(data),), dtype=bool)).reshape(nv, 3)\n\n return idx", "def _circle_intersection(self, circle, point):\n dist = euclidean_distance((circle[0], circle[1]), point) - circle[2]\n vun = vec2d((circle[0] - point[0]), (circle[1] - point[1]))\n v = vun.normalized()\n\n x, y = (point[0] + dist * v.x), (point[0] + dist * v.x)\n\n return dist, (x, y)", "def getClosePoints(self, point, depth=None):\n if not depth:\n depth = self.depth\n\n point = [c if c > 0 else self.size[i]-c for i, c in enumerate(point)]\n point = [c if c < self.size[i] else c % self.size[i] for i, c in enumerate(point)]\n # testSignature = self.grid.getSignature(point, self.spacings)\n testSignature = self.grid.getSignature2(point, self.spacings)\n # print testSignature, point\n # print testSignature\n # return self.tree[tuple(testSignature)]\n\n neighbors = []\n for neighborSignature in self.grid.getNeighborNodes(testSignature):\n neighborSignature = [s if s>=0 else self.maxIndex for s in neighborSignature]\n # neighborSignature = testSignature[:-1] + [neighborSignature]\n try:\n neighbors += self.tree[tuple(neighborSignature)]\n except KeyError:\n pass\n return neighbors", "def getPointsInCircum(r, n=100, h=0, k=0):\n\n points = [(np.cos(2*np.pi/n*x)*r, np.sin(2*np.pi/n*x)*r) for x in range(0, n+1)]\n x, y = list(zip(*points))\n x = np.array(x)\n y = np.array(y)\n x += h\n y += k\n return (x, y)", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def inside_circle(total_count):\n\n x = np.float32(np.random.uniform(size=total_count))\n y = np.float32(np.random.uniform(size=total_count))\n\n radii = ##\n\n count = ##\n\n return count", "def find_coords(self, x, y, x_pos, y_pos):\n # Calculate relative coordinates in the sector for each point\n xA = self.x_values[x_pos, y_pos]\n xB = self.x_values[x_pos + 1, y_pos]\n xC = self.x_values[x_pos, y_pos + 1]\n xD = self.x_values[x_pos + 1, y_pos + 1]\n yA = self.y_values[x_pos, y_pos]\n yB = self.y_values[x_pos + 1, y_pos]\n yC = self.y_values[x_pos, y_pos + 1]\n yD = self.y_values[x_pos + 1, y_pos + 1]\n polarity = 2.0 * self.polarity[x_pos, y_pos] - 1.0\n a = xA\n b = xB - xA\n c = xC - xA\n d = xA - xB - xC + xD\n e = yA\n f = yB - yA\n g = yC - yA\n h = yA - yB - yC + yD\n denom = d * g - h * c\n mu = (h * b - d * f) / denom\n tau = (h * (a - x) - d * (e - y)) / denom\n zeta = a - x + c * tau\n eta = b + c * mu + d * tau\n theta = d * mu\n alpha = (-eta + polarity * np.sqrt(eta**2.0 - 4.0 * zeta * theta)) / (\n 2.0 * theta\n )\n beta = mu * alpha + tau\n\n # Alternate method if there are sectors that are \"too regular\"\n z = np.logical_or(\n np.isnan(alpha), np.isnan(beta)\n ) # These points weren't able to identify coordinates\n if np.any(z):\n these = np.isclose(\n f / b, (yD - yC) / (xD - xC)\n ) # iso-beta lines have equal slope\n if np.any(these):\n kappa = f[these] / b[these]\n int_bot = yA[these] - kappa * xA[these]\n int_top = yC[these] - kappa * xC[these]\n int_these = y[these] - kappa * x[these]\n beta_temp = (int_these - int_bot) / (int_top - int_bot)\n x_left = beta_temp * xC[these] + (1.0 - beta_temp) * xA[these]\n x_right = beta_temp * xD[these] + (1.0 - beta_temp) * xB[these]\n alpha_temp = (x[these] - x_left) / (x_right - x_left)\n beta[these] = beta_temp\n alpha[these] = alpha_temp\n\n # print(np.sum(np.isclose(g/c,(yD-yB)/(xD-xB))))\n\n return alpha, beta", "def test_circular_scatter():\n area = [0, 1000, 0, 1000]\n size = 1000\n x, y = gridder.circular_scatter(area, size, random=False)\n distances = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)\n npt.assert_allclose(distances, distances[0]*np.ones(size-1), rtol=1e-09)", "def p2p_xyz(start_point, end_point, top_left_cor, cellsize, dem):\n start_cell = (int((start_point[0] - top_left_cor[0]) / cellsize[0]),\n int((start_point[1] - top_left_cor[1]) / cellsize[1]))\n end_cell = (int((end_point[0] - top_left_cor[0]) / cellsize[0]),\n int((end_point[1] - top_left_cor[1]) / cellsize[1]))\n cells = misc.get_line(start_cell, end_cell) \n pnts = []\n elev = []\n \n dem_elv = dem[:,1]\n dem_indx = dem[:,2:4]\n\n for cell in cells:\n x = top_left_cor[0] + cell[0] * cellsize[0] + cellsize[0] / 2\n y = top_left_cor[1] + cell[1] * cellsize[1] + cellsize[1] / 2\n #xy_indx=[str(cell[0]),str(cell[1])]\n z_indx=np.logical_and(np.equal(dem_indx[:,0],cell[0]),np.equal(dem_indx[:,1],cell[1]))\n try:\n z=dem_elv[z_indx][0]\n except (np.sum(z_indx)>1):\n print(\"Oops! That was more than one indices in dem matching the query index (in getCellValue)\")\n #z_indx = [i for i,j in enumerate(dem_indx) if j == xy_indx]\n z = float(dem_elv[z_indx])\n pnts.append((x, y))\n elev.append(z)\n return pnts, elev", "def getYesPoints(pshapes, proj, dx, nmax, touch_center=True):\n\n mxmin = 9e10\n mxmax = -9e10\n mymin = 9e10\n mymax = -9e10\n for pshape in pshapes:\n pxmin, pymin, pxmax, pymax = pshape.bounds\n if pxmin < mxmin:\n mxmin = pxmin\n if pxmax > mxmax:\n mxmax = pxmax\n if pymin < mymin:\n mymin = pymin\n if pymax > mymax:\n mymax = pymax\n\n if not touch_center:\n geodict = GeoDict.createDictFromBox(mxmin, mxmax, mymin, mymax, dx, dx)\n img = rasterizeShapes(pshapes, geodict)\n #now get the numpy array of x/y coordinates where covgrid == 1\n idx = np.where(img == 1)[0]\n x, y = np.unravel_index(idx, (geodict.ny, geodict.nx))\n yespoints = list(zip(x.flatten(), y.flatten()))\n nrows = geodict.ny\n ncols = geodict.nx\n xvar = np.arange(geodict.xmin, geodict.xmax+geodict.dx, geodict.dx)\n yvar = np.arange(geodict.ymin, geodict.ymax+geodict.dy, geodict.dy)\n else:\n xvar = np.arange(mxmin, mxmax+dx, dx)\n yvar = np.arange(mymin, mymax+dx, dx)\n ncols = len(xvar)\n nrows = len(yvar)\n if nmax is not None:\n if ncols*nrows > nmax:\n aspect = ncols/nrows\n ncols = np.sqrt(nmax*aspect)\n nrows = nmax/ncols\n ncols = int(ncols)\n nrows = int(nrows)\n #re-calculate dx here...\n tdx = (mxmax-mxmin)/ncols\n tdy = (mymax-mymin)/nrows\n dx = np.max(tdx, tdy)\n xvar = np.arange(mxmin, mxmax+dx, dx)\n yvar = np.arange(mymin, mymax+dx, dx)\n\n #Get the \"yes\" points to sample from\n yespoints = []\n idx = []\n shapeidx = 0\n if pshapes[0].type == 'Polygon':\n #loop over shapes, projecting each one, then get the sample points\n for pshape in pshapes:\n if not shapeidx % 1000:\n print('Searching polygon %i of %i' % (shapeidx, len(pshapes)))\n shapeidx += 1\n pxmin, pymin, pxmax, pymax = pshape.bounds\n leftcol = np.where((pxmin - xvar) >= 0)[0].argmax()\n rightcol = np.where((xvar - pxmax) >= 0)[0][0]\n bottomrow = np.where((pymin - yvar) >= 0)[0].argmax()\n toprow = np.where((yvar - pymax) >= 0)[0][0]\n xp = np.arange(xvar[leftcol], xvar[rightcol]+dx, dx)\n yp = np.arange(yvar[bottomrow], yvar[toprow]+dx, dx)\n xmesh, ymesh = np.meshgrid(xp, yp)\n xy = list(zip(xmesh.flatten(), ymesh.flatten()))\n for point in xy:\n ix = np.where(xvar == point[0])[0][0]\n iy = np.where(yvar == point[1])[0][0]\n if pshape.contains(Point(point)):\n yespoints.append(point)\n idx.append(np.ravel_multi_index((iy, ix), (nrows, ncols), mode='raise', order='C'))\n else:\n yespoints = []\n for pshape in pshapes:\n yespoints.append(pshape.coords[0])\n\n return (np.array(yespoints), nrows, ncols, xvar, yvar, idx)", "def trackCircle( center, rad, imShape ):\n \n \"\"\"\n center = ccnt\n rad = rd\n inShape = segImg.shape\n debug = False\n \"\"\"\n \n # check if whole circle is inside image\n if (center[0] - rad) < 0 or (center[0] + rad) >= imShape[1] or (center[1] - rad) < 0 or (center[1] + rad) >= imShape[0]:\n raise NameError( 'Circle partialy outside the image' )\n \n center = np.array( center )\n \n # start tracking at right side of circle, always pick neigbouring pixel which is closest to tabs radius and stop when came around\n startPoint1 = np.round( center + np.array( [ rad, 0] ) )\n \n currentPoint = startPoint1.copy()\n contour = [ currentPoint ]\n iterNum = 0\n maxIterNum = 1000\n \n def getNextPoint():\n \"\"\"\n gets next point \n \"\"\"\n surroundingPts_local = np.array( [ [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1], [0,1], [1,1] ])\n surroundingPts_global = np.tile( currentPoint, [8,1] ) + surroundingPts_local\n \n if len( contour ) > 1:\n # dont use last\n includeInd = np.sum( surroundingPts_global == contour[-2], 1 ) != 2\n # aditionaly exlude neighbout pts\n excludeInd = np.where( includeInd == False)[0][0]\n if excludeInd == 0:\n includeInd[ [1, 7] ] = False\n elif excludeInd == 7:\n includeInd[ [0, 6] ] = False\n else:\n includeInd[ [ excludeInd-1, excludeInd+1 ] ] = False\n \n surroundingPts_global = surroundingPts_global * np.tile( includeInd, [2,1] ).T\n \n # find closest to demamnded radius\n dists = np.abs( np.sqrt( np.sum( ( surroundingPts_global - np.tile( center, [8,1] ) )**2, 1 ) ) - rad )\n ind = np.argmin( dists )\n return surroundingPts_global[ ind, : ]\n \n while 1:\n # check if max num of iterations passed\n if iterNum == maxIterNum:\n print Warning( 'Reached max num of iterations. Tracking unsuccessful!' )\n #return np.array( contour ).astype(np.int), -1\n break\n \n # get next point\n nextPoint = getNextPoint()\n\n # in first iteraton also remember sesond tracked point.\n if iterNum is 0: \n startPoint2 = nextPoint.copy()\n \n # check if came around\n if iterNum > 2 and ( np.sum(nextPoint == startPoint1) ==2 or np.sum(nextPoint == startPoint2) == 2 ):\n # finished successfuly\n break \n # print iterNum, nextPoint - startPoint1, nextPoint\n \n # add to storage\n contour.append( nextPoint ) \n # increment \n iterNum += 1\n # reassign\n currentPoint = nextPoint.copy()\n\n # return result and successful flag\n return np.array( contour ).astype(np.int)", "def voronoi_finite_polygons_2d(vor, radius=None):\n\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n # print(p1, region)\n vertices = vor.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)", "def find_potential(n=10,nwalks=200):\n \n boundary = np.zeros((n+1, n+1)) # Store boundary potentials\n v = np.zeros((n+1, n+1)) # Store potential for all positions\n \n # Set the boundary conditions\n for i in range(1,n):\n boundary[0,i] = 10\n boundary[n,i] = 10\n boundary[i,0] = 5\n boundary[i,n] = 5\n # Set the the boundary position that maximizes the potential at [3, 5] to 20\n boundary[3,0] = boundary[4,0] = boundary[5,0] = boundary[6,0] = boundary[7,0] = 20\n #boundary[0,3] = boundary[0,4] = boundary[0,5] = boundary[0,6] = boundary[0,7] = 20\n \n v = np.copy(boundary) # Store potential for all positions\n\n # Compute Greens function for each position\n for x in range(1,n):\n for y in range(1,n):\n position = [x, y] # Position to compute Greens function for\n Greens_func = Greens_function_approxRW(n=n, nwalks=nwalks, start_position=position) # The Greens function\n \n # Find potential at current position\n v_pos = potential_from_Greens(boundary, n=n, G=Greens_func, nwalks=nwalks)\n v[position[1], position[0]] = v_pos\n \n # v is now computed for all locations and can be plotted\n fig = plt.figure()\n plt.title('Maximized potential for [3,5]', fontsize = 18)\n im = plt.imshow(v, cmap=None, interpolation='nearest')\n cb = fig.colorbar(im)\n cb.ax.tick_params(labelsize=14)\n plt.show()", "def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;", "def radon_squares(N,theta_vec,S, circle=False):\n \n #Rescaling according to image size \n S[:,0] = S[:,0]*N/2\n S[:,1] = (S[:,1])*N/2\n S[:,2] = (S[:,2])*N/2\n S[:,3] = S[:,3]*math.pi/180\n \n [t_vec, grid_t, grid_theta] = build_t_theta_pixel(N,theta_vec, circle = circle);\n [nrow,ncol] = np.shape(S);\n tmp = np.zeros((nrow,len(grid_theta)));\n for i in range(nrow): # cycle on the elements of the phantom\n grid_theta_new = grid_theta - S[i,3];\n grid_t_new = (grid_t - S[i,0]* np.cos(grid_theta) - S[i,1]*np.sin(grid_theta))*2/S[i,2];\n\n for j in range(len(grid_theta)): # angles\n theta_new = grid_theta_new[j]\n t_new = grid_t_new[j]\n if theta_new == 0:\n if abs(t_new)< 1:\n v1= -1;\n v2= 1;\n else:\n v1= 0;\n v2= 0;\n #endif\n else:\n v1= (t_new*np.cos(theta_new)-1)/np.sin(theta_new);\n v2= (t_new*np.cos(theta_new)+1)/np.sin(theta_new);\n #endif\n\n if theta_new == np.pi/2:\n if abs(t_new)< 1:\n h1= -1;\n h2= 1;\n else:\n h1= 0;\n h2= 0;\n #endif\n else:\n h1 = (1-t_new*np.sin(theta_new))/np.cos(theta_new);\n h2 = (-1-t_new*np.sin(theta_new))/np.cos(theta_new);\n #endif\n vmax= np.maximum(v1,v2); # scalar values\n vmin= np.minimum(v1,v2);\n hmax= np.maximum(h1,h2);\n hmin= np.minimum(h1,h2);\n entryval= np.maximum(vmin,hmin);\n exitval= np.minimum(vmax,hmax);\n\n if (exitval-entryval) > 0:\n tmp[i,j]=(.5)*S[i,4]*S[i,2]*(exitval-entryval);\n else:\n tmp[i,j]=0;\n #endif\n #endfor\n #endfor\n radvec = np.sum(tmp,axis=0);\n \n analytical_sinogram = np.transpose(np.reshape(radvec,(len(theta_vec),len(t_vec))));\n\n return analytical_sinogram", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def phantom_rectangles(n_points,R):\n \n \n #Rescaling according to image size \n R[:,0] = R[:,0]*n_points/2\n R[:,1] = R[:,1]*n_points/2\n R[:,2] = R[:,2]*n_points/2\n R[:,3] = R[:,3]*n_points/2\n R[:,4] = R[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = R.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sui rettangoli\n x_new = x - R[k,0]\n y_new = y - R[k,1]\n\n u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))\n v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):\n phantom1[i,j,k] = R[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def _adj(self, sino):\n # image coordinate grid\n p_x = torch.linspace(\n -self.n[0] / 2.0 + 0.5,\n self.n[0] / 2.0 - 0.5,\n self.n[0],\n device=self.n.device,\n ).unsqueeze(1)\n p_y = torch.linspace(\n -self.n[1] / 2.0 + 0.5,\n self.n[1] / 2.0 - 0.5,\n self.n[1],\n device=self.n.device,\n ).unsqueeze(0)\n\n # check if coordinate is within circle\n if self.flat:\n max_gamma = torch.atan(\n (self.s_detect.abs() * (self.n_detect / 2.0))\n / (self.d_source + self._d_detect())\n )\n else:\n max_gamma = (self.s_detect.abs() * (self.n_detect / 2.0)) / (\n self.d_source + self._d_detect()\n )\n radius = self.d_source * torch.sin(max_gamma)\n p_r = torch.sqrt(p_x * p_x + p_y * p_y)\n mask = p_r <= radius\n\n # use batch and channel dimensions for vectorized interpolation\n original_dim = sino.ndim\n while sino.ndim < 4:\n sino = sino.unsqueeze(0)\n assert sino.shape[-3] == 1 # we can handle only single channel data\n sino = sino.transpose(-4, -3) # switch batch and channel dim\n\n # rotated coordinate grid\n pi = torch.acos(torch.zeros(1)).item() * 2.0\n cs = torch.cos(self.angles * pi / 180.0).unsqueeze(1).unsqueeze(1)\n sn = torch.sin(self.angles * pi / 180.0).unsqueeze(1).unsqueeze(1)\n p_x_r = cs * p_x + sn * p_y\n p_y_r = -sn * p_x + cs * p_y\n\n # find angles and detector positions defining rays through coordinate\n if self.flat:\n grid_d = (\n (self.d_source + self._d_detect())\n * p_x_r\n / (self.d_source - p_y_r)\n )\n else:\n grid_d = (self.d_source + self._d_detect()) * torch.atan(\n p_x_r / (self.d_source - p_y_r)\n )\n grid_a = (\n torch.arange(self.m[0], device=sino.device)\n .unsqueeze(1)\n .unsqueeze(1)\n .expand(-1, self.n[0], self.n[1])\n - self.m[0] / 2.0\n + 0.5\n )\n\n grid_d = grid_d / (\n (self.n_detect / 2.0 - 0.5) * self.s_detect\n ) # rescale valid detector positions to [-1,1]\n grid_a = grid_a / (self.m[0] / 2.0 - 0.5) # rescale angles to [-1,1]\n grid = torch.stack([grid_d, grid_a], dim=-1)\n inter = torch.nn.functional.grid_sample(\n sino.expand(self.m[0], -1, -1, -1), grid, align_corners=True\n )\n\n # compute integral reweighting factors and integrate\n if self.flat:\n weight = (self.d_source + self._d_detect()).pow(2) / (\n self.d_source - p_y_r\n ).pow(2)\n else:\n weight = (self.d_source + self._d_detect()).pow(2) / (\n (self.d_source - p_y_r).pow(2) + p_x_r.pow(2)\n )\n x = mask * (inter * (weight).unsqueeze(1)).sum(dim=0, keepdim=True)\n\n # undo batch and channel manipulations\n x = x.transpose(-4, -3) # unswitch batch and channel dim\n while x.ndim > original_dim:\n x = x.squeeze(0)\n\n return x / self.s_detect.abs()", "def query(self, points):\n voxel_x = np.clip(np.searchsorted(\n self.segments[0], points[:, 0]) - 1, 0, self.x_y_z[0])\n voxel_y = np.clip(np.searchsorted(\n self.segments[1], points[:, 1]) - 1, 0, self.x_y_z[1])\n voxel_z = np.clip(np.searchsorted(\n self.segments[2], points[:, 2]) - 1, 0, self.x_y_z[2])\n voxel_n = np.ravel_multi_index([voxel_x, voxel_y, voxel_z], self.x_y_z)\n\n return voxel_n", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def _point_in_object(self, pos, obj):\n if isinstance(obj, (Rectangle, Circle, Diamond, Triangle)):\n return self._point_in_tris(pos, obj)\n elif isinstance(obj, (ConcentricCircles, FixationDot)):\n return np.any([self._point_in_tris(pos, c) for c in obj._circles])", "def voronoi_finite_polygons_2d(vor, radius=None):\n\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n vertices = vor.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)", "def particle_LJV(R,N,D):\n b = np.zeros(N)\n for i in range(N):\n x = R[0,np.arange(N)!=i]-R[0,i]\n y = R[1,np.arange(N)!=i]-R[1,i]\n z = R[2,np.arange(N)!=i]-R[2,i]\n [x,y,z] = minimal_image(x,y,z,D)\n c = np.stack((x,y,z))\n r = np.sqrt(np.sum(c**2,0))\n b[i] = np.sum(4*((1/r)**12-(1/r)**6))\n Uv = np.sum(b)\n return Uv", "def getSearchSpaceCoords(self):", "def voronoi_finite_polygons_2d(vor, radius=None):\n\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n vertices = vor.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)", "def voronoi_finite_polygons_2d(vor, radius=None):\n\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n vertices = vor.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)", "def determine_region(x, y, width, height):\n xs = [0, width / 3, 2 * width / 3, width]\n ys = [0, height / 3, 2 * height / 3, height]\n for i in range(3):\n for j in range(3):\n if (x >= xs[j] and x < xs[j + 1]) and (y >= ys[i] and y < ys[i + 1]):\n return i * 3 + j", "def filter_nearby_points(self, point: Tuple[float], radius: float):\n\n nearby = self.tree.query_ball_point(point, radius)\n return nearby", "def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]", "def _3D_sphere_edges(G, radius):\n # TODO This can be parallelized.\n edges = []\n for (u, pu), (v, pv) in combinations(G.nodes(data=\"pos\"), 2):\n for a, b in zip(pu, pv)):\n if (haversine(a,b)) <= radius:\n edges.append((u, v))\n print(u,v)\n return edges", "def relative_interior_contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for eq in self.equation_generator():\n if not eq.contains(p):\n return False\n\n for ine in self.inequality_generator():\n if not ine.interior_contains(p):\n return False\n\n return True", "def pointfind2(plat, plon, lat, lon, pdif=1):\n\n\tdist_min = 1000000.\n\t\n\t\n\tfor i in range(lon.shape[0]):\n\t\tfor j in range(lon.shape[1]):\n\t\t\tdist = Ngl.gc_dist(plat,plon,lat[i,j],lon[i,j])\n\t\t\tif dist_min > dist:\n\t\t\t\tdist_min = dist\n\t\t\t\ti_min = i\n\t\t\t\tj_min = j\n\t\t\t\tlat_min = lat[i,j]\n\t\t\t\tlon_min = lon[i,j]\n\t\n\tprint(i_min,j_min,lat_min,lon_min)\n\tgg1 = i_min, j_min\n\t\n\treturn(gg1, lat_min, lon_min)", "def voxelize_points(points, pc_bbox_center, voxel_resolution, num_voxels_per_dim, pc_center_in_voxel_grid):\n\n # this is the voxel grid we are going to return\n voxel_grid = np.zeros((num_voxels_per_dim,\n num_voxels_per_dim,\n num_voxels_per_dim), dtype=np.bool)\n\n # take the points and convert them from meters to voxel space coords\n centered_scaled_points = np.floor(\n (points - np.array(pc_bbox_center) + np.array(\n pc_center_in_voxel_grid) * voxel_resolution) / voxel_resolution)\n\n # remove any points that are beyond the area that falls in our voxel grid\n mask = centered_scaled_points.max(axis=1) < num_voxels_per_dim\n centered_scaled_points = centered_scaled_points[mask]\n\n # if we don't have any more points that fall within our voxel grid\n # return an empty grid\n if centered_scaled_points.shape[0] == 0:\n return voxel_grid\n\n # remove any points that are outside of the region we are voxelizing\n # as they are to small.\n mask = centered_scaled_points.min(axis=1) > 0\n centered_scaled_points = centered_scaled_points[mask]\n\n # if we don't have any more points that fall within our voxel grid,\n # return an empty grid\n if centered_scaled_points.shape[0] == 0:\n return voxel_grid\n\n # treat our remaining points as ints, since we are already in voxel coordinate space.\n # this points shoule be things like (5, 6, 7) which represent indices in the voxel grid.\n csp_int = centered_scaled_points.astype(int)\n\n # create a mask from our set of points.\n mask = (csp_int[:, 0], csp_int[:, 1], csp_int[:, 2])\n\n # apply the mask to our voxel grid setting voxel that had points in them to be occupied\n voxel_grid[mask] = 1\n\n return voxel_grid", "def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass", "def inters_segment(self, s):\r\n x1 = s.start[0] - self.center[0]\r\n y1 = s.start[1] - self.center[1]\r\n x2 = s.end[0] - self.center[0]\r\n y2 = s.end[1] - self.center[1]\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n dr = math.sqrt(dx * dx + dy * dy)\r\n D = x1 * y2 - x2 * y1\r\n dr2 = dr * dr\r\n d = self.radius * self.radius * dr2 - D * D \r\n \r\n if d < 0:\r\n return []\r\n else: \r\n if dy < 0:\r\n sgndy = -1\r\n else:\r\n sgndy = 1 \r\n \r\n Ddy = D * dy\r\n mDdx = -D * dx\r\n sgndydxsqrtd = sgndy * dx * math.sqrt(d)\r\n absdysqrtd = abs(dy) * math.sqrt(d) \r\n \r\n xa = float(Ddy + sgndydxsqrtd) / dr2 + self.center[0]\r\n ya = float(mDdx + absdysqrtd) / dr2 + self.center[1]\r\n \r\n xb = (Ddy - sgndydxsqrtd) / dr2 + self.center[0]\r\n yb = (mDdx - absdysqrtd) / dr2 + self.center[1]\r\n \r\n if (d == 0) or not s.contains_point(xb, yb):\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya))]\r\n else:\r\n return []\r\n else:\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya)), (int(xb), int(yb))]\r\n else:\r\n return [(int(xb), int(yb))]", "def detector(centre_x, centre_y, height_z, radius, distance):\r\n z = np.linspace(0, height_z, 50)\r\n z = [i+distance for i in z]\r\n \r\n theta = np.linspace(0, 2*np.pi, 50)\r\n theta_grid, z_grid=np.meshgrid(theta, z)\r\n \r\n x_grid = radius*np.cos(theta_grid) + centre_x\r\n y_grid = radius*np.sin(theta_grid) + centre_y\r\n return x_grid, y_grid, z_grid", "def inside_circle(total_count):\n\n host_name = MPI.Get_processor_name()\n print(\"Rank {} generating {:n} samples on host {}.\".format(\n rank, total_count, host_name))\n x = np.float64(np.random.uniform(size=total_count))\n y = np.float64(np.random.uniform(size=total_count))\n\n radii = np.sqrt(x*x + y*y)\n\n count = len(radii[np.where(radii<=1.0)])\n\n return count", "def _intersected(positions, radius):\n P1 = positions[0]\n P2 = positions[1]\n P3 = positions[2]\n temp1 = P2 - P1\n e_x = temp1 / np.linalg.norm(temp1)\n temp2 = P3 - P1\n i = np.dot(e_x, temp2)\n temp3 = temp2 - i * e_x\n e_y = temp3 / np.linalg.norm(temp3)\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(P2 - P1)\n j = np.dot(e_y, temp2) \n x = d / 2\n y = (-2*i*x + i*i + j*j) / (2*j)\n temp4 = radius**2 - x*x - y*y\n if temp4 < 0:\n return False\n return True", "def __contains__(self, point):\n #### Original \n from pyresample.spherical_geometry import point_inside, Coordinate\n corners = self.corners\n\n if isinstance(point, tuple):\n return point_inside(Coordinate(*point), corners)\n else:\n return point_inside(point, corners)\n #### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in __contains__')\n #sphpoly = SphPolygon(corners)\n #return sphpoly.intersection(SphPolygon(point), sphpoly)", "def phantom_squares(n_points,S):\n \n #Rescaling according to image size \n S[:,0] = S[:,0]*n_points/2\n S[:,1] = S[:,1]*n_points/2\n S[:,2] = S[:,2]*n_points/2\n S[:,3] = S[:,3]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 ) \n nrow,ncol = S.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow)) \n\n for k in range(nrow): #itero sui quadrati\n x_new = x - S[k,0]\n y_new = y - S[k,1]\n\n u = abs(x_new*math.cos(S[k,3])+y_new*math.sin(S[k,3]))\n v = abs(-x_new*math.sin(S[k,3])+y_new*math.cos(S[k,3]))\n\n cond = np.maximum(u,v)\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] < S[k,2]/2):\n phantom1[i,j,k] = S[k,4]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def solve_circle(pts, known_radius, guess, max_error_accepted):\n\n if len(pts) < 2:\n return None\n\n x, y = pts.T\n def error(c):\n xc, yc = c\n return np.sqrt((x-xc)**2 + (y-yc)**2)-known_radius\n\n center, ier = optimize.leastsq(error, guess, maxfev=50) # maxfev limits the number of iterations\n if ier in [1,2,3,4]:\n errs = error(center)\n max_err = np.amax(np.abs(errs))\n if max_err > max_error_accepted:\n return None\n return center\n return None", "def test_pointnum2():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(100, -100), radius=400, thickness=25)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def phantom_ellipses(n_points,E):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*n_points/2 #semiaxis a\n E[:,1] = E[:,1]*n_points/2 #semiaxis b\n E[:,2] = E[:,2]*n_points/2 #x\n E[:,3] = E[:,3]*n_points/2 #y\n E[:,4] = E[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = E.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sulle ellissi\n x_new = x - E[k,2]\n y_new = y - E[k,3]\n\n #find(( (x.*cosp + y.*sinp).^2)./asq + ((y.*cosp - x.*sinp).^2)./bsq <= 1); \n cosp = math.cos(E[k,4])\n sinp = math.sin(E[k,4])\n cond = np.square( x_new * cosp + y_new * sinp )*1/(E[k,0]*E[k,0]) + \\\n np.square(y_new * cosp - x_new * sinp)*1/(E[k,1]*E[k,1]) - 1\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] <= 0.0):\n phantom1[i,j,k] = E[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside", "def _cage_pts(xyz, neighbor_xyzs, sigma, neighbor_diameters, L, M, R):\n pts = rand_sphere(M) * R + xyz\n for nxyz, nsig in zip(neighbor_xyzs, neighbor_diameters):\n dpts = np.remainder(pts - nxyz + L / 2.0, L) - L / 2.0\n dists_sq = np.sum(dpts**2, axis=1)\n goodix = dists_sq >= ((nsig + sigma) / 2.0)**2\n pts = pts[goodix, :]\n return pts", "def NN_z(x, y, con_ver, nbr_ver, cellsize):\n gx, gy, elevNNGrid = interpolate_to_grid(con_ver[:, 0], con_ver[:,1], con_ver[:,2], \n interp_type = \"natural_neighbor\", \n hres = cellsize[0])\n elev_NN = elevNNGrid[0, 0]\n if not(np.isnan(elev_NN)):\n elev_i = elev_NN\n else:\n print(\"elev_NN is nan: evaluating else loop\")\n d_nbr = np.zeros(3)\n for n in range(0, 3):\n d_nbr[n] = ((x - nbr_ver[n][0])**2 + (y - nbr_ver[n][1])**2)**0.5\n nearest_ver = nbr_ver[d_nbr.argmax(0)]\n elev_i = nearest_ver[2]\n return elev_i", "def circles_overlapping(x1, y1, x2, y2, r):\n # print(abs((x2-x1)**2 + (y2-y1)**2))\n # print((2*r)**2)\n if (abs((x2-x1)**2 + (y2-y1)**2) > (2*r)**2):\n return False\n else: return True", "def pointfind(plat, plon, lat, lon, pdif = 1):\n\t\n\tfff = 10\n\twhile (fff > 1):\n\t\t\n\t\t#conditions for latitude (lat - 2d array of latitudes)\n\t\tc_lat=(lat>(plat-pdif))&(lat<(plat+pdif))\n\t\t#conditions for longiyude (lon - 2d array of longitudes)\n\t\tc_lon=(lon>(plon-pdif))&(lon<(plon+pdif))\n\t\t\n\t\t#combine both conditions together\n\t\tc_all=c_lat&c_lon\n\t\t\n\t\t#values of the points that fulfil conditions\n\t\tplatf = lat[numpy.nonzero(c_all)]\n\t\tplonf = lon[numpy.nonzero(c_all)]\n\t\t\n\t\t\t\t\n\t\t#indeces of the poin that fulfil conditions \n\t\tg = numpy.nonzero(c_all)\n\t\t\n\t\t\n\t\t#check if we have found uniq solution\n\t\tfff = platf.shape[0]\n\t\t# decrease window to reduce amount of solutions if we have more than one\n\t\t#print(pdif)\n\t\tpdif = pdif-0.001\n\tprint(\"coordinates of the point that fulfil conditions: \"+str(platf)+\" \"+str(plonf))\n\tprint(\"indeces of the point that fulfil conditions: \"+str(g[0])+\" \"+str(g[1]))\n\t\n\treturn(g, platf, plonf)", "def latticepoints(circle_radius, pixel_size):\n\n numlatticepoints = 0\n npixels = int(circle_radius/float(pixel_size))\n for i in range(-npixels, npixels+1, 1):\n for j in range(-npixels, npixels+1, 1):\n if ((i*pixel_size)**2 + (j*pixel_size)**2) <= (np.sqrt(2.*float(npixels*pixel_size)**2))**2:\n #if ((m*pixel_size)**2 + (n*pixel_size)**2) <= npixels**2:\n numlatticepoints = numlatticepoints + 1\n\n return numlatticepoints", "def __findFarestPoint__( self, outPoint ):\n end = outPoint;\n endInside = self.inside( end );\n if endInside: return outPoint;\n start = self.center;\n startInside = self.inside( start );\n \n while( True ):\n if ( utility.euclideanDistSqr( start, end ) <= 4 ):\n return start;\n mid = utility.devide( utility.add( start, end ), 2);\n if self.inside( mid ):\n start = mid;\n else:\n end = mid;", "def free_line(p, eps, s, dps1, dps2, ds):\n px = p[0]\n py = p[1]\n s1x = s[0, 0]\n s1y = s[0, 1]\n s2x = s[1, 0]\n s2y = s[1, 1]\n if s1x == s2x and s1y == s2y:\n if eucl_dist(p, s[0]) > eps:\n lf = [-1, -1]\n else:\n lf = [0, 1]\n else:\n if point_to_seg(p, s[0], s[1], dps1, dps2, ds) > eps:\n # print(\"No Intersection\")\n lf = [-1, -1]\n else:\n segl = eucl_dist(s[0], s[1])\n segl2 = segl * segl\n intersect = circle_line_intersection(px, py, s1x, s1y, s2x, s2y, eps)\n if intersect[0][0] != intersect[1][0] or intersect[0][1] != intersect[1][1]:\n i1x = intersect[0, 0]\n i1y = intersect[0, 1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n\n i2x = intersect[1, 0]\n i2y = intersect[1, 1]\n u2 = (((i2x - s1x) * (s2x - s1x)) + ((i2y - s1y) * (s2y - s1y))) / segl2\n ordered_point = sorted((0, 1, u1, u2))\n lf = ordered_point[1:3]\n else:\n if px == s1x and py == s1y:\n lf = [0, 0]\n elif px == s2x and py == s2y:\n lf = [1, 1]\n else:\n i1x = intersect[0][0]\n i1y = intersect[0][1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n if 0 <= u1 <= 1:\n lf = [u1, u1]\n else:\n lf = [-1, -1]\n return lf", "def challenge2(self):\n # Let's try an octree-type approach\n # For each grid cube we should be able to find whether a nanobot:\n # 1) is not in range (is outside grid cube and not in range of nearest face)\n # 2) is in range of whole cube (all 8 corners are in range)\n # 3) is in range of part of the cube (i.e. not 1 or 2)\n # Root node: figure out extent of whole space\n mins = []\n maxs = []\n for axis in range(3):\n mins.append(min(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n maxs.append(max(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n\n for count in range(len(self.nanobots), 0, -1):\n results = self.search_coord_with_max_nanobots(mins, maxs, [], self.nanobots, count)\n if results and results[0].count >= count:\n break\n\n print(f\"Found {len(results)} octree search results with {results[0].count} nanobots in range.\")\n\n # Find result coord closest to origin\n closest_dist = np.iinfo(np.int32).max\n best_coord = None\n for result in results:\n for corner in itertools.product(*zip(result.mins, result.maxs)):\n d = manhattan_dist(corner, (0, 0, 0))\n if d < closest_dist:\n closest_dist = d\n best_coord = corner\n\n print(f\"Best coord: {best_coord} (dist={manhattan_dist(best_coord, (0, 0, 0))})\")", "def convergentPointCheck(data, velocity):\n import astropy.units as u\n \n parallax = 1000/data.icrs.distance\n ra = data.icrs.ra.to(u.rad)\n dec= data.icrs.dec.to(u.rad)\n pm_ra_cosdec = data.icrs.pm_ra_cosdec*4.74047/parallax\n pm_dec = data.icrs.pm_dec*4.74047/parallax\n\n vpred = vecRot(velocity,'zyx', np.transpose(np.array([-ra, dec, np.zeros(ra.size)]))) # return vr, pm_ra_cosdec, pm_dec\n psi = np.arctan2(vpred[:,2],vpred[:,1]) # angle \n vpred_rot = vecRot(vpred, 'x', -psi)\n vobs = np.transpose([data.icrs.radial_velocity, pm_ra_cosdec, pm_dec])\n vobs_rot = vecRot(vobs, 'x', -psi)\n \n dmu_parallel = vobs_rot[:,1] - vpred_rot[:,1]\n dmu_perpendicular = vobs_rot[:,2] - vpred_rot[:,2]\n \n return dmu_parallel, dmu_perpendicular, psi", "def circleFit( pts ):\n x = pts[:,0] \n y = pts[:,1] \n \n def calc_R(xc, yc):\n \"\"\" calculate the distance of each data points from the center (xc, yc) \"\"\"\n return sqrt((x-xc)**2 + (y-yc)**2)\n\n def f_2b(c):\n \"\"\" calculate the algebraic distance between the 2D points and the mean circle centered at c=(xc, yc) \"\"\"\n Ri = calc_R(*c)\n return Ri - Ri.mean()\n \n def Df_2b(c):\n \"\"\" Jacobian of f_2b\n The axis corresponding to derivatives must be coherent with the col_deriv option of leastsq\"\"\"\n xc, yc = c\n df2b_dc = empty((len(c), x.size)) \n Ri = calc_R(xc, yc)\n df2b_dc[0] = (xc - x)/Ri # dR/dxc\n df2b_dc[1] = (yc - y)/Ri # dR/dyc\n df2b_dc = df2b_dc - df2b_dc.mean(axis=1)[:, newaxis]\n return df2b_dc\n\n center_estimate = np.mean( pts, 0 )\n center_2b, ier = optimize.leastsq( f_2b , center_estimate, Dfun=Df_2b, col_deriv=True)\n \n xc_2b, yc_2b = center_2b\n Ri_2b = calc_R(*center_2b)\n R_2b = Ri_2b.mean()\n residu_2b = np.sum((Ri_2b - R_2b)**2)\n return [xc_2b, yc_2b], R_2b", "def get_interior_points(N=128):\n x1 = sobol.i4_sobol_generate(2, N) - np.array([1, 1])\n x2 = sobol.i4_sobol_generate(2, N) - np.array([1, 0])\n x3 = sobol.i4_sobol_generate(2, N) - np.array([0, 1])\n return torch.from_numpy(np.concatenate((x1, x2, x3), 0)).float()", "def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)", "def jarvis_convex_hull(points):\n start_index = np.argmax(points[:, 0]) # Point with the highest y-coordinate\n start_point = points[start_index]\n # result = [start_index[:]]\n result = [start_index]\n added_points = {start_index}\n while True:\n for ref_index, ref_point in enumerate(points):\n exit_ = True\n if ref_index == start_index or ref_index in added_points:\n continue\n\n signs = 0\n threshold = len(points) - 2\n for compare_index, compare_point in enumerate(points):\n if compare_index == ref_index or compare_index == start_index:\n continue\n check = compare(start_point, ref_point, compare_point)\n if abs(check) < 1e-2:\n dist_start_ref = distance(start_point, ref_point)\n dist_start_compare = distance(start_point, compare_point)\n if dist_start_compare > dist_start_ref:\n threshold = threshold + 1\n else:\n threshold = threshold - 1\n continue\n signs = signs + 1 if check > 0 else signs - 1\n\n if abs(signs) < threshold:\n continue\n\n exit_ = False\n result.append(ref_index[:])\n added_points.add(ref_index)\n start_index = ref_index\n break\n\n if exit_:\n return result", "def __contains__(self, point, e=10e-10):\n v1 = self.vector\n v2 = Vector.createFromTwoPoints(self.point, point)\n return abs(v1.angle - v2.angle) < e", "def distance_great_circle(u, v, r=1.):\n\n lat_1, lon_1 = u\n lat_2, lon_2 = v\n lat_1, lon_1, lat_2, lon_2 = lat_1*np.pi/180, lon_1*np.pi/180, lat_2*np.pi/180, lon_2*np.pi/180\n\n d_lon = np.abs(lon_1 - lon_2)\n\n A = np.power(np.cos(lat_2)*np.sin(d_lon), 2)\n B = np.power(np.cos(lat_1)*np.sin(lat_2) - np.sin(lat_1)*np.cos(lat_2)*np.cos(d_lon), 2)\n C = np.sin(lat_1)*np.sin(lat_2) + np.cos(lat_1)*np.cos(lat_2)*np.cos(d_lon)\n\n return r*np.arctan(np.sqrt(A + B) / C)", "def pos_on_semicircle(x, r, cxy):\n pos = np.sqrt(r ** 2 - (x - cxy[0]) ** 2) + cxy[1]\n\n return pos", "def points_in_circle(c, d):\n if d == 0:\n return set((c,))\n circle = set()\n x, y = (c[0] + d * directions[4][0], c[1] + d * directions[4][1])\n for m in directions:\n for i in range(1, d + 1):\n x, y = x + m[0], y + m[1]\n circle.add((x, y))\n return circle", "def generate_valid_coordinates(radius, dist_apart):\n\n vtx_x = random.randrange(dist_apart, int(WINDOW_WIDTH - radius), dist_apart);\n vtx_y = random.randrange(dist_apart, int(WINDOW_HEIGHT), dist_apart);\n\n count = 0\n while any((abs(vtx[\"x\"] - vtx_x) <= dist_apart) for vtx in VERTICES) and count < 1000:\n vtx_x = random.randrange(dist_apart, int(WINDOW_WIDTH - dist_apart), dist_apart);\n count += 1\n\n count = 0\n while any((abs(vtx[\"y\"] - vtx_y) <= dist_apart) for vtx in VERTICES) and count < 1000:\n vtx_y = random.randrange(dist_apart, int(WINDOW_HEIGHT), dist_apart);\n count += 1\n return vtx_x, vtx_y", "def fit_galaxy(self, ypos, xpos, r_in, r_out = 0):\r\n count_out = []\r\n count_in = []\r\n for j, i in product(np.arange(ypos - (r_out + r_in), ypos + r_out + r_in + 1),np.arange(xpos - (r_out + r_in), xpos + 1 + r_out + r_in)): # Create square\r\n if (j - ypos) ** 2 + (i - xpos) ** 2 <= r_in ** 2 and 0<= j <= self.shapes[0] - 1 and 0<= i <= self.shapes[1] - 1: # make sure points are in a circle\r\n j = int(j)\r\n i = int(i)\r\n if self.raw_image_data[j,i] * self.masked[j,i] == self.raw_image_data[j,i]:\r\n count_in.append(self.raw_image_data[j,i])\r\n self.masked[j,i] = 0 # self.mask_region runs the for loop again\r\n if r_in ** 2 < (j - ypos) ** 2 + (i - xpos) ** 2 <= (r_in + r_out)**2 and 0<= j <= (self.shapes[0] - 1) and 0<= i <= self.shapes[1] - 1: # in the outer ring\r\n j = int(j)\r\n i = int(i)\r\n if self.raw_image_data[j,i] * self.masked[j,i] == self.raw_image_data[j,i]: \r\n count_out.append(self.raw_image_data[j][i]) \r\n self.masked[j,i]\r\n return count_in, count_out", "def points_on_circumference(center=(0, 0), r=50, n=100):\n\treturn [\n (\n center[0]+(cos(2 * pi / n * x) * r), \n center[1] + (sin(2 * pi / n * x) * r) \n\n ) for x in range(0, n + 1)]", "def addPoint(self, p):\n p = np.asarray(p)\n idx = len(self.coords)\n # print(\"coords[\", idx,\"] ->\",p)\n self.coords.append(p)\n\n # Search the triangle(s) whose circumcircle contains p\n bad_triangles = []\n for T in self.triangles:\n # Choose one method: inCircleRobust(T, p) or inCircleFast(T, p)\n if self.inCircleFast(T, p):\n bad_triangles.append(T)\n\n # Find the CCW boundary (star shape) of the bad triangles,\n # expressed as a list of edges (point pairs) and the opposite\n # triangle to each edge.\n boundary = []\n # Choose a \"random\" triangle and edge\n T = bad_triangles[0]\n edge = 0\n # get the opposite triangle of this edge\n while True:\n # Check if edge of triangle T is on the boundary...\n # if opposite triangle of this edge is external to the list\n tri_op = self.triangles[T][edge]\n if tri_op not in bad_triangles:\n # Insert edge and external triangle into boundary list\n boundary.append((T[(edge+1) % 3], T[(edge-1) % 3], tri_op))\n\n # Move to next CCW edge in this triangle\n edge = (edge + 1) % 3\n\n # Check if boundary is a closed loop\n if boundary[0][0] == boundary[-1][1]:\n break\n else:\n # Move to next CCW edge in opposite triangle\n edge = (self.triangles[tri_op].index(T) + 1) % 3\n T = tri_op\n\n # Remove triangles too near of point p of our solution\n for T in bad_triangles:\n del self.triangles[T]\n del self.circles[T]\n\n # Retriangle the hole left by bad_triangles\n new_triangles = []\n for (e0, e1, tri_op) in boundary:\n # Create a new triangle using point p and edge extremes\n T = (idx, e0, e1)\n\n # Store circumcenter and circumradius of the triangle\n self.circles[T] = self.circumcenter(T)\n\n # Set opposite triangle of the edge as neighbour of T\n self.triangles[T] = [tri_op, None, None]\n\n # Try to set T as neighbour of the opposite triangle\n if tri_op:\n # search the neighbour of tri_op that use edge (e1, e0)\n for i, neigh in enumerate(self.triangles[tri_op]):\n if neigh:\n if e1 in neigh and e0 in neigh:\n # change link to use our new triangle\n self.triangles[tri_op][i] = T\n\n # Add triangle to a temporal list\n new_triangles.append(T)\n\n # Link the new triangles each another\n N = len(new_triangles)\n for i, T in enumerate(new_triangles):\n self.triangles[T][1] = new_triangles[(i+1) % N] # next\n self.triangles[T][2] = new_triangles[(i-1) % N] # previous", "def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r", "def radon_ellipses(N,theta_vec, E, tvec_set=None, circle=False):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*N/2\n E[:,1] = E[:,1]*N/2\n E[:,2] = E[:,2]*N/2\n E[:,3] = E[:,3]*N/2\n E[:,4] = E[:,4]*math.pi/180\n \n [t_vec, grid_t, grid_theta] = build_t_theta_pixel(N, theta_vec, tvec_set=tvec_set, circle =circle);\n\n (nrowE,ncolE) = E.shape;\n tmp = np.zeros((nrowE,len(grid_theta)))\n for i in range(nrowE):\n grid_theta_new = grid_theta - E[i,4]\n x_new = (E[i,2]*np.cos(grid_theta)+E[i,3]*np.sin(grid_theta))\n y_new = (-E[i,2]*np.sin(grid_theta)+E[i,3]*np.cos(grid_theta))\n grid_t_new = (grid_t -x_new)/E[i,1]\n\n v1 = np.sin(grid_theta_new)**2+((E[i,0]/E[i,1])**2)*np.cos(grid_theta_new)**2 - grid_t_new**2\n cond = v1;\n v2 = np.zeros((v1.shape[0],1))\n for j in range (len(grid_theta)):\n if cond[j] > 0:\n v2[j]=1\n else:\n v2[j]=0\n #endif\n #endfor\n v3 = np.sqrt(v1*v2);\n v4 = np.sin(grid_theta_new)**2+((E[i,0]/E[i,1])**2)*np.cos(grid_theta_new)**2\n tmp[i,:] = np.transpose( 2*E[i,0]*E[i,5]*(v3/v4) )\n #endfor\n radvec = np.sum(tmp,axis = 0);\n analytical_sinogram = np.transpose(np.reshape(radvec,(len(theta_vec),len(t_vec))))\n return analytical_sinogram", "def find_gate_posts(img, display_results=False):\n\n greyscale_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_GRAY2BGR)\n cm_image = cv2.applyColorMap(greyscale_image, cv2.COLORMAP_VIRIDIS)\n\n kernel = np.ones((5, 5), np.uint8)\n\n # cm_image = cv2.erode(cm_image, kernel, iterations=1)\n kernel = np.ones((5, 5), np.uint8)\n cm_image = cv2.dilate(cm_image, kernel, iterations=3)\n kernel = np.ones((4, 4), np.uint8)\n cm_image = cv2.erode(cm_image, kernel, iterations=1)\n\n cm_image = cv2.medianBlur(cm_image, 5) # Removes salt and pepper noise\n\n cm_copy_image = cm_image\n cv2.copyTo(cm_image, cm_copy_image)\n\n mask = mask_sonar_image(cm_image, display_results)\n\n cm_circles = cv2.findContours(mask, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n cm_circles = list(filter(lambda x: (cv2.contourArea(x) > 200\n and cv2.contourArea(x) < 5000),\n cm_circles))\n cm_circles = sorted(cm_circles,\n key=lambda x: (arc_circ(x)),\n reverse=False)\n\n cm_circles = list(filter(lambda x: (cv2.arcLength(x, True)**2/(4\n * math.pi*cv2.contourArea(x)) > 2.5), cm_circles))\n\n if len(cm_circles) < 1:\n print(\"Not enough circles found\")\n return None\n\n filtered_circles = cm_circles[0:1]\n\n circle_positions = []\n for circle in filtered_circles: # find center of circle code\n M = cv2.moments(circle)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n circle_positions.append((cX, cY, arc_circ(circle), cv2.arcLength(\n circle, True)**2/(4*math.pi*cv2.contourArea(circle))))\n\n if display_results:\n cv2.drawContours(cm_copy_image, filtered_circles, -1, (0, 255, 0), 2)\n cv2.imshow(\"found_gate_posts\", cm_copy_image)\n cv2.waitKey(0)\n\n return circle_positions", "def convex(self):\n x, y = self.center\n angles = []\n l = len(self.points)\n for i in range(l - 1):\n A = self.points[(i + l - 1) % l]\n B = self.points[i % l]\n C = self.points[(i + 1) % l]\n u = Vector.createFromTwoPoints(A, B)\n v = Vector.createFromTwoPoints(C, B)\n angle = v ^ u\n if angle > pi:\n return True\n return False", "def search(x, pentagon_list) -> bool:\n l = 0\n r = len(pentagon_list) - 1\n\n while l <= r:\n m = (l + r) // 2\n if x == pentagon_list[m]:\n return True\n\n elif x < pentagon_list[m]:\n r = m - 1\n\n else:\n l = m + 1\n \n return False" ]
[ "0.6597289", "0.6597289", "0.65620327", "0.61580426", "0.6140572", "0.60876256", "0.6040511", "0.59974813", "0.59635466", "0.59572154", "0.5936011", "0.58943766", "0.58940417", "0.586778", "0.5863292", "0.58565116", "0.5846335", "0.5845742", "0.5843002", "0.582725", "0.5824984", "0.5819978", "0.58025765", "0.5772186", "0.5768249", "0.5754432", "0.57453966", "0.5740391", "0.57373595", "0.5734325", "0.57166433", "0.5714772", "0.5704557", "0.5701083", "0.5696862", "0.5688756", "0.5665007", "0.5663048", "0.56610763", "0.56515676", "0.5647515", "0.5638304", "0.5632944", "0.5625783", "0.5624675", "0.56202507", "0.56202507", "0.5615464", "0.56092465", "0.56032526", "0.55972946", "0.5596196", "0.5592077", "0.55822325", "0.5575725", "0.5569378", "0.5569378", "0.55693126", "0.5566772", "0.556327", "0.5558923", "0.5549995", "0.5544038", "0.55403036", "0.552873", "0.55254346", "0.55230117", "0.5521597", "0.55161977", "0.551543", "0.55149907", "0.5510471", "0.5509841", "0.5507054", "0.5502806", "0.55026567", "0.55002654", "0.5500204", "0.54976743", "0.54943293", "0.54920596", "0.54881316", "0.5486845", "0.54847366", "0.54846853", "0.5484098", "0.5483792", "0.5481689", "0.54765075", "0.54709995", "0.5467462", "0.54610926", "0.5460924", "0.5456235", "0.5455372", "0.5452397", "0.5449697", "0.5444595", "0.54387844", "0.54371774", "0.54370606" ]
0.0
-1
Initialisation, where df is a pandas DataFrame and var is the name of the column to study and init_pars is a dictionary with initial values
def __init__(self, df, Z, Q, H, T, R, d, c, var='dep_var', secondvar=None, method='iterate'): self.method = method # Initialize all the system matrices as attributes of the class self.Z = Z self.Q = Q self.H = H self.T = T self.R = R self.d = d self.c = c # same for parameters and objects self.df = df self.var = var # The data itself self.y = np.array(df[var].values.flatten()) self.times = df.index # Options for the minimizer self.options = {'eps': 1e-09, 'maxiter': 200}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,df, init_pars, var='dep_var', var_name='Volume of Nile'):\n self.df = df\n self.var = var\n self.var_name = var_name\n self.y = np.array(df[var].values.flatten())\n self.times = df.index\n self.pardict = init_pars\n self.options = {'eps':1e-09,\n 'maxiter':2000}", "def init(param):\n MODULE_HELPER.check_parameter(param, key='featureCount_exec', dtype=str)\n MODULE_HELPER.check_parameter(param, key='featureCount_t', dtype=str)\n MODULE_HELPER.check_parameter(param, key='featureCount_id', dtype=str)\n MODULE_HELPER.check_parameter(param, key='featureCount_by_meta', dtype=bool)\n MODULE_HELPER.check_parameter(param, key='Rscript_exec', dtype=str)\n\n #deriving the stranded parameter\n if param['stranded'] == 'reverse':\n param['featureCount_s'] = '2'\n elif param['stranded'] == 'yes':\n param['featureCount_s'] = '1'\n else:\n param['featureCount_s'] = '0'", "def setup_initial_values(self, init_params={}):\n for row in self.panel[1:]:\n for widget in row:\n if widget.name in init_params:\n widget.value = init_params[widget.name]", "def __init__(self, df):\n self.original_data = df\n self.preprocessed_data = pd.DataFrame()", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def __init__(self, df):\n self.df = df", "def init_model_df(self):\n\n self.model_df = pd.DataFrame(columns=self.query_df[self.column_name].unique())\n\n # add _TIMESTAMP column to dataframe\n self.model_df[self.column_index] = self.min_increments\n\n # set row index to _TIMESTAMP\n self.model_df.set_index(self.column_index, inplace=True)", "def initialize_variables(self):\n self.sess.run(self.init)", "def reset_parameters_lecun(self, param_init=0.1):\n logger.info('===== Initialize %s with lecun style =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_lecun_normal(n, p, param_init)", "def _parse_var_initsol(self,varname) :\n\t\tinitsol = self.ss.constraint.initsol\n\t\tparams = getattr(initsol,varname)\n\t\tnvars = len(self.ss.variables) # num of variables\n\n\t\tif varname in ('alpha','beta') : \n\t\t\tself.initsol[varname] = np.ones(nvars)\n\t\t\tkeys = params.keys()\n\t\t\tself.initsol[varname][:] = params['defaultInitialValue']\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+',key)\t:\n\t\t\t\t\tidx = int(key.split('_')[1])\n\t\t\t\t\tself.initsol[varname][idx-1] = params[key]\n\t\telif varname in ('g','h') :\n\t\t\tself.initsol[varname] = np.ones([nvars,nvars])\n\t\t\tkeys = params.keys()\n\t\t\tself.initsol[varname][:] = params['defaultInitialValue']\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+_\\d+',key)\t:\n\t\t\t\t\tidr,idc = map(int,(key.split('_')[1:3]))\n\t\t\t\t\tself.initsol[varname][idr-1][idc-1] = params[key]\n\t\t\n\t\telse :\n\t\t\tlogging.error(\"Unrecognized varname %s quitting..\" \\\n\t\t\t%(varname))\n\t\t\tsys.exit(1)", "def __init__(self, df):\n self.data = df", "def prepare_estimation(model_params_init_file_name, lower, upper):\n\n # Read in data and init file sources\n model_params_df = pd.read_pickle(model_params_init_file_name)\n model_params_df[\"lower\"] = lower\n model_params_df[\"upper\"] = upper\n\n return model_params_df", "def _prepare_initial_params_df(self):\r\n self.logger.info(\"Preparing initial params\")\r\n\r\n # First we must normalize the weights.\r\n #\r\n total_weights = self.optimizer_config.initial_points_pareto_weight \\\r\n + self.optimizer_config.initial_points_cached_good_params_weight \\\r\n + self.optimizer_config.initial_points_random_params_weight\r\n assert total_weights > 0\r\n\r\n initial_points_pareto_fraction = self.optimizer_config.initial_points_pareto_weight / total_weights\r\n initial_points_cached_good_fraction = self.optimizer_config.initial_points_cached_good_params_weight / total_weights\r\n\r\n num_initial_points = self.optimizer_config.num_starting_configs\r\n\r\n # Let's start with the pareto points.\r\n #\r\n pareto_params_df = self.pareto_frontier.params_for_pareto_df\r\n if pareto_params_df is None:\r\n pareto_params_df = pd.DataFrame()\r\n\r\n num_desired_pareto_points = math.floor(num_initial_points * initial_points_pareto_fraction)\r\n num_existing_pareto_points = len(pareto_params_df.index)\r\n\r\n if num_existing_pareto_points > 0:\r\n if num_desired_pareto_points < num_existing_pareto_points:\r\n pareto_params_df = pareto_params_df.sample(n=num_desired_pareto_points, replace=False, axis='index')\r\n self.logger.info(f\"Using {len(pareto_params_df.index)} of {num_existing_pareto_points} pareto points as starting configs.\")\r\n else:\r\n self.logger.info(\"There are no existing pareto points.\")\r\n\r\n # Now let's take the cached good points.\r\n #\r\n num_desired_cached_good_points = math.floor(num_initial_points * initial_points_cached_good_fraction)\r\n cached_params_df = pd.DataFrame()\r\n if self._good_configs_from_the_past_invocations_df is not None:\r\n if num_desired_cached_good_points < len(self._good_configs_from_the_past_invocations_df.index):\r\n cached_params_df = self._good_configs_from_the_past_invocations_df.sample(n=num_desired_cached_good_points, replace=False, axis='index')\r\n else:\r\n cached_params_df = self._good_configs_from_the_past_invocations_df.copy(deep=True)\r\n self.logger.info(\r\n f\"Using {len(cached_params_df.index)} out of {len(self._good_configs_from_the_past_invocations_df.index)} \"\r\n f\"cached good configs as starting configs\"\r\n )\r\n else:\r\n self.logger.info(\"No cached params are available.\")\r\n\r\n # Finally, let's generate the random points.\r\n #\r\n num_desired_random_points = num_initial_points - len(pareto_params_df.index) - len(cached_params_df.index)\r\n random_params_df = self.optimization_problem.parameter_space.random_dataframe(num_samples=num_desired_random_points)\r\n self.logger.info(f\"Using {len(random_params_df.index)} random points as starting configs.\")\r\n\r\n initial_params_df = pd.concat([pareto_params_df, cached_params_df, random_params_df])\r\n initial_params_df.reset_index(drop=True, inplace=True)\r\n return initial_params_df", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def init_parameters(\n self, init_fun: Optional[NNInitFunc] = None, *, seed: Optional[PRNGKeyT] = None\n ):\n if init_fun is None:\n init_fun = normal(stddev=0.01)\n\n rng = nkjax.PRNGSeq(nkjax.PRNGKey(seed))\n\n def new_pars(par):\n return jnp.asarray(\n init_fun(rng.take(1)[0], shape=par.shape, dtype=par.dtype),\n dtype=par.dtype,\n )\n\n self.parameters = jax.tree_map(new_pars, self.parameters)", "def __init__(self, df):\n self.df = df\n if 'time' in self.df.columns:\n self.time = True\n self.df = self.df.sort('time')\n else:\n self.time = False\n if 'condition' not in self.df.columns:\n self.df['condition'] = 'all'", "def initialise(self):\n self.sc.init.exec_action(self.variables)", "def initial_parameters(ship_data: dict) -> dict:\n\n mask = df_parameters[\"brix_lambda\"].notnull()\n df_parameters.loc[mask, \"brix_prime\"] = df_parameters.loc[mask].apply(\n calculate_prime, ship_parameters=ship_data, axis=1\n )\n\n df_parameters[\"prime\"] = df_parameters[\"brix_prime\"]\n\n df_parameters.loc[\"Ydelta\", \"prime\"] = 0.003 # Just guessing\n df_parameters.loc[\"Ndelta\", \"prime\"] = (\n -df_parameters.loc[\"Ydelta\", \"prime\"] / 2\n ) # Just guessing\n\n df_parameters.loc[\"Nu\", \"prime\"] = 0\n df_parameters.loc[\"Nur\", \"prime\"] = 0\n # df_parameters.loc[\"Xdelta\", \"prime\"] = -0.001\n df_parameters.loc[\"Xr\", \"prime\"] = 0\n df_parameters.loc[\"Xrr\", \"prime\"] = 0.000\n df_parameters.loc[\"Xu\", \"prime\"] = 0\n df_parameters.loc[\"Xuu\", \"prime\"] = 0\n df_parameters.loc[\"Xv\", \"prime\"] = 0\n df_parameters.loc[\"Xvr\", \"prime\"] = 0\n df_parameters.loc[\"Yu\", \"prime\"] = 0\n df_parameters.loc[\"Yur\", \"prime\"] = 0.00\n\n df_parameters.loc[\"Nuv\", \"prime\"] = 0.0\n df_parameters.loc[\"Xthrust\", \"prime\"] = 1.0\n df_parameters.loc[\"Yrdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Xvdelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Xdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Yvdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Nrdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Yuv\", \"prime\"] = 0.0\n df_parameters.loc[\"Nvdeltadelta\", \"prime\"] = 0.0\n\n df_parameters.loc[\"Ythrustdelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Nthrustdelta\", \"prime\"] = 0.0\n\n parameters = df_parameters[\"prime\"].dropna().to_dict()\n\n return parameters", "def init_columns(cycle_df, datatype):\n (cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col, char_cap_col, charge_or_discharge) = col_variables(datatype)\n assert type(cycle_df) == pd.DataFrame\n assert volt_col in cycle_df.columns\n assert dis_cap_col in cycle_df.columns\n assert char_cap_col in cycle_df.columns\n\n cycle_df = cycle_df.reset_index(drop=True)\n cycle_df['dV'] = None\n cycle_df['Discharge_dQ'] = None\n cycle_df['Charge_dQ'] = None\n #cycle_df['Discharge_dQ/dV'] = None\n #cycle_df['Charge_dQ/dV'] = None\n return cycle_df", "def sa_pandas_init(self):\n\n lca = self.lca\n\n ind_activity = 0\n ind_product = 1\n ind_biosphere = 2\n\n cols = []\n rows = []\n inputs = []\n\n #All exchanges in inputs\n for input_ in self.inputs:\n\n if input_ == 'biosphere':\n continue\n\n for i in self.inputs_dict[input_]['tech_params']:\n act = lca.reverse_dict() [ind_activity] [i['col']]\n prod = lca.reverse_dict() [ind_product] [i['row']]\n cols += [ bw.get_activity(act) ['name'] ]\n rows += [ bw.get_activity(prod)['name'] ]\n inputs += [input_]\n for j in self.inputs_dict[input_]['bio_params']:\n act = lca.reverse_dict() [ind_activity] [j['col']]\n bio = lca.reverse_dict() [ind_biosphere] [j['row']]\n cols += [ bw.get_activity(act) ['name'] ]\n rows += [ bw.get_activity(prod)['name'] ]\n inputs += [input_]\n\n if self.parameters != None:\n # All parameters\n parameters_names_list = [name for name in self.parameters_array['name']]\n cols += parameters_names_list\n rows += parameters_names_list\n inputs += ['Parameters']*len(parameters_names_list)\n\n df = pd.DataFrame([inputs, rows, cols], index = ['Inputs', 'Products or flows', 'Activities'])\n df = df.transpose()\n\n self.sensitivity_indices_df = df", "def init_vars(self):\n # type: () -> None\n raise NotImplementedError", "def init_params(self):\n blah", "def _autoInitPars(self):\n for p in self._pars:\n setattr(self,p,self.defaultparval)", "def __init__(self, df, df_test, n_days, length, style) :\n self.n_days = n_days\n self.length = length\n self.df = df\n self.df_test = df_test\n self.features = len(df.columns) - 1\n self.style = style\n self.df_true = df_test.copy()", "def __init__(self, df, df_test, n_days, length, style) :\n self.n_days = n_days\n self.length = length\n self.df = df\n self.df_test = df_test\n self.features = len(df.columns) - 1\n self.style = style\n self.df_true = df_test.copy()", "def _data_dep_init(self, inputs):\n from tensorflow.python.ops.nn import moments\n from tensorflow.python.ops.math_ops import sqrt\n\n with variable_scope.variable_scope('data_dep_init'):\n # Generate data dependent init values\n activation = self.layer.activation\n self.layer.activation = None\n x_init = self.layer.call(inputs)\n m_init, v_init = moments(x_init, self.norm_axes)\n scale_init = 1. / sqrt(v_init + 1e-10)\n\n # Assign data dependent init values\n self.layer.g = self.layer.g * scale_init\n self.layer.bias = (-m_init * scale_init)\n self.layer.activation = activation\n self.initialized = True", "def init(*args):\n global dataset\n dataset = args[0]", "def __init__(self, df):\n self.df = df\n self._set_hash()", "def _init_param_source():\n\n amp_sine = 1\n amp_rand = 1\n rollover = 100 # length of data to display\n update_delay = 100 # time between delays of update in ms\n param_source = ColumnDataSource(dict(\n amp_sine=[amp_sine],\n amp_rand=[amp_rand],\n rollover=[rollover],\n update_delay=[update_delay]\n ))\n return param_source", "def _optimizer_state_init(opt_states):\n prefix_list = [\"moments\", \"accum\", \"moment1\", \"moment2\", \"lamb_m\", \"lamb_v\", \"mean_grad\",\n \"mean_square\", \"prev\"]\n for opt_param in opt_states:\n prefix = opt_param.name[:opt_param.name.find(\".\")]\n if opt_param.has_init and (prefix in prefix_list or opt_param.name == \"global_step\"):\n opt_param.init_data()", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_uniform(n, p, param_init)", "def parameter_initialization(self):\n dictsize = settings.PARS.get('numBases')\n numClass = self.train_labels.shape[0] # number of objects\n Dinit = np.empty((self.train_feats.shape[0], 0)) # for C-Ksvd and D-Ksvd\n dictLabel = np.empty((numClass, 0), dtype=np.int)\n numPerClass = dictsize//numClass\n param1 = {\n 'mode': 2,\n 'K': settings.PARS.get('numBases'), # size of the dictionary\n 'lambda1': settings.PARS.get('lambda_'),\n 'lambda2': 0,\n 'iter': settings.PARS.get('iterationini')\n }\n param2 = {\n 'lambda1': settings.PARS.get('lambda_'),\n 'lambda2': 0,\n 'mode': 2\n }\n\n for classid in range(numClass):\n col_ids = np.array(np.nonzero(self.train_labels[classid, :] == 1)).ravel()\n # ensure no zero data elements are chosen\n data_ids = np.array(np.nonzero(np.sum(self.train_feats[:, col_ids]**2, axis=0) > 1e-6)).ravel()\n\n # Raising an error if any zero lement is found\n if col_ids.shape[0] != data_ids.shape[0]:\n raise DatasetZeroElementFound\n\n # Initilization for LC-KSVD (perform KSVD in each class)\n Dpart = self.train_feats[:, col_ids[np.random.choice(data_ids, numPerClass, replace=False)]]\n param1['D'] = Dpart # initial dictionary\n Dpart = trainDL(self.train_feats[:, col_ids[data_ids]], **param1)\n Dinit = np.c_[Dinit, Dpart]\n labelvector = np.zeros((numClass, 1), dtype=np.int)\n labelvector[classid] = 1\n dictLabel = np.c_[dictLabel, np.tile(labelvector, (1, numPerClass))]\n\n param1['D'] = np.asfortranarray(Dinit) # initial dictionary\n # RuntimeError: matrix arg 10 must be a 2d double Fortran Array\n self.train_feats = self.train_feats if np.isfortran(self.train_feats) else np.asfortranarray(self.train_feats)\n Dinit = trainDL(self.train_feats, **param1)\n Xinit = lasso(self.train_feats, Dinit, **param2)\n\n # learning linear classifier parameters\n tmp = np.linalg.inv(Xinit@Xinit.T+np.eye(*(Xinit@Xinit.T).shape))@Xinit\n Winit = tmp@self.train_labels.T\n Winit = Winit.T\n\n Q = np.zeros((dictsize, self.train_feats.shape[1])) # energy matrix\n\n for frameid in range(self.train_feats.shape[1]):\n label_training = self.train_labels[:, frameid]\n maxid1 = label_training.argmax(0)\n\n for itemid in range(Dinit.shape[1]):\n label_item = dictLabel[:, itemid]\n maxid2 = label_item.argmax(0)\n\n if maxid1 == maxid2:\n Q[itemid, frameid] = 1\n\n Tinit = tmp@Q.T\n Tinit = Tinit.T\n\n return Dinit, Winit, Tinit, Q", "def init_locals(self):\n pass", "def __init__(self, df):\n # Transpose dataframe data to make it column oriented\n self.data = df.values.T\n\n assert df.index.tzinfo is not None\n self.tz = df.index.tzinfo\n self.index = df.index.tz_localize(None)\n\n\n self.cols = {}\n for i, col_name in enumerate(df.columns):\n self.cols[col_name] = i", "def __init__(self):\n self.current_year = datetime.date.today().year\n self.random_column_mappings = collections.defaultdict(dict)", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if p.dim() == 1:\n nn.init.constant_(p, 0.0)\n logger.info('Initialize %s with %s / %.3f' % (n, 'constant', 0.0))\n elif p.dim() == 2:\n nn.init.uniform_(p, a=-param_init, b=param_init)\n logger.info('Initialize %s with %s / %.3f' % (n, 'uniform', param_init))\n else:\n raise ValueError(n)", "def _initialize_df(self, df):\n df['values'] = (self.tc.instrument_returns['cumulative'] *\n self.tc.starting_cash).mul(self.target_weights, axis=1).values * (1 - self.tc.commission)\n df['allocations'] = self.df['values'].div(df['values'].sum(axis=1), axis=0)\n df['returns'] = (df['values'].sum(axis=1)).pct_change(1).fillna(0)", "def __init__(self, df):\n self.original_data = df\n self.cleaned_data = pd.DataFrame()", "def __init__(self, df,\n target_cols,\n problem_type,\n num_folds = 3,\n shuffle = False,\n random_state = 0):\n\n self.dataframe = df\n self.target_cols = target_cols\n self.num_targets = len(target_cols)\n self.problem_type = problem_type\n self.shuffle = shuffle\n self.num_folds = num_folds\n self.random_state = random_state\n\n if self.shuffle:\n self.dataframe = self.dataframe.sample(frac = 1,\n random_state = self.random_state).reset_index(drop = True)\n\n self.dataframe[\"kfold\"] = -1", "def __init__(\n self,\n x,\n effect_size='EFFECTSIZE',\n p='P',\n snp='SNP',\n gene='GENE',\n annotation=None,\n logp=True\n ):\n\n # checking the validity of the arguments\n\n # Make sure you have effect_size and p columns and that they are of\n # numeric type\n if effect_size not in x.columns.values:\n raise KeyError(\"Column %s not found in 'x' data.frame\"\n % effect_size)\n else:\n if not is_numeric_dtype(x[effect_size].dtype):\n raise TypeError(\"%s column should be numeric. Do you have \"\n \"'X', 'Y', 'MT', etc? If so change to \"\n \"numbers and try again.\" % effect_size)\n\n if p not in x.columns.values:\n raise KeyError(\"Column %s not found in 'x' data.frame\" % p)\n else:\n if not is_numeric_dtype(x[p].dtype):\n raise TypeError(\"%s column should be numeric type\" % p)\n else:\n if (x[p] < 0).any():\n raise ValueError(\"Negative p-values found.\"\n \" These must be removed.\")\n if (x[p] > 1).any():\n raise ValueError(\"P-values greater than 1 found. \"\n \"These must be removed.\")\n if np.isnan(x[p]).any():\n raise ValueError(\"NaN p-values found. These must be \"\n \"removed\")\n\n # Create a new DataFrame with columns named after effect_size and p.\n self.data = pd.DataFrame(data=x[[effect_size, p]])\n\n if snp is not None:\n if snp not in x.columns.values:\n # Warn if you don't have a snp column\n raise KeyError(\n \"snp argument specified as %s but column not found in \"\n \"'x' data.frame\" % snp)\n else:\n # If the input DataFrame has a snp column, add it to the new\n # DataFrame\n self.data[snp] = x[snp]\n\n if gene is not None:\n if gene not in x.columns.values:\n # Warn if you don't have a gene column\n raise KeyError(\n \"gene argument specified as %s but column not found in \"\n \"'x' data.frame\" % gene)\n else:\n # If the input DataFrame has a gene column, add it to the new\n # DataFrame\n self.data[gene] = x[gene]\n\n if annotation is not None:\n if annotation not in x.columns.values:\n # Warn if you don't have an annotation column\n raise KeyError(\n \"annotation argument specified as %s but column not \"\n \"found in 'x' data.frame\" % annotation\n )\n else:\n # If the input DataFrame has a gene column, add it to the new\n # DataFrame\n self.data[annotation] = x[annotation]\n\n self.xlabel = \"Effect Size\"\n self.ticks = []\n self.ticksLabels = []\n self.effectSize = effect_size\n self.pName = p\n self.snpName = snp\n self.geneName = gene\n self.annotationName = annotation\n self.logp = logp", "def initialize_(x, spec):\n activate(x, spec, lookup=[nn.init])", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def initialize(self, algo, verbosity=0):\n if isinstance(self.source, pd.DataFrame):\n data = self.source\n else:\n if verbosity > 0:\n print(f\"{self.name}: Reading file {self.source}\")\n data = PandasFileHelper.read_file(self.source, **self._rpars)\n\n if self._trbs is None:\n if self._col_i is not None and self._col_t is None:\n data.reset_index(inplace=True)\n elif self._col_i is None and self._col_t is not None:\n tnames = algo.farm.turbine_names\n inds = [tnames.index(name) for name in data[self._col_t]]\n data[FC.TURBINE] = inds\n self._col_i = FC.TURBINE\n else:\n raise KeyError(\n f\"{self.name}: Please either specify 'col_tinds' or 'col_tnames'\"\n )\n self._trbs = data[self._col_i].to_numpy()\n n_trbs = len(self._trbs)\n\n self._rcols = []\n for v in self._rvars:\n col_vmin = f\"{v}_min\"\n col_vmin = self._colmap.get(col_vmin, col_vmin)\n if col_vmin not in data.columns:\n raise KeyError(\n f\"{self.name}: Missing column '{col_vmin}', maybe add it to 'colmap'?\"\n )\n\n col_vmax = f\"{v}_max\"\n col_vmax = self._colmap.get(col_vmax, col_vmax)\n if col_vmax not in data.columns:\n raise KeyError(\n f\"{self.name}: Missing column '{col_vmax}', maybe add it to 'colmap'?\"\n )\n\n self._rcols += [col_vmin, col_vmax]\n\n self._tcols = []\n for v in self._tvars:\n col = self._colmap.get(v, v)\n if col not in data.columns:\n raise KeyError(\n f\"{self.name}: Missing column '{col}', maybe add it to 'colmap'?\"\n )\n self._tcols.append(col)\n\n n_rvars = len(self._rvars)\n self._rdata = data[self._rcols].to_numpy().reshape(n_trbs, n_rvars, 2)\n self._tdata = data[self._tcols].to_numpy()\n\n for vi, v in enumerate(self._rvars):\n if v in self._perds:\n self._rdata[:, vi] = np.mod(self._rdata[:, vi], self._perds[v])\n\n return super().initialize(algo, verbosity)", "def create_initial_parameters(self):\n update_nested_dictionary(\n self.settings,\n {self.highest_lookup: {\n self.highest_sublookup: self.kw\n }})", "def __init__(self, connection_loc, tags_tsdata,\n dummy_var_no):\n self.init_system(connection_loc, tags_tsdata)\n self.removedummyvars(dummy_var_no)\n self.addforwardscale()\n self.addbackwardscale()", "def initialize_df(scenario_index,scenarios_nums):\n df = pd.DataFrame(index=scenarios_nums)\n df.index.name = scenario_index\n return df", "def par_fit(init_file):\n check_presence_init(init_file)\n\n dict_ = read(init_file)\n np.random.seed(dict_[\"SIMULATION\"][\"seed\"])\n\n # We perform some basic consistency checks regarding the user's request.\n check_presence_estimation_dataset(dict_)\n # check_initialization_dict2(dict_)\n # check_init_file(dict_)\n\n # Distribute initialization information.\n data = read_data(dict_[\"ESTIMATION\"][\"file\"])\n num_treated = dict_[\"AUX\"][\"num_covars_treated\"]\n num_untreated = num_treated + dict_[\"AUX\"][\"num_covars_untreated\"]\n\n _, X1, X0, Z1, Z0, Y1, Y0 = process_data(data, dict_)\n\n if dict_[\"ESTIMATION\"][\"maxiter\"] == 0:\n option = \"init\"\n else:\n option = dict_[\"ESTIMATION\"][\"start\"]\n\n # Read data frame\n\n # define starting values\n x0 = start_values(dict_, data, option)\n opts, method = optimizer_options(dict_)\n dict_[\"AUX\"][\"criteria\"] = calculate_criteria(dict_, X1, X0, Z1, Z0, Y1, Y0, x0)\n dict_[\"AUX\"][\"starting_values\"] = backward_transformation(x0)\n rslt_dict = bfgs_dict()\n if opts[\"maxiter\"] == 0:\n rslt = adjust_output(None, dict_, x0, X1, X0, Z1, Z0, Y1, Y0, rslt_dict)\n else:\n opt_rslt = minimize(\n minimizing_interface,\n x0,\n args=(dict_, X1, X0, Z1, Z0, Y1, Y0, num_treated, num_untreated, rslt_dict),\n method=method,\n options=opts,\n )\n rslt = adjust_output(\n opt_rslt, dict_, opt_rslt[\"x\"], X1, X0, Z1, Z0, Y1, Y0, rslt_dict\n )\n # Print Output files\n print_logfile(dict_, rslt)\n\n if \"comparison\" in dict_[\"ESTIMATION\"].keys():\n if dict_[\"ESTIMATION\"][\"comparison\"] == 0:\n pass\n else:\n write_comparison(data, rslt)\n else:\n write_comparison(data, rslt)\n\n return rslt", "def _set_init(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n ## Auxiliar information\n self.ks = None\n self.iss = [0]\n ## Class structural information\n self._setted = False\n self._constant_rel_pos = False\n self.staticneighs = None\n self.staticneighs_set = None", "def _initialize_data_filter(self):\n df_params = self._loading_params.copy()\n df_params[\"filter_negate\"] = True\n df_params[\"filter_upper\"] = True\n self._data_filter = LoadProcessedData(**df_params)", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def __init__(__self__, *,\n column_name: Optional[str] = None,\n identifier: Optional[str] = None):\n if column_name is not None:\n pulumi.set(__self__, \"column_name\", column_name)\n if identifier is not None:\n pulumi.set(__self__, \"identifier\", identifier)", "def __init__(self,columns_to_fix=[],rows_to_scan='all',keep_dummies=False):\n self.columns_to_fix = columns_to_fix\n self.rows_to_scan = rows_to_scan\n self.keep_dummies = keep_dummies", "def __init__(self, column_id='', data_type='', data_format='',\n constant_value=''):\n self.column_id = column_id\n self.data_type = data_type\n self.data_format = data_format\n self.constant_value = constant_value", "def _setup_dataframe(self, serie, metadata=None):\n header = self.get_data_header(serie, dataset='cnv')\n df = self.get_data_in_frame(serie, header, dataset='cnv')\n df = self.df_handler.map_column_names_of_dataframe(df)\n\n return df", "def __init__(self, df):\n self._binarized_df = None\n self.__schema__ = 'viNet'\n self._confidence = viNetDataframeColumn.confidence.value\n self._predicted = viNetDataframeColumn.predicted.value\n self._groundtruth = viNetDataframeColumn.groundtruth.value\n\n self._validate(df)\n self._dataframe = df\n self._customer = None\n self._windfarm = None\n self._tag = 'Unknown Tag'\n self._config = 'Unknown Config'", "def _init_node_parm(self, key):\n try:\n wf_data_conf = WorkflowDataConfFrame(key)\n self.data_conf = wf_data_conf.conf\n except Exception as e:\n raise Exception(\"dataconf_node_fame._init_node_parm Initializing Error : \" +str(e))", "def _compute_variables(df: EDAFrame, cfg: Config) -> Dict[str, Any]:\n data: Dict[str, Any] = {}\n # variables\n if cfg.variables.enable:\n for col in df.columns:\n try:\n dtype = df.get_eda_dtype(col)\n # Since it will throw error if a numerical column is all-nan,\n # we transform it to categorical column.\n # We also transform to categorical for small cardinality numerical column.\n if df.get_missing_cnt(col) == df.shape[0]:\n srs = df.get_col_as_str(col, na_as_str=True)\n data[col] = nom_comps(srs, cfg)\n elif isinstance(dtype, (Nominal, GeoGraphy, GeoPoint)):\n data[col] = nom_comps(df.frame[col], cfg)\n elif isinstance(dtype, SmallCardNum):\n srs = df.get_col_as_str(col, na_as_str=False)\n data[col] = nom_comps(srs, cfg)\n elif isinstance(dtype, Continuous):\n data[col] = cont_comps(df.frame[col], cfg)\n # elif isinstance(dtype, DateTime):\n # data[col] = {}\n # data[col][\"stats\"] = calc_stats_dt(df.frame[col])\n # data[col][\"line\"] = dask.delayed(_calc_line_dt)(df.frame[[col]], \"auto\")\n else:\n raise ValueError(f\"unprocessed type in column{col}:{dtype}\")\n except:\n print(f\"error happended in column:{col}\", file=sys.stderr)\n raise\n return data", "def __init__(self, df, name):\n try:\n self.df = df\n df.any()\n except AttributeError:\n self.df = df.toPandas()\n\n self.name = name\n\n if self.df.empty:\n raise Exception('Empty Dataset')", "def _initialize(self, data):\n if not isinstance(data, pd.DataFrame):\n raise ValueError(\"Provided data is not a pandas.DataFrame\")\n\n super()._initialize(data)\n \n #if not self.num_cols:\n # raise ValueError(\"Entropy methods are not usable on continous data\")\n \n #prepare discretizer for num values\n self.pid_cols = self.num_cols\n intervals = 200\n bins = 8\n alphas = 0.25\n strategy = \"frequency\"\n n = len(self.num_cols)\n mins = list(data[self.pid_cols].min())\n maxs = list(data[self.pid_cols].max())\n \n self.pid = PartitionIncrementalDiscretizer(\n intervals=[intervals]*n, \n mins = mins,\n maxs= maxs,\n alphas=[alphas]*n, \n n_bins=[bins]*n,\n strategy=strategy) \n \n self.pid.fit(data[self.pid_cols])\n self.pid.transform(data[self.pid_cols])\n \n #alter num and cat cols to support default smr methods\n self.num_cols = []\n self.cat_cols += self.pid_cols\n for col in self.pid_cols:\n self.categories[col] = list(range(bins))\n \n self.imp = CategoricalRememberingNaNImputer(categories=self.categories)\n self.pipeline = Pipeline(steps=[\n ('discretizer', self.pid),\n ('imputer', self.imp)])\n \n #init feature dicts\n self.cat_counts = {}\n self.lab_counts = {}\n self.cat_probs = {}\n self.lab_probs = {}\n self.cat_entropies = {}\n self.lab_entropy = 0\n \n #set cat_col dicts\n for col in self.cat_cols:\n self.cat_counts[col] = {}\n self.cat_probs[col] = {}\n for label in self.labels:\n self.cat_counts[col][label] = {}", "def _populate_df(self, df, objs,):\n for obj in objs:\n for prop in df.columns:\n df.loc[obj.name, prop] = getattr(obj, prop)", "def initDataView(self,student_cols):\n self.notebook.initStudentsView(student_cols)\n #self.dataView.initStudentsView(student_cols)", "def __init__(self, *args, **kwargs):\n \n self.dense = True\n\n # Create table\n super().__init__(*args, **kwargs)", "def init_sources(self):\n\n columns = ['some', 'column', 'headers']\n self.columns = columns\n\n self.data_source = ColumnDataSource(data=dict(zip(columns, []*len(columns))))", "def set_data(self, df):\n self.df = df", "def __init__(self, **kwargs):\n perform = kwargs.get(\"perform\", True)\n kwargs[\"perform\"] = False\n self._data = dict(self._DATA)\n self._text = self._DUMMY_TEXT\n self.df = pd.DataFrame()\n self.update(**kwargs)\n if perform:\n self.perform()", "def from_dict(self, data: dict):\n super().from_dict(data)\n if self.data is not None:\n try:\n self.data[\"columns\"] = np.array(\n [int(col) for col in self.data[\"columns\"]]\n )\n except:\n pass\n self.data = pd.DataFrame(\n data=self.data[\"data\"],\n columns=self.data[\"columns\"],\n index=self.data[\"index\"],\n )", "def _init_params_default(self):\n # if there are some nan -> mean impute\n Yimp = self.Y.copy()\n Inan = sp.isnan(Yimp)\n Yimp[Inan] = Yimp[~Inan].mean()\n if self.P == 1:\n C = sp.array([[Yimp.var()]])\n else:\n C = sp.cov(Yimp.T)\n C /= float(self.n_randEffs)\n for ti in range(self.n_randEffs):\n self.getTraitCovarFun(ti).setCovariance(C)", "def __init__(self,columns_to_fix=[],convert_dict={'Y':1,'N':0}):\n self.columns_to_fix = columns_to_fix\n self.convert_dict = convert_dict", "def __init__(self, col, val):\n\n self.__col = col\n self.__val = val", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if 'conv' in n:\n continue\n init_with_uniform(n, p, param_init)", "def initialize(self):\r\n if not self.context:\r\n self.context = SQLContext(self.url, self.connection, self.schema)\r\n if self.table is None:\r\n self.table = self.context.table(self.table_name)\r\n if not self.fields:\r\n self.read_fields()\r\n self.field_names = self.fields.names()", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n if 'score.monotonic_energy.v.weight_g' in n or 'score.monotonic_energy.r' in n:\n logger.info('Skip initialization of %s' % n)\n continue\n if 'score.monotonic_energy.conv1d' in n:\n logger.info('Skip initialization of %s' % n)\n continue\n if 'score.chunk_energy.v.weight_g' in n or 'score.chunk_energy.r' in n:\n logger.info('Skip initialization of %s' % n)\n continue\n if 'linear_lm_gate.fc.bias' in n and p.dim() == 1:\n nn.init.constant_(p, -1.0)\n logger.info('Initialize %s with %s / %.3f' % (n, 'constant', -1.0))\n continue\n init_with_uniform(n, p, param_init)", "def variable(self, col: str, name: str, init_fn, *init_args) -> Variable:\n if not self._initialization_allowed:\n raise ValueError(\n 'Variables must be initialized in `setup()` or in a method '\n 'wrapped in `@compact`')\n if self._name_taken(name):\n raise errors.NameInUseError('variable', name, self.__class__.__name__)\n v = self.scope.variable(col, name, init_fn, *init_args)\n self._state.children[name] = col\n return v", "def _init_slot_variable(\n self, layer_name, embed_var, slot_name, initial_value\n ):\n if (\n layer_name not in self._tls._slot_variables\n or slot_name not in self._tls._slot_variables[layer_name]\n ):\n slot_var_name = \"%s/%s\" % (embed_var._shared_name, slot_name)\n slot_var = self._opt.add_weight(\n name=slot_var_name,\n shape=(None, None),\n dtype=embed_var.dtype,\n initializer=initial_value,\n trainable=False,\n )\n slot_variables_dict = self._tls._slot_variables.setdefault(\n layer_name, {}\n )\n slot_variables_dict[slot_name] = slot_var\n else:\n slot_var = self._tls._slot_variables[layer_name][slot_name]\n slot_var.assign(initial_value)\n return slot_var", "def set_meta(self, name, value):\n # note sometimes during .view, we won't have this var available\n check_meta = not hasattr(self, '_init_arg_check') or self._init_arg_check\n if check_meta and name in self._init_args:\n # note this is largely a failsafe, we shouldn't get to this\n # point via setattr since it'll match the hasattr(self.pobj, name)\n raise Exception('Cannot have member variables that clash with pandas constructor args')\n object.__setattr__(self, name, value)", "def __init__(self, X_columns, X_dtypes):\n self.X_columns = X_columns\n self.X_dtypes = X_dtypes", "def set_initial_value(self, rel_name, m):\n if not self.is_relation(rel_name):\n raise RelationNameError(rel_name, 'Relation is not defined.')\n expected_shape = self.get_shape(rel_name)\n if m.shape[1] != expected_shape[1]:\n raise ValueError(\n 'relation and initial_value have different columns: %d vs %d' %\n (expected_shape[1], m.shape[1]))\n if self.is_dense(rel_name):\n self._np_initval[rel_name] = m.transpose()\n else:\n self._np_initval[rel_name] = scipy.sparse.coo_matrix(m.transpose())", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def __init__(self, **kwargs):\n\t\tself.vars = kwargs\n\t\tself.old_vars = None", "def compile_dataframe_default(self):\n\t\tdata = [\n\t\t\t['ford','mustang','coupe','A'],\n\t\t\t['chevy','camaro','coupe','B'],\n\t\t\t['ford','fiesta','sedan','C'],\n\t\t\t['ford','focus','sedan','A'],\n\t\t\t['ford','taurus','sedan','B'],\n\t\t\t['toyota','camry','sedan','B']\n\t\t]\n\n\t\tself.data = pd.DataFrame(data, columns = self.data_cols)", "def correct_params():\n params = {\n \"probs\": [0.1, 0.9],\n \"treat\": 2,\n \"data\": pd.DataFrame(data={\"id\": np.arange(100), \"block\": np.arange(100)}),\n \"idx_col\": \"id\",\n }\n return params", "def preinitialize(self):\n for group in self.param_groups:\n for p in group['params']:\n if group['momentum'] != 0:\n self.state[p][\"momentum_buffer\"] = torch.zeros_like(\n p, device=\"cpu\"\n ).to(p.device)", "def init_agg(self, col):\n\t\traise NotImplementedError()", "def _parse_initsol(self) :\n\t\tlogging.debug(\"Parsing initsol initial solution\")\n\n\t\t# Init initsol as an empty dict\n\t\tself.initsol = {}\n\n\t\tfor varname in ['alpha','beta','g','h'] : \n\t\t\tself._parse_var_initsol(varname)", "def __init__(self, df):\n self.df = df\n self.min_interval_in_seconds = 99999999999", "def __init__(self):\n\n self.active_df = pd.DataFrame([], columns=['txt_name', 'missing_data_days'])", "def __init__(self, columns=()):\n self.columns = list(columns)\n\n # Create internal dictionary for faster access\n self.column_dict = {}\n\n for column in self.columns:\n self.column_dict[column.column_id] = column", "def __init__(self, df=None):\n\n if df is not None:\n # set attributes with list of unique stimulus combinations / categories\n self.all_stimulus_pairs = df.combo.unique().tolist()\n\n # set attribute for number of unique dimensionality reductions\n self.unique_subspace_dims = df.n_components.unique().tolist()\n\n # get df columns\n self.numeric_keys = df.select_dtypes(include=['float64']).columns\n self.object_keys = df.select_dtypes(include=['object']).columns\n\n # get number of jacknnifes\n self.n_jacks = df.jack_idx.values.astype(np.float).max() + 1\n \n # Collapse over jackknifes. Save mean / sd of all numeric params.\n log.info(\"Collapsing results over jackknifes\")\n # returns a dataframe for numeric results\n self.numeric_results = self._collapse_numeric(df) \n \n # objects = arrays (e.g. arrays of eigenvectors, evals, decoding vectors).\n # returns a dictionary of data frames\n self.array_results = self._collapse_objects(df)", "def __init__(self):\n self.model = None\n self.joined_datasets = None\n self.id_col = None\n self.val_col = None\n self.pop_col = None\n self.total_population_per_unit = None\n self.centroids_of_areal_data = None\n self.prepared_data = None\n self.unknown_area_id = None\n\n # Parameters\n self.lags = None\n self.step = None\n self.min_no_of_observations = None\n self.max_search_radius = None", "def initVariable(self, state):\n self.nb_agent = state.getNumAgents()\n self.first_call = False", "def _init_state(self, init_state=None, init_cov=None): \n ## Initialize the BMI state, assuming \n nS = self.n_states \n if init_state == None: \n init_state = np.mat( np.zeros([nS, 1]) ) \n if self.include_offset: init_state[-1,0] = 1 \n if init_cov == None: \n init_cov = np.mat( np.zeros([nS, nS]) )\n self.state = bmi.GaussianState(init_state, init_cov)", "def initialize(self, numSamples, sampleMethod):\n initSamples = initial_samples(self.lb, self.ub, sampleMethod, numSamples)\n if sum(self.xID) != 0:\n xUB = [\n self.ub[np.where(self.xID == 1)[0][0]]] * len(self.xID)\n xSamples = initial_samples([0] * len(self.xID), xUB, 'rand-wor', numSamples)\n for var in range(len(self.varType)):\n if 'i' in self.varType[var] or 'd' in self.varType[var]:\n initSamples[:, var] = np.rint(initSamples[:, var])\n\n if sum(self.xID) != 0:\n initSamples = initSamples * (self.cID + self.iID + self.dID) + xSamples * self.xID\n return initSamples", "def changeInitValues(self, betas):\n\n if self.name in betas:\n self.initValue = betas[self.name]", "def fill_nid(df, nid_dict):\n assert 'year' in df.columns, \"DataFrame doesn't have a 'year' column\"\n df['nid'] = df['year'].map(nid_dict)\n return df", "def standard_init(self, data):\n comm = self.comm\n H = self.H\n my_y = data['y']\n my_N, D = my_y.shape\n\n assert D == self.D\n\n # Calculate averarge W\n W_mean = parallel.allmean(my_y, axis=0, comm=comm) # shape: (D, )\n\n # Calculate data variance\n sigma_sq = parallel.allmean((my_y-W_mean)**2, axis=0, comm=comm) # shape: (D, )\n sigma_init = np.sqrt(sigma_sq).sum() / D # scalar\n\n # Initial W\n noise = sigma_init/4.\n W_init = W_mean + np.random.normal(scale=noise, size=[H, D]) # shape: (H, D)\n\n #Create and set Model Parameters, W columns have the same average!\n model_params = {\n 'W' : W_init, \n 'pi' : 1./H,\n 'sigma' : sigma_init\n }\n\n return model_params", "def _init_tkvars(self,PO):\n for name,param in PO.params().items():\n self._create_tkvar(PO,name,param)", "def _reload(self):\n if os.path.exists(self.filename):\n self.data = pd.read_csv(self.filename)\n else:\n self.data = pd.DataFrame(columns=self.unique_keys)\n\n # Set these default values\n # if 'weight_rescale' not in self.data.columns:\n # self.data['weight_rescale'] = 'none'\n # if 'norm' not in self.data.columns:\n # self.data['norm'] = 'softmax'\n # if 'update' not in self.data.columns:\n # self.data['update'] = 'all'\n # if 'replay' not in self.data.columns:\n # self.data['replay'] = False\n if 'debug' not in self.data.columns:\n self.data['debug'] = False\n\n # if 'tie' not in self.data.columns:\n # self.data['tie'] = False\n\n if 'update_length' not in self.data.columns:\n self.data['update_length'] = 0\n # for key in self.unique_keys:\n # self.data[key] = np.nan\n # Remaining set to None\n # for k in self.check_keys:\n # if k not in self.data.columns:\n # self.data[k] = None", "def record_variable_inits(self):\n old_init = getattr(variables.Variable, '__init__')\n\n def record(*args, **kwargs):\n self._in_variable_creation = True\n old_init(*args, **kwargs)\n self._in_variable_creation = False\n\n setattr(variables.Variable, '__init__', record)\n yield\n setattr(variables.Variable, '__init__', old_init)", "def init_data(title, **kwds):\n if(title in data_conf.keys()):\n data_conf[title].update(kwds)\n else:\n logging.debug(\"Initializing source '%s.'\" % title)\n data_conf[title] = kwds\n if(ipc.mpi.is_slave()):\n ipc.mpi.send('__data_conf__', data_conf)", "def main(dataframe: pd.DataFrame, arg_1: str='nothing') -> pd.DataFrame:\n dataframe[\"placeholder\"] = arg_1\n return dataframe" ]
[ "0.7032874", "0.5726817", "0.55134976", "0.5488105", "0.5354998", "0.5293255", "0.5284768", "0.5283604", "0.52450967", "0.52211875", "0.5218443", "0.52158165", "0.5208098", "0.5172965", "0.51615965", "0.5151945", "0.513557", "0.51037365", "0.5097976", "0.5087936", "0.50831324", "0.50649345", "0.5060887", "0.50424105", "0.50424105", "0.5037035", "0.5026113", "0.49965212", "0.49957886", "0.499071", "0.4985883", "0.49692407", "0.49686044", "0.4962015", "0.4954215", "0.49500427", "0.49490067", "0.49460652", "0.49368906", "0.492108", "0.49160975", "0.49032086", "0.48757467", "0.48737726", "0.4867982", "0.4866919", "0.48655644", "0.48653662", "0.48647177", "0.48567614", "0.48517537", "0.4847826", "0.4839091", "0.4835459", "0.48271966", "0.48103225", "0.48064697", "0.4805001", "0.47950044", "0.47946724", "0.47918212", "0.47846243", "0.47786453", "0.47736362", "0.47696087", "0.47662583", "0.4764104", "0.47626922", "0.47479534", "0.47441047", "0.47422552", "0.47371504", "0.47315136", "0.47307447", "0.47274378", "0.47190508", "0.47153908", "0.47062194", "0.46982956", "0.46961427", "0.4692861", "0.4681286", "0.4680885", "0.4680193", "0.46760738", "0.46719056", "0.46711266", "0.46709472", "0.46708402", "0.4668761", "0.46677673", "0.46667206", "0.4666538", "0.46639362", "0.4660297", "0.46595302", "0.46550623", "0.46530205", "0.46501613", "0.4647847" ]
0.5161708
14
likelihood function of state space model, to be optimized by minimizer. Pass parameter vector to it and obtain likelihood
def __llik_fun__(self, par_ini): n = len(self.y) # Only the normal KFS if self.method == 'iterate': _, __, P, v, F = self.iterate(plot=False, estimate=True, init_params=par_ini) # KSF with regression coefficient (beta) elif self.method == 'iterateRegression': _, __, P, v, F = self.iterateRegression(plot=False, estimate=True, init_params=par_ini) L= -(n / 2)*np.log(2*np.pi) - 0.5 * np.sum(np.log(np.abs(F[1:]))) - 0.5 * np.sum((v[1:] ** 2) / F[1:]) #+ np.log(self.P_start) # Return negative likelihood since we minimize it with the minimizier return (-1)*L
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def likelihood(\n self,\n observation: np.ndarray,\n state: np.ndarray,\n control_z: Optional[np.ndarray] = None\n ) -> np.matrix:\n pass", "def log_likelihood(self, data, reward_model, bias_params):", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def objective_llh(self, params):\n\n try:\n obj = self.log_likelihood(params[0], params[1], params[2:])\n except (LinAlgError, ZeroDivisionError, ValueError):\n obj = -np.inf\n return obj", "def log_likelihood(self):\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))\r\n D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))\r\n return A + C + D + self.likelihood.Z", "def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)", "def likelihood(self,x,params = None,**kwargs):\n return np.exp(self.log_likelihood(x,params=params,**kwargs))", "def state_likelihood(measurement_t, state_t):\n # constants for the measurement model\n rw = 10 ** (-6) # uncetrainty in gyro measurements\n r_theta = 5 * (10 ** (-5)) # uncertainty in accel. measurements\n Rd = np.array([[r_theta, 0], # measurement covariance\n [0, rw]])\n C = np.array([[0, 1, 0], [1, 0, 1]]) # measurement matrix\n\n\n # obs_noise = np.random.multivariate_normal(expected_zt, Rd) # drawing sample from ~ N(Cxt, Rd)\n\n expected_zt = np.dot(C, state_t) # expected z_t for each hyp. particle x_t\n k = measurement_t.size # dimension of the vector \n e1 = (2 * np.pi) ** (-k/2) # first expression of likelihood\n\n cov_sqrt_det = np.linalg.det(Rd) ** (-1/2) # square root of the determinant of covariance matrix\n cov_inverse = np.linalg.inv(Rd) # inverse of the covariance matrix\n\n z_mean_diff = (measurement_t - expected_zt) # difference between observed and expected measurement\n z_mean_diff_T = (measurement_t - expected_zt).T # transpose of above\n exponential = np.exp(-0.5 * (z_mean_diff_T @ cov_inverse @ z_mean_diff)) # exponential part of the likelihood function\n\n likelihood = e1 * cov_sqrt_det * exponential # the likelihood L(z_t)\n return likelihood", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood", "def parameterized_likelihood(params: NamedParameters):\n return ParamaterizedLikelihood(params)", "def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])", "def _build_likelihood(self):\n\n # Get prior KL.\n KL = self.build_prior_KL()\n\n # Get conditionals\n fmean, fvar = self._build_predict(self.X, full_cov=False)\n\n # Get variational expectations.\n var_exp = self.likelihood.variational_expectations(fmean, fvar, self.Y) * self.obs_weight\n\n # re-scale for minibatch size\n scale = tf.cast(self.num_data, gp.settings.float_type) / tf.cast(tf.shape(self.X)[0], gp.settings.float_type)\n scale = scale / tf.reduce_mean(self.obs_weight)\n return tf.reduce_sum(var_exp) * scale - KL", "def model_likelihood(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> Tensor:\n return self.model.log_prob(obs, actions, next_obs)", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def log_likelihood_function(self, instance: af.ModelInstance) -> float:\r\n model_data = self.model_data_from_instance(instance=instance)\r\n fit = self.fit_from_model_data(model_data=model_data)\r\n return fit.log_likelihood", "def log_likelihood(self) -> tf.Tensor:\n # K⁻¹ + GᵀΣ⁻¹G = LLᵀ.\n l_post = self._k_inv_post.cholesky\n num_data = self.observations_index.shape[0]\n\n # Hμ [..., num_transitions + 1, output_dim]\n marginal = self.emission.project_state_to_f(self.prior_ssm.marginal_means)\n marginal = self._drop_batch_shape(marginal)\n\n # y = obs - Hμ [..., num_transitions + 1, output_dim]\n disp = self.observations - marginal\n disp_data = self.sparse_observations - self.dense_to_sparse(marginal)\n\n # cst is the constant term for a gaussian log likelihood\n cst = (\n -0.5 * np.log(2 * np.pi) * tf.cast(self.emission.output_dim * num_data, default_float())\n )\n\n term1 = -0.5 * tf.reduce_sum(\n input_tensor=tf.einsum(\"...op,...p,...o->...o\", self._r_inv_data, disp_data, disp_data), axis=[-1, -2]\n )\n\n # term 2 is: ½|L⁻¹(GᵀΣ⁻¹)y|²\n # (GᵀΣ⁻¹)y [..., num_transitions + 1, state_dim]\n obs_proj = self._back_project_y_to_state(disp)\n\n # ½|L⁻¹(GᵀΣ⁻¹)y|² [...]\n term2 = 0.5 * tf.reduce_sum(\n input_tensor=tf.square(l_post.solve(obs_proj, transpose_left=False)), axis=[-1, -2]\n )\n\n ## term 3 is: ½log |K⁻¹| - log |L| + ½ log |Σ⁻¹|\n # where log |Σ⁻¹| = num_data * log|R⁻¹|\n term3 = (\n 0.5 * self.prior_ssm.log_det_precision()\n - l_post.abs_log_det()\n + 0.5 * self._log_det_observation_precision\n )\n\n return tf.reduce_sum(cst + term1 + term2 + term3)", "def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def loglikelihood(self, y):\n raise NotImplementedError", "def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood", "def __calc_likelihood(self, *args):\n params = {}\n for i, p in enumerate(self._par_names):\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n return self.return_likelihood(params)", "def likelihood(self):\n \n raise NotImplementedError()", "def lnprior(self, params):\n self.debug.start_function('lnprior')\n lower_bounds = self.mcmc_version.prior_bounds[:, 0]\n upper_bounds = self.mcmc_version.prior_bounds[:, 1]\n inside_bounds = np.logical_and(params > lower_bounds,\n params < upper_bounds)\n\n if False in inside_bounds:\n self.debug.end_function()\n return self.zero_lhood\n\n if self.has_logz:\n z_input = params[self.param_idxs['logz']]\n else:\n z = params[self.param_idxs['z']]\n z_input = np.log10(z / z_sun)\n\n prior_lhood = np.log(self.z_prior(z_input))\n\n # ===== anisotropy/inclination priors =====\n if self.has_two_f:\n xi_ratio = params[self.param_idxs['f_p']] / params[self.param_idxs['f_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n elif self.has_xi_ratio:\n xi_ratio = params[self.param_idxs['xi_ratio']]\n d_b = params[self.param_idxs['d_b']]\n prior_lhood += np.log(self.xi_ratio_prior(xi_ratio))\n prior_lhood += np.log(self.d_b_prior(d_b))\n\n self.debug.variable('prior_lhood', prior_lhood, formatter='f')\n self.debug.end_function()\n return prior_lhood", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def LLwrapper(params):\n NLL = LogLikelihood(gauss, s)\n return NLL(params[0], params[1])", "def gnll_loss(y, parameter_vector):\n alpha, mu, sigma = slice_parameter_vectors(\n parameter_vector, components\n ) # Unpack parameter vectors\n\n gm = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(probs=alpha),\n components_distribution=tfd.Normal(loc=mu, scale=sigma),\n )\n\n log_likelihood = gm.log_prob(tf.transpose(y)) # Evaluate log-probability of y\n\n return -tf.reduce_mean(log_likelihood, axis=-1)", "def get_likelihood(self, sta, obs):\n # obs (32, obs_num, 3, 24, 24) -> (32*obs_num, 3, 24, 24)\n o = obs.permute(0, 1, 4, 2, 3)\n o = o.view(-1, 3, 24, 24)\n e = self.observation_encoder(o)\n # get e (32*obs_num, 128)\n # get all the combinations of states and observations\n # -> (32, obs_num, 128)\n e = e.view(obs.size()[0], obs.size()[1], -1)\n # -> (32, obs_num, sta_num, 128)\n e = e.view(obs.size()[0], obs.size()[1], 1, e.size()[2]).repeat(1, 1, sta.size()[1], 1)\n # sta (32, sta_num, 3) -> (32, sta_num, 4)\n s = torch.cat(((sta[:, :, :2] - torch.from_numpy(self.means['s'])[:2]) / torch.from_numpy(self.stds['s'])[:2],\n torch.cos(sta[:, :, 2:3]), torch.sin(sta[:, :, 2:3])), -1)\n # -> (32, obs_num, sta_num, 4)\n s = s.view(s.size()[0], 1, s.size()[1], s.size()[2]).repeat(1, obs.shape[1], 1, 1)\n # get all the combinations of states and observations\n # cat_input (32, obs_num, sta_num, 132)\n cat_input = torch.cat((e, s), -1)\n # -> (32*obs_num*sta_num, 132)\n cat_input = cat_input.view(-1, cat_input.size()[-1])\n\n # get w (32*obs_num*sta_num, 1)\n w = self.likelihood_estimator(cat_input)\n # -> (32, obs_num, sta_num)\n w = w.view(sta.size()[0], obs.size()[1], sta.size()[1])\n\n return w", "def likelihood_func(params, key, coeffs_init, t, y, y_err, planet_parameters):\n \n # Replace coefficients\n if len(params) > 0:\n for index, coeff_key in enumerate(key):\n coeffs_init[coeff_key] = params[index]\n \n # Build transit model\n transit_model = build_transit_model(coeffs_init, t, planet_parameters)\n # Build sigma and chi**2\n sigma = np.mean(y_err)\n chi2 = chi_squared(transit_model, y, y_err)\n likelihood = -len(y)*np.log(sigma) - 0.5*len(y)*np.log(2*np.pi) - .5*chi2\n \n return likelihood", "def __call__(self, params):\n # Construct model for given set of parameters\n mod = self.model(params)\n\n # Input into equation (11) from Anderson (1990)\n # But we want log-likelihood not negative log-likelihood (in MCMC)\n # and so we add the -1.0\n like = np.sum(np.log(mod) + (self.power / mod))\n return -1.0*like", "def get_likelihood(self, discretized=False, state=None):\n if not hasattr(self, 'softmax'):\n self.generate_softmax()\n\n if self.softmax is not None:\n if state is not None:\n return self.softmax.probability(class_=self.softmax_class_label,\n state=state)\n elif discretized:\n return self.softmax.probability(class_=self.softmax_class_label)\n else:\n return self.softmax, self.softmax_class_label\n else:\n logging.error(\"Couldn't generate softmax model for {}\"\n .format(self.__str__()))", "def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)", "def log_likelihood(parameters):\n if len(copula.bounds_param) == 1:\n params = [parameters]\n else:\n param1, param2 = parameters\n params = [param1, param2]\n logl = -np.sum(np.log(copula.get_pdf(psd_obs[0], psd_obs[1], params)))\n return logl", "def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)", "def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood", "def get_likelihood(self, d):\n pos = d.pos - self.parent.pos\n pos = np.dot(rotmat(-self.angle), pos)\n lik = halfnorm.pdf(pos[0],scale=self.length) * \\\n vonmises.pdf(np.arctan2(pos[1],pos[0]),self.vonmisesscale,loc=self.angle)\n #assert lik!=0.0\n return lik", "def log_likelihood(self, state, obs, act):\n indices = np.array([self.Gittins[state['successes'][i], state['failures'][i]] for i in range(self.env.n_arms)])\n greedy_arms = np.where(np.isclose(indices,indices.max()))[0]\n return np.log(1/len(greedy_arms)) if act in greedy_arms else -1e8", "def compute_log_likelihood(self, indicators, weights, l2):\n scores, _ = self.predict_probability(self.train_feature_x, weights)\n probs = self.predict_probability(self.train_feature_x, weights)\n lp = np.sum((indicators-1)*scores + np.log(probs)) - l2* np.sum(weights[1:]**2)\n return lp", "def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll", "def sp_loglikelihood(psr, Aw, psd): \n if len(psd)*2 != psr.Fmat.shape[1]:\n raise ValueError(\"PSD vector not of appropriate length!\")\n\n Nvec = np.ones(len(psr.toas)) * Aw**2\n return mark3loglikelihood(psr, Nvec, psd.repeat(2))", "def log_likelihood(self):\n raise NotImplementedError(\"the log_likelihood property should \"\n \"be defined in the Estimator sub-class\")", "def log_likelihood(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n log_likelihood = numpy.sum(norm.logpdf(X,parameters['mu'],sigma))\n\n return log_likelihood", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood", "def loglikelihood(self, context, continuation):\n pass", "def log_likelihood(cosmo_param, pk_obs, inv_cov):\n pknlfid, kbins, kspace = fiducial_power(cosmo_param)\n \n x = pk_obs - pknlfid\n return -0.5* (x.T @ inv_cov @ x)", "def _pseudo_likelihood(self, v_pos, updates):\n bit_i = theano.shared(value=0, name='bit_i')\n\n fe_xi = self.free_energy(v_pos)\n\n fe_xi_ = self.free_energy(T.set_subtensor(v_pos[:, bit_i],\n 1 - v_pos[:, bit_i]))\n\n updates[bit_i] = (bit_i + 1) % v_pos.shape[1]\n \n return T.mean(v_pos.shape[1] * T.log(T.nnet.sigmoid(fe_xi_ - fe_xi)))", "def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)", "def log_likelihood_grad_bias(self, data, reward_model, bias_params):", "def neg_log_likelihood(self,params: ndarray) -> float:\n\n return -self.compute_log_likelihood(params)", "def log_likelihood(self):\r\n if self.likelihood.is_heteroscedastic:\r\n A = -0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self._A))\r\n else:\r\n A = -0.5 * self.num_data * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT\r\n B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self._A))\r\n C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))\r\n D = 0.5 * self.data_fit\r\n self._A_part, self._B_part, self._C_part, self._D_part = A, B, C, D\r\n return A + B + C + D + self.likelihood.Z", "def log_likelihood(self, theta):\n raise NotImplementedError()", "def bd_nll(params, trajectory, return_params=False):\n # Extract parameters\n s_fit = params[0]\n t_fit = params[2]\n N_w_fit = int(params[1])\n\n # Exatract inferred clone_sizes from AO/DP ratio\n mean_size, size_range = observations_to_clone_size(AO=trajectory.AO,\n DP=trajectory.DP,\n N_w=N_w_fit)\n\n # Set inferred size as the mean of all possible ranges of observations\n trajectory['inferred_size'] = mean_size\n\n # Compute time_steps\n trajectory['delta_t'] = np.insert(np.diff(trajectory.age),\n 0, trajectory.iloc[0].age - t_fit)\n # Initialise negative log-likelihood computation\n nll = 0\n for i, time_point in trajectory.iterrows():\n # Extract initial clone_size and time difference between observations\n if i == 0:\n init_size = 1\n else:\n init_size = max(trajectory.iloc[i-1].inferred_size, 1)\n\n # Compute AO/DP observation probability\n prob = AO_prob_value(AO=time_point.AO,\n DP=time_point.DP,\n init_size=init_size,\n s=s_fit,\n delta_t=time_point.delta_t,\n N_w=N_w_fit)\n\n # Avoid divide by zero encountered in log warning\n if prob < 1.0e-100:\n prob = 1.0e-100\n\n # Compute negative log likelihood\n nll -= np.log(prob)\n\n if return_params is True:\n return nll, params\n else:\n return nll", "def get_total_log_likelihood(self, x, **kwargs):\n pass", "def likelihood(ts,w,Phi):\n a = Phi.dot(w)\n return np.exp(a*ts)*sigmoid(-a)", "def log_prior(self, params):\n # log likelihood function, see:\n # https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Likelihood_function\n variance = self.std ** 2\n ndim = params.ndim\n mean_diff = params - self.mean\n scaled_sq_err = jnp.dot(mean_diff, mean_diff) / variance\n # log determinant of covariance matrix\n log_det_cov = 2 * ndim * jnp.log(self.std)\n norm_term = ndim * jnp.log(2 * jnp.pi)\n return -0.5 * (log_det_cov + scaled_sq_err + norm_term)", "def forward(self, input):\n log_likelihood = -0.5 * (math.log(2 * math.pi) + self.sigma2.log() + (input - self.mu) ** 2 / self.sigma2)\n return log_likelihood", "def get_log_likelihood(response_probability, response):\n pass", "def __wrap_likelihood(self, args):\n params = {}\n for i, p in enumerate(self._par_names):\n if not self.fitarg['fix'][p]:\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n else:\n if self._par_islog[p]:\n params[p] = np.power(10., self.fitarg['pinit'][p])\n else:\n params[p] = self.fitarg['pinit'][p]\n return self.return_likelihood(params)", "def f(self, x):\n error = log_likelihood_calc(x[1], x[0], self.data)\n return error", "def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll", "def pl_loglikelihood(psr, Aw, Ar, Si):\n Nvec = np.ones(len(psr.toas)) * Aw**2\n psd = PL_psd(psr.freqs, Ar, 0.5*(3-Si), sig_fL=1.0/(20.0*year))\n return mark3loglikelihood(psr, Nvec, psd)", "def log_likelihood(self, X, Y):\n\t\tr,c = twod(Y).shape\n\t\tif r == 1 and c != 1:\n\t\t\tY = twod(Y).T\n\n\t\tsoft = self.predict_soft(X)\n\t\treturn np.mean(np.sum(np.log(np.power(soft, Y, )), 1), 0)", "def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)", "def negative_log_likelihood(self):\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.log(self.p_y_given_x)[T.arange(self.y.shape[0]), self.y]", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def L1_log_likelihood_gradient(X, y, B, lmbda):\n pass", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def __compute_log_likelihood(self, outputs, data, boolean):\n end_loc, pi_loc, mu_loc, sigma_loc, rho_loc = outputs\n\n mu1_loc, mu2_loc = mu_loc[:, :, :20], mu_loc[:, :, 20:]\n sig1_loc, sig2_loc = (\n sigma_loc[:, :, :20] + 10e-10,\n sigma_loc[:, :, 20:] + 10e-10,\n )\n\n x1_loc = data[:, 1:, 1].unsqueeze(2).to(self.__device)\n x2_loc = data[:, 1:, 2].unsqueeze(2).to(self.__device)\n x3_loc = data[:, 1:, 0].to(self.__device)\n\n end_loc = end_loc[:, :-1, -1].to(self.__device)\n pi_loc = pi_loc[:, :-1, :].to(self.__device)\n mu1_loc = mu1_loc[:, :-1, :].to(self.__device)\n mu2_loc = mu2_loc[:, :-1, :].to(self.__device)\n sig1_loc = sig1_loc[:, :-1, :].to(self.__device)\n sig2_loc = sig2_loc[:, :-1, :].to(self.__device)\n rho_loc = rho_loc[:, :-1, :].to(self.__device)\n\n boolean = boolean[:, :-1].to(self.__device)\n\n gauss = pi_loc * self.__bivariate_gaussian(\n sig1_loc, sig2_loc, mu1_loc, mu2_loc, x1_loc, x2_loc, rho_loc\n )\n gauss = torch.sum(gauss, dim=2).to(self.__device)\n\n log_lik = torch.sum(\n -boolean * torch.log(gauss + 10e-10)\n - boolean * torch.log(end_loc + 10e-10) * (x3_loc)\n - boolean * torch.log(1 - end_loc + 10e-10) * (1 - x3_loc)\n )\n\n return log_lik", "def log_likelihood_loss(y, tx, w):\n p_1 = sigmoid(tx.dot(w))\n p_0 = np.log(1-p_1)\n p_1 = np.log(p_1)\n return -np.sum((y == 1)*p_1+(y == 0)*p_0)", "def get_likelihood(self, observation, position, direction):\n if self.real_robot and observation == 0.0:\n return 1.0\n\n closest = self.world_model.get_closest_wall(position, direction)\n if closest == None:\n # probability of a false positive is 0\n if observation == 0.0:\n return 1.0\n else:\n return 0.0\n elif closest != None and observation == 0.0:\n # probability of missing an obstacle is 0\n return 0.0\n return norm(0, self.model_noise_rate).pdf(abs(position - closest) - observation)", "def log_likelihood_function(self, instance):\r\n\r\n try:\r\n return self.fit_interferometer_for_instance(\r\n instance=instance\r\n ).figure_of_merit\r\n except (\r\n exc.PixelizationException,\r\n exc.InversionException,\r\n exc.GridException,\r\n OverflowError,\r\n ) as e:\r\n raise exc.FitException from e", "def likelihood(self, sign_switch, hyperparam):\n\n self.timer.tic()\n\n if numpy.isscalar(hyperparam):\n hyperparam_ = numpy.array([hyperparam], dtype=float)\n else:\n hyperparam_ = hyperparam\n\n # Check if likelihood is already computed for an identical hyperparam\n if (self.ell is not None) and \\\n (self.ell_hyperparam is not None) and \\\n (hyperparam_.size == self.ell_hyperparam.size) and \\\n numpy.allclose(hyperparam_, self.ell_hyperparam,\n atol=self.hyperparam_tol):\n\n if sign_switch:\n return -self.ell\n else:\n return self.ell\n\n # Get eta\n eta = self._hyperparam_to_eta(hyperparam_)\n\n # Extract scale from hyperparam\n if (not numpy.isscalar(hyperparam_)) and \\\n (hyperparam_.size > self.scale_index):\n\n # Set scale of the covariance object\n scale = self._hyperparam_to_scale(hyperparam_[self.scale_index:])\n self.mixed_cor.set_scale(scale)\n\n if numpy.abs(eta) >= self.max_eta:\n\n # Optimal sigma02 when eta is very large\n sigma02 = self._find_optimal_sigma02()\n\n # Log likelihood\n ell = -0.5*self.rdof * (numpy.log(2.0*numpy.pi) + 1.0 +\n numpy.log(sigma02))\n\n if self.B is None:\n Cinv = numpy.matmul(self.X.T, self.X)\n logdet_Cinv = numpy.log(numpy.linalg.det(Cinv))\n ell += - 0.5*logdet_Cinv\n # else:\n # logdet_B = numpy.log(numpy.linalg.det(self.B))\n # ell += 0.5*logdet_B\n\n else:\n\n # Update Y, C, Mz (all needed for computing optimal sigma2)\n self._update_Y_C_Mz(hyperparam_)\n\n # Find (or update) optimal sigma2\n sigma2 = self._find_optimal_sigma2(hyperparam_)\n\n logdet_Kn = self.mixed_cor.logdet(eta)\n logdet_Cinv = numpy.log(numpy.linalg.det(self.Cinv))\n\n if numpy.isnan(logdet_Kn):\n raise RuntimeError('Logdet of \"Kn\" is nan at eta: %0.3e.'\n % eta)\n\n # Log likelihood\n ell = -0.5*self.rdof * \\\n (numpy.log(2.0*numpy.pi) + 1.0 + numpy.log(sigma2)) \\\n - 0.5*logdet_Kn - 0.5*logdet_Cinv\n\n if self.B is not None:\n # Note that self.B is indeed B1, that is the matrix B without\n # sigma**2.\n logdet_B = numpy.log(numpy.linalg.det(self.B))\n ell += -0.5*logdet_B\n\n # Store ell to member data (without sign-switch).\n self.ell = ell\n self.ell_hyperparam = hyperparam_\n\n # If ell is used in scipy.optimize.minimize, change the sign to obtain\n # the minimum of -ell\n if sign_switch:\n ell = -ell\n\n self.timer.toc()\n\n return ell", "def likelihood(x, n, P):\n if not isinstance(n, int) or (n <= 0):\n raise ValueError('n must be a positive integer')\n if not isinstance(x, int) or (x < 0):\n raise ValueError(\n 'x must be an integer that is greater than or equal to 0')\n if x > n:\n raise ValueError('x cannot be greater than n')\n if not isinstance(P, np.ndarray) or len(P.shape) != 1:\n raise TypeError('P must be a 1D numpy.ndarray')\n if not np.all((P >= 0) & (P <= 1)):\n raise ValueError('All values in P must be in the range [0, 1]')\n nume = np.math.factorial(n)\n deno = (np.math.factorial(x) * (np.math.factorial(n - x)))\n fact = nume / deno\n P_likelihood = fact * (np.power(P, x)) * (np.power((1 - P), (n - x)))\n return P_likelihood", "def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def get_kl(g_obs, \n vb_params_dict, \n prior_params_dict,\n gh_loc, \n gh_weights,\n e_log_phi = None,\n e_z = None):\n\n # get prior parameters\n dp_prior_alpha = prior_params_dict['dp_prior_alpha']\n allele_prior_lambda_vec = prior_params_dict['allele_prior_lambda_vec']\n \n e_log_sticks, e_log_1m_sticks, \\\n e_log_cluster_probs, e_log_pop_freq = \\\n get_moments_from_vb_params_dict(vb_params_dict,\n gh_loc = gh_loc,\n gh_weights = gh_weights)\n \n \n # joint log likelihood\n e_z_opt, z_nat_param = \\\n get_optimal_z(g_obs, e_log_pop_freq, e_log_cluster_probs)\n \n if e_z is None:\n e_z = e_z_opt\n \n e_loglik = np.sum(e_z * z_nat_param)\n \n # entropy term\n entropy = get_entropy(vb_params_dict, e_z, gh_loc, gh_weights) \n \n # prior term \n e_log_prior = get_e_log_prior(e_log_1m_sticks,\n e_log_pop_freq,\n dp_prior_alpha,\n allele_prior_lambda_vec)\n \n elbo = e_loglik + entropy + e_log_prior\n\n # prior perturbation\n if e_log_phi is not None:\n\n e_log_pert = e_log_phi(vb_params_dict['ind_admix_params']['stick_means'],\n vb_params_dict['ind_admix_params']['stick_infos'])\n \n elbo = elbo + e_log_pert\n \n return -1 * elbo", "def log_likelihood(mu, sigma, y, T):\n ll = 0.\n for yi, Ti in zip(y, T):\n d = yi.size\n log_det_cov = np.linalg.slogdet(sigma[Ti])[1]\n y_minus_mean = yi - mu[Ti]\n term3 = np.dot(y_minus_mean.T.ravel(),\n np.linalg.solve(sigma[Ti], y_minus_mean.T).ravel())\n ll += (-0.5 * d * np.log(2 * np.pi) - 0.5 * log_det_cov - 0.5 * term3)\n return ll", "def log_marg_likelihood(self):\n self.A = np.linalg.inv(self.Sn)\n term1 = self.t - self.design_matrix@self.mn\n self.Evidence_mN = (self.beta/2)*np.linalg.norm(term1)+ (self.alpha/2)*self.mn.T@self.mn\n A_abs = np.linalg.eigvals(self.A)\n A_abs = np.prod(A_abs)\n\n self.marg_lik = ((self.p)/2)*np.log(self.alpha) + (self.n/2)*np.log(self.beta) - self.Evidence_mN - (1/2)*np.log(A_abs) - (self.n/2)*np.log(2*np.pi)\n\n return self.marg_lik", "def loglikelihood(mean, grad):\n\n # update the global latent_means list\n latent_means[index] = mean\n\n if grad.size > 0:\n # update the gradient\n grad[:] = compute_gradient(\n Y=Y, \n mi=mean, \n latent_Sigmas=latent_Sigmas,\n B1=B1,\n B2=B2,\n ss=ss,\n mu=mu,\n g1=g1,\n g2=g2,\n sigma2=sigma2,\n index=index\n )\n\n a1, a2, a3, a4, a5 = compute_terms(\n Y=Y, \n latent_means=latent_means, \n latent_Sigmas=latent_Sigmas, \n B1=B1, \n B2=B2, \n mu=mu, \n g1=g1, \n g2=g2\n )\n\n scalars = N*q/2 - N*p/2*np.log(TWOPI*sigma2)\n\n total = sum(\n [\n item1 - 1/(2*sigma2)*item2 + (TWOPI)**(1/2-q)*(item3 + item4 + item5) \n for item1, item2, item3, item4, item5 in zip(a1, a2, a3, a4, a5)\n ]\n )\n\n return total + scalars", "def objective_function(self, x):\r\n try:\r\n self._set_params_transformed(x)\r\n self._fail_count = 0\r\n except (LinAlgError, ZeroDivisionError, ValueError) as e:\r\n if self._fail_count >= self._allowed_failures:\r\n raise e\r\n self._fail_count += 1\r\n return np.inf\r\n return -self.log_likelihood() - self.log_prior()", "def get_likelihood(\n self,\n qb,\n inv_fish,\n map_tag=None,\n null_first_cmb=False,\n lmin=33,\n lmax=250,\n mcmc=True,\n alpha_tags=[\"95\", \"150\"],\n beam_tags=[\"95\", \"150\"],\n r_prior=[0, np.inf],\n alpha_prior=[0, np.inf],\n res_prior=None,\n beam_prior=[0, 1],\n betad_prior=[0, 1],\n dust_amp_prior=[0, np.inf],\n dust_ellind_prior=[0, 1],\n num_walkers=50,\n num_steps=20000,\n converge_criteria=0.01,\n reset_backend=None,\n file_tag=None,\n ):\n\n for x in [\n r_prior,\n alpha_prior,\n res_prior,\n beam_prior,\n betad_prior,\n dust_amp_prior,\n dust_ellind_prior,\n ]:\n if x is not None:\n x[:] = [float(x[0]), float(x[1])]\n\n save_name = \"like_mcmc\"\n if not mcmc:\n alpha_prior = None\n res_prior = None\n beam_prior = None\n betad_prior = None\n dust_amp_prior = None\n dust_ellind_prior = None\n\n # no template cleaning if there aren't any templates specified\n if not getattr(self, \"template_cleaned\", False):\n alpha_prior = None\n\n # null out unused priors\n self.template_alpha = getattr(self, \"template_alpha\", None)\n if self.template_alpha is None or all(\n [x is None for x in self.template_alpha.values()]\n ):\n alpha_prior = None\n\n # count alpha parameters to fit\n alpha_tags = [x for x in alpha_tags if x in self.map_tags_orig]\n if not len(alpha_tags):\n alpha_prior = None\n\n num_alpha = 0\n if alpha_prior is not None:\n num_alpha = len(alpha_tags)\n\n # count beam parameters to fit\n beam_tags = [x for x in beam_tags if x in self.map_tags_orig]\n if not len(beam_tags):\n beam_prior = None\n\n num_beam = 0\n if beam_prior is not None:\n num_beam = len(beam_tags)\n\n if not any([k.startswith(\"res_\") for k in qb]):\n res_prior = None\n\n if np.any(\n [\n betad_prior is not None,\n dust_amp_prior is not None,\n dust_ellind_prior is not None,\n ]\n ):\n dust_ell_fit = True\n else:\n dust_ell_fit = False\n\n # bookkeeping: ordered priors\n priors = {\n \"r_prior\": r_prior,\n \"alpha_prior\": alpha_prior,\n \"res_prior\": res_prior,\n \"beam_prior\": beam_prior,\n \"betad_prior\": betad_prior,\n \"dust_amp_prior\": dust_amp_prior,\n \"dust_ellind_prior\": dust_ellind_prior,\n }\n # priors on quantities that affect Dmat_obs or gmat (precalculated)\n obs_priors = [alpha_prior]\n\n # check parameter space\n if all([x is None for x in priors.values()]):\n raise RuntimeError(\"Empty parameter space\")\n\n out = dict(\n r_prior=r_prior,\n alpha_prior=alpha_prior,\n res_prior=res_prior,\n beam_prior=beam_prior,\n betad_prior=betad_prior,\n dust_amp_prior=dust_amp_prior,\n dust_ellind_prior=dust_ellind_prior,\n alpha_tags=alpha_tags,\n num_walkers=num_walkers,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n lmin=lmin,\n lmax=lmax,\n )\n\n if mcmc and reset_backend is None:\n ret = self.load_data(\n save_name,\n \"likelihood\",\n bp_opts=True,\n to_attrs=False,\n map_tag=map_tag,\n value_ref=out,\n extra_tag=file_tag,\n )\n if ret is not None and ret.get(\"converged\", False):\n if converge_criteria >= ret.get(\"converge_criteria\", 0.01):\n return ret\n if ret is not None:\n for pname, pval in priors.items():\n if np.all(pval != ret.get(pname, None)):\n ret = None\n # clear chain cache if rerunning, otherwise append to chain by default\n reset_backend = ret is None\n\n out.update(converge_criteria=converge_criteria)\n\n # save state\n if mcmc and reset_backend:\n self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )\n\n # clear pre-computed quantities\n self.clear_precalc()\n use_precalc = all([x is None for x in obs_priors])\n\n cls_input, cls_noise, cls_debias = self.get_data_spectra()\n\n # extract residual bins, ignoring bins outside of lmin/lmax\n if res_prior is not None:\n bin_def_orig = copy.deepcopy(self.bin_def)\n nbins_res_orig = self.nbins_res\n qb_res = OrderedDict()\n num_res = 0\n for k in list(qb):\n if k.startswith(\"res_\"):\n bd = self.bin_def[k]\n good = np.where((bd[:, 1] > lmin) & (bd[:, 0] < lmax))[0]\n # use all qb res in range lmin, lmax\n self.bin_def[k] = bd[good]\n v = qb.pop(k)[good]\n num_res += len(v)\n\n # use average qb res in good range per map\n # self.bin_def[k] = np.array([[lmin, lmax + 1]])\n # v = np.array([(qb.pop(k)[good]).mean()])\n # num_res += 1\n qb_res[k] = v\n self.nbins_res = num_res\n\n # set CMB model bandpowers to unity, since we are computing\n # the likelihood of this model given the data\n if r_prior is None:\n self.log(\"Computing model spectrum\", \"debug\")\n self.warn(\"Beam variation not implemented for case of no r fit\")\n cbl = self.bin_cl_template(map_tag=map_tag)\n cls_model = self.get_model_spectra(qb, cbl, delta=True, cls_noise=cls_noise)\n else:\n qb = copy.deepcopy(qb)\n for spec in self.specs:\n stags = [\"cmb_{}\".format(spec), \"fg_{}\".format(spec)]\n for stag in stags:\n if stag not in qb:\n continue\n qb[stag] = np.ones_like(qb[stag])\n\n self.log(\"Computing r model spectrum\", \"debug\")\n cls_shape_scalar = self.get_signal_shape(\n r=1.0, save=False, component=\"scalar\"\n )\n\n cls_shape_tensor = self.get_signal_shape(\n r=1.0, save=False, component=\"tensor\"\n )\n\n # load tensor and scalar terms separately\n cbl_scalar = self.bin_cl_template(cls_shape_scalar, map_tag)\n cls_model_scalar = self.get_model_spectra(\n qb, cbl_scalar, delta=True, cls_noise=cls_noise\n )\n cbl_tensor = self.bin_cl_template(cls_shape_tensor, map_tag)\n cls_model_tensor = self.get_model_spectra(\n qb, cbl_tensor, delta=False, res=False\n )\n if beam_prior is not None:\n # load beam error term for tensor and scalar\n cbl_scalar_beam = self.bin_cl_template(\n cls_shape_scalar, map_tag, beam_error=True\n )\n cls_mod_scal_beam = self.get_model_spectra(\n qb, cbl_scalar_beam, delta=True, res=False\n )\n cbl_tensor_beam = self.bin_cl_template(\n cls_shape_tensor, map_tag, beam_error=True\n )\n cls_mod_tens_beam = self.get_model_spectra(\n qb, cbl_tensor_beam, delta=False, res=False\n )\n\n # load foreground shape\n if dust_ell_fit:\n cls_shape_dust = self.get_signal_shape(save=False, component=\"fg\")\n # if dust_ellind_prior is None:\n # # can preload shape since not varying ell index\n cbl_fg = self.bin_cl_template(cls_shape_dust, map_tag=map_tag)\n if beam_prior is not None:\n cbl_fg_beam = self.bin_cl_template(\n cls_shape_dust, map_tag, beam_error=True\n )\n\n cbl = copy.deepcopy(cbl_scalar)\n cls_model = copy.deepcopy(cls_model_scalar)\n\n # XXX TODO\n # how to marginalize over the garbage bin?\n\n def parse_params(theta):\n \"\"\"\n Parse array of parameters into a dict\n \"\"\"\n params = {}\n if r_prior is not None:\n params[\"r\"] = theta[0]\n theta = theta[1:]\n if alpha_prior is not None:\n params[\"alpha\"] = theta[:num_alpha]\n theta = theta[num_alpha:]\n if res_prior is not None:\n params[\"res\"] = theta[:num_res]\n theta = theta[num_res:]\n if beam_prior is not None:\n params[\"beam\"] = theta[:num_beam]\n theta = theta[num_beam:]\n if betad_prior is not None:\n params[\"betad\"] = theta[0]\n theta = theta[1:]\n if dust_amp_prior is not None:\n # param for ee and bb\n params[\"dust_amp\"] = theta[:2]\n theta = theta[2:]\n if dust_ellind_prior is not None:\n params[\"dust_ellind\"] = theta[0]\n theta = theta[1:]\n if len(theta):\n raise ValueError(\"Too many parameters to parse\")\n return params\n\n def log_prior(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log prior function constructed from input options\n \"\"\"\n values = {\n \"r_prior\": r,\n \"alpha_prior\": alpha,\n \"res_prior\": res,\n \"dust_amp_prior\": dust_amp,\n }\n for v, pval in values.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n if np.any(pval < prior[0]) or np.any(pval > prior[1]):\n return -np.inf\n\n values_gauss = {\n \"beam_prior\": beam,\n \"betad_prior\": betad,\n \"dust_ellind_prior\": dust_ellind,\n }\n # for beam and betad, use gaussian prior\n log_prob = 0.0\n for v, pval in values_gauss.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n pval = np.atleast_1d(pval)\n norm = np.log(1.0 / (prior[1] * np.sqrt(2 * np.pi)))\n chi = (pval - prior[0]) / prior[1]\n log_prob += np.sum(norm - chi ** 2 / 2.0)\n\n return log_prob\n\n def log_like(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log likelihood function constructed from input options\n \"\"\"\n cls_model0 = copy.deepcopy(cls_model)\n\n # compute new template subtracted data spectra\n if alpha is None:\n clsi = cls_input\n else:\n self.get_masked_data(template_alpha=OrderedDict(zip(alpha_tags, alpha)))\n clsi = self.get_data_spectra(do_noise=False)\n\n if beam is not None:\n beam = dict(zip(beam_tags, beam))\n beam_coeffs = dict()\n for xname, (m0, m1) in self.map_pairs_orig.items():\n d = {}\n b0, b1 = [beam.get(m, None) for m in (m0, m1)]\n if b0 is not None:\n d[\"b1\"] = b0\n if b1 is not None:\n d[\"b2\"] = b1\n if b0 is not None:\n d[\"b3\"] = b0 * b1\n beam_coeffs[xname] = d\n\n # compute new signal shape by scaling tensor component by r\n if r is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n dd[:] = (\n cls_model_scalar[stag][xname]\n + r * cls_model_tensor[ctag][xname]\n )\n\n if beam is None:\n continue\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * (\n cls_mod_scal_beam[ctag][xname][bn]\n + r * cls_mod_tens_beam[ctag][xname][bn]\n )\n dd[:] += beam_term\n\n elif beam is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_scal_beam[ctag][xname][bn]\n dd[:] = cls_model_scalar[stag][xname] + beam_term\n\n # fg term, including beam modifications. Because mix terms are\n # dependent on dust amp, get model specs here.\n if dust_ell_fit:\n if dust_amp is None:\n qb[\"fg_ee\"][:] = 1\n qb[\"fg_bb\"][:] = 1\n else:\n qb[\"fg_ee\"][:] = dust_amp[0]\n qb[\"fg_bb\"][:] = dust_amp[1]\n if betad is None:\n qb[\"delta_beta\"][:] = 0\n else:\n qb[\"delta_beta\"][:] = betad\n if dust_ellind is not None:\n cbl_fg0 = self.bin_cl_template(\n cls_shape_dust, map_tag=map_tag, fg_ell_ind=dust_ellind\n )\n if beam is not None:\n cbl_fg_beam0 = self.bin_cl_template(\n cls_shape_dust,\n map_tag,\n fg_ell_ind=dust_ellind,\n beam_error=True,\n )\n else:\n cbl_fg0 = cbl_fg\n if beam is not None:\n cbl_fg_beam0 = cbl_fg_beam\n\n cls_model_fg = self.get_model_spectra(\n qb, cbl_fg0, delta=True, res=False\n )\n if beam is not None:\n cls_mod_fg_beam = self.get_model_spectra(\n qb, cbl_fg_beam0, delta=True, res=False\n )\n # add fg field to model, and add fg to total model\n for stag, d in cls_model_fg.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"fg\", \"total\"]:\n continue\n ftag = \"fg_{}\".format(spec)\n if stag not in cls_model0:\n cls_model0[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in cls_model0[stag]:\n cls_model0[stag][xname] = cls_model_fg[ftag][xname]\n else:\n cls_model0[stag][xname] += cls_model_fg[ftag][xname]\n\n # add beam terms to fg and total fields\n if beam is not None:\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_fg_beam[ftag][xname][bn]\n cls_model0[stag][xname] += beam_term\n\n # compute noise model terms\n if res is None:\n clsm = cls_model0\n else:\n res = pt.arr_to_dict(res, qb_res)\n clsm = copy.deepcopy(cls_model0)\n cls_res = self.get_model_spectra(res, cbl)\n for stag, d in cls_res.items():\n if stag not in clsm:\n clsm[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in clsm[stag]:\n clsm[stag][xname] = dd\n else:\n clsm[stag][xname] += dd\n\n # compute likelihood\n like = self.fisher_calc(\n qb,\n cbl,\n clsi,\n cls_noise=cls_noise,\n cls_debias=cls_debias,\n cls_model=clsm,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n use_precalc=use_precalc,\n like_lmin=lmin,\n like_lmax=lmax,\n )\n return like\n\n def log_prob(theta):\n \"\"\"\n Log posterior probability from prior and likelihood\n\n Returns log_prior with each step\n \"\"\"\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior\n\n # initial values\n x0 = []\n brute_force = True if not mcmc else False # only vary r\n if r_prior is not None:\n x0 += [0.01]\n if alpha_prior is not None:\n alphas = [self.template_alpha[tag] for tag in alpha_tags]\n x0 += [0.01 if a == 0 else a for a in alphas]\n brute_force = False\n if res_prior is not None:\n x0 += list(pt.dict_to_arr(qb_res, flatten=True))\n brute_force = False\n if beam_prior is not None:\n # add a beam term for each frequency\n x0 += [0.01] * len(beam_tags)\n brute_force = False\n if betad_prior is not None:\n x0 += [0.01]\n brute_force = False\n if dust_amp_prior is not None:\n x0 += [1, 1]\n brute_force = False\n if dust_ellind_prior is not None:\n x0 += [0.01]\n brute_force = False\n\n ndim = len(x0)\n if ndim * 2 > num_walkers:\n num_walkers = int(np.round(ndim / float(num_walkers)) * num_walkers * 2)\n self.warn(\n \"Found {} parameters, increasing number of MCMC walkers to {}\".format(\n ndim, num_walkers\n )\n )\n x0 = np.array(x0)[None, :] * (1 + 1e-4 * np.random.randn(num_walkers, len(x0)))\n\n if brute_force or (r_prior is not None and ndim == 1):\n self.log(\"Computing brute-force r profile likelihood\", \"info\")\n likefile = self.get_filename(\n save_name, ext=\".txt\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n rs = np.linspace(0, 3, 500)\n likes = np.zeros_like(rs)\n for idx, r in enumerate(rs):\n like = log_like(r=r)\n if idx % 20 == 0:\n self.log(\"r = {:.3f}, loglike = {:.2f}\".format(r, like), \"debug\")\n likes[idx] = like\n header = \"{} r likelihood\\nColumns: r, loglike\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag)\n )\n np.savetxt(likefile, np.column_stack((rs, likes)), header=header)\n\n if not mcmc:\n return [rs, likes]\n\n # run chains!\n import emcee\n\n # setup sampler output file\n filename = self.get_filename(\n save_name, ext=\".h5\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n backend_exists = os.path.exists(filename)\n backend = emcee.backends.HDFBackend(filename)\n if backend_exists and backend.shape != (num_walkers, ndim):\n self.warn(\n \"Expected backend of shape ({}, {}), found {}. Resetting\".format(\n num_walkers, ndim, backend.shape\n )\n )\n reset_backend = True\n if reset_backend:\n backend.reset(num_walkers, ndim)\n\n # initialize sampler\n self.log(\"Initializing sampler\", \"info\")\n sampler = emcee.EnsembleSampler(num_walkers, ndim, log_prob, backend=backend)\n if not reset_backend and backend_exists:\n # grab the last sample if appending to an existing run\n x0 = sampler.run_mcmc(None, 1)\n\n # track autocorrelation time\n old_tau = np.inf\n converged = False\n\n self.log(\n \"Starting {} iterations with {} parameters\".format(num_steps, ndim), \"info\"\n )\n for sample in sampler.sample(x0, iterations=num_steps):\n if not sampler.iteration % 10:\n self.log(\"MCMC iteration {}\".format(sampler.iteration), \"debug\")\n # check convergence every 100 steps\n if sampler.iteration % 100:\n continue\n\n # compute autocorrelation time\n tau = sampler.get_autocorr_time(tol=0)\n\n # check convergence\n converged = np.all(tau / converge_criteria < sampler.iteration)\n converged &= np.all(np.abs(old_tau - tau) / tau < converge_criteria)\n self.log(\n \"MCMC iteration {} autocorr time: mean {:.1f} min {:.1f} max {:.1f}\".format(\n sampler.iteration, np.mean(tau), np.min(tau), np.max(tau)\n ),\n \"info\",\n )\n if converged:\n break\n old_tau = tau\n\n out.update(converged=converged, num_steps=sampler.iteration)\n\n # converged posterior distribution\n if converged:\n self.log(\n \"MCMC converged in {} iterations\".format(sampler.iteration), \"info\"\n )\n tau = sampler.get_autocorr_time()\n burnin = int(2 * np.max(tau))\n thin = int(0.5 * np.min(tau))\n samples = sampler.get_chain(discard=burnin, thin=thin, flat=True)\n out.update(tau=tau, burnin=burnin, thin=thin, samples=samples)\n else:\n self.warn(\"MCMC not converged in {} iterations\".format(num_steps))\n\n if res_prior is not None:\n self.bin_def = bin_def_orig\n self.nbins_res = nbins_res_orig\n\n # save and return\n return self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )", "def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n if self.BICscore is None:\n BIC = 0\n for i, model in enumerate(self.models):\n n = model.n \n k = model.m.num_params\n L = model.m.log_likelihood()\n BIC += L - k/2*np.log(n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))", "def log_prob_parameters(self, parameters):\n lp = 0.0\n parameters_model = self.get_parameters_model\n index = 0\n\n for parameter in parameters_model:\n dimension = parameter.dimension\n lp += parameter.log_prior(parameters[index: index + dimension])\n\n index += dimension\n\n if not np.isinf(lp):\n lp += self.log_likelihood(parameters[0], parameters[1], parameters[2:])\n\n return lp", "def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood", "def _log_likelihood(self, theta, f, x, y, yerr):\n sigma2 = yerr**2\n return -0.5*np.sum((y - f(theta, x))**2 / sigma2 + 2*np.log(sigma2))", "def _getCurrentPosteriorLikelihood(self): \n likelihood = 0\n T = self.T\n K= self.K \n final_likelihood = 0\n total_log_lik = 0\n \n for n in range(1,self.N+1):\n # Compute total Likelihood for all Instances P(x1...xn / theta) \n tot_lik = 0\n tot_scale_factor = 0\n \n for i in range(1,self.K+1): \n likelihood = self.posterior_state_trellis[n][(T,i)]\n tot_lik = tot_lik + likelihood\n\n try:\n total_log_lik = math.log(tot_lik) \n except ValueError:\n ipdb.set_trace()\n \n for t in range(1,self.T):\n scale_factor = self.forward_scaling_vector[n][t] \n tot_scale_factor = tot_scale_factor + math.log(scale_factor)\n\n final_likelihood = final_likelihood + (total_log_lik - tot_scale_factor)\n\n return final_likelihood", "def log_likelihood(self,samples,times):\n prior_mu = np.ones(2*len(self.A)+1) \n prior_var = np.eye(2*len(self.A)+1)*0.7\n prior_p = np.log(self.prior_pdf())\n #prior_p = np.log(self.normal_prior(prior_mu,prior_var))\n xform = [self.sum_exp(t) for t in times]\n lp = scipy.stats.norm(xform,np.sqrt(self.var)).pdf(samples)\n sample_p =np.sum(np.log(lp))\n ll = prior_p + sample_p\n\n if np.isnan(ll):\n return -np.infty\n return ll", "def lnprob(theta, model, priors, x, y, yerr):\n lp = lnprior(theta, priors)\n if not np.isfinite(lp):\n return -np.inf\n return lp + lnlike(theta, model, x, y, yerr)", "def lnprob(theta, dtarray, dmagarray, sigmaarray):\n lp = lnprior(theta)\n\n if not np.isfinite(lp):\n #if (lp==-(10**32)):\n return -np.inf\n #return -(10**32)\n return lp +lnlike(theta, dtarray, dmagarray, sigmaarray)", "def likelihood(self):\n\n # assert the Gaussian process is up to date\n self._gp_up_to_date()\n\n noise_penalization_term = -1 / 2 * np.log(\n np.linalg.det(self.cov_matrix))\n\n y = np.linalg.solve(self.cov_matrix, self.list_y)\n y = np.array(self.list_y) @ y\n data_fidelity_term = -1 / 2 * y\n\n nbr_obs_term = - self.n_observation * np.log(2 * np.pi)\n likelihood = (\n noise_penalization_term + data_fidelity_term + nbr_obs_term\n )\n return likelihood", "def relative_likelihood(self):\n \n if self.num_hidden == 0:\n \n return T.exp(-self.compute_energy(self.x, self.batch_size))\n \n if self.num_hidden > 0:\n \n return T.exp(-self.compute_free_energy(self.x))", "def lnprobfn(theta, model=None, obs=None, sps=None, \n nested=False, verbose=verbose):\n\n # Calculate prior probability and exit if not within prior\n # Also if doing nested sampling, do not include the basic priors, \n # since the drawing method includes the prior probability\n lnp_prior = model.prior_product(theta, nested=nested)\n if not np.isfinite(lnp_prior):\n return -np.infty\n \n # Generate \"mean\" model\n spec, phot, mfrac = model.mean_model(theta, obs, sps=sps)\n \n # Calculate likelihoods\n lnp_spec = lnlike_spec(spec, obs=obs)\n lnp_phot = lnlike_phot(phot, obs=obs)\n\n return lnp_prior + lnp_phot + lnp_spec", "def log_likelihood_grad_rew(self, data, reward_model, bias_params):", "def log_marginal_likelihood(self, theta=None, gradient=True, opt_flag=False):\n if theta != None:\n self.kernel.hyper = np.exp(theta) if opt_flag else theta\n # self.kernel.hyper\n if gradient:\n K, grad = self.kernel.estimate(self.X_train, gradient=True)\n else:\n K = self.kernel.estimate(self.X_train)\n \n K += self.id_mat * self.sigma_n\n\n try:\n L = np.linalg.cholesky(K)\n except np.linalg.LinAlgError:\n return (np.inf, np.array([0])) if gradient else np.inf\n \n alpha = cho_solve((L, True), self.Y_train)\n \n logl = float(self.Y_train.T.dot(alpha)) / 2\n logl += np.log(np.diag(L)).sum()\n logl += self.ntrain * np.log(2 * np.pi) / 2\n\n \n if gradient:\n logl_grad = alpha.dot(alpha.T) # einsum is slower\n logl_grad -= cho_solve((L, True), self.id_mat)\n logl_grad = 0.5 * np.einsum('ij,ji -> ', logl_grad, grad) #dot prod and trace combined\n return logl, -np.array([logl_grad])\n return logl", "def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))", "def train_likelihood_estimator(self):\n batch_size = self.trainparam['batch_size']\n epochs = self.trainparam['epochs']\n lr = self.trainparam['learning_rate']\n\n self.observation_encoder = self.observation_encoder.double()\n self.likelihood_estimator = self.likelihood_estimator.double()\n if self.use_cuda:\n self.observation_encoder = self.observation_encoder.cuda()\n self.likelihood_estimator = self.likelihood_estimator.cuda()\n\n train_loader = torch.utils.data.DataLoader(\n self.train_set,\n batch_size=batch_size,\n shuffle=True,\n num_workers=self.globalparam['workers'],\n pin_memory=True,\n sampler=None)\n val_loader = torch.utils.data.DataLoader(\n self.eval_set,\n batch_size=batch_size,\n shuffle=False,\n num_workers=self.globalparam['workers'],\n pin_memory=True)\n\n optimizer = torch.optim.Adam(list(self.observation_encoder.parameters())+\n list(self.likelihood_estimator.parameters()), lr)\n\n log_dir = 'likelihood_estimator_log'\n if os.path.exists(log_dir):\n shutil.rmtree(log_dir)\n log_writer = SummaryWriter(log_dir)\n\n check_point_dir = 'likelihood_estimator_checkpoint'\n if not os.path.exists(check_point_dir):\n os.makedirs(check_point_dir)\n\n niter = 0\n for epoch in range(epochs):\n self.observation_encoder.train()\n self.likelihood_estimator.train()\n\n for batch_id, (sta, obs, act) in enumerate(train_loader):\n if self.use_cuda:\n sta = sta.cuda()\n obs = obs.cuda()\n w = self.get_likelihood(sta, obs)\n\n # define loss (correct -> 1, incorrect -> 0) and optimizer\n correct_item = 0\n incorrect_item = 0\n for batch_ind in range(w.size()[0]):\n correct_samples = torch.diag(w[batch_ind])\n incorrect_samples = w[batch_ind] - torch.diag(torch.diag(w[batch_ind]))\n correct_item += torch.sum(-torch.log(correct_samples))\n incorrect_item += torch.sum(-torch.log(1.0 - incorrect_samples))\n loss = correct_item / w.size()[0] + incorrect_item / (w.size()[0]*(w.size()[0]-1))\n\n # log and visualize\n if niter % self.log_freq == 0:\n print('Epoch {}/{}, Batch {}/{}: Train loss: {}'.format(epoch, epochs, batch_id, len(train_loader), loss))\n log_writer.add_scalar('train/loss', loss, niter)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n niter += 1\n\n # visualize the output of the model\n if self.visualize and epoch % 10 == 0:\n w = w.data.cpu().numpy()\n for i in range(w.shape[0]):\n plot_measurement(w[batch_id], save_image=True,\n outdir='train_vis/measurement/epoch-{}'.format(epoch),\n batch=batch_id, ind=i)\n\n if epoch % self.test_freq == 0:\n likelihood = self.eval_likelihood_estimator(val_loader)\n print('Epoch {}: Val likelihood: {}'.format(epoch, likelihood))\n log_writer.add_scalar('val/likelihood', likelihood, niter)\n\n if epoch % 10 == 0:\n save_name1 = os.path.join(\n check_point_dir, 'encoder_checkpoint_{}.pth'.format(epoch))\n save_name2 = os.path.join(\n check_point_dir, 'estimator_checkpoint_{}.pth'.format(epoch))\n torch.save(self.observation_encoder.state_dict(), save_name1)\n print('Saved encoder to {}'.format(save_name1))\n torch.save(self.likelihood_estimator.state_dict(), save_name2)\n print('Saved estimator to {}'.format(save_name2))" ]
[ "0.7302444", "0.7227682", "0.7056718", "0.704548", "0.7038803", "0.6894036", "0.68110865", "0.67771643", "0.67480403", "0.669332", "0.6690634", "0.66567886", "0.6648611", "0.6637662", "0.6605316", "0.6598561", "0.65950495", "0.6588439", "0.6587018", "0.65525615", "0.65012264", "0.6464521", "0.64589596", "0.6442059", "0.6407985", "0.64072937", "0.6406942", "0.6364486", "0.6358092", "0.6321471", "0.62578136", "0.624615", "0.62158644", "0.620644", "0.6169606", "0.616442", "0.6160234", "0.6155541", "0.614181", "0.61116785", "0.61089456", "0.60880417", "0.60796875", "0.6077692", "0.6070465", "0.60503095", "0.6048457", "0.60226285", "0.6003014", "0.59961903", "0.59847414", "0.598463", "0.5969516", "0.59420455", "0.5937273", "0.5937254", "0.5926995", "0.58910793", "0.5888464", "0.5882953", "0.58754975", "0.5873367", "0.58688927", "0.5865234", "0.5846335", "0.58458555", "0.5832616", "0.58291566", "0.58259076", "0.58249825", "0.5824169", "0.5824169", "0.58218616", "0.5821119", "0.58132297", "0.5779691", "0.57793134", "0.5770539", "0.57551897", "0.57540566", "0.5751742", "0.57470274", "0.5738893", "0.5733488", "0.57311386", "0.57298166", "0.572154", "0.5718073", "0.5710446", "0.5702189", "0.569314", "0.5688971", "0.5687904", "0.5679832", "0.5673281", "0.56685483", "0.56666", "0.5658569", "0.5656973", "0.5654637" ]
0.6453411
23
Optimize model by maximizing the likelihood
def fit_model(self, phi, omega, sigma_eta, beta=0): # Initialize at the initial values parsed to the class par_ini = [phi, omega, sigma_eta, beta] # Approximate the jabocian for more efficient minimization Lprime = lambda x: approx_fprime(x, self.__llik_fun__, 0.01) if self.method == 'iterateRegression': # Depending on whether we include the regression coefficient, use other optimizer est = minimize(self.__llik_fun__, x0=par_ini, options=self.options, method='Newton-CG', jac=Lprime) else: est = minimize(self.__llik_fun__, x0=par_ini, options=self.options, method='BFGS') # Return optimal parameters return est.x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize(self): \n if self.model == 'ARD':\n estimate = minimize(\n fun=optim_func,\n x0=np.array([self.alpha, self.beta]),\n args=(self,),\n method='L-BFGS-B',\n bounds=((0, 50), (0, 50)),\n )\n # logger.info(estimate)\n\n # organize into a dict\n result = {\n \"alpha\": estimate.x[0],\n \"beta\": estimate.x[1],\n \"Lik\": estimate.fun,\n \"negLogLik\": -np.log(-estimate.fun),\n \"convergence\": estimate.success,\n }\n logger.debug(result)\n\n elif self.model == 'ER':\n estimate = minimize(\n fun=optim_func,\n x0=np.array([self.alpha]),\n args=(self,),\n method='L-BFGS-B',\n bounds=[(0, 50)],\n )\n\n result = {\n \"alpha\": estimate.x[0],\n \"Lik\": estimate.fun, \n \"negLogLik\": -np.log(-estimate.fun),\n \"convergence\": estimate.success,\n }\n logger.debug(result)\n\n else:\n raise Exception('model must be specified as either ARD or ER')\n\n # get scaled likelihood values\n self.log_lik = result[\"negLogLik\"]\n self.tree = self.tree.set_node_values(\n 'likelihood',\n values={\n node.idx: np.array(node.likelihood) / sum(node.likelihood)\n for node in self.tree.idx_dict.values()\n }\n )", "def optimize(self, model):\n model.optimize_params(\n max_iters=self.max_iters, max_beta_iters=self.max_beta_iters,\n max_U_iters=self.max_U_iters, rel_tol=self.rel_tol,\n optimize_beta=self.optimize_beta, optimize_U=self.optimize_U,\n compute_D=self.compute_D\n )\n return model", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def maxlikelihood(self):\n\n chi2 = lambda *args: -2 * lnlikelihood.lnlike(*args) \n # print('***DEBUGGING*** chi2 = ', chi2)\n # print('***DEBUGGING*** self.theta_guess = ', self.theta_guess)\n # print('***DEBUGGING*** self.transinfo = ', self.transinfo)\n # print('***DEBUGGING*** self.wave_b = ', self.wave_b)\n # print('***DEBUGGING*** self.flux_b = ', self.flux_b)\n # print('***DEBUGGING*** self.err_b = ', self.err_b)\n # print('***DEBUGGING*** self.wave_r = ', self.wave_r)\n # print('***DEBUGGING*** self.flux_r = ', self.flux_r)\n # print('***DEBUGGING*** self.err_r = ', self.err_r)\n # print('***DEBUGGING*** self.velres = ', self.velres)\n result = op.minimize(chi2, self.theta_guess,\n args=(self.transinfo, self.wave_b, self.flux_b, self.err_b,\n self.wave_r, self.flux_r, self.err_r, self.velres))\n\n self.theta_ml = result[\"x\"]", "def perform_bayesian_optimization(self):\n bounds = {'hunits': (self.hunits_lower, self.hunits_upper),\n 'embedding_dim': (self.embedding_dim_lower, self.embedding_dim_upper)}\n optimizer = BayesianOptimization(f=self.lstm_score, pbounds=bounds, random_state=1)\n optimizer.maximize(init_points=2, n_iter=self.iterations)\n print(optimizer.max)\n print(optimizer.res)", "def optim_func(params, model):\n if model.model == 'ARD':\n model.alpha, model.beta = params\n lik = model.pruning_algorithm()\n\n else:\n model.alpha = params[0]\n lik = model.pruning_algorithm()\n \n return -lik", "def maximize_marginal_likelihood(kernel, model, optimizer, output_directory,\n testing_data, feature_names, plot_ARD, plot_params):\n\n # run the optimiser with a callback function if user wants to track the parameters\n if plot_params:\n parameter_log = []\n opt_logs = optimizer.minimize(closure=model.training_loss, variables=model.trainable_variables,\n step_callback=(lambda x,y,z: parameter_log.append([x, z])),\n options=dict(maxiter=ci_niter(250)))\n\n else:\n # run the optimiser without a callback function otherwise\n opt_logs = optimizer.minimize(closure=model.training_loss, variables=model.trainable_variables,\n options=dict(maxiter=ci_niter(250)))\n\n # set data against which to validate the model\n features_test = testing_data['features']\n affinity_test = testing_data['affinity']\n\n # calculate the predictions and Pearson's R, Spearman's R as well as RMSE\n mean, var = model.predict_f(features_test)\n pearsonsr, pvalue = pearsonr(mean.numpy().flatten(), affinity_test.values)\n spearmansr, spvalue = spearmanr(a=mean.numpy().flatten(), b=affinity_test.values)\n rmse = np.sqrt(mean_squared_error(affinity_test.values, mean.numpy().flatten()))\n\n # write the results to a file\n filename = f'{model.name}_{kernel.name}'+'.csv'\n\n with open(output_directory+'/'+filename, 'w') as out_file:\n out_file.write(f'%Gaussian process regression with a Gaussian likelihood\\n')\n out_file.write(f'%model: {model.name}, kernel: {kernel.name}\\n')\n out_file.write(f'Optimization success: {opt_logs.get(\"success\")} in {opt_logs.get(\"nit\")} iterations, {opt_logs.get(\"message\")}\\n')\n for key, value in gp.utilities.read_values(model).items():\n out_file.write(f'%{key}: {value}\\n')\n out_file.write(f'%loglikelihood: {model.log_marginal_likelihood()}\\n')\n out_file.write(f'%RMSE:{rmse:.3f}\\n')\n out_file.write(f'%Pearson_correlation_coefficient:{pearsonsr:.3f},P-value:{pvalue:.3f}\\n')\n out_file.write(f'%Spearman_correlation_coefficient:{spearmansr:.3f},P-value:{spvalue:.3f}\\n')\n out_file.write('%%%%%%PREDICTIONS%%%%%\\n')\n out_file.write(f'name,f_pred_mean,f_pred_var,y_true,{\",\".join(feature_names)}\\n')\n for i in range(0, len(mean)):\n out_file.write(f'{affinity_test.index.values[i]},{mean.numpy()[i][0]:.4f},{var.numpy()[i][0]:.4f},{affinity_test.values[i]:.4f},{\"\".join(str(i)+\",\" for i in features_test[i].round(4).tolist())[:-1]}\\n')\n out_file.close()\n\n # create the plots that were specified in the arguments to the specified output directory\n if plot_ARD:\n plot_feature_rankings(lengthscales=model.kernel.kernels[0].lengthscales.numpy(),\n feature_names=feature_names, figpath=output_directory+'/feature_relevance.png')\n if plot_params:\n plot_parameter_change(parameter_log=parameter_log, figpath=output_directory+'/parameter_change.png')", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def optimize(self,max_iter=100):\n\n\n for itr in range(max_iter):\n opt_logs = self.opt.minimize(self.tot_neg_elbo,sum([expert.trainable_variables for expert in self.experts],())) \n print(self.neg_elbo)", "def optimize(self):\n \n # converting from batch to local quantities\n if self.dispersion == \"gene-batch\":\n local_dispersion = tf.matmul(self.batch, tf.exp(self.px_r))\n else: \n local_dispersion = tf.exp(self.px_r)\n \n if self.library_mode == \"numeric\":\n local_l_mean = self.library_size_mean\n local_l_var = self.library_size_var\n else:\n local_l_mean = tf.matmul(self.batch, self.library_size_mean)\n local_l_var = tf.matmul(self.batch, self.library_size_var)\n \n \n # VAE loss\n if self.zi:\n recon = log_zinb_positive(self.expression, self.px_rate, local_dispersion, \\\n self.px_dropout)\n else:\n recon = log_nb_positive(self.expression, self.px_rate, local_dispersion)\n \n kl_gauss_z = 0.5 * tf.reduce_sum(\\\n tf.square(self.qz_m) + self.qz_v - tf.log(1e-8 + self.qz_v) - 1, 1)\n\n if self.scalings:\n kl_gauss_l = 0.5 * tf.reduce_sum(\\\n tf.square(self.ql_m - local_l_mean) / local_l_var \\\n + self.ql_v / local_l_var \\\n + tf.log(1e-8 + local_l_var) - tf.log(1e-8 + self.ql_v) - 1, 1)\n \n if self.scalings:\n self.ELBO_gau = tf.reduce_mean(recon - self.kl_scale * kl_gauss_z - kl_gauss_l)\n else:\n self.ELBO_gau = tf.reduce_mean(recon - self.kl_scale * kl_gauss_z)\n \n # MMD loss\n if self.apply_mmd:\n self.mmd = mmd_objective(self.z, self.batch_ind, self.num_batches)\n self.loss = - self.ELBO_gau + self.mmd_scale * self.mmd\n \n else:\n self.loss = - self.ELBO_gau\n \n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n optimizer = self.optimize_algo\n with tf.control_dependencies(update_ops):\n self.train_step = optimizer.minimize(self.loss)", "def softmax_opt(x, incentive, idx_i, theta=1):\n\n # print x\n # Multiplication by theta to make the values of np.exp(.) more reasonable\n # y = np.copy(x) * theta\n y = (np.copy(x)/60.0 * -0.086 + 0 * 0.7) * theta\n\n y[idx_i] = (np.copy(x[idx_i])/60.0 * -0.086 + incentive * 0.7) * theta\n # print y\n p = np.minimum(np.maximum(np.exp(y), 1e-20), 1e20) / np.sum(np.minimum(np.maximum(np.exp(y), 1e-20), 1e20), axis=0)\n # print y\n\n # If any element of p is Nan, return equal probablity for all the paths\n if np.isnan(p).any():\n p = np.ones(len(x)) / len(x)\n return p", "def add_objective(self): \n \n if \"CSS\" in self.algorithm:\n \n if self.num_hidden == 0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.add_css_approximation(data_term)\n \n if \"CD\" in self.algorithm and self.num_hidden ==0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n normalizer_term = self.compute_energy(self.x_gibbs, \n self.batch_size)\n \n normalizer_term = -T.mean(normalizer_term)\n \n if \"CD\" in self.algorithm and self.num_hidden > 0:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.compute_free_energy(self.rbm_cd_samples)\n \n normalizer_term = -T.mean(normalizer_term)\n \n # cost is negative log likelihood \n self.cost = T.mean(data_term) + normalizer_term", "def _fitness_model__(self, solution=None, minmax=0):\n return self.objective_func(solution) if minmax == 0 else 1.0 / (self.objective_func(solution) + self.EPSILON)", "def regress(self, model, log):\n self.optimizer_kwargs.update({'jac': self.lossprime,\n 'args': (self.lossprime,)})\n log('Starting parameter optimization.', tic='opt')\n log(' Optimizer: %s' % self.optimizer)\n log(' Optimizer kwargs: %s' % self.optimizer_kwargs)\n x0 = model.vector.copy()\n try:\n self.optimizer(model.get_loss, x0, **self.optimizer_kwargs)\n\n except ConvergenceOccurred:\n log('...optimization successful.', toc='opt')\n return True\n else:\n log('...optimization unsuccessful.', toc='opt')\n if self.lossprime:\n max_lossprime = \\\n max(abs(max(model.lossfunction.dloss_dparameters)),\n abs(min(model.lossfunction.dloss_dparameters)))\n log('...maximum absolute value of loss prime: %.3e'\n % max_lossprime)\n return False", "def fit_null_model_m(self, verbose=True):\n obj = Objective(self)\n res = minimize(neg_log_lik_m0_s2, [0.0, 0.0], method=\"Nelder-Mead\", args=(obj))\n assert res.success is True, \"did not converge\"\n m0_hat = np.exp(res.x[0])\n s2_hat = np.exp(res.x[1])\n self.m0 = m0_hat * np.ones(len(self))\n self.s2 = s2_hat\n self.comp_precision(s2=s2_hat)\n\n # print update\n self.train_loss = neg_log_lik_m0_s2(np.r_[np.log(m0_hat), np.log(s2_hat)], obj)\n if verbose:\n sys.stdout.write(\n (\n \"constant-w/variance fit, \"\n \"converged in {} iterations, \"\n \"train_loss={:.7f}\\n\"\n ).format(res.nfev, self.train_loss)\n )", "def milp(mdp, maxV, zeroConstraints=()):\n m = Model()\n m.setParam('OutputFlag', False)\n\n # convert notation to previous implementation\n S = mdp.S\n A = mdp.A\n R = mdp.rFuncs\n psi = mdp.psi\n T = mdp.T\n alpha = mdp.alpha\n gamma = mdp.gamma\n\n # useful constants\n rLen = len(R)\n M = 10000 # a large number\n Sr = range(len(S))\n Ar = range(len(A))\n\n # decision variables\n x = m.addVars(len(S), len(A), lb=0, name='x')\n z = m.addVars(rLen, vtype=GRB.BINARY, name='z')\n y = m.addVars(rLen, name='y')\n\n # constraints on y\n for i in range(rLen):\n m.addConstr(y[i] <= sum([x[s, a] * R[i](S[s], A[a]) for s in Sr for a in Ar]) - maxV[i] + (1 - z[i]) * M)\n m.addConstr(y[i] <= z[i] * M)\n\n # constraints on x (valid occupancy)\n for sp in Sr:\n m.addConstr(sum(x[s, a] * ((s == sp) - gamma * T(S[s], A[a], S[sp])) for s in Sr for a in Ar) == alpha(S[sp]))\n\n # == constraints\n for consIdx in range(len(zeroConstraints)):\n m.addConstr(sum(x[S.index(s), A.index(a)] for s, a in zeroConstraints[consIdx]) == 0)\n # obj\n m.setObjective(sum([psi[i] * y[i] for i in xrange(rLen)]), GRB.MAXIMIZE)\n\n m.optimize()\n\n pi = {(S[s], A[a]): x[s, a].X for s in Sr for a in Ar}\n\n if m.status == GRB.Status.OPTIMAL:\n # return feasible being true and the obj value, opt pi\n # .X attribute is to retrieve the value of the variable\n return pi\n else:\n # simply return infeasible\n raise Exception('milp problem optimal solution not found' + m.status)", "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,\n 'colsample_bytree': hp.uniform('colsample_bytree', 0.3, 1),\n 'reg_alpha': hp.loguniform('reg_alpha', np.log(0.005), np.log(5)) - 0.0001,\n 'reg_lambda': hp.loguniform('reg_lambda', np.log(1), np.log(5)),\n 'bagging_freq': hp.choice('bagging_freq', [0, 1]),\n 'num_leaves': scope.int(hp.uniform('num_leaves', 10, 128)),\n 'n_estimators': 1000,\n 'boosting': 'gbdt',\n 'objective': 'multiclass',\n 'num_class': 12,\n 'metric': 'None',\n 'is_unbalance': 'true',\n # 'min_data_per_group': 1000,\n 'verbose': -1,\n 'random_seed': 42,\n \n }\n\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best = fmin(score_model, space, algo=tpe.suggest,\n # trials=trials,\n max_evals=hyperopt_niters)\n return best", "def optimize(self, iterations=1000):\r\n prev = None\r\n finalx = None\r\n finaly = None\r\n while iterations:\r\n maxei, eis = self.acquisition()\r\n new_y = self.f(maxei)\r\n if maxei == prev:\r\n break\r\n self.gp.update(maxei, new_y)\r\n pycodehack = finaly is None or self.minimize and finaly > new_y\r\n if ((pycodehack or not self.minimize and finaly < new_y)):\r\n finaly = new_y\r\n finalx = maxei\r\n prev = maxei\r\n iterations -= 1\r\n return finalx, finaly", "def relative_likelihood(self):\n \n if self.num_hidden == 0:\n \n return T.exp(-self.compute_energy(self.x, self.batch_size))\n \n if self.num_hidden > 0:\n \n return T.exp(-self.compute_free_energy(self.x))", "def optimize(self, num_restarts=1, max_iters=100, max_f_eval=300.0, method='Anneal'):\n dic = DictVectorizer()\n # flatten the parameters\n init_params,bounds=dic.fit_transform(self.params)\n #we minimise minus the marginal likelihood\n def objective(params_flatten):\n self.params=dic.inverse_transform(params_flatten,bounds)\n val = -self.log_marginal_likelihood()\n return val# we want to maximize it\n \n \n #run ptimisation with multiple restarts\n optml=np.inf\n for i in range(num_restarts):\n #minimise function\n if method=='Anneal':\n res=dual_annealing(objective,bounds, maxiter=max_iters, maxfun=max_f_eval, x0=init_params)\n else:\n \n res = minimize(objective, init_params, \n bounds=bounds, method=method,options={'maxiter': max_iters, 'disp': False})\n #print(\"Iteration \"+str(i)+\" \",-res.fun)\n if res.fun<optml:\n params_best=res.x #init_params \n optml=res.fun\n init_params=bounds[:,0]+(bounds[:,1]-bounds[:,0])*np.random.rand(len(bounds[:,0]))\n print(\"Iteration \"+str(i)+\" \",-res.fun)\n #params_best=res.x\n #optml=res.fun\n self.params=dic.inverse_transform(params_best,bounds)\n return -optml", "def _optimize(self, p_loss):\r\n \r\n self._optimizer.zero_grad()\r\n p_loss.backward()\r\n self._optimizer.step()", "def __optimize_adam(self, args, threshold, N_iter, learning_rate, verbose, val_set = None):\n\t\t\t#setting parameters for learning rate\n\t\tbeta1 = .9\t\t#forgetting factor for first moment\n\t\tbeta2 = .999\t#forgetting factor for second moment\n\t\tepsilon = 1e-8\n\t\tm = np.zeros(self.V.shape) #first moment\n\t\tv = np.zeros(self.V.shape) #second moment\n\t\thistory = []\n\t\tif threshold is not None:\n\t\t\tN_iter = 1000000000 # if threshold, no maximum iteration should be used\n\t\tfor i in range(0,N_iter):\n\t\t\tg = self.grad(self.V, args)\n\t\t\tm = beta1*m + (1-beta1)*g\n\t\t\tv = beta2*v + (1-beta2)*np.square(g)\n\t\t\tm_corr = m / (1-beta1)\n\t\t\tv_corr = v / (1-beta2)\n\n\t\t\tupdate = np.divide(m_corr, np.sqrt(v_corr)+epsilon)\n\t\t\tif np.any(np.isnan(update)): #debug\n\t\t\t\tquit()\n\t\t\tself.V = self.V - learning_rate * update\n\t\t\tself.V[:,-1] = np.zeros((self.D+1,))\n\n\t\t\tif isinstance(val_set,tuple):\n\t\t\t\targs_val = (val_set[0], val_set[1], args[2])\n\t\t\t\thistory.append((self.loss(self.V, args), self.loss(self.V, args_val)) ) #(train_err, val_err)\n\t\t\telse:\n\t\t\t\thistory.append((self.loss(self.V, args),))\n\n\t\t\tif verbose:\n\t\t\t\tprint(\"Loss at iter= \",i, history[i])\n\n\t\t\tif threshold is not None and i>10:\n\t\t\t\tif history[-10][-1] - history[-1][-1] < threshold:\n\t\t\t\t\tbreak\n\t\treturn history", "def _optimize(self) -> None:\n\n for i, agent in enumerate(self.agents):\n states, actions, rewards, next_states, dones = self.memory.sample()\n\n actor_next_state = self._agent_states(i, next_states)\n next_actions = torch.cat(\n [a.actor_target(actor_next_state) for a in self.agents], 1\n )\n next_q = agent.critic_target(next_states, next_actions).detach()\n target_q = rewards[:, i].view(-1, 1) + self.gamma * next_q * (\n 1 - dones[:, i].view(-1, 1)\n )\n local_q = agent.critic_local(states, actions)\n\n value_loss = agent.loss_fn(local_q, target_q)\n agent.value_optimizer.zero_grad()\n value_loss.backward()\n agent.value_optimizer.step()\n\n local_actions = []\n for i, a in enumerate(self.agents):\n local_states = self._agent_states(i, states)\n local_actions.append(\n a.actor_local(local_states)\n if a == agent\n else a.actor_local(local_states).detach()\n )\n local_actions = torch.cat(local_actions, 1)\n policy_loss = -agent.critic_local(states, local_actions).mean()\n\n agent.policy_optimizer.zero_grad()\n policy_loss.backward()\n agent.policy_optimizer.step()\n\n self._update_target_model(agent.critic_local, agent.critic_target)\n self._update_target_model(agent.actor_local, agent.actor_target)", "def optimize(self, trial):\r\n num_leaves = trial.suggest_int(\"num_leaves\", 6, 50)\r\n min_child_samples = trial.suggest_int(\"min_child_samples\", 100, 500)\r\n min_child_weight = trial.suggest_uniform(\"min_child_weight\", 1, 7)\r\n subsample = trial.suggest_uniform(\"subsample\", 0.6, 1)\r\n colsample_bytree = trial.suggest_uniform(\"colsample_bytree\", 0.6, 1)\r\n reg_alpha = trial.suggest_uniform(\"reg_alpha\", 0.1, 100)\r\n reg_lambda = trial.suggest_uniform(\"reg_lambda\", 0.1, 100)\r\n\r\n model = LGBMRegressor(\r\n num_leaves=num_leaves,\r\n min_child_samples=min_child_samples,\r\n min_child_weight=min_child_weight,\r\n subsample=subsample,\r\n colsample_bytree=colsample_bytree,\r\n reg_alpha=reg_alpha,\r\n reg_lambda=reg_lambda,\r\n )\r\n\r\n model = ModelTrainer(file_object=self.file_object).get_trained_model(\r\n model, self.X_train, self.y_train\r\n )\r\n r_squared, rmse = ModelScorer(file_object=self.file_object).get_model_scores(\r\n model, self.X_test, self.y_test\r\n )\r\n\r\n return r_squared", "def fit(self, ymeas, mmeas=None, alg='optls'): # alg: {'optls','mine'}\n\n # [ X ]*dm = [ dy ]\n # [ a ] [ 0 ] <-- using built-in Ridge model does this\n #\n # [ X ]*dm = [ dy ]\n # [-a ] [ a*m ] <-- but I want this for iterated nonlin problem\n #\n # [ X ]*dm = [ dy ]\n # [-aL ] [ a*L*m ] <-- and more generally I want this (higher-order Tihk)\n #\n # which can be rewritten:\n # G * dm = D (and then loop that from m0 with m=m+dm...)\n\n # X is the Jacobian matrix of derivs of predicted data points wrt model\n # params m, as given by ypred,X=self.fwd_deriv_code(m)...\n\n\n if alg=='optls':\n # https://docs.scipy.org/doc/scipy/reference/optimize.html\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html\n def fun(m):\n mlen = m.size\n L = create_findiff_mtx(mlen,self.beta)\n ypred,J = self.fwd_deriv_code(m) # m: model params vector, J: derivs matrix\n resids = ymeas-ypred\n modelfunc = self.alpha * np.dot(L,m)\n modelfunc = modelfunc.reshape(len(modelfunc),1)\n f = np.squeeze(np.concatenate((resids,modelfunc),axis=0))\n return f\n\n def jac(m):\n mlen = m.size\n L = create_findiff_mtx(mlen,self.beta)\n ypred,J = self.fwd_deriv_code(m) # m: model params vector, J: derivs matrix\n Jreg = self.alpha * L\n Jout = np.concatenate((J,Jreg))\n return Jout\n\n if self.usefindiff:\n jacfn='2-point'\n else:\n jacfn=jac\n if self.verbose:\n verblevel=2\n else:\n verblevel=0\n res = least_squares(fun, np.squeeze(self.minit), jac=jacfn,\n bounds=(0., 3.5), diff_step=None, verbose=verblevel, max_nfev=self.max_nfev,\n method='trf', ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0)\n #ftol=1e-4, xtol=1e-1, gtol=1e-8, x_scale=1.0)\n #ftol=1e0, xtol=1e-01, gtol=1e-01, x_scale=1.0)\n #ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0)\n\n if mmeas is not None:\n testMSE = cplxMSE(res.x.reshape(len(res.x),1),mmeas)\n else:\n testMSE = npl.nan\n ypred,J = self.fwd_deriv_code(res.x.reshape(len(res.x),1))\n ypred=np.log10(ypred)\n residnorm = norm(ypred-ymeas)\n print('resid norm',residnorm)\n L = create_findiff_mtx(len(self.minit),self.beta)\n print('maxeig JJ',np.real(np.amax(np.linalg.eigvals(np.dot(J.T,J))))) # J'J has real eigvals but kept cplx type\n print('maxeig LL',np.amax(np.linalg.eigvals(np.dot(L.T,L))))\n if self.showplot:\n f, ax = plt.subplots(1, 2, figsize=(11,4))\n # plot the meas and pred data:\n # print('ypred',ypred)\n # print('ymeas',ymeas)\n ax[0].plot(ypred,'r.-')\n ax[0].plot(ymeas,'k.-')\n ax[0].grid()\n #ax[0].set_ylabel('cost')\n #ax[0].set_xlabel('iterations')\n ax[0].set_title('Measured (blk) and predicted (blu) data')\n # plot the init, true, and final model param vectors:\n ax[1].plot(self.minit,'g.-')\n ax[1].plot(res.x,'r.--')\n ax[1].plot(mmeas,'k.--')\n ax[1].grid()\n #ax[1].set_ylabel('model value')\n #ax[1].set_xlabel('indep var')\n ax[1].set_title('Model vectors (true=blk, init=grn, soln=red)')\n\n # return m,cost,misfit,modelnorm,norm(dm),testMSE\n return res.x,res.cost,np.nan,np.nan,np.nan,testMSE\n\n elif alg=='mine':\n cost = []\n m = self.minit\n mlen = len(m)\n if self.verbose:\n print('iter alpha cost norm(dd) norm(dm) dmtol')\n for i in range(self.max_nfev):\n ypred,X = self.fwd_deriv_code(m) # m: model params vector, X: derivs matrix\n if self.usefindiff:\n def tmpfwdcode(m):\n return np.squeeze(self.fwd_deriv_code(m)[0])\n X = jacfindiff(tmpfwdcode,m,dx=1.0e-6) # dx=1.0e-6 is problem dependent!\n L = create_findiff_mtx(mlen,self.beta)\n G = np.concatenate((X, -self.alpha*L),axis=0)\n D = np.concatenate((ymeas-ypred, self.alpha*np.dot(L,m)),axis=0)\n misfit = cplxMSE(ymeas, ypred)\n modelnorm = norm(np.dot(L,m))**2\n current_cost = misfit + pow(self.alpha,2)*modelnorm\n dm,res,rnk,sv = lstsq(G,D)\n m = m + dm\n cost.append(current_cost)\n if self.verbose:\n print('%3d %6.1g %10.3f %10.3f %10.2g %6.3g' %\n (i, self.alpha, current_cost, norm(ymeas-ypred), norm(dm), self.dmtol))\n if norm(dm) < self.dmtol:\n break\n self.G = G\n self.ypred = ypred\n if mmeas is not None:\n testMSE = cplxMSE(m,mmeas)\n else:\n testMSE = npl.nan\n print('maxeig JJ',np.real(np.amax(np.linalg.eigvals(np.dot(X.T,X))))) # X'X has real eigvals but kept cplx type\n print('maxeig LL',np.amax(np.linalg.eigvals(np.dot(L.T,L))))\n if self.showplot:\n f, ax = plt.subplots(1, 2, figsize=(11,4))\n # plot the cost (ie loss) per iterations:\n ax[0].semilogy(cost,'.-') # (last element of cost)\n ax[0].grid()\n ax[0].set_ylabel('cost')\n ax[0].set_xlabel('iterations')\n ax[0].set_title('Cost history (misfit^2 + alpha^2*modelnorm^2)')\n # plot the init, true, final, and evolution of model params:\n #print('m',np.squeeze(m.T))\n ax[1].plot(mmeas,'k')\n ax[1].plot(self.minit,'g')\n ax[1].plot(m,'r')\n ax[1].grid()\n #ax[1].set_ylabel('model value')\n ax[1].set_xlabel('indep var')\n ax[1].set_title('Model vectors')\n\n return m,cost[-1],misfit,modelnorm,norm(dm),testMSE", "def optimize(self, maxiter=200):\n for _ in range(maxiter):\n for i in range(self.n_particles):\n x = self.particles_pos[i]\n v = self.velocities[i]\n p_best = self.p_best[i]\n self.velocities[i] = self.update_velocity(x, v, p_best, self.g_best)\n self.particles_pos[i] = self.update_position(x, v)\n # Update the best position for particle i\n if self.func(self.particles_pos[i]) < self.func(p_best):\n self.p_best[i] = self.particles_pos[i]\n # Update the best position overall\n if self.func(self.particles_pos[i]) < self.func(self.g_best):\n self.g_best = self.particles_pos[i]\n return self.g_best, self.func(self.g_best)", "def objective(self,w):\n l = 0\n for i in range(len(self.x)):\n # Each example contributes log(sigma(y_i * x_i . w))\n l -= log(sigmoid(self.y[i] * np.dot(w, self.x[i,:])))\n # regularisation 1/2 * alpha * ||w||^2\n l += 0.5 * self.alpha * np.dot(w,w)\n return l", "def optimize(self, max_iters=1e3, messages=False, use_counter=False,\\\n factr=1e7, pgtol=1e-05):\n logger.debug('Beginning MLE to optimize hyperparams. grad_method=%s'\\\n % self.grad_method)\n\n # setup the optimization\n try:\n x0 = self._transform_parameters(self.parameters)\n assert np.all(np.isfinite(x0))\n except:\n logger.error('Transformation failed for initial values. '\\\n + 'Ensure constraints are met or the value is not too small.')\n raise\n\n # filter out the fixed parameters\n free = np.logical_not(self._fixed_indicies)\n x0 = x0[free]\n\n # setup the counter\n if use_counter:\n self._counter = solver_counter(disp=True)\n else:\n self._counter = None\n\n # run the optimization\n try:\n x_opt, f_opt, opt = fmin_l_bfgs_b(func=self._objective_grad, x0=x0,\\\n factr=factr, pgtol=pgtol, maxiter=max_iters, disp=messages)\n except (KeyboardInterrupt,IndexError):\n logger.info('Keyboard interrupt raised. Cleaning up...')\n if self._counter is not None and self._counter.backup is not None:\n self.parameters = self._counter.backup[1]\n logger.info('will return best parameter set with'\\\n + 'log-likelihood = %.4g' % self._counter.backup[0])\n else:\n logger.info('Function Evals: %d. Exit status: %s' % (f_opt, opt['warnflag']))\n # extract the optimal value and set the parameters to this\n transformed_parameters = self._previous_parameters \n transformed_parameters[free] = x_opt\n self.parameters = self._untransform_parameters(transformed_parameters)\n return opt", "def _optimize(self, objective):\n # Initial value\n initial = self.get_initial()[0]\n\n if self.vector_to_matrix_transform is not None:\n initial = self.vector_to_matrix_transform(initial)\n\n if self.solver_type is 'NelderMead' or self.solver_type is 'ParticleSwarm':\n initial = None\n\n # Create tensorflow variable\n if self.matrix_manifold_dimension is None:\n x_tf = tf.Variable(tf.zeros(self.dimension, dtype=tf.float64))\n else:\n x_tf = tf.Variable(tf.zeros([self.matrix_manifold_dimension, self.matrix_manifold_dimension], dtype=tf.float64))\n\n # Cost function for pymanopt\n def objective_fct(x):\n if self.matrix_to_vector_transform_tf is not None:\n # Reshape x from matrix to vector form to compute the objective function (tensorflow format)\n x = self.matrix_to_vector_transform_tf(x, self.matrix_manifold_dimension)\n return objective(x)[0]\n\n # Transform the cost function to tensorflow function\n cost = tf.py_function(objective_fct, [x_tf], tf.float64)\n\n # Gradient function for pymanopt\n def objective_grad(x):\n if self.matrix_to_vector_transform is not None:\n # Reshape x from matrix to vector form to compute the gradient\n x = self.matrix_to_vector_transform(x)\n\n # Compute the gradient\n grad = np.array(objective(x)[1])[0]\n\n if self.vector_to_matrix_transform is not None:\n # Reshape the gradient in matrix form for the optimization on the manifold\n grad = self.vector_to_matrix_transform(grad)\n return grad\n\n # Define pymanopt problem\n problem = pyman.Problem(manifold=self.manifold, cost=cost, egrad=objective_grad, arg=x_tf, verbosity=2)\n\n # Optimize the parameters of the problem\n opt_x, opt_log = self.solver.solve(problem, x=initial)\n\n if self.matrix_to_vector_transform is not None:\n # Reshape the optimum from matrix to vector form\n opt_x = self.matrix_to_vector_transform(opt_x)\n\n # Format the result to fit with GPflowOpt\n result = sc_opt.OptimizeResult(x=opt_x, fun=opt_log['final_values']['f(x)'], nit=opt_log['final_values']['iterations'], message=opt_log['stoppingreason'], success=True)\n\n return result", "def maximize_loglik(model_params: Union[CupidParams, CupidParamsCSHeteroxy, CupidParamsFcmnl],\n x_init: np.ndarray,\n lower: Optional[np.ndarray] = None,\n upper: Optional[np.ndarray] = None,\n checkgrad: Optional[bool] = False,\n verbose: Optional[bool] = False,\n fixed_vars: Optional[List[int]] = None,\n fixed_vals: Optional[List[float]] = None,\n options: Optional[Dict] = {'iprint': 1}) -> Tuple[float, np.ndarray, int]:\n n_params = x_init.size\n try:\n kc = KN_new()\n except:\n bs_error_abort(\"Failed to find a valid Knitro license.\")\n\n KN_add_vars(kc, n_params)\n\n # bounds, if any\n if lower is None:\n # not necessary since infinite\n KN_set_var_lobnds(kc, xLoBnds=np.full(n_params, -KN_INFINITY))\n else:\n KN_set_var_lobnds(kc, xLoBnds=lower)\n if upper is None:\n KN_set_var_upbnds(kc, xUpBnds=np.full(n_params, KN_INFINITY))\n else:\n KN_set_var_upbnds(kc, xUpBnds=upper)\n\n # Define an initial point. If not set, Knitro will generate one.\n KN_set_var_primal_init_values(kc, xInitVals=x_init)\n\n if fixed_vars is not None:\n assert fixed_vals is not None\n KN_set_var_fxbnds(kc, fixed_vars, fixed_vals)\n\n cb = KN_add_eval_callback(kc, evalObj=True, funcCallback=log_likelihood)\n\n KN_set_cb_user_params(kc, cb, model_params)\n\n KN_set_cb_grad(kc, cb, objGradIndexVars=KN_DENSE,\n gradCallback=grad_log_likelihood)\n\n KN_set_int_param(kc, KN_PARAM_OUTLEV, KN_OUTLEV_ALL)\n\n if checkgrad:\n # Perform a derivative check.\n KN_set_int_param(kc, KN_PARAM_DERIVCHECK, KN_DERIVCHECK_ALL)\n\n # Solve the problem.\n nStatus = KN_solve(kc)\n\n loglik_val, estimates = print_optimization_results(kc)\n\n print_stars()\n print(f\" Value of log-likelihood: {loglik_val: > 8.3f}\\n\")\n print()\n\n return loglik_val, np.array(estimates), nStatus", "def fit(self):\n self._minuit_problem.migrad() # run optimizer\n self._status = 0 if self._minuit_problem.migrad_ok() else 1", "def optimization(logits, y, population, embedding, alpha):\n if FLAGS.uniform_weights:\n weights = tf.ones(shape=tf.shape(population))\n else:\n weights = tf.where(\n tf.greater(population, 0.01), tf.fill(tf.shape(population), 0.16),\n tf.fill(tf.shape(population), 2.5))\n if not FLAGS.propensity_weights:\n weights = tf.sigmoid(tf.matmul(embedding, alpha)) * weights\n weights /= tf.reduce_mean(weights)\n loss = tf.losses.hinge_loss(labels=y, logits=logits, weights=weights)\n optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)\n return optimizer, loss", "def maximize(self):\n raise NotImplementedError", "def gp_optimize_threshold(gp_model, X_val, y_val, X_scaler, y_scaler, optimize_for=\"profits\"): \n y_hat, conf = gp_model.predict(X_val)\n regressed_payment = y_scaler.inverse_transform(y_hat).reshape(-1)\n loan_amt = X_scaler.inverse_transform(X_val)[:,0]\n\n # This ratio is a guage of how likely a person will pay back.\n # It is compared with a threshold to determine whether or not to loan.\n payment_to_loan_ratio = regressed_payment / loan_amt\n\n # Sort in descending order\n sorted_ind = np.argsort(-payment_to_loan_ratio)\n sorted_payment_to_loan_ratio = payment_to_loan_ratio[sorted_ind]\n X_sorted, y_sorted = X_val[sorted_ind,:], y_val[sorted_ind]\n\n threshold, highest_opt_val = 0, 0\n for i, thresh in enumerate(sorted_payment_to_loan_ratio): \n X_loanee = X_sorted[:i+1,:]\n y_loanee = y_sorted[:i+1]\n \n loan_amt_loanee = np.sum(X_scaler.inverse_transform(X_loanee)[:,0])\n payments_loanee = np.sum(y_loanee)\n\n # Optimize for different values\n if optimize_for == \"profits\":\n opt_val = payments_loanee - loan_amt_loanee\n elif optimize_for == \"profit_percentage\":\n opt_val = (payments_loanee - loan_amt_loanee) / loan_amt_loanee\n else:\n raise Exception(\"Illegal optimize_for value: %s\" % optimize_for)\n\n # Keep track of highest value (that is being optimized for)\n if opt_val > highest_opt_val:\n threshold = thresh\n highest_opt_val = opt_val\n return threshold", "def optimize(self):\n prm = (self.b,self.c)\n d = self.d\n no = int(d*d)\n bounds = [(-1,1)]*no\n resG = differential_evolution(inpSc.entBias, bounds, args = prm, popsize = 40, disp = False)\n\n xOpt = resG.x\n xOpt = xOpt/(np.linalg.norm(xOpt))\n\n #Refine the global optimization by performing a second local optimizaiton\n x0 = xOpt\n\n res = minimize(inpSc.entBias, x0, args = prm, method='BFGS', options={'disp': False})\n xOpt = res.x\n xOpt = xOpt/(np.linalg.norm(xOpt))\n self.rhoOp = inpSc.getMat(xOpt, d)\n self.Q1 = -res.fun", "def fit(self):\n if self.minimizer == \"differential_evolution\":\n kwargs = {\"maxiter\": self._maxiter}\n elif self.minimizer == \"shgo\":\n kwargs = {\"options\": {\"maxiter\": self._maxiter,\n \"jac\": self.cost_func.jac_cost}}\n elif self.minimizer == \"dual_annealing\":\n kwargs = {\"maxiter\": self._maxiter, \"local_search_options\": {\n \"jac\": self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif \"Maximum number of iteration\" in result.message:\n self._status = 1\n else:\n self._status = 2", "def softmax_loss(x, y):\n\n eps = 1e-5\n \n N,C = x.shape\n p = softmax(x)\n llikelihood = -np.log(p[range(N),y] + eps)\n# print(llikelihood)\n loss = np.sum(llikelihood) / N\n\n dx = p\n dx[range(N),y] -= 1\n dx = dx/N\n \n return loss, dx", "def log_likelihood(self, data, reward_model, bias_params):", "def maximize(self,x0,method='fmin',**kwargs):\n return self._optimize(x0,'max',method,**kwargs)", "def solve(self):\n start = timer()\n # encode into milp\n me = MILPEncoder(MILPSolver.prob,\n MILPSolver.params.logger.LOGFILE, \n MILPSolver.params.INTRA_DEP_CONSTRS,\n MILPSolver.params.INTER_DEP_CONSTRS)\n if MILPSolver.lp == True:\n gmodel = me.lp_encode()\n else:\n gmodel = me.encode()\n # Set gurobi parameters\n pgo = 1 if MILPSolver.params.PRINT_GUROBI_OUTPUT == True else 0\n gmodel.setParam('OUTPUT_FLAG', pgo)\n tl = MILPSolver.params.TIMEOUT\n if tl != -1 : gmodel.setParam('TIME_LIMIT', tl)\n if not MILPSolver.params.DEFAULT_CUTS: \n MILPSolver.disable_default_cuts(gmodel)\n gmodel._vars = gmodel.getVars()\n # set callback cuts \n MILPSolver.id_form = IdealFormulation(MILPSolver.prob,\n gmodel, \n MILPSolver.params.IDEAL_FREQ,\n MILPSolver.params.logger.LOGFILE)\n MILPSolver.dep_cuts = DepCuts(MILPSolver.prob,\n gmodel,\n MILPSolver.params.DEP_FREQ,\n MILPSolver.params.INTRA_DEP_CUTS,\n MILPSolver.params.INTER_DEP_CUTS,\n MILPSolver.sip_params,\n MILPSolver.params.logger.LOGFILE)\n # Optimise\n if MILPSolver.params.callback_enabled() and MILPSolver.lp == False:\n gmodel.optimize(MILPSolver._callback)\n else:\n gmodel.optimize()\n\n runtime = timer() - start\n cex = None \n if MILPSolver.status == SolveResult.BRANCH_THRESHOLD:\n result = SolveResult.BRANCH_THRESHOLD\n elif gmodel.status == GRB.OPTIMAL:\n cex_shape = MILPSolver.prob.spec.input_layer.input_shape\n cex = np.zeros(cex_shape)\n for i in itertools.product(*[range(j) for j in cex_shape]):\n cex[i] = MILPSolver.prob.spec.input_layer.out_vars[i].x\n result = SolveResult.UNSATISFIED\n elif gmodel.status == GRB.TIME_LIMIT:\n result = SolveResult.TIMEOUT\n elif gmodel.status == GRB.INTERRUPTED:\n result = SolveResult.INTERRUPTED\n elif gmodel.status == GRB.INFEASIBLE or gmodel.status == GRB.INF_OR_UNBD:\n result = SolveResult.SATISFIED\n else:\n result = SolveResult.UNKNOWN\n \n # MILPSolver.logger.info('Verification problem {} solved, '\n # 'LP: {}, '\n # 'time: {:.2f}, '\n # 'result: {}.'\n # .format(MILPSolver.prob.id,\n # MILPSolver.lp,\n # runtime,\n # result.value))\n \n return SolveReport(result, runtime, cex)", "def mle(model):\n dist = model.dist\n x, c, n, t = (model.data['x'], model.data['c'],\n model.data['n'], model.data['t'])\n const = model.fitting_info['const']\n trans = model.fitting_info['transform']\n inv_trans = model.fitting_info['inv_trans']\n init = model.fitting_info['init']\n fixed_idx = model.fitting_info['fixed_idx']\n offset = model.offset\n lfp = model.lfp\n zi = model.zi\n\n if hasattr(dist, 'mle'):\n return dist.mle(x, c, n, t, const, trans,\n inv_trans, init, fixed_idx, offset)\n\n results = {}\n\n \"\"\"\n Need to flag entries where truncation is inf or -inf so that the autograd\n doesn't fail. Because autograd fails if it encounters any inf, nan, -inf\n etc even if they don't affect the gradient. A must for autograd\n \"\"\"\n t_flags = np.ones_like(t)\n t_mle = copy.copy(t)\n # Create flags to indicate where the truncation values are infinite\n t_flags[:, 0] = np.where(np.isfinite(t[:, 0]), 1, 0)\n t_flags[:, 1] = np.where(np.isfinite(t[:, 1]), 1, 0)\n # Convert the infinite values to a finite value to ensure\n # the autodiff functions don't fail\n t_mle[:, 0] = np.where(t_flags[:, 0] == 1, t[:, 0], 1)\n t_mle[:, 1] = np.where(t_flags[:, 1] == 1, t[:, 1], 1)\n\n results['t_flags'] = t_flags\n results['t_mle'] = t_mle\n\n # Create the objective function\n def fun(params, offset=False, lfp=False,\n zi=False, transform=True, gamma=0.):\n x_mle = np.copy(x)\n if transform:\n params = inv_trans(const(params))\n\n if offset:\n gamma = params[0]\n params = params[1:]\n else:\n # Use the assumed value\n pass\n\n if zi:\n f0 = params[-1]\n params = params[0:-1]\n else:\n f0 = 0.\n\n if lfp:\n p = params[-1]\n params = params[0:-1]\n else:\n p = 1.\n\n inf_c_flags, x_mle = _create_censor_flags(x_mle, gamma, c, dist)\n return dist.neg_ll(x_mle, c, n, inf_c_flags,\n t_mle, t_flags, gamma, p, f0, *params)\n\n old_err_state = np.seterr(all='ignore')\n use_initial = False\n\n if zi:\n def jac(x, offset, lfp, zi, transform):\n return approx_fprime(x, fun, np.sqrt(np.finfo(float).eps),\n offset, lfp, zi, transform)\n hess = None\n else:\n jac = jacobian(fun)\n hess = hessian(fun)\n\n res = minimize(fun, init, args=(offset, lfp, zi, True),\n method='Newton-CG', jac=jac, hess=hess)\n\n if (res.success is False) or (np.isnan(res.x).any()):\n res = minimize(fun, init, args=(offset, lfp, zi, True),\n method='BFGS', jac=jac)\n\n if (res.success is False) or (np.isnan(res.x).any()):\n res = minimize(fun, init, args=(offset, lfp, zi, True))\n\n if 'Desired error not necessarily' in res['message']:\n print(\"Precision was lost, try:\"\n + \"\\n- Using alternate fitting method\"\n + \"\\n- visually checking model fit\"\n + \"\\n- change data to be closer to 1.\", file=sys.stderr)\n\n elif (not res.success) | (np.isnan(res.x).any()):\n print(\"MLE Failed: Try making the values of the data closer to \"\n + \"1 by dividing or multiplying by some constant.\"\n + \"\\n\\nAlternately try setting the `init` keyword in the `fit()`\"\n + \" method to a value you believe is closer.\"\n + \"A good way to do this is to set any shape parameter to 1. \"\n + \"and any scale parameter to be the mean of the data \"\n + \"(or it's inverse)\"\n + \"\\n\\nModel returned with inital guesses\", file=sys.stderr)\n\n use_initial = True\n\n if use_initial:\n p_hat = inv_trans(const(init))\n else:\n p_hat = inv_trans(const(res.x))\n\n if offset:\n results['gamma'] = p_hat[0]\n params = p_hat[1:]\n parameters_for_hessian = copy.copy(params)\n else:\n results['gamma'] = 0\n params = p_hat\n parameters_for_hessian = copy.copy(params)\n\n if zi:\n results['f0'] = params[-1]\n params = params[0:-1]\n else:\n results['f0'] = 0.\n params = params\n\n if lfp:\n results['p'] = params[-1]\n results['params'] = params[0:-1]\n else:\n results['p'] = 1.\n results['params'] = params\n\n try:\n if zi or lfp:\n results['hess_inv'] = None\n else:\n results['hess_inv'] = inv(hess(parameters_for_hessian,\n *(False, lfp, zi,\n False, results['gamma'])))\n except np.linalg.LinAlgError:\n results['hess_inv'] = None\n\n results['_neg_ll'] = res['fun']\n results['res'] = res\n\n np.seterr(**old_err_state)\n\n return results", "def _optimize_mpt(self, mu, sigma, q, gamma, max_leverage, last_b):\n sigma = np.matrix(sigma)\n mu = np.matrix(mu).T\n\n # regularization parameter for singular cases\n ALPHA = 0.001\n\n def maximize(mu, sigma, q):\n n = len(last_b)\n\n P = matrix(2 * (sigma + ALPHA * np.eye(n)))\n q = matrix(-q * mu + 2 * ALPHA * np.matrix(last_b).T)\n G = matrix(-np.eye(n))\n h = matrix(np.zeros(n))\n\n if max_leverage is None or max_leverage == float('inf'):\n sol = solvers.qp(P, q, G, h)\n else:\n if self.allow_cash:\n G = matrix(np.r_[G, matrix(np.ones(n)).T])\n h = matrix(np.r_[h, matrix([self.max_leverage])])\n sol = solvers.qp(P, q, G, h, initvals=last_b)\n else:\n A = matrix(np.ones(n)).T\n b = matrix(np.array([max_leverage]))\n sol = solvers.qp(P, q, G, h, A, b, initvals=last_b)\n\n if sol['status'] != 'optimal':\n logging.warning(\"Solution not found for {}, using last weights\".format(last_b.name))\n return last_b\n\n return np.squeeze(sol['x'])\n\n def maximize_with_penalization(b, last_b, mu, sigma, q, gamma):\n n = len(mu)\n c = np.sign(b - last_b)\n sigma = matrix(sigma)\n mu = matrix(mu)\n\n P = 2 * (sigma + ALPHA * matrix(np.eye(n)))\n qq = 2 * sigma * matrix(last_b) - q * mu + matrix(gamma * c)\n\n G = matrix(np.r_[-np.diag(c), np.eye(n), -np.eye(n)])\n h = matrix(np.r_[np.zeros(n), self.max_leverage - last_b, last_b])\n\n A = matrix(np.ones(n)).T\n b = matrix([self.max_leverage - sum(last_b)])\n\n sol = solvers.qp(P, qq, G, h, A, b, initvals=np.zeros(n))\n\n return np.squeeze(sol['x']) + np.array(last_b)\n\n try:\n b = maximize(mu, sigma, q)\n except ValueError:\n b = last_b\n\n # second optimization for fees\n if (gamma != 0).any() and (b != last_b).any():\n b = maximize_with_penalization(b, last_b, mu, sigma, q, gamma)\n return b", "def maximize_one(self, gamma, xisum, c, x_digits):\n log_likelihood = np.log(c).sum()\n self._i = gamma[0] / gamma[0].sum()\n self._t = (xisum.T / xisum.sum(1)).T\n self._e = np.dot(x_digits, gamma) / gamma.sum(0)\n return log_likelihood", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def fit_model(train_ts_dis, data, init_prior = [.5,.5], bias = True, mode = \"biasmodel\"):\r\n if mode == \"biasmodel\":\r\n #Fitting Functions\r\n def bias_fitfunc(rp, tsb, df):\r\n init_prior = [.5,.5]\r\n model = BiasPredModel(train_ts_dis, init_prior, ts_bias = tsb, recursive_prob = rp)\r\n model_likelihoods = []\r\n for i in df.index:\r\n c = df.context[i]\r\n trial_choice = df.subj_ts[i]\r\n conf = model.calc_posterior(c)\r\n model_likelihoods.append(conf[trial_choice])\r\n return np.array(model_likelihoods)\r\n \r\n def bias_errfunc(params,df):\r\n rp = params['rp']\r\n tsb = params['tsb']\r\n #minimize\r\n return abs(np.sum(np.log(bias_fitfunc(rp,tsb,df)))) #single value\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('rp', value = .6, min = 0, max = 1)\r\n if bias == True:\r\n fit_params.add('tsb', value = 1, min = 0)\r\n else:\r\n fit_params.add('tsb', value = 1, vary = False, min = 0)\r\n out = lmfit.minimize(bias_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(out)\r\n return out.values\r\n \r\n elif mode == \"midline\":\r\n #Fitting Functions\r\n def midline_errfunc(params,df):\r\n eps = params['eps'].value\r\n context_sgn = np.array([max(i,0) for i in df.context_sign])\r\n choice = df.subj_ts\r\n #minimize\r\n return -np.sum(np.log(abs(abs(choice - (1-context_sgn))-eps)))\r\n \r\n #Fit bias model\r\n #attempt to simplify:\r\n fit_params = lmfit.Parameters()\r\n fit_params.add('eps', value = .1, min = 0, max = 1)\r\n midline_out = lmfit.minimize(midline_errfunc,fit_params, method = 'lbfgsb', kws= {'df': data})\r\n lmfit.report_fit(midline_out)\r\n return midline_out.values", "def solve(self):\n max_iter = 1000\n iter_count = 0\n yhat = self.predict()\n loss = self.cross_entropy(yhat)\n gradloss = self.cross_entropy_gradient(yhat)\n while la.norm(gradloss) > 1e-6 and iter_count < max_iter:\n alpha = 1.0\n slope = la.norm(gradloss)**2\n beta_new = self.beta + alpha * gradloss\n yhat = self.predict(beta=beta_new)\n loss_new = self.cross_entropy(yhat)\n while loss_new < loss + 1e-4 * alpha * slope:\n alpha = alpha / 2\n beta_new = self.beta + alpha * gradloss\n yhat = self.predict(beta=beta_new)\n loss_new = self.cross_entropy(yhat)\n self.beta = beta_new\n loss = loss_new\n gradloss = self.cross_entropy_gradient(yhat)\n iter_count += 1", "def estimate_params(self, thresh=1e-5, max_iter=15):\n em = EM(self.obs, self.theta, thresh=thresh, max_iter=max_iter)\n self.estimate = em.estimate_params()\n self.likelihood = em.lhood\n self.initial_likelihood = em.calculate_likelihood(theta=self.theta)", "def propose_optimize():\n pass", "def _optimize_model(self):\n # Check that there is enough plays in self.experiences\n if len(self.memory) < self.batch_size:\n return 0\n\n # Select self.batch_size random experience\n transitions = random.sample(self.memory, self.batch_size)\n batch = Transition(*zip(*transitions))\n\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n next_state_batch = torch.cat(batch.next_state)\n reward_batch = torch.cat(batch.reward)\n done_batch = torch.cat(batch.done)\n\n # Compute Q(s, a) for all state\n q_values = self.policy_net(state_batch).gather(1, action_batch)\n\n # Compute Q(s_{t+1}) for all next_state\n next_q_values = torch.zeros(self.batch_size, device=device)\n with torch.no_grad():\n next_q_values[~done_batch] = self.target_net(\n next_state_batch[~done_batch]).max(1)[0].detach()\n\n # Compute expected Q-value\n expected_q_values = (next_q_values * self.gamma) + reward_batch\n\n # Compute loss\n loss = F.mse_loss(q_values, expected_q_values.unsqueeze(1))\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return loss.item()", "def optimize(self):\n \n if self.verbose:\n print('Starting grid search with bounds: [' + \\\n ';'.join(['%5g to %5g']*len(self.steps))%tuple([(self.steps[i][0], self.steps[i][-1]) for i in range(len(self.steps))]) +']')\n\n for params in self._get_next_point():\n self.transform.set_params(params)\n\n v, _ = self.measure.value_and_derivatives(self.transform)\n\n if v < self.best_value:\n self.best_value = v\n self.best_params = params\n# print('New best value %2.4f at ('%v, ', '.join(['%8.3f']*len(params))%tuple(params), ')')\n\n self.value_history.append(v)\n self.last_value = v\n self.iteration += 1\n\n if self.report_freq > 0 and (self.iteration % self.report_freq == 0) and self.report_func is not None:\n self.report_func(self)\n\n # Set the best transform\n self.transform.set_params(self.best_params)\n self.last_value = self.best_value\n return self.best_value", "def objective_function(self, x):\r\n try:\r\n self._set_params_transformed(x)\r\n self._fail_count = 0\r\n except (LinAlgError, ZeroDivisionError, ValueError) as e:\r\n if self._fail_count >= self._allowed_failures:\r\n raise e\r\n self._fail_count += 1\r\n return np.inf\r\n return -self.log_likelihood() - self.log_prior()", "def best_fit(self, **kwargs):\n n_fit_p = len(self.fit_parameters)\n n_wc = len(self.fit_wc_names)\n if n_fit_p + n_wc == 1:\n def f(x):\n return -self.log_likelihood([x])\n opt = scipy.optimize.minimize_scalar(f, **kwargs)\n else:\n def f(x):\n return -self.log_likelihood(x)\n if 'x0' not in kwargs:\n x0 = np.zeros(n_fit_p + n_wc)\n if n_fit_p > 1:\n x0[:n_fit_p] = self.get_central_fit_parameters\n opt = minimize_robust(f, x0, **kwargs)\n else:\n opt = minimize_robust(f, **kwargs)\n if not opt.success:\n raise ValueError(\"Optimization failed.\")\n else:\n return {'x': opt.x, 'log_likelihood': -opt.fun}", "def _objective(self, params, model_ID, model_dict, X, y, **kwargs):\n model = model_dict['model']\n param_grid = model_dict['param_grid'].copy()\n params = params.copy()\n \n obj_verbose = max(0,self.verbose-2)\n \n type_X = str(type(X))\n \n if 'dask' in type_X:\n X = X.compute()\n y = y.compute()\n \n if obj_verbose>=2:\n print('params',params)\n \n params_transform, model = self._update_model_params(params, \n model_ID,\n model, \n param_grid)\n type_model = str(type(model))\n \n if obj_verbose>=2:\n print('params_transform',params_transform)\n if 'sklearn' in type_model or 'xgboost' in type_model:\n \n cv_scores = _sklearn_model_selection.cross_val_score(model, X, y,\n scoring= self.scoring['metric'],\n cv = self.cv,\n n_jobs= self.n_jobs,\n verbose = obj_verbose\n )\n\n else: #using neural net function\n import tensorflow as _tf\n #check for kwargs\n epochs = 100\n batch_size = 32\n callbacks = [_tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience =10)]\n for item in kwargs.items():\n if 'epochs' in item[0]: \n epochs = item[1]\n elif 'batch_size' in item[0]: \n batch_size = item[1]\n elif 'callbacks' in item[0]: \n callbacks = item[1] \n cv_scores = _NeuralNet.cross_val_score(model,\n batch_size,\n epochs,\n X, y,\n callbacks,\n scoring = self.scoring['metric'],\n cv = self.cv,\n verbose= obj_verbose)\n \n cv_score = _np.mean(cv_scores)\n \n if 'sklearn' in type_model or 'xgboost' in type_model:\n if self.scoring['maximize']==True or self.scoring['metric']==None:\n cv_score = 1/cv_score \n else:\n if self.scoring['maximize']==True and self.scoring['metric']!=None :\n cv_score = 1/cv_score \n \n objective = {'loss': cv_score,\n 'params': params,\n 'status': _hyperopt.STATUS_OK,\n 'eval_time': _time.time()}\n return objective", "def minimize(self):\n self.normalize()\n p0s = self.spacedvals(method='random')\n if self.n_spots > 1:\n opts = self.multifit(p0s)\n else:\n opts = self.singlefit(p0s)\n self.yf = [self.solve(theta) for theta in opts]\n self.bestps = opts\n return opts", "def optimize(self, X, y):\n print(\"Performing TPOT genetic optimization.\")\n self.model.fit(X, y)\n self.optimized = True", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def __max_step(self, batch):\n\n # short hand and optimizer\n policy = self.mc.get('policy', target=False)\n policy_parameters = list(policy.parameters())\n opt_outer = optim.Adam(policy_parameters, lr=self.outer_lr)\n\n # optimize\n opt_outer.zero_grad()\n obj = -self.obj(batch)\n obj.backward()\n opt_outer.step()", "def optimize(self, x0):\n (result,f,d) = fmin_l_bfgs_b(lambda x:self.costFun(x), np.ravel(x0),lambda x: self.gradFun(x))\n print(\"optimization completed with cost: \" + str(f))\n return result.reshape(self.inp_shape)", "def optimize_model(self, gradient_clamping_value=None):\n # TODO: worry about this later\n # if self.kwargs['use_PER']:\n # # Create batch with PrioritizedReplayBuffer/PER:\n # transitions, importance_sampling_weights = self.replayBuffer.sample(self.batch_size)\n # batch = EXPPER(*zip(*transitions))\n # importance_sampling_weights = torch.from_numpy(importance_sampling_weights)\n # if self.use_cuda:\n # importance_sampling_weights = importance_sampling_weights.cuda()\n\n self.optimizer.zero_grad()\n transitions, batch = self.sample_from_replay_buffer(self.batch_size)\n\n next_state_batch, state_batch, action_batch, reward_batch, \\\n non_terminal_batch = self.create_tensors_for_optimization(batch,\n use_cuda=self.use_cuda)\n\n dqn_loss = compute_loss(states=state_batch,\n actions=action_batch,\n next_states=next_state_batch,\n rewards=reward_batch,\n non_terminals=non_terminal_batch,\n model=self.model,\n target_model=self.target_model,\n gamma=self.GAMMA,\n use_double=self.use_double,\n use_dueling=self.use_dueling,\n iteration_count=self.target_update_count)\n\n dqn_loss.backward()\n\n if gradient_clamping_value is not None:\n torch.nn.utils.clip_grad_norm(self.model.parameters(), gradient_clamping_value)\n\n self.optimizer.step()\n\n # TODO: Worry about this later\n #loss_per_item = dqn_loss\n #loss_np = loss_per_item.cpu().data.numpy()\n #if self.kwargs['use_PER']:\n # for (idx, new_error) in zip(batch.idx, loss_np):\n # new_priority = self.replayBuffer.priority(new_error)\n # self.replayBuffer.update(idx, new_priority)\n\n #return loss_np", "def forwardPropagateBest(self, inputMatrix, index):\r\n return 1.0/(1.0+np.exp(-np.einsum('ji, li->lj', self.WeightMatrixT[index], inputMatrix)-self.BiasVector[index]))", "def fit_brody_mle(s: fArr) -> float:\n # use negative log-likelihood because we want to minimize\n # log_like = lambda beta: -np.sum(log_brody(s, beta))\n log_like = lambda beta: -np.sum(brody_dist(s, beta))\n opt_result = minimize_scalar(\n log_like, bounds=(1e-5, 1.0 - 1e-5), method=\"Bounded\", options=dict(xatol=1e-4)\n )\n if not opt_result.success:\n raise RuntimeError(\"Optimizer failed to find optimal Brody fit.\")\n return float(opt_result.x)", "def optimize(self, maxiter):\n for iteration in range(maxiter):\n self.sortParticles()\n self.phi = int(phiMin + iteration *((phiMax - phiMin) / float(maxiter)))\n self.cluster()\n #self.ConnectClusters()\n for i in range(self.n_particles):\n x = self.particles_pos[i]\n v = self.velocities[i]\n p_best = self.p_best[i]\n self.velocities[i] = self.update_velocity(x, v, p_best , self.g_best , self.getLbestOfCluster(self.getClusterOfParticle(i)) , i)\n self.particles_pos[i] = self.update_position(x, v)\n # Update the best position for particle i\n if self.func(self.particles_pos[i]) < self.func(p_best):\n self.p_best[i] = self.particles_pos[i]\n # Update the best position overall\n if self.func(self.particles_pos[i]) < self.func(self.g_best):\n \n self.g_best = self.particles_pos[i]\n return self.g_best, self.func(self.g_best)", "def MAXED(N, sigma2, R, f_def, params):\n\n # pull out algorithm-specific parameters\n Omega = params['Omega']\n\n # create the function that we will maximize, Z\n def Z(lam, N, sigma2, R, f_def, Omega):\n \"\"\"A function, the maximization of which is equivalent to the\n maximization of \"\"\"\n\n A = - np.sum(f_def * np.exp(- np.sum((lam * R.T).T, axis=0)))\n B = - (Omega * np.sum(lam**2 * sigma2))**(0.5)\n C = - np.sum(N * lam)\n\n # negate because it's a minimization\n return - (A + B + C)\n\n # create a lambda\n lam = np.ones(len(N))\n\n # apply the simulated annealing to the Z\n mk = {'args': (N, sigma2, R, f_def, Omega)}\n lam = basinhopping(Z, lam, minimizer_kwargs=mk).x\n\n # back out the spectrum values from the lam\n return f_def * np.exp(-np.sum((lam * R.T).T, axis=0))", "def compute_maximisation( self, X, Z, O ):\n\n raise NotImplementedError", "def scipy_minus_objective(w,all_vector_graphs,all_correct_rows,\\\n all_batches,sigma=None,perceptron=None):\n if perceptron:\n perceptron._obj_iter += 1\n obj = 0.0\n index = 0\n for vector_graphs,correct_rows,batches in zip(all_vector_graphs,all_correct_rows,all_batches):\n all_scores = vector_graphs * w\n sum_log_Z = 0.0\n for batch in batches:\n batch_scores = all_scores[batch]\n sum_log_Z += logsumexp(batch_scores) #np.log(np.exp(batch_scores).sum())\n obj += all_scores[correct_rows].sum() - sum_log_Z\n index += 1\n if index % 100 == 0:\n print('Objective '+str(index)+' processed')\n obj = obj / len(all_vector_graphs)\n if sigma != None:\n obj += - 0.5 * sigma * (w * w).sum()\n print('Objective:'+str(obj))\n return -1.0 * obj", "def get_likelihood(\n self,\n qb,\n inv_fish,\n map_tag=None,\n null_first_cmb=False,\n lmin=33,\n lmax=250,\n mcmc=True,\n alpha_tags=[\"95\", \"150\"],\n beam_tags=[\"95\", \"150\"],\n r_prior=[0, np.inf],\n alpha_prior=[0, np.inf],\n res_prior=None,\n beam_prior=[0, 1],\n betad_prior=[0, 1],\n dust_amp_prior=[0, np.inf],\n dust_ellind_prior=[0, 1],\n num_walkers=50,\n num_steps=20000,\n converge_criteria=0.01,\n reset_backend=None,\n file_tag=None,\n ):\n\n for x in [\n r_prior,\n alpha_prior,\n res_prior,\n beam_prior,\n betad_prior,\n dust_amp_prior,\n dust_ellind_prior,\n ]:\n if x is not None:\n x[:] = [float(x[0]), float(x[1])]\n\n save_name = \"like_mcmc\"\n if not mcmc:\n alpha_prior = None\n res_prior = None\n beam_prior = None\n betad_prior = None\n dust_amp_prior = None\n dust_ellind_prior = None\n\n # no template cleaning if there aren't any templates specified\n if not getattr(self, \"template_cleaned\", False):\n alpha_prior = None\n\n # null out unused priors\n self.template_alpha = getattr(self, \"template_alpha\", None)\n if self.template_alpha is None or all(\n [x is None for x in self.template_alpha.values()]\n ):\n alpha_prior = None\n\n # count alpha parameters to fit\n alpha_tags = [x for x in alpha_tags if x in self.map_tags_orig]\n if not len(alpha_tags):\n alpha_prior = None\n\n num_alpha = 0\n if alpha_prior is not None:\n num_alpha = len(alpha_tags)\n\n # count beam parameters to fit\n beam_tags = [x for x in beam_tags if x in self.map_tags_orig]\n if not len(beam_tags):\n beam_prior = None\n\n num_beam = 0\n if beam_prior is not None:\n num_beam = len(beam_tags)\n\n if not any([k.startswith(\"res_\") for k in qb]):\n res_prior = None\n\n if np.any(\n [\n betad_prior is not None,\n dust_amp_prior is not None,\n dust_ellind_prior is not None,\n ]\n ):\n dust_ell_fit = True\n else:\n dust_ell_fit = False\n\n # bookkeeping: ordered priors\n priors = {\n \"r_prior\": r_prior,\n \"alpha_prior\": alpha_prior,\n \"res_prior\": res_prior,\n \"beam_prior\": beam_prior,\n \"betad_prior\": betad_prior,\n \"dust_amp_prior\": dust_amp_prior,\n \"dust_ellind_prior\": dust_ellind_prior,\n }\n # priors on quantities that affect Dmat_obs or gmat (precalculated)\n obs_priors = [alpha_prior]\n\n # check parameter space\n if all([x is None for x in priors.values()]):\n raise RuntimeError(\"Empty parameter space\")\n\n out = dict(\n r_prior=r_prior,\n alpha_prior=alpha_prior,\n res_prior=res_prior,\n beam_prior=beam_prior,\n betad_prior=betad_prior,\n dust_amp_prior=dust_amp_prior,\n dust_ellind_prior=dust_ellind_prior,\n alpha_tags=alpha_tags,\n num_walkers=num_walkers,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n lmin=lmin,\n lmax=lmax,\n )\n\n if mcmc and reset_backend is None:\n ret = self.load_data(\n save_name,\n \"likelihood\",\n bp_opts=True,\n to_attrs=False,\n map_tag=map_tag,\n value_ref=out,\n extra_tag=file_tag,\n )\n if ret is not None and ret.get(\"converged\", False):\n if converge_criteria >= ret.get(\"converge_criteria\", 0.01):\n return ret\n if ret is not None:\n for pname, pval in priors.items():\n if np.all(pval != ret.get(pname, None)):\n ret = None\n # clear chain cache if rerunning, otherwise append to chain by default\n reset_backend = ret is None\n\n out.update(converge_criteria=converge_criteria)\n\n # save state\n if mcmc and reset_backend:\n self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )\n\n # clear pre-computed quantities\n self.clear_precalc()\n use_precalc = all([x is None for x in obs_priors])\n\n cls_input, cls_noise, cls_debias = self.get_data_spectra()\n\n # extract residual bins, ignoring bins outside of lmin/lmax\n if res_prior is not None:\n bin_def_orig = copy.deepcopy(self.bin_def)\n nbins_res_orig = self.nbins_res\n qb_res = OrderedDict()\n num_res = 0\n for k in list(qb):\n if k.startswith(\"res_\"):\n bd = self.bin_def[k]\n good = np.where((bd[:, 1] > lmin) & (bd[:, 0] < lmax))[0]\n # use all qb res in range lmin, lmax\n self.bin_def[k] = bd[good]\n v = qb.pop(k)[good]\n num_res += len(v)\n\n # use average qb res in good range per map\n # self.bin_def[k] = np.array([[lmin, lmax + 1]])\n # v = np.array([(qb.pop(k)[good]).mean()])\n # num_res += 1\n qb_res[k] = v\n self.nbins_res = num_res\n\n # set CMB model bandpowers to unity, since we are computing\n # the likelihood of this model given the data\n if r_prior is None:\n self.log(\"Computing model spectrum\", \"debug\")\n self.warn(\"Beam variation not implemented for case of no r fit\")\n cbl = self.bin_cl_template(map_tag=map_tag)\n cls_model = self.get_model_spectra(qb, cbl, delta=True, cls_noise=cls_noise)\n else:\n qb = copy.deepcopy(qb)\n for spec in self.specs:\n stags = [\"cmb_{}\".format(spec), \"fg_{}\".format(spec)]\n for stag in stags:\n if stag not in qb:\n continue\n qb[stag] = np.ones_like(qb[stag])\n\n self.log(\"Computing r model spectrum\", \"debug\")\n cls_shape_scalar = self.get_signal_shape(\n r=1.0, save=False, component=\"scalar\"\n )\n\n cls_shape_tensor = self.get_signal_shape(\n r=1.0, save=False, component=\"tensor\"\n )\n\n # load tensor and scalar terms separately\n cbl_scalar = self.bin_cl_template(cls_shape_scalar, map_tag)\n cls_model_scalar = self.get_model_spectra(\n qb, cbl_scalar, delta=True, cls_noise=cls_noise\n )\n cbl_tensor = self.bin_cl_template(cls_shape_tensor, map_tag)\n cls_model_tensor = self.get_model_spectra(\n qb, cbl_tensor, delta=False, res=False\n )\n if beam_prior is not None:\n # load beam error term for tensor and scalar\n cbl_scalar_beam = self.bin_cl_template(\n cls_shape_scalar, map_tag, beam_error=True\n )\n cls_mod_scal_beam = self.get_model_spectra(\n qb, cbl_scalar_beam, delta=True, res=False\n )\n cbl_tensor_beam = self.bin_cl_template(\n cls_shape_tensor, map_tag, beam_error=True\n )\n cls_mod_tens_beam = self.get_model_spectra(\n qb, cbl_tensor_beam, delta=False, res=False\n )\n\n # load foreground shape\n if dust_ell_fit:\n cls_shape_dust = self.get_signal_shape(save=False, component=\"fg\")\n # if dust_ellind_prior is None:\n # # can preload shape since not varying ell index\n cbl_fg = self.bin_cl_template(cls_shape_dust, map_tag=map_tag)\n if beam_prior is not None:\n cbl_fg_beam = self.bin_cl_template(\n cls_shape_dust, map_tag, beam_error=True\n )\n\n cbl = copy.deepcopy(cbl_scalar)\n cls_model = copy.deepcopy(cls_model_scalar)\n\n # XXX TODO\n # how to marginalize over the garbage bin?\n\n def parse_params(theta):\n \"\"\"\n Parse array of parameters into a dict\n \"\"\"\n params = {}\n if r_prior is not None:\n params[\"r\"] = theta[0]\n theta = theta[1:]\n if alpha_prior is not None:\n params[\"alpha\"] = theta[:num_alpha]\n theta = theta[num_alpha:]\n if res_prior is not None:\n params[\"res\"] = theta[:num_res]\n theta = theta[num_res:]\n if beam_prior is not None:\n params[\"beam\"] = theta[:num_beam]\n theta = theta[num_beam:]\n if betad_prior is not None:\n params[\"betad\"] = theta[0]\n theta = theta[1:]\n if dust_amp_prior is not None:\n # param for ee and bb\n params[\"dust_amp\"] = theta[:2]\n theta = theta[2:]\n if dust_ellind_prior is not None:\n params[\"dust_ellind\"] = theta[0]\n theta = theta[1:]\n if len(theta):\n raise ValueError(\"Too many parameters to parse\")\n return params\n\n def log_prior(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log prior function constructed from input options\n \"\"\"\n values = {\n \"r_prior\": r,\n \"alpha_prior\": alpha,\n \"res_prior\": res,\n \"dust_amp_prior\": dust_amp,\n }\n for v, pval in values.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n if np.any(pval < prior[0]) or np.any(pval > prior[1]):\n return -np.inf\n\n values_gauss = {\n \"beam_prior\": beam,\n \"betad_prior\": betad,\n \"dust_ellind_prior\": dust_ellind,\n }\n # for beam and betad, use gaussian prior\n log_prob = 0.0\n for v, pval in values_gauss.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n pval = np.atleast_1d(pval)\n norm = np.log(1.0 / (prior[1] * np.sqrt(2 * np.pi)))\n chi = (pval - prior[0]) / prior[1]\n log_prob += np.sum(norm - chi ** 2 / 2.0)\n\n return log_prob\n\n def log_like(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log likelihood function constructed from input options\n \"\"\"\n cls_model0 = copy.deepcopy(cls_model)\n\n # compute new template subtracted data spectra\n if alpha is None:\n clsi = cls_input\n else:\n self.get_masked_data(template_alpha=OrderedDict(zip(alpha_tags, alpha)))\n clsi = self.get_data_spectra(do_noise=False)\n\n if beam is not None:\n beam = dict(zip(beam_tags, beam))\n beam_coeffs = dict()\n for xname, (m0, m1) in self.map_pairs_orig.items():\n d = {}\n b0, b1 = [beam.get(m, None) for m in (m0, m1)]\n if b0 is not None:\n d[\"b1\"] = b0\n if b1 is not None:\n d[\"b2\"] = b1\n if b0 is not None:\n d[\"b3\"] = b0 * b1\n beam_coeffs[xname] = d\n\n # compute new signal shape by scaling tensor component by r\n if r is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n dd[:] = (\n cls_model_scalar[stag][xname]\n + r * cls_model_tensor[ctag][xname]\n )\n\n if beam is None:\n continue\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * (\n cls_mod_scal_beam[ctag][xname][bn]\n + r * cls_mod_tens_beam[ctag][xname][bn]\n )\n dd[:] += beam_term\n\n elif beam is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_scal_beam[ctag][xname][bn]\n dd[:] = cls_model_scalar[stag][xname] + beam_term\n\n # fg term, including beam modifications. Because mix terms are\n # dependent on dust amp, get model specs here.\n if dust_ell_fit:\n if dust_amp is None:\n qb[\"fg_ee\"][:] = 1\n qb[\"fg_bb\"][:] = 1\n else:\n qb[\"fg_ee\"][:] = dust_amp[0]\n qb[\"fg_bb\"][:] = dust_amp[1]\n if betad is None:\n qb[\"delta_beta\"][:] = 0\n else:\n qb[\"delta_beta\"][:] = betad\n if dust_ellind is not None:\n cbl_fg0 = self.bin_cl_template(\n cls_shape_dust, map_tag=map_tag, fg_ell_ind=dust_ellind\n )\n if beam is not None:\n cbl_fg_beam0 = self.bin_cl_template(\n cls_shape_dust,\n map_tag,\n fg_ell_ind=dust_ellind,\n beam_error=True,\n )\n else:\n cbl_fg0 = cbl_fg\n if beam is not None:\n cbl_fg_beam0 = cbl_fg_beam\n\n cls_model_fg = self.get_model_spectra(\n qb, cbl_fg0, delta=True, res=False\n )\n if beam is not None:\n cls_mod_fg_beam = self.get_model_spectra(\n qb, cbl_fg_beam0, delta=True, res=False\n )\n # add fg field to model, and add fg to total model\n for stag, d in cls_model_fg.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"fg\", \"total\"]:\n continue\n ftag = \"fg_{}\".format(spec)\n if stag not in cls_model0:\n cls_model0[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in cls_model0[stag]:\n cls_model0[stag][xname] = cls_model_fg[ftag][xname]\n else:\n cls_model0[stag][xname] += cls_model_fg[ftag][xname]\n\n # add beam terms to fg and total fields\n if beam is not None:\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_fg_beam[ftag][xname][bn]\n cls_model0[stag][xname] += beam_term\n\n # compute noise model terms\n if res is None:\n clsm = cls_model0\n else:\n res = pt.arr_to_dict(res, qb_res)\n clsm = copy.deepcopy(cls_model0)\n cls_res = self.get_model_spectra(res, cbl)\n for stag, d in cls_res.items():\n if stag not in clsm:\n clsm[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in clsm[stag]:\n clsm[stag][xname] = dd\n else:\n clsm[stag][xname] += dd\n\n # compute likelihood\n like = self.fisher_calc(\n qb,\n cbl,\n clsi,\n cls_noise=cls_noise,\n cls_debias=cls_debias,\n cls_model=clsm,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n use_precalc=use_precalc,\n like_lmin=lmin,\n like_lmax=lmax,\n )\n return like\n\n def log_prob(theta):\n \"\"\"\n Log posterior probability from prior and likelihood\n\n Returns log_prior with each step\n \"\"\"\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior\n\n # initial values\n x0 = []\n brute_force = True if not mcmc else False # only vary r\n if r_prior is not None:\n x0 += [0.01]\n if alpha_prior is not None:\n alphas = [self.template_alpha[tag] for tag in alpha_tags]\n x0 += [0.01 if a == 0 else a for a in alphas]\n brute_force = False\n if res_prior is not None:\n x0 += list(pt.dict_to_arr(qb_res, flatten=True))\n brute_force = False\n if beam_prior is not None:\n # add a beam term for each frequency\n x0 += [0.01] * len(beam_tags)\n brute_force = False\n if betad_prior is not None:\n x0 += [0.01]\n brute_force = False\n if dust_amp_prior is not None:\n x0 += [1, 1]\n brute_force = False\n if dust_ellind_prior is not None:\n x0 += [0.01]\n brute_force = False\n\n ndim = len(x0)\n if ndim * 2 > num_walkers:\n num_walkers = int(np.round(ndim / float(num_walkers)) * num_walkers * 2)\n self.warn(\n \"Found {} parameters, increasing number of MCMC walkers to {}\".format(\n ndim, num_walkers\n )\n )\n x0 = np.array(x0)[None, :] * (1 + 1e-4 * np.random.randn(num_walkers, len(x0)))\n\n if brute_force or (r_prior is not None and ndim == 1):\n self.log(\"Computing brute-force r profile likelihood\", \"info\")\n likefile = self.get_filename(\n save_name, ext=\".txt\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n rs = np.linspace(0, 3, 500)\n likes = np.zeros_like(rs)\n for idx, r in enumerate(rs):\n like = log_like(r=r)\n if idx % 20 == 0:\n self.log(\"r = {:.3f}, loglike = {:.2f}\".format(r, like), \"debug\")\n likes[idx] = like\n header = \"{} r likelihood\\nColumns: r, loglike\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag)\n )\n np.savetxt(likefile, np.column_stack((rs, likes)), header=header)\n\n if not mcmc:\n return [rs, likes]\n\n # run chains!\n import emcee\n\n # setup sampler output file\n filename = self.get_filename(\n save_name, ext=\".h5\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n backend_exists = os.path.exists(filename)\n backend = emcee.backends.HDFBackend(filename)\n if backend_exists and backend.shape != (num_walkers, ndim):\n self.warn(\n \"Expected backend of shape ({}, {}), found {}. Resetting\".format(\n num_walkers, ndim, backend.shape\n )\n )\n reset_backend = True\n if reset_backend:\n backend.reset(num_walkers, ndim)\n\n # initialize sampler\n self.log(\"Initializing sampler\", \"info\")\n sampler = emcee.EnsembleSampler(num_walkers, ndim, log_prob, backend=backend)\n if not reset_backend and backend_exists:\n # grab the last sample if appending to an existing run\n x0 = sampler.run_mcmc(None, 1)\n\n # track autocorrelation time\n old_tau = np.inf\n converged = False\n\n self.log(\n \"Starting {} iterations with {} parameters\".format(num_steps, ndim), \"info\"\n )\n for sample in sampler.sample(x0, iterations=num_steps):\n if not sampler.iteration % 10:\n self.log(\"MCMC iteration {}\".format(sampler.iteration), \"debug\")\n # check convergence every 100 steps\n if sampler.iteration % 100:\n continue\n\n # compute autocorrelation time\n tau = sampler.get_autocorr_time(tol=0)\n\n # check convergence\n converged = np.all(tau / converge_criteria < sampler.iteration)\n converged &= np.all(np.abs(old_tau - tau) / tau < converge_criteria)\n self.log(\n \"MCMC iteration {} autocorr time: mean {:.1f} min {:.1f} max {:.1f}\".format(\n sampler.iteration, np.mean(tau), np.min(tau), np.max(tau)\n ),\n \"info\",\n )\n if converged:\n break\n old_tau = tau\n\n out.update(converged=converged, num_steps=sampler.iteration)\n\n # converged posterior distribution\n if converged:\n self.log(\n \"MCMC converged in {} iterations\".format(sampler.iteration), \"info\"\n )\n tau = sampler.get_autocorr_time()\n burnin = int(2 * np.max(tau))\n thin = int(0.5 * np.min(tau))\n samples = sampler.get_chain(discard=burnin, thin=thin, flat=True)\n out.update(tau=tau, burnin=burnin, thin=thin, samples=samples)\n else:\n self.warn(\"MCMC not converged in {} iterations\".format(num_steps))\n\n if res_prior is not None:\n self.bin_def = bin_def_orig\n self.nbins_res = nbins_res_orig\n\n # save and return\n return self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )", "def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore\n pass", "def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()", "def gopt_max(fun, bounds, n_warmup = 1000, n_local = 10):\n x_best, y_best = gopt_min(lambda x: -fun(x), bounds, n_warmup, n_local)\n return x_best, -y_best", "def optimize_model(data_dir):\n # Read data\n X_train, y_train = read_vectorized_features(data_dir, subset=\"train\")\n\n # Filter unlabeled data\n train_rows = (y_train != -1)\n\n # read training dataset\n X_train = X_train[train_rows]\n y_train = y_train[train_rows]\n\n # score by roc auc\n # we're interested in low FPR rates, so we'll consider only the AUC for FPRs in [0,5e-3]\n score = make_scorer(roc_auc_score, max_fpr=5e-3)\n\n # define search grid\n param_grid = {\n 'boosting_type': ['gbdt'],\n 'objective': ['binary'],\n 'num_iterations': [500, 1000],\n 'learning_rate': [0.005, 0.05],\n 'num_leaves': [512, 1024, 2048],\n 'feature_fraction': [0.5, 0.8, 1.0],\n 'bagging_fraction': [0.5, 0.8, 1.0]\n }\n model = lgb.LGBMClassifier(boosting_type=\"gbdt\", n_jobs=-1, silent=True)\n\n # each row in X_train appears in chronological order of \"appeared\"\n # so this works for progrssive time series splitting\n progressive_cv = TimeSeriesSplit(n_splits=3).split(X_train)\n\n grid = GridSearchCV(estimator=model, cv=progressive_cv, param_grid=param_grid, scoring=score, n_jobs=1, verbose=3)\n grid.fit(X_train, y_train)\n\n return grid.best_params_", "def objective(\n X_train: CSVData,\n Y_train: CSVData,\n model: str,\n log_dir: str,\n smote: bool,\n outlier: str,\n config: Dict[str, Union[float, int]],\n) -> Dict[str, Any]:\n try:\n smote_fn = get_smote_fn(**config) if smote else None # type:ignore\n model = choose_model(model, log_dir, **config) # type:ignore\n\n outlier_detection = (\n get_outlier_detection(outlier, **config) if outlier is not None else None # type:ignore\n )\n\n # Keep k low for faster evaluation\n score = evaluate_model(\n model,\n X_train,\n Y_train,\n k=5,\n smote_fn=smote_fn,\n outlier_detection=outlier_detection,\n )\n\n # We need to maximize score, so minimize the negative\n return {\"loss\": -score, \"status\": STATUS_OK}\n\n except Exception:\n return {\"loss\": 0, \"status\": STATUS_FAIL}", "def optimize(self):\n\t\ts1,a1,r1,s2 = self.ram.agg_sample(self.batch_size)\n\n\t\ts1 = Variable(torch.from_numpy(s1))\n\t\ta1 = Variable(torch.from_numpy(a1))\n\t\tr1 = Variable(torch.from_numpy(r1))\n\t\ts2 = Variable(torch.from_numpy(s2))\n\n\t\tfor i in range(self.critic_step):\n\t\t\t# ---------------------- optimize critic ----------------------\n\t\t\t# Use target actor exploitation policy here for loss evaluation\n\t\t\t\n\t\t\t# a2 = self.target_actor.forward(s2).detach()\n\t\t\t# next_val = torch.squeeze(self.target_critic.forward(s2, a2).detach())\n\t\t\t\n\t\t\t# y_exp = r + gamma*Q'( s2, pi'(s2))\n\t\t\ty_expected = r1 #+ GAMMA*next_val\n\t\t\t# y_pred = Q( s1, a1)\n\t\t\ty_predicted = torch.squeeze(self.critic.forward(s1, a1))\n\t\t\t# compute critic loss, and update the critic\n\t\t\t#print(y_predicted,y_expected,\"hi\")\n\t\t\tloss_critic = F.smooth_l1_loss(y_predicted, y_expected.squeeze())\n\t\t\tself.critic_optimizer.zero_grad()\n\t\t\tloss_critic.backward()\n\t\t\tself.critic_optimizer.step()\n\n\t\t# ---------------------- optimize actor ----------------------\n\t\tpred_a1 = self.actor.forward(s1)\n\t\tloss_actor = -1*torch.sum(self.critic.forward(s1, pred_a1))\n\t\tself.actor_optimizer.zero_grad()\n\t\tloss_actor.backward()\n\t\tself.actor_optimizer.step()\n\n\t\tutils.soft_update(self.target_actor, self.actor, TAU)\n\t\tutils.soft_update(self.target_critic, self.critic, TAU)\n\n\t\t# if self.iter % 100 == 0:\n\t\tif self.batch_size > 1:\n\t\t\ty_1 = y_predicted.data.numpy()[0]\n\t\t\tr_1 = r1.data.numpy()[0]\n\t\telse:\n\t\t\ty_1 = y_predicted.data.numpy()\n\t\t\tr_1 = r1.data.numpy()\n\t\tprint ('Iteration :- ', self.iter, ' Loss_actor :- ', loss_actor.data.numpy(),\\\n\t\t\t' Loss_critic :- ', loss_critic.data.numpy(), ' Critic Pred Reward :- ', y_1, ' Actual Reward :- ', r_1)\n\t\tself.iter += 1", "def gmmloglik(log_emlik, weights):", "def gmmloglik(log_emlik, weights):", "def learn(self, Xtrain, ytrain):\n pass\n self.weights = np.zeros(Xtrain.shape[1],)\n\n ### YOUR CODE HERE\n \n lmbd = self.params['lamb']\n \n numsamples = Xtrain.shape[0]\n # Xless = Xtrain[:,self.params['features']]\n Xless = Xtrain\n self.weights = np.random.rand(Xless.shape[1])\n err = 10000;\n #cw =0;\n tolerance = 10*np.exp(-4)\n i=0;\n \n \n w1 = self.weights\n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain) + lmbd * self.regularizer[0](self.weights)\n # print(cw)\n errors = []\n runtm = []\n epch = []\n \n err = 1\n iteration= 1000\n #tm= time.time()\n while (abs(cw-err)>tolerance) and (i <iteration):\n err = cw\n g = self.logit_cost_grad(cw_v, Xless, ytrain)\n obj = cw\n j=0\n ita = -1* self.params['stepsize']\n w = self.weights\n # w1 = np.add(w,np.dot(ita,g))\n while(j<iteration):\n w1 = np.add(w,np.dot(ita,g))\n # cw_v =(np.dot(Xless, w1)-ytrain)\n # cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, w1.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)+lmbd * self.regularizer[0](w1)\n ## print (cw)\n \n if(cw<np.absolute(obj-tolerance)): ############################################\n break\n ita = 0.7*ita\n j=j+1\n \n if(j==iteration):\n self.weights=w\n ita =0\n else:\n self.weights = w1\n \n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)\n #tm1 = time.time()-tm\n #runtm.append(tm1)\n #err = cw\n errors.append(err)\n i=i+1\n epch.append(i)", "def optimize(self,s,max_steps,td_n):\r\n \r\n s1, d1, a1, r1, s2, done = self.ram.sample(self.batch_size)\r\n s1 = torch.from_numpy(s1).float()\r\n #print(s1.shape)\r\n d1 = torch.from_numpy(d1).float()\r\n d1 = d1.unsqueeze(1)\r\n d1.requires_grad = True\r\n d1_copy = d1.detach().cpu().numpy()\r\n self.mse_logic.append(d1_copy[-1][-1])\r\n a1 = torch.from_numpy(a1).float()\r\n #print(a1.shape)\r\n r1 = torch.from_numpy(r1).float()\r\n r1 = torch.squeeze(r1)\r\n #print(r1.size())\r\n s2 = torch.from_numpy(s2).float()\r\n done = torch.from_numpy(done).float()\r\n self.t = s\r\n self.T = max_steps\r\n self.td_n = td_n\r\n self.td_tau = self.t-self.td_n+1\r\n if self.td_tau >= 0:\r\n for i in range(self.td_tau+1, min(self.td_tau+self.td_n, self.T)):\r\n self.G = self.gamma**(i-self.td_tau-1)*(r1)\r\n if self.td_tau+self.td_n < self.T:\r\n #-----------optimize critic -----------------------------------\r\n a2 = self.target_actor.forward(s2).detach()\r\n noise = a1.data.normal_(0,0.2)\r\n noise = noise.clamp(-0.5,0.5)\r\n a2 = (a2+noise).clamp(-1,1)\r\n next_val = torch.squeeze(self.target_critic.forward(s2,a2).detach())\r\n y_expected = self.G + self.gamma**(self.td_n)*next_val*(1-done)\r\n y_predicted = torch.squeeze(self.critic.forward(s1, a1))\r\n # compute critic loss, and update the critic\r\n loss_critic = self.l_loss(y_predicted, y_expected.detach()).unsqueeze(0)\r\n self.critic_optimizer.zero_grad()\r\n loss_critic.backward()\r\n self.critic_optimizer.step()\r\n \r\n #-----------optimize actor --------------------------------------\r\n pred_a1 = self.actor.forward(s1)\r\n pred_a1_copy = pred_a1.detach().numpy()\r\n self.actor_logic.append(pred_a1_copy[-1][-1])\r\n loss_actor = -self.critic.forward(s1, pred_a1).mean()\r\n #entropy = torch.mean(pred_a1*torch.log(pred_a1))\r\n loss_policy = loss_actor\r\n self.loss_l1_list.append(loss_policy.item())\r\n mse_policy = self.target_critic(s1,d1).mean()\r\n loss_mse = self.mse(loss_actor, mse_policy)\r\n self.loss_mse_list.append(loss_mse.item())\r\n loss = sum([(1-self.lambda_mse)*loss_policy, self.lambda_mse*loss_mse])\r\n self.loss_final_list.append(loss.item())\r\n self.actor_optimizer.zero_grad()\r\n loss.backward()\r\n self.actor_optimizer.step()\r\n \r\n soft_update(self.target_actor, self.actor, self.tau)\r\n soft_update(self.target_critic, self.critic, self.tau)\r\n \r\n #self.actor.state_dict(self.target_actor.state_dict())\r\n #self.critic.state_dict(self.target_critic.state_dict())\r", "def fit_gp(self):\n # Put things into training mode.\n self.gpf_core.float()\n self.likelihood.train()\n # Now use Adam by default.\n optimizer = torch.optim.Adam([{'params': self.gpf_core.parameters()}],\n lr=0.1)\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood,\n self.gpf_core)\n # TODO: Allow length of training to be an option.\n for _ in range(500):\n optimizer.zero_grad()\n output = self.gpf_core(self.tensor_x)\n loss = -mll(output, self.tensor_y)\n loss.backward()\n optimizer.step()", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def makeOptimizationStep(modelNet, targetNet, gameMemory, optimizer):\n statesBatch, actionsBatch, nextStatesBatch, rewardsBatch, terminalMask = gameMemory.getBatch()\n currentQValues = modelNet(statesBatch).gather(1, actionsBatch.unsqueeze(1))\n nextQValues = targetNet(nextStatesBatch).max(1)[0].detach()\n nextQValues[terminalMask == 1] = 0\n expectedQValues = rewardsBatch + nextQValues * DISCOUNT_FACTOR\n expectedQValues = torch.tensor(expectedQValues).unsqueeze(1).to(DEVICE)\n loss = F.smooth_l1_loss(currentQValues, expectedQValues)\n optimizer.zero_grad()\n loss.backward()\n for param in modelNet.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()", "def _update_model(self, X_all, Y_all):\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.set_XY(X_all, Y_all)\n\n # WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...\n if self.max_iters > 0:\n # --- update the model maximizing the marginal likelihood.\n if self.optimize_restarts==1:\n self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)\n else:\n self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose)", "def maximize_loglik_fcmnl_MPEC(model_params: Union[CupidParams, CupidParamsCSHeteroxy, CupidParamsFcmnl],\n x_init: np.ndarray,\n lower: Optional[np.ndarray] = None,\n upper: Optional[np.ndarray] = None,\n checkgrad: Optional[bool] = False,\n verbose: Optional[bool] = False,\n fixed_vars: Optional[List[int]] = None,\n fixed_vals: Optional[List[float]] = None,\n options: Optional[Dict] = {'iprint': 1}) -> Tuple[float, np.ndarray, int]:\n n_paramsU = x_init.size\n bases_surplus = model_params.bases_surplus\n ncat_men, ncat_women, n_bases = bases_surplus.shape\n n_pars_b_men, n_pars_b_women = model_params.n_pars_b_men, model_params.n_pars_b_women\n n_pars_b = n_pars_b_men + n_pars_b_women\n n_thetas = n_pars_b + n_bases\n\n try:\n kc = KN_new()\n except:\n bs_error_abort(\"Failed to find a valid Knitro license.\")\n\n KN_add_vars(kc, n_paramsU)\n\n # bounds, if any\n if lower is None:\n # not necessary since infinite\n KN_set_var_lobnds(kc, xLoBnds=np.full(n_paramsU, -KN_INFINITY))\n else:\n KN_set_var_lobnds(kc, xLoBnds=lower)\n if upper is None:\n KN_set_var_upbnds(kc, xUpBnds=np.full(n_paramsU, KN_INFINITY))\n else:\n KN_set_var_upbnds(kc, xUpBnds=upper)\n\n n_prod_categories = ncat_men * ncat_women\n # Add the constraints and set the rhs and coefficients\n n_cons = n_prod_categories\n KN_add_cons(kc, n_cons)\n KN_set_con_eqbnds(kc, cEqBnds=[0.0] * n_cons)\n\n # Define an initial point. If not set, Knitro will generate one.\n KN_set_var_primal_init_values(kc, xInitVals=x_init)\n\n if fixed_vars is not None:\n assert fixed_vals is not None\n KN_set_var_fxbnds(kc, fixed_vars, fixed_vals)\n\n cb = KN_add_eval_callback(kc, evalObj=True,\n indexCons=np.arange(n_prod_categories),\n funcCallback=log_likelihood_fcmnl_MPEC)\n\n KN_set_cb_user_params(kc, cb, model_params)\n\n # c(x,y) has derivatives in thetas, U(x,t), U(z,y)\n # dc(x,y)/dU(x,y) may only appear once\n n_args_jac = n_thetas + ncat_men + ncat_women - 1\n n_jac = n_cons * n_args_jac\n cbjacIndexCons = np.repeat(np.arange(n_prod_categories), n_args_jac)\n cbjacIndexVars = np.zeros(n_prod_categories * n_args_jac, int)\n i = 0\n for iman in range(ncat_men):\n for iwoman in range(ncat_women):\n # derivatives in thetas\n cbjacIndexVars[i:(i+n_thetas)] = np.arange(n_thetas)\n # derivatives in [iman, jwoman]\n cbjacIndexVars[(i + n_thetas):(i+n_thetas+ncat_women)] = \\\n n_thetas + iman*ncat_women + np.arange(ncat_women)\n # derivatives in [jman, iwoman] except [iman, iwoman]\n list_men = list(range(ncat_men))\n del list_men[iman]\n cbjacIndexVars[(i + n_thetas + ncat_women):(i+n_args_jac)] = \\\n n_thetas + iwoman + ncat_women*np.array(list_men, int)\n i += n_args_jac\n\n print(cbjacIndexCons.shape)\n print(cbjacIndexVars.shape)\n\n KN_set_cb_grad(kc, cb, objGradIndexVars=KN_DENSE,\n jacIndexCons=cbjacIndexCons, jacIndexVars=cbjacIndexVars,\n gradCallback=grad_log_likelihood_fcmnl_MPEC)\n\n KN_set_int_param(kc, KN_PARAM_OUTLEV, KN_OUTLEV_ALL)\n\n if checkgrad:\n # Perform a derivative check.\n KN_set_int_param(kc, KN_PARAM_DERIVCHECK, KN_DERIVCHECK_ALL)\n\n # Solve the problem.\n nStatus = KN_solve(kc)\n\n loglik_val, estimates = print_optimization_results_MPEC(kc)\n\n print_stars()\n print(f\" Value of log-likelihood: {loglik_val: > 8.3f}\\n\")\n print()\n\n return loglik_val, np.array(estimates), nStatus", "def objective(hyperparams): \n global iteration #necessary with a global variable because of implementation from hyperopt. \n iteration += 1\n\n result = run_model(hyperparams, iteration)\n loss = -result #transform to loss in order to minimize\n\n return {'loss': loss, 'hyperparams': hyperparams, 'iteration': iteration, 'status': STATUS_OK}", "def linearFitWithOutliers(x, y, e, outtriangle='linear.png'):\n # theta will be an array of length 2 + N, where N is the number of points\n # theta[0] is the intercept, theta[1] is the slope,\n # and theta[2 + i] is the weight g_i\n def log_prior(theta):\n #g_i needs to be between 0 and 1\n if (all(x > 0. for x in theta[2:]) and all(x < 1. for x in theta[2:])) and \\\n 0. < theta[0] < 10. and 0. < theta[1] < 0.1:\n return 0\n else:\n return -np.inf # recall log(0) = -inf\n\n def log_likelihood(theta, x, y, e, sigma_B):\n dy = y - theta[0] - theta[1] * x\n g = np.clip(theta[2:], 0, 1) # g<0 or g>1 leads to NaNs in logarithm\n logL1 = np.log(g) - 0.5 * np.log(2 * np.pi * e ** 2) - 0.5 * (dy / e) ** 2\n logL2 = np.log(1 - g) - 0.5 * np.log(2 * np.pi * sigma_B ** 2) - 0.5 * (dy / sigma_B) ** 2\n return np.sum(np.logaddexp(logL1, logL2))\n\n def log_posterior(theta, x, y, e, sigma_B):\n return log_prior(theta) + log_likelihood(theta, x, y, e, sigma_B)\n\n\n #find starting point\n def squared_loss(theta, x=x, y=y, e=e):\n dy = y - theta[0] - theta[1] * x\n return np.sum(0.5 * (dy / e) ** 2)\n theta1 = optimize.fmin(squared_loss, [0, 0], disp=False)\n\n ndim = 2 + len(x) # number of parameters in the model\n nwalkers = 200 # number of MCMC walkers\n nburn = 5000 # \"burn-in\" period to let chains stabilize\n nsteps = 50000 # number of MCMC steps to take\n\n # set theta near the maximum likelihood, with\n starting_guesses = np.zeros((nwalkers, ndim))\n starting_guesses[:, :2] = np.random.normal(theta1, 1, (nwalkers, 2))\n starting_guesses[:, 2:] = np.random.normal(0.5, 0.1, (nwalkers, ndim - 2))\n\n #initiate sampler\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[x, y, e, 20])\n\n # Run a burn-in and set new starting position\n print \"Burning-in...\"\n pos, prob, state = sampler.run_mcmc(starting_guesses, nburn)\n best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)\n sampler.reset()\n\n print \"Running an improved estimate...\"\n pos, prob, state = sampler.run_mcmc(pos, nburn)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n sampler.reset()\n print \"Running MCMC...\"\n pos, prob, state = sampler.run_mcmc(pos, nsteps, rstate0=state)\n print \"Mean acceptance fraction:\", np.mean(sampler.acceptance_fraction)\n\n #sample shape = (nwalkers, nsteps, ndim)\n sample = sampler.chain.reshape(-1, ndim)\n\n params = np.mean(sample[:, :2], 0)\n g = np.mean(sample[:, 2:], 0)\n outliers = (g < 0.5)\n\n #Get the index with the highest probability\n maxprob_index = np.argmax(prob)\n\n #Get the best parameters and their respective errors and print best fits\n params_fit = pos[maxprob_index][:2]\n errors = [sampler.flatchain[:, i].std() for i in xrange(ndim)][:2]\n\n fig = triangle.corner(sample, labels=['intercept' , 'slope'] + len(x)*['Gi',])\n fig.savefig(outtriangle)\n plt.close()\n\n\n return params, params_fit, errors, outliers", "def minimize(self, func, grad, x0, args=()):\n learning_rate = self._learning_rate\n best_x = x = x0\n best_value = func(x, *args)\n iters_without_improve = 0\n\n for iteration in range(self._max_iterations):\n gradient = grad(x, *args)\n\n # If absolute values of all partial derivatives are equal to 0 with specified accuracy, then parameters are\n # close enough to the minimum and there is no need to continue gradient descent.\n if np.abs(gradient).max() <= self._accuracy:\n break\n\n x = x - learning_rate * gradient\n\n # If new values of x haven't lead to decrease of the function value for the specified number of iteration,\n # the x is reverted to its previous best value and the learning rate is reduced\n value = func(x, *args)\n if value > best_value:\n iters_without_improve += 1\n if iters_without_improve >= self._lr_reduce_patience:\n x = best_x\n learning_rate *= self._lr_reduce_factor\n else:\n iters_without_improve = 0\n best_value = value\n best_x = x\n\n return best_x", "def _optimize(self, v):\n v0, prob_h_v0, vk, prob_h_vk = self._gibbs_sampling(v)\n W_grad, a_grad, b_grad = self._compute_gradients(v0, prob_h_v0, vk, prob_h_vk)\n para_update = [tf.assign(self.W, tf.add(self.W, self.learning_rate*W_grad)),\n tf.assign(self.a, tf.add(self.a, self.learning_rate*a_grad)),\n tf.assign(self.b, tf.add(self.b, self.learning_rate*b_grad))]\n error = tf.metrics.mean_squared_error(v0, vk)[1]\n return para_update, error", "def fit(self, x, y): \n # *** START CODE HERE ***\n y = y.reshape(y.shape[0], 1)\n y_0 = (1 - y).reshape(y.shape)\n m = y.shape[0]\n m_0 = np.asscalar(np.sum(y_0))\n m_1 = np.asscalar(np.sum(y))\n # Find phi, mu_0, mu_1, and sigma\n phi = np.sum(y) / m\n mu_0 = (np.sum(np.multiply(y_0, x), axis = 0, keepdims = True) / m_0) #.reshape(y.shape)\n mu_1 = np.sum(np.multiply(y, x), axis = 0, keepdims=True) / m_1\n sigma = getsigma(x, mu_0, mu_1, m, y, y_0)\n # Write theta in terms of the parameters\n sigma_inv = np.linalg.inv(sigma)\n log_phi = np.log(np.exp(-1 * np.log(phi)) - 1)\n theta_0 = (np.dot(np.dot(mu_0, sigma_inv), mu_0.T) - np.dot(np.dot(mu_1, sigma_inv), mu_1.T)) / 2 - log_phi\n self.theta = np.concatenate((theta_0, np.dot(sigma_inv, (mu_1 - mu_0).T)))\n # Compute cost\n x_0 = np.zeros((x.shape[0], 1)) + 1\n x_train = np.concatenate((x_0.T, x.T))\n h_theta = sigmoid(np.dot(self.theta.T, x_train)).T\n cost = - np.sum(np.dot(y.T, np.log(h_theta - (h_theta - 0.5) * self.eps)) + (np.dot(y_0.T, np.log(1 - h_theta + (h_theta - 0.5) * self.eps)))) / m\n if self.verbose:\n print(\"Cost: \" + str(cost))\n # *** END CODE HERE ***", "def hyperopt_func(model_dict, model_param_names, training_param_names, param_space, datasets, max_evals=30):\n tester = fitness(model_dict, model_param_names, training_param_names, datasets)\n trials = Trials()\n \n timer_start = timer()\n best = fmin(fn=tester.objective, \n space=param_space, \n algo=tpe.suggest, \n max_evals=max_evals, \n trials=trials, \n rstate=np.random.RandomState(50))\n timer_end = timer()\n print('Total training time (min):',(timer_end-timer_start)/60)\n results = sorted(trials.results, key = lambda x: x['loss'])\n return results", "def objective(self,w):\n diffs = self.get_y_times_diffs(self.get_split_weights(w))\n #print diffs, sigmoid(diffs)\n obj = -np.sum(np.log(sigmoid(diffs))) #negative, since minimising\n # regularisation\n obj += 0.5 * self.alpha * np.dot(w[:self.interp_index[0]], w[:self.interp_index[0]])\n return obj", "def minimize(self):\n pass", "def softmax(x): \n e_x = np.exp(x - np.max(x)) \n return e_x / e_x.sum()", "def train_MLE(self) -> None:\n result = differential_evolution(self.neg_log_likelihood,\n bounds=self.initial_params_bounds)\n self.mle_params = result['x']\n print(result)", "def softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)", "def fit(self, Y, STATUS, ntop=100, nrecent=100, nmax=400, ntopmu=100, ntopvar=100, nkmeans=300, nkeamnsdata=5000,\n lam=1e-6):\n X = self.X\n untested = [i for i in range(self.n) if STATUS[i] == 0]\n tested = [i for i in range(self.n) if STATUS[i] == 2]\n ytested = Y[tested].reshape(-1)\n self.y_max = np.max(ytested)\n # each 10 fits we update the hyperparameters, otherwise we just update the data which is a lot faster\n if np.mod(self.update_counter, self.updates_per_big_fit) == 0:\n print('fitting hyperparameters')\n # how many training points are there\n ntested = len(tested)\n # if more than nmax we will subsample and use the subsample to fit hyperparametesr\n if ntested > nmax:\n # subsample is uniion of 100 best points, 100 most recent points and then random points \n top = list(np.argsort(ytested)[-ntop:])\n recent = list(range(ntested - nrecent, ntested))\n topandrecent = list(set(top + recent))\n rand = list(\n np.random.choice([i for i in range(ntested) if i not in topandrecent], nmax - len(topandrecent),\n False))\n testedtrain = topandrecent + rand\n ytrain = ytested[testedtrain]\n train = [tested[i] for i in testedtrain]\n else:\n train = tested\n ytrain = ytested\n \n # use GPy code to fit hyperparameters to minimize NLL on train data\n mfy = GPy.mappings.Constant(input_dim=self.d, output_dim=1) # fit dense GPy model to this data\n ky = GPy.kern.RBF(self.d, ARD=True, lengthscale=np.ones(self.d))\n self.GP = GPy.models.GPRegression(X[train], ytrain.reshape(-1, 1), kernel=ky, mean_function=mfy)\n self.GP.optimize('bfgs')\n # strip out fitted hyperparameters from GPy model, because cant do high(ish) dim sparse inference\n self.mu = self.GP.flattened_parameters[0]\n self.a = self.GP.flattened_parameters[1]\n self.l = self.GP.flattened_parameters[2]\n self.b = self.GP.flattened_parameters[3]\n # selecting inducing points for sparse inference \n print('selecting inducing points')\n # get prediction from GPy model \n self.py = self.GP.predict(X)\n # points with 100 highest means\n topmu = [untested[i] for i in np.argsort(self.py[0][untested].reshape(-1))[-ntopmu:]]\n # points with 100 highest uncertatinty\n topvar = [untested[i] for i in np.argsort(self.py[1][untested].reshape(-1))[-ntopvar:]]\n # combine with train set above to give nystrom inducing points (inducing points that are also actual trainingdata points) \n nystrom = topmu + topvar + train\n # also get some inducing points spread throughout domain by using kmeans\n # kmeans is very slow on full dataset so choose random subset \n # also scale using length scales l so that kmeans uses approproate distance measure\n kms = KMeans(n_clusters=nkmeans, max_iter=5).fit(\n np.divide(X[list(np.random.choice(untested, nkeamnsdata))], self.l))\n # matrix of inducing points \n self.M = np.vstack((X[nystrom], np.multiply(kms.cluster_centers_, self.l)))\n # dragons...\n # email james.l.hook@gmail.com if this bit goes wrong!\n print('fitting sparse model')\n DXM = euclidean_distances(np.divide(X, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_XM = self.a * np.exp(-DXM / 2)\n DMM = euclidean_distances(np.divide(self.M, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_MM = self.a * np.exp(-DMM / 2) + np.identity(self.M.shape[0]) * lam * self.a\n self.B = self.a + self.b - np.sum(np.multiply(np.linalg.solve(self.SIG_MM, self.SIG_XM.T), self.SIG_XM.T),0)\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n else:\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n self.update_counter += 1\n \"\"\" \n key attributes updated by fit \n \n self.SIG_XM : prior covarience matrix between data and inducing points\n self.SIG_MM : prior covarience matrix at inducing points\n \n self.SIG_MM_pos : posterior covarience matrix at inducing points\n self.mu_M_pos : posterior mean at inducing points \n \n \"\"\"", "def maximization(X, g):\n if not verify(X, g):\n return None, None, None\n n, d = X.shape\n k, _ = g.shape\n m = np.zeros((k, d))\n S = np.empty((k, d, d))\n pi = np.zeros((k, ))\n for i in range(k):\n Nk = np.sum(g[i])\n pi[i] = Nk / n\n gi = g[i].reshape(1, n)\n m[i] = np.sum(np.matmul(gi, X), axis=0) / Nk\n Df = X - m[i]\n S[i] = np.dot(gi * Df.T, Df) / Nk\n return(pi, m, S)", "def fit_greedy(data, nnbr=10, threshold=0.05, refit=refit_pll):\n n,m = data.shape;\n L = np.zeros((n,n)) # initialize parameters\n scores = np.zeros(n) \n data = data.astype(int)\n for i in range(n):\n Ni = []\n while (len(Ni)<nnbr):\n Vi = (0*data[i,:] + sum(data[j,:]*(2**jj) for jj,j in enumerate(Ni))).astype(int)\n Vsz = int(Vi.max()+1)\n for j in range(n):\n if j==i or j in Ni: scores[j]=0.; continue\n pIJV = Factor( [Var(0,2),Var(1,2),Var(2,Vsz)] , 0.)\n # pIJV[data[i,:],data[j,:],Vi] += 1. # Test??\n for k in range(m): pIJV[data[i,k],data[j,k],Vi[k]] += 1.\n pV = pIJV.marginal([2]); pV /= (pV.sum()+1e-20);\n pIJV /= (pIJV.sum([0])+1e-20)\n scores[j] = ((pIJV.condition({0:1,1:1})-pIJV.condition({0:1,1:0})).abs()*pV).sum()\n jmax = int(np.argmax(scores))\n if scores[jmax] < threshold: break\n Ni.append(jmax)\n # TODO: prune back each list?\n #print(i,\" : \",Ni)\n L[i,Ni] = 1.\n L = L*L.T # \"and\" connectivity: keep only if edges (i,j) and (j,i) present?\n model = Ising(L);\n refit(model,data)\n return model", "def _optfn(self, x):\n\n logger.debug(\" optfn(theta=%s)\", str(x))\n\n wmx = max(self.weights) * self.weighttrunc\n\n ip = []\n for i,w in enumerate(self.weights):\n if w < wmx:\n continue\n ip.append((i,w,x))\n\n if self.pool is None:\n itr = map(self.worker.loglik_grad, ip)\n else:\n itr = self.pool.imap_unordered(_pool_loglik_grad, ip, 10)\n\n if self._prior_shape is None:\n ll = 0.\n grad = np.zeros(len(x))\n else:\n ll = sum(sp.special.xlogy(self._prior_shape-1,x)-(x/self._prior_scale))\n grad = (self._prior_shape - 1)/x - 1/self._prior_scale\n\n for l,g in itr:\n ll += l\n grad += g\n\n logger.debug(\" optfn=%g\", ll)\n\n return -ll, -grad", "def optimize(self, X_train, y_train, X_val, y_val):\n\n # To learn more about XGBoost parameters, head to this page:\n # https://github.com/dmlc/xgboost/blob/master/doc/parameter.md\n\n def objective_function(params):\n\n params['n_estimators'] = int(params['n_estimators'])\n\n self.logger.info(\"Training with params: \")\n self.logger.info(params)\n\n watchlist = [(X_train, y_train), (X_val, y_val)]\n\n model = xgb.XGBClassifier(**params)\n\n model.fit(\n X_train,\n y_train,\n eval_set=watchlist,\n verbose=self.verbose,\n early_stopping_rounds=100)\n\n predictions = model.predict_proba(\n X_val, ntree_limit=model.best_iteration + 1)[:, 1]\n\n valid_score = self.optimized_metric(y_val, predictions)\n\n self.logger.info(\"\\tScore {0}\\n\\n\".format(valid_score))\n self.logger.info(\"-------- End Iteration --------\")\n\n # The score function should return the loss (1-score)\n # since the optimize function looks for the minimum\n loss = 1 - valid_score\n\n return {'loss': loss, 'status': STATUS_OK}\n\n space = {\n 'n_estimators': hp.quniform('n_estimators', 100, 1000, 10),\n 'eta': hp.quniform('eta', 0.01, 0.4, 0.02),\n # A problem with max_depth casted to float instead of int with\n # the hp.quniform method.\n 'max_depth': hp.choice('max_depth', np.arange(1, 14, dtype=int)),\n 'min_child_weight': hp.quniform('min_child_weight', 1, 10, 1),\n 'subsample': hp.quniform('subsample', 0.5, 1, 0.05),\n 'gamma': hp.quniform('gamma', 0.1, 1, 0.05),\n 'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),\n 'lambda': hp.quniform('lambda', 0.1, 1, 0.2),\n }\n # Use the fmin function from Hyperopt to find the best hyperparameters\n best_hyperparams = self.__get_best_hyperparameters(\n fmin(\n objective_function,\n dict(self.fix_hyperparameters, **space),\n algo=tpe.suggest,\n max_evals=self.max_evals))\n self.best_hyperparams = best_hyperparams\n self.logger.info(\"Best parameters are:\\n\")\n self.logger.info(best_hyperparams)", "def minimize(self,x0=None):\n import time\n start_time = time.time()\n tmp,total_par,lik_grad = self.minimize_both_vers(numerical=False,x0=x0)\n if tmp['success']==False:\n print(\"Probably a problem with gradient, do numerical\")\n tmp,total_par,lik_grad = self.minimize_both_vers(x0=tmp['x'],numerical=True)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n self.lengthscale = total_par[0]\n self.variance = total_par[1]\n self.gstds = total_par[2]\n tmp['fx']=np.array([total_par[0],total_par[1],total_par[2]])\n return tmp,total_par,lik_grad", "def run(self, C, p0 = None):\n global algorithm \n algorithm = AdaptiveMM(self.g, C, p0 = p0, lambda0 = 2000)\n solve()", "def evaluationFunction(individual, modelOmega, mean):\n logValue = float('Inf')\n genomeModel = models.model.newModel(modelOmega[0].definitions)\n genomeModel.bins = list(individual)\n modelLambda = models.model.newModel(modelOmega[0].definitions)\n modelLambda.bins = calcNumberBins(genomeModel.bins, mean)\n for i in range(len(modelOmega)):\n tempValue = calcLogLikelihood(modelLambda, modelOmega[i])\n calcLogLikelihood.cache_clear()\n if tempValue < logValue:\n logValue = tempValue\n return logValue," ]
[ "0.7011556", "0.67614967", "0.6661641", "0.64753014", "0.6324813", "0.6313172", "0.62755656", "0.62447", "0.6160573", "0.6111164", "0.60700196", "0.60524917", "0.60361964", "0.6025766", "0.6025012", "0.6023183", "0.60038626", "0.600292", "0.598566", "0.59753746", "0.59354776", "0.5916593", "0.59025437", "0.590035", "0.5892111", "0.5890576", "0.5886195", "0.5880765", "0.5879071", "0.58708185", "0.5870202", "0.5869746", "0.5869692", "0.58594924", "0.5856766", "0.58449054", "0.5842594", "0.5833418", "0.58318204", "0.582677", "0.582244", "0.5811013", "0.5810227", "0.58044165", "0.57868123", "0.5786283", "0.5779739", "0.57750267", "0.57724804", "0.5762971", "0.57575107", "0.57493746", "0.5742719", "0.57320946", "0.5727708", "0.5725752", "0.57182884", "0.5715191", "0.5714503", "0.57095605", "0.57060534", "0.5701408", "0.5688027", "0.56866914", "0.567584", "0.5666699", "0.565991", "0.56588584", "0.5658036", "0.56570685", "0.56471664", "0.56298375", "0.56291676", "0.56291676", "0.56233823", "0.56225556", "0.56179917", "0.56173134", "0.5611909", "0.56109387", "0.5598984", "0.5596861", "0.5596539", "0.5593116", "0.5584542", "0.5581393", "0.55785275", "0.5578105", "0.55731714", "0.5567685", "0.5566612", "0.55641073", "0.5562296", "0.555944", "0.55566734", "0.55492777", "0.5547602", "0.5547321", "0.5546109", "0.55458236" ]
0.5914919
22
Iterate over the observations and update the filtered values after each iteration
def iterate(self, plot=True, estimate=False, init_params=None): # Create empty arrays to store values F = np.zeros(len(self.y)) a = np.zeros(len(self.y)) v = np.zeros(len(self.y)) P = np.zeros(len(self.y)) # Initialize at the initial values parsed to the class if estimate == True: self.T = np.array(init_params[0]) self.c = np.array(init_params[1]) self.R = np.array(init_params[2]) # self.H = np.array(init_params[0]) # self.Q = np.array(init_params[1]) P[0] = self.P_start a[0] = self.a_start # Iterate over the observatios for t in range(0, len(self.y) - 1): # Kalman filter equations v[t] = self.y[t] - self.Z * a[t] - self.d F[t] = self.Z * P[t] * self.Z.transpose() + self.H a_t = a[t] + ((P[t] * self.Z.transpose()) / F[t]) * v[t] a[t + 1] = self.T * a_t + self.c P_t = P[t] - ((P[t] * self.Z.transpose()) / F[t]) * self.Z * P[t] P[t + 1] = self.T * P_t * self.T.transpose() + self.R * self.Q * self.R.transpose() F[-1] = P[-1] + self.H v[-1] = self.y[-1] - a[-1] # Obtain std error of prediction form variance std = np.sqrt((P * self.H) / (P + self.H)) return a, std, P, v, F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, other):\n for filter, value in other.items():\n self.__setitem__(filter, value)", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def update(self, updates, predicate):\n for row in self.rows:\n if predicate(row):\n for column, new_value in updates.items():\n row[column] = new_value", "def filter(self):\n self.filter_means = [self.m_0]\n self.filter_covs = [self.P_0]\n self.marginal_covs = []\n for t in range(self.data.shape[0]):\n m_bar, P_bar = self.one_step_prediction(self.filter_means[-1], self.filter_covs[-1])\n\n # Update step\n y = self.data[t]\n if not np.isnan(y).any():\n v = y[:, None] - self.observation_matrix @ m_bar\n S = self.observation_matrix @ P_bar @ self.observation_matrix.T + self.observation_cov\n K = P_bar @ self.observation_matrix.T @ np.linalg.inv(S)\n\n m_bar = m_bar + K @ v\n P_bar = P_bar - K @ S @ K.T\n\n self.marginal_covs.append(S)\n\n self.filter_means.append(m_bar)\n self.filter_covs.append(P_bar)\n self.filter_means = self.filter_means[1:]\n self.filter_covs = self.filter_covs[1:]", "def filter_values(self):\n dfilter = self.args.datafilter\n self.logger.info(u'Filtering values with:{f}'.format(f=dfilter))\n data = self.outputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}, value:{v}'.format(k=key,\n v=value))\n returned_data = dict_value_filter(key, value, dfilter, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data after filter:{d}'.format(d=newdata))\n\n self.outputdata = newdata", "def UpdateSet(self, dataset):\r\n for data in dataset:\r\n self.UpdateOddsRatioVsNoNorm(data)", "def filter_update(observations, observation_matrix, observation_variance,\n observation_indices, observation_count,\n state_mean, state_covariance):\n\n sigma = 0.\n detf = 0.\n n_observation = np.int(observation_count)\n for i in range(n_observation):\n observation_index = int(observation_indices[i])\n obsmat = observation_matrix[observation_index, :]\n innovation = (observations[observation_index]\n - np.dot(obsmat, state_mean))\n dot_statecov_obsmat = np.dot(state_covariance, obsmat)\n innovation_covariance = (np.dot(obsmat, dot_statecov_obsmat)\n + observation_variance[observation_index])\n kgain = dot_statecov_obsmat / innovation_covariance\n state_covariance = (state_covariance\n - (np.outer(kgain, kgain)\n * innovation_covariance))\n\n state_mean = state_mean + kgain * innovation\n\n sigma = sigma + (innovation ** 2 / innovation_covariance)\n detf = detf + np.log(innovation_covariance)\n\n return (state_mean, state_covariance, sigma, detf)", "def update_filter(self, measurement, robot_pos):\n new_weights = []\n for p in self.particles:\n p = self.__move(p, self.MOTION)\n angle = self.__measurement(p, robot_pos)\n prob = self.__measurement_prob(angle, measurement,\n self.sense_noise)\n new_weights.append(prob)\n new_weights = np.array(new_weights)\n new_weights /= np.sum(new_weights) # normalized weights\n self.weights = new_weights\n\n # if self.__neff() > self.n / 2:\n self.particles = self.__resample()", "def set_observations(self, factor):\r\n relevant_obs = set(self.obs).intersection(set(factor.get_variables()))\r\n if relevant_obs:\r\n factor.set_observations({x:self.obs[x] for x in relevant_obs})", "def filter(self, update):\n\n raise NotImplementedError", "def ObserveEvidence(self,E):\r\n for v,e in E.iteritems():\r\n indx, = (self.var==v).nonzero() # find i where f.var[i]=v\r\n if indx.size != 0:\r\n A = I2A(range(len(self.val)), self.card) # all assignments\r\n A = A[:,indx].flatten() # only interested in 'indx' element of each row\r\n self.val[A != e] = 0", "def update(self, predictions, real_vals):\n for pred, actual in zip(predictions, real_vals):\n self.cm.loc[actual, pred] += 1", "def filter(self, observable):", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def update_values(self):\n # have to reset params to 0 when recalculating\n self.total_weight = self.total_price = self.total_value = self.total_fitness = 0\n for index, value in enumerate(self.item_stats):\n if value == 1:\n self.total_weight += self.items[index].weight\n self.total_price += self.items[index].price\n self.total_value += self.items[index].value\n self.total_fitness += self.items[index].fitness", "def update(self, z):\n raise NotImplementedError('Must implement an update step for the filter.')", "def run_batch_filter(self):\n if self.speed_data is None and self.flow_data is None and self.traveltime_data is None:\n print(\n 'Warning: The measurement data must be set before running the batch filter: use function self.set_meas_data()')\n\n # =======================================================================\n # the initial ensembles, which should have been set externally\n X_init = np.matrix(np.zeros((self.dim_state, self.num_ensembles)))\n print(\n 'Setting initial ensembles: rho {0}; qin {1}; qout {2}'.format(self.init_rho, self.init_qin, self.init_qout))\n for ens in range(0, self.num_ensembles):\n X_init[self.x_index['density'][0]:\n self.x_index['density'][self.num_cells - 1], ens] = self.init_rho\n X_init[self.x_index['qin'], ens] = self.init_qin\n X_init[self.x_index['qout'], ens] = self.init_qout\n\n # print('setted qin {0}; qout {1}'.format(X_init[self.x_index['qin'], ens], X_init[self.x_index['qout'], ens] ))\n # add noise to each ensemble\n X_init[:, ens] += np.matrix(np.random.multivariate_normal(\n np.zeros(self.dim_state), self.Q)).reshape((self.dim_state, 1))\n\n self.set_initial_ensembles(X_init)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the initial state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n self.qout_obs.append(np.nan)\n\n # The enKF runs at the finest time grid\n # for each step, update the system\n for step in range(0, self.num_steps):\n\n # update status\n sys.stdout.write('\\r')\n sys.stdout.write('Status: filtering step {0}/{1}'.format(step, self.num_steps))\n sys.stdout.flush()\n # print('Status: filtering step {0}'.format(step))\n\n cur_time = (step + 1) * self.dur_steps\n\n # get the effective measurement\n eff_flow, eff_speed, eff_traveltime = self.__get_eff_meas(cur_time)\n\n # build the observation index\n self.y_index, self.dim_obs, y_obs, cov_noise = self.__build_obs_index(eff_flow, eff_speed, eff_traveltime)\n\n # update the estimate for this step\n est_state = self.update_estimate(y_obs, cov_noise, cur_time)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the updated state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_entrance_sensor in self.y_index['flow'].keys():\n self.qin_obs.append(y_obs[self.y_index['flow'][self.__debug_entrance_sensor]])\n # print('y_index[flow]:{0}'.format(self.y_index['flow'].keys()))\n # print('y_obs[ y_index[flow][entrance] ]:{0}'.format(\n # y_obs[ self.y_index['flow'][self.__debug_entrance_sensor]],\n # self.__debug_entrance_sensor))\n else:\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_exit_sensor in self.y_index['flow'].keys():\n self.qout_obs.append(y_obs[self.y_index['flow'][self.__debug_exit_sensor]])\n else:\n self.qout_obs.append(np.nan)\n # =======================================================================\n # save the estimated state\n self.est_state_all[:, step] = est_state\n\n # decouple and save into self.est_density, self.est_speed, self.est_queue, self.est_traveltime\n self.est_density[:, step] = est_state[0:self.num_cells, 0]\n\n # the speed is computed using the fundamental diagram\n for cell_id in range(0, self.num_cells):\n # use the static FD at this step\n self.est_speed[cell_id, step] = self.__rho2v(self.vm_cells[cell_id, 0], self.beta_cells[cell_id, 0],\n self.rhoc_cells[cell_id, 0], self.wc_cells[cell_id, 0],\n self.est_density[cell_id, step])\n\n # REMARK: the queue and travel time a post-processed from the speed field.\n # They are computed in cross_evaluation class for all algorithms\n # the queue length starts from the first cell with speed below queue_threshold to the end of road\n # index = (self.est_speed[:, step] <= self.queue_threshold)\n #\n # # filter out the outliers\n # index_smoothed = deepcopy(index)\n # outlier_max = 3\n # counter = 0\n # for i in range(0, len(index)):\n #\n # if index[i] == True:\n # # trigger the coutner\n # counter += 1\n # elif index[i] == False and counter != 0:\n # if counter <= outlier_max:\n # # found outliers\n # index_smoothed[ i-counter : i ] = False\n # # reset counter\n # counter = 0\n #\n # # if i != 0 and i != len(index)-1:\n # # if sum( index[i-1:i+3] ) >=2:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == 0:\n # # if sum(index[0: 5] ) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == len(index)-1:\n # # if sum(index[ i-4 :len(index)]) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n #\n # if sum(index_smoothed) <= 3: # use 4 to suppress false alarms\n # # if less or equal then 2 cells are in congestion, it may be caused by noise.\n # self.est_queue[step] = 0\n # else:\n # # if step > 105 and step < 115:\n # # print(sum(index_smoothed))\n # # print(index_smoothed)\n # # print(index)\n #\n # self.est_queue[step] = \\\n # self.len_cells*( self.num_cells - np.argmax(index_smoothed) )\n # # try:\n # # first_cong_cell_id = [x[0] for x in enumerate( self.est_speed[:,step] ) if x[1] < self.queue_threshold][0]\n # # except IndexError:\n # # # no congested cell\n # # first_cong_cell_id = self.num_cells\n # # # the estimated queue length\n # # self.est_queue[step] = self.len_cells*( self.num_cells - first_cong_cell_id )\n #\n # # the travel time estimate is computed by summing up the travel time in each cell\n # self.est_traveltime[step] = np.sum(self.len_cells/self.est_speed[:,step])\n\n\n # =======================================================================\n # DEBUG\n # plot the update\n if self.__debug:\n plot_len = 19\n # qin\n if False:\n if not np.isnan(self.qin_obs[-1]):\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n positions_f = np.arange(0, len(self.qin_f)) - 0.1\n positions_a = np.arange(0, len(self.qin_a)) + 0.1\n positions_obs = np.arange(0, len(self.qin_obs))\n # predicted as red\n bp = ax1.boxplot(self.qin_f[-plot_len:],\n positions=positions_f[-plot_len:], widths=0.15,\n patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#FF4633', linewidth=1)\n # change fill color\n # box.set( facecolor = '#FF4633' )\n # corrected as green\n bp = ax1.boxplot(self.qin_a[-plot_len:],\n positions=positions_a[-plot_len:], widths=0.15, patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#07891B', linewidth=1)\n # change fill color\n # box.set( facecolor = '#07891B' )\n # measurement as blue\n ax1.scatter(positions_obs[-plot_len:], self.qin_obs[-plot_len:], color='b', marker='o', s=40,\n label='Observation')\n ax1.set_title('qin')\n # x_ticks = np.arange(0, len(self.qin_f))\n # ax1.set_xticks(x_ticks[-plot_len:])\n plt.show()\n\n # qout\n if False:\n if not np.isnan(self.qout_obs[-1]):\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n positions_f = np.arange(0, len(self.qout_f)) - 0.1\n positions_a = np.arange(0, len(self.qout_a)) + 0.1\n positions_obs = np.arange(0, len(self.qout_obs))\n # predicted as red\n bp = ax2.boxplot(self.qout_f[-plot_len:], positions=positions_f[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#FF4633')\n # corrected as green\n bp = ax2.boxplot(self.qout_a[-plot_len:], positions=positions_a[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#07891B')\n # measurement as blue\n ax2.scatter(positions_obs[-plot_len:], self.qout_obs[-plot_len:], color='b', marker='o', s=30,\n label='Observation')\n ax2.set_title('qout')\n # x_ticks = np.arange(0, len(self.qout_f))\n # ax2.set_xticks(x_ticks[-plot_len:])\n\n plt.show()\n\n # plot the estimated qin and qout\n if self.__debug:\n if True:\n qin = np.squeeze(np.array(self.est_state_all[self.x_index['qin'], :]))\n qin_meas = np.array(self.qin_obs)[1:]\n print(len(qin), len(qin_meas))\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n t = np.arange(len(qin))\n ax1.plot(t, qin, 'r-', label='Estimated')\n not_nan = ~np.isnan(qin_meas)\n ax1.plot(t[not_nan], qin_meas[not_nan], 'b', label='Measured')\n ax1.legend()\n ax1.grid(True)\n ax1.set_title('qin')\n\n plt.draw()\n\n if True:\n qout = np.squeeze(np.array(self.est_state_all[self.x_index['qout'], :]))\n qout_meas = np.array(self.qout_obs)[1:]\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n t = np.arange(len(qout))\n ax2.plot(t, qout, 'r-', label='Estimated')\n not_nan = ~np.isnan(qout_meas)\n ax2.plot(t[not_nan], qout_meas[not_nan], 'b', label='Measured')\n ax2.set_title('qout')\n ax2.legend()\n ax2.grid(True)\n plt.draw()", "def _update_edges_filtered(self, change=None):\n self._edges_filter.val_range = self.edges_range\n edges_ids = self._edges_filter.val_ids\n self.edges_ids = self._filter_edges(edges_ids)", "def update(self, obs, q):\n for o in obs:\n prob = np.exp(-70)\n if self.landmarks:\n # find the data association with ML\n prob, landmark_idx, ass_obs, ass_jacobian, ass_adjcov = self.find_data_association(o)\n if prob < self.TOL:\n # create new landmark\n self.create_landmark(o)\n else:\n # update corresponding EKF\n self.update_landmark(np.transpose(np.array([o])), landmark_idx, ass_obs, ass_jacobian, ass_adjcov)\n else:\n # no initial landmarks\n self.create_landmark(o)\n self.weight *= prob\n \n q.put([self]) ###", "def update_filter_params(self, fh):\n (self.data_timestamp, self.framerate,\n self.l, self.d, self.gamma,\n self.eps, self.alex, self.traceswitch) = (fh.attrs['data_timestamp'], fh.attrs['framerate'],\n fh.attrs['l'], fh.attrs['d'], fh.attrs['gamma'],\n fh.attrs['eps'], fh.attrs['alex'], fh.attrs['traceswitch'])", "def after_each(self, dataset: pydicom.dataset.Dataset) -> None:\r\n for a_filter in self.filters[::-1]:\r\n a_filter.after_each(dataset)", "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def __setitem__(self, query_filter, value):\n saved_items = []\n for index, query in enumerate(self.__bound_queries):\n saved_items.append(query.get(query_filter, None))\n try:\n query[query_filter] = value\n except:\n for q, old_value in itertools.izip(self.__bound_queries[:index],\n saved_items):\n if old_value is not None:\n q[query_filter] = old_value\n else:\n del q[query_filter]\n raise", "def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x", "def update(self, iteration):\n pass", "def before_each(self, dataset: pydicom.dataset.Dataset) -> None:\r\n for a_filter in self.filters:\r\n a_filter.before_each(dataset)", "def update_cards(self, filterdict, values, multiple=True):\n\n self._collection.update(self._constrain_keys(filterdict),\n {'$set': self._constrain_keys(values)},\n multi=multiple)", "def fill_obs(self, observation_table, data_store):\n for obs in observation_table:\n events = data_store.obs(obs_id=obs['OBS_ID']).events\n\n # TODO: filter out (mask) possible sources in the data\n # for now, the observation table should not contain any\n # run at or near an existing source\n\n self.counts_cube.fill_events([events])\n self.livetime_cube.data += events.observation_live_time_duration", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def filter(self, *args):\n from .elements import EqualClauseElement\n for a in args:\n for c in self._criterion:\n if isinstance(c, EqualClauseElement) and isinstance(a, EqualClauseElement) and \\\n c.attribute.node == a.attribute.node and c.attribute.label == a.attribute.label:\n c.value = a.value\n break\n else:\n self._criterion.append(a)\n return self", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def process_dataset(dataset, func):\n new_dataset = copy.copy(dataset)\n del new_dataset[\"val\"]\n new_dataset.update(func(dataset))\n return new_dataset", "def update(self, observed, axis):\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE\n #\n total = 0\n \n # zero out irrelevant data and calculate total for observed sample\n for each in self._table:\n if (axis == 1 and each[1] != observed) or (axis == 0 and each[0] != observed):\n self._table[each] = 0\n else:\n total += self._table[each]\n \n # normalize the revelant data\n for each in self._table:\n if (axis == 1 and each[1] == observed) or (axis == 0 and each[0] == observed):\n self._table[each] = self._table[each] / total\n\n #\n # END OF YOUR CODE\n # ------------------------------------------------------------------------- ", "def _modify_updates(self, updates):\n wxf = self.wxf\n wyf = self.wyf\n wxf_updated = updates[wxf]\n wyf_updated = updates[wyf]\n nwxf = (wxf_updated.std(0) + SMALL)[numpy.newaxis, :]\n nwyf = (wyf_updated.std(0) + SMALL)[numpy.newaxis, :]\n meannxf = nwxf.mean()\n meannyf = nwyf.mean()\n # Center filters\n centered_wxf = wxf_updated - wxf_updated.mean(0)\n centered_wyf = wyf_updated - wyf_updated.mean(0)\n # Fix standard deviation\n wxf_updated = centered_wxf * (meannxf / nwxf)\n wyf_updated = centered_wyf * (meannyf / nwyf)\n updates[wxf] = wxf_updated\n updates[wyf] = wyf_updated", "def apply_filters(self):\n hurst_cut = 0\n coint_cut = 0\n half_life_cut = 0\n mean_cross_cut = 0\n\n # Create an empty list for pairs that pass the filter tests\n validated_pairs = []\n\n # Create all the pairs combination\n self.create_pair_differences()\n\n # Print the number of potential pairs\n print(f\"Number of potential pairs in before filter: {len(self.__pairs_data)}\")\n\n for pair in self.__pairs_data:\n # Select the stocks from the pair\n stock1 = pair[0]\n stock2 = pair[1]\n\n # Test the hurst filter\n if self.hurst_filter(self, stock1=stock1, stock2=stock2):\n hurst_cut += 1\n if self.engel_filter(self, stock1=stock1, stock2=stock2):\n coint_cut += 1\n if self.half_life_filter(self, stock1=stock1, stock2=stock2):\n half_life_cut += 1\n if self.mean_cross_filter(self, stock1=stock1, stock2=stock2):\n mean_cross_cut += 1\n validated_pairs.append([stock1, stock2])\n\n print(f\"Hurst filter pass: {hurst_cut}\")\n print(f\"Co-integration filter pass: {coint_cut}\")\n print(f\"Half-life filter pass: {half_life_cut}\")\n print(f\"Mean-cross filter pass: {mean_cross_cut}\")\n print(f\"Final Number of validated pairs: {len(validated_pairs)}\")\n print(\"The final validated pairs are: \")\n print(validated_pairs)\n\n # Save it to the attribute\n self.__validated_pairs = validated_pairs\n self.__validated_pairs_diff = self.__pair_diff[self.symbolize_pairs(self.__validated_pairs)]", "def test_updating_multiple_records_through_filter_with_arg_value(self, test_domain):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all({\"last_name\": \"Fraud\"})\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None", "def _update_all(self, criteria: Q, *args, **kwargs):\n raise NotImplementedError", "def updateIntensities (self,listAtoms):\r\n \r\n for i in range(len(listAtoms)):\r\n for j in range(len(listAtoms[i].spikeArray)):\r\n self.mol[i].spikeArray[j].intensity = listAtoms[i].spikeArray[j].intensity", "def updateFitness(self):\r\n for candidate in self.candidates:\r\n candidate.updateFitness()\r\n return", "def resample(self):\n self.particles = ParticleFilter.weighted_values(self.particles,\n [p.weight for p in self.particles],\n len(self.particles))\n for p in self.particles:\n p.weight = 1./len(self.particles)", "def update_all(self, stream: Iterator[Mapping[str, np.ndarray]]) -> Self:\n\n for element in stream:\n self.update(element)\n\n return self", "def update_all(self, stream: Iterator[Mapping[str, np.ndarray]]) -> Self:\n\n for element in stream:\n self.update(element)\n\n return self", "def filter(self, observations):\n\n (_, _, _, x_filtered, P_filtered) = filter(self.F, self.Q, self.H, self.R, self.x_0, self.P_0, observations)\n return x_filtered, P_filtered", "def filter_(self,fltr:torch.tensor):\n self.container = self.container[:,fltr]\n self.count_hist = self.count_hist[fltr]", "def update_individuals(individuals, eval_results):\n for ind, res in zip(individuals, eval_results):\n ind.fitness.values = res[0]\n ind.matching_node_pairs = res[1]\n ind.gtp_precisions = res[2]", "def _update_step(self, *, observations: types.ObservationsTorch) -> None:", "def _applyFilters(self) -> None:\n self._dataframe = self._source.loc[:, self._visable_columns]\n for column, value in self._filters.items():\n if value is not None:\n self._dataframe = self._dataframe[self._source[column] == value]\n else:\n self._dataframe = self._dataframe[self._source[column].isnull()]\n\n self.layoutChanged.emit()", "def filter(self, filters):", "def filter(self, filter_dict):\n pass", "def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value", "def prefilter(self, filt=None, verbose=False):\n erased = []\n if verbose:\n msg = 'Prior to filter, we have {} cells.'.format(len(self.cells))\n print(msg)\n # check for supplementary observables to be computed\n raw_obs, func_obs = set_observable_list(filters=[filt, ])\n\n # compute suppl obs for all cells\n if raw_obs:\n for cell in self.cells:\n for obs in raw_obs:\n cell.build(obs)\n for cell in self.cells:\n if filt is not None:\n if not filt(cell):\n erased.append(cell)\n # make Colony.add_cell_recursive non functional\n cell.bpointer = None\n if cell.parent:\n cell.parent.childs.remove(cell)\n # make daughter cells new roots\n for ch in cell.childs:\n ch.bpointer = None\n if verbose:\n msg = '{} cells do not pass filter.'.format(len(erased))\n print(msg)\n for cell in erased:\n self.cells.remove(cell) # otherwise would be considered root\n # clean-up actions for computing extra obs\n # extra obs computation depends on tree decomposition\n # this will be done in lineage.get_timeseries()\n for cell in self.cells:\n for obs in raw_obs:\n del cell._sdata[obs.label]\n if verbose:\n msg = 'After filtering, we get {} cells.'.format(len(self.cells))\n print(msg)\n# self.metadata.filters.append(repr(boofunc))\n return", "def run(self, threshold=1e-3, max_iters=100):\n for msr in self.msrs:\n # find state transition matrix and propagate state\n phi_p, state_prop = self._compute_stm(msr.time, self.phis[-1])\n\n # compute observation deviation, obs_state matrix\n y_i, h_tilde = self._msr_resid(msr, state_prop)\n\n # update information and N\n self._update_info_and_n(y_i, h_tilde, phi_p, msr.cov)\n\n # add everything to the appropriate lists\n #new_cov = np.matmul(phi_p, np.matmul(self.cov_list[-1], phi_p))\n #self.cov_list.append(new_cov)\n self.prop_state_list.append(state_prop)\n self.estimates.append(state_prop)\n self.phis.append(phi_p)\n self.times.append(msr.time)\n\n # compute correction\n self.iters += 1\n x_hat_0 = np.linalg.solve(self.fisher_info[-1], self.N[-1])[0]\n\n # check for convergence\n if np.linalg.norm(x_hat_0) <= threshold:\n print(\"Batch filter converged in {} Iterations\".format(self.iters))\n self.cov_batch = np.linalg.inv(self.fisher_info[-1])\n\n elif self.iters >= max_iters:\n raise StopIteration(\"max_iters: {} reached without convergence\".format(max_iters))\n\n else:\n # reset everything and try again\n updated_istate = np.add(self.prop_state_list[0], np.transpose(x_hat_0))\n # fixes a strange bug wher the size of updated istate was changing\n updated_istate = np.resize(updated_istate, (1, len(self.istate)))[0]\n\n self.prop_state_list = [updated_istate]\n #self.cov_list = [self.apriori]\n self.fisher_info = [self.fisher_info[0]]\n self.pert_vec = np.subtract(self.pert_vec, x_hat_0)\n self.N = [np.matmul(self.apriori, self.pert_vec)]\n self.phis = [self.phis[0]]\n self.estimates = [updated_istate]\n self.times = [0]\n self.run()", "def _UpdateDataSetValues( self ):\n pass", "def set_filter_for_eval(self, x_filter):\n self.x_filter = x_filter\n\n entity_size = len(self.ent_to_idx)\n reln_size = len(self.rel_to_idx)\n\n first_million_primes_list = []\n curr_dir, _ = os.path.split(__file__)\n with open(os.path.join(curr_dir, \"prime_number_list.txt\"), \"r\") as f:\n logger.debug('Reading from prime_number_list.txt.')\n line = f.readline()\n i = 0\n for line in f:\n p_nums_line = line.split(' ')\n first_million_primes_list.extend([np.int64(x) for x in p_nums_line if x != '' and x != '\\n'])\n if len(first_million_primes_list) > (2 * entity_size + reln_size):\n break\n\n # subject\n self.entity_primes_left = first_million_primes_list[:entity_size]\n # obj\n self.entity_primes_right = first_million_primes_list[entity_size:2 * entity_size]\n # reln\n self.relation_primes = first_million_primes_list[2 * entity_size:(2 * entity_size + reln_size)]\n\n self.filter_keys = []\n # subject\n self.filter_keys = [self.entity_primes_left[self.x_filter[i, 0]] for i in range(self.x_filter.shape[0])]\n # obj\n self.filter_keys = [self.filter_keys[i] * self.entity_primes_right[self.x_filter[i, 2]]\n for i in range(self.x_filter.shape[0])]\n # reln\n self.filter_keys = [self.filter_keys[i] * self.relation_primes[self.x_filter[i, 1]]\n for i in range(self.x_filter.shape[0])]\n\n self.is_filtered = True", "def test_feature_is_filtered(self):\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )", "def particle_filter(particle_set_t, measurement_t):\n global count\n n_samples, dim = particle_set_t.shape # no of particles and dimension of each particle\n\n pred_state = np.zeros((n_samples, dim), dtype=\"float64\") # store the predicted state \n weights = np.zeros(n_samples, dtype=\"float64\") # corresponding weights for resampling\n\n particle_set_t1 = np.zeros((n_samples, dim), dtype=\"float64\") # next iteration of particles\n\n\n # this loop calculates \\bar{X_t}, i.e. the predicted belief.\n for n in range(n_samples):\n # predicted motion step:\n xn_t1 = sample_motion_model(particle_set_t[n]) # 3x1 vector: hypothetical state\n\n # measurement correction step:\n weight_xn_t1 = state_likelihood(measurement_t, xn_t1) # scalar value\n\n pred_state[n] = xn_t1\n weights[n] = weight_xn_t1\n\n \n # It was observed that if all weights are 0, the resampling step breaks. \n # Thus, adding a uniform distribution. This is obviously a very bad idea \\ \n # as the true state can easily be discarded in the resampling step: TODO!\n if np.sum(weights) > 0.0:\n weights = weights/np.sum(weights) # normalize array only when sum in not 0\n else:\n print(\"possile divergence!\")\n weights[:] = 1 / n_samples # if sum is 0 then assign uniform distribution throughout\n\n\n # the resampling step:\n # indices = monte_carlo.residual_resample(weights)\n indices = monte_carlo.stratified_resample(weights)\n count += 1\n print(count)\n\n # new particle set is particles at index locations\n for i, index in enumerate(indices):\n particle_set_t1[i] = pred_state[index]\n\n return particle_set_t1", "def filter(self,state0):\n ok,tchi2 = True,0.\n state = state0.copy()\n ii = 0\n for node in self.nodes:\n zrun = node.zrun\n ok,state,F,Q = self.model.propagate(state,zrun)\n if (not ok):\n warning(\"kfilter.filter not possible to filter at \",(ii,zrun))\n debug(\"kfilter.filter i,ok,chi2 \",(ii,ok,tchi2))\n return ok,tchi2\n node.F = F\n node.Q = Q\n node.setstate('pred',state)\n fstate,fchi2 = node.predict(state)\n node.setstate('filter',fstate)\n node.setchi2('filter',fchi2)\n tchi2+=fchi2\n self.model.user_filter(node)\n state = node.getstate('filter').copy()\n ii+=1\n self.status='filter'\n debug(\"kfilter.filter ok,chi2 \",(ok,tchi2))\n return ok,tchi2", "def Filter(self,val):\n \n #set th elength of the lis to 0\n List = [self.InitialList[i] for i in range(0,len(self.InitialList))]\n FilterValues = [None]\n Grab = [None]\n Headers = []\n \n #create the quick index\n for i in range(len(self.Condensensed)):\n \n Headers.append([self.Condensensed[i][l][0] for l in range(len(self.Condensensed[i]))])\n \n #grab the values...\n for j in range(len(self.Variables)):\n \n FilterValues.append(self.Variables[j].get())\n\n if self.Variables[j].get().split(' ')[0] == 'All':\n \n Grab.append(False)\n \n else:\n \n Grab.append(True)\n \n #intermediate list to compare\n ToCompare = []\n \n for i in range(1,len(Grab)):\n \n if Grab[i]:\n \n #find the index\n l = Headers[i].index(FilterValues[i])\n \n #grab it\n ToCompare.append([self.Condensensed[i][l][m] for m in range(len(self.Condensensed[i][l]))])\n\n\n for i in range(0, len(ToCompare)):\n \n List = list(set(List).intersection(ToCompare[i]))\n\n #update the interface\n self.Gatherer(List,list(self.Input))\n self.BuildTree()", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def update(self, **kwargs):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"update {}\".format(item))\n item.update(**kwargs)", "def train_filter(filt, U, Y):\n n = len(U)\n X = []\n R = []\n for i in range(n):\n x_hat_bar = filt.time_update(U[i])\n x_hat, P = filt.measurement_update(Y[i], x_hat_bar)\n print(i, x_hat, P)\n X.append(x_hat)\n R.append(P)\n return np.array(X), R", "def test_updating_record_through_filter(self, test_domain):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update(last_name=\"Fraud\")\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def do_filter(self):\n return np.sum(np.multiply(self._past_values, self._impulse_response))", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def updateValues(self, otherDialog):\n for index in range(len(self._values)):\n for oval in otherDialog._values:\n if self._values[index].getID() == oval.getID():\n self._values[index].combine( oval, intelligent=False )\n break", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def observe(self, observation):\n # shallow copy observation (deep copy can be expensive)\n obs = observation.copy()\n batch_idx = self.opt.get('batchindex', 0)\n self.observation = obs\n #self.answers[batch_idx] = None\n return obs", "def _update_raw_data(params):\n from scipy.signal import filtfilt\n start = params['t_start']\n stop = params['raw'].time_as_index(start + params['duration'])[0]\n start = params['raw'].time_as_index(start)[0]\n data_picks = _pick_data_channels(params['raw'].info)\n data, times = params['raw'][:, start:stop]\n if params['projector'] is not None:\n data = np.dot(params['projector'], data)\n # remove DC\n if params['remove_dc'] is True:\n data -= np.mean(data, axis=1)[:, np.newaxis]\n if params['ba'] is not None:\n data[data_picks] = filtfilt(params['ba'][0], params['ba'][1],\n data[data_picks], axis=1, padlen=0)\n # scale\n for di in range(data.shape[0]):\n data[di] /= params['scalings'][params['types'][di]]\n # stim channels should be hard limited\n if params['types'][di] == 'stim':\n norm = float(max(data[di]))\n data[di] /= norm if norm > 0 else 1.\n # clip\n if params['clipping'] == 'transparent':\n data[np.logical_or(data > 1, data < -1)] = np.nan\n elif params['clipping'] == 'clamp':\n data = np.clip(data, -1, 1, data)\n params['data'] = data\n params['times'] = times", "def _filter_satisfied(self, update_setd=False):\n\n model = self.oracle.get_model()\n setd = set()\n\n for i, cl in enumerate(self.soft):\n if not self.satc[i]:\n if self._satisfied(cl, model):\n self.satc[i] = True\n self.ss_assumps.append(self.sels[i])\n else:\n setd = setd.union(set(cl))\n\n if update_setd:\n self.setd = list(setd)", "def _put_resolved_booleans_into_filter(\n filter_operator: Operator, model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues]\n) -> None:\n for operator in _model_filter_in_operator_generator(filter_operator):\n model_filter = operator.unresolved_value\n operator.resolved_value = model_filters_to_resolved_values.get(\n model_filter, BooleanValues.UNKNOWN\n )", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def set_observations(self, oseries):\n self.oseries_index = oseries.index\n observations_masked = np.ma.array(oseries,\n mask=(~np.isfinite(oseries)))\n (n_timesteps, dimobs) = observations_masked.shape\n self.observation_indices = np.zeros((n_timesteps, dimobs),\n dtype=np.float64)\n self.observation_count = np.zeros(n_timesteps, dtype=np.int64)\n self.observations = np.zeros((n_timesteps, dimobs), dtype=np.float64)\n\n for t in range(n_timesteps):\n observation = observations_masked[t]\n # add large value to find all finite non-masked values\n obstmp = observation + 1e10\n obsindices = obstmp.nonzero()[0]\n self.observation_count[t] = len(obsindices)\n\n if (len(obsindices) > 0):\n for i, _ in enumerate(obsindices):\n obsid = int(obsindices[i])\n self.observations[t, obsid] = observation[obsid]\n self.observation_indices[t, i] = obsid", "def updateObservation(self, obs):\n self.settingsDb.updateObservation(self.observationsTableName(), obs)", "def update(self):\n\n terms_toRemove = []\n\n for termIndex, [term_constantFactor, term_unknowns_attributeAddresses] in enumerate(self.LHS):\n\n # Check if coefficient is 0 - then no need to process any of the unknowns since term will be 0 anyways\n if term_constantFactor == 0:\n terms_toRemove.append(termIndex)\n continue # continue to next term, no need to resolve the unknowns of this term since the product will be 0 anyways\n\n # Check if any unknowns became known\n unknowns_toRemove = []\n for unknown_attributeAddress in term_unknowns_attributeAddresses:\n attribute = getattr_fromAddress(*unknown_attributeAddress)\n if isNumeric(attribute):\n # object.attribute which had previously been identified as unknown now has a value, add it to the constant factor product and remove from the unknowns\n self.LHS[termIndex][0] *= attribute # multiply it with the constant factor product\n unknowns_toRemove.append([termIndex, unknown_attributeAddress])\n for termIndex, unknown_attributeAddress in unknowns_toRemove: # remove unknowns which have become known in the end\n # removing in the end not to tamper with the iteration of the above loop\n self.LHS[termIndex][1].remove(unknown_attributeAddress)\n\n # Move constants to RHS\n if self.LHS[termIndex][1] == []:\n # if term has no unknowns, it is a constant, move to RHS\n self.RHS -= self.LHS[termIndex][0]\n self.LHS.pop(termIndex)\n\n for termIndex in reversed(terms_toRemove): # reversed - otherwise would tamper with indices of items identified for removal\n self.LHS.pop(termIndex)\n\n self._gatherUnknowns()", "def filter(self):\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def update(self, values, train, eta=.1):\n\t\tfor X, y_true in zip(values, train):\n\t\t\tprediction = self.activate(X)\n\t\t\terror = y_true - prediction\n\t\t\tweight_update = error * eta * X\n\t\t\tself.weights += weight_update", "def finalize(self):\n # this forces only some filters will be used for feature computation\n # this is not ideal, but a necessary stop-gap while we revise\n # the PropertyTable SQL\n self._good_filters = ['u', 'g', 'r', 'i', 'z', 'Y']\n self._good_filter_wave = np.array([3569.5, 4766.5, 6214.5, 7544.5, 8707.5, 10039.5])\n\n use_filters = set(self._good_filters) & self.filters\n if not self.filters.issubset(use_filters):\n message = 'Number of useful filters ({}) does not equal number available filters ({}) - some filters will not be used'.format(\n ''.join(use_filters), ''.join(self.filters))\n warnings.warn(message, RuntimeWarning)\n self.filters = set(use_filters)\n mask = np.array([True if x in self.filters else False for x in self.passband])\n\n if mask.size: # Not empty arrays\n self.time = self.time[mask]\n self.flux = self.flux[mask]\n self.fluxErr = self.fluxErr[mask]\n self.obsId = self.obsId[mask]\n self.passband = self.passband[mask]\n self.zeropoint = self.zeropoint[mask]\n for key in self._extra_cols:\n val = getattr(self, key)\n setattr(self, key, val[mask])\n\n self.nobs = len(self.time)\n if self.nobs == 0:\n message = 'Object {} with locus ID {} has no good observations.'.format(self.objectId, self.locusId)\n raise ValueError(message)\n\n return self._remove_flux_extinction()", "def step_filter(self, v, imu_meas, meas):\n\n if (imu_meas is not None) and (meas is None):\n xp, Pp = self.prediction(v, imu_meas)\n self.x = xp\n self.P_t = Pp\n return xp\n\n elif (imu_meas is None) and (meas is not None):\n z_t = self.transformMeasurement(meas)\n self.x = z_t\n return z_t\n\n else: # both measurements contain values\n xp, Pp = self.prediction(v, imu_meas)\n z_t = self.transformMeasurement(meas)\n x, P = self.update(z_t, xp, Pp)\n\n self.x = x\n self.P_t = P\n return x", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def _apply_filter(self, fn=lambda ngram, freq: False):\n tmp_ngram = FreqDist()\n for ngram, freq in self.ngram_fd.items():\n if not fn(ngram, freq):\n tmp_ngram[ngram] = freq\n self.ngram_fd = tmp_ngram", "def step_filter(self, v, imu_meas, z_t):\n # YOUR CODE HERE\n pass", "def parameter_update(self, X, X_mask):\n data_with_mask = np.hstack([X, X_mask])\n X_pred, X_MC_preds = self.predict(data_with_mask)\n X[X_mask] *= self.keep_coeff\n X[X_mask] += self.weight_update_coeff * X_pred[X_mask]\n return X, X_MC_preds", "def filter_data(data,filters):\n final_filter = pd.Series(np.array([True] * data.shape[0]))\n for attribute, value in filters:\n final_filter &= data[attribute] == value\n return data[final_filter]", "def _set_values(loop_data):\n \n # These are the indexes of all the data that are unassigned\n value_indexes = loop_data.ix[(loop_data.loop==0)&(loop_data.tag==0)&(loop_data.stop==0)].index\n loop_data[u'value'] = 0\n loop_data.ix[value_indexes,u'value'] = 1\n \n # These are the indexes of data who follow either a loop, label, or stop tag\n value_indexes_begin = loop_data.ix[(value_indexes-1)].ix[loop_data.value==0].index + 1\n \n # The first rows of each data correspond to data for their respective loops\n loop_max = loop_data.loop.max()\n loop_range = np.arange(loop_max-1, -1, -1)\n \n for idx in value_indexes_begin:\n loop_data.ix[idx:idx+len(loop_range)-1,u'value'] += loop_range\n \n return loop_data", "def update_with_observation(particles, landmark_list):\r\n for p in particles:\r\n for i in range(np.shape(landmark_list)[1]):\r\n p.update_particle(landmark_list[:, i])\r\n\r\n return particles", "def process_observation(self, observation):\n return observation", "def process_observation(self, observation):\n return observation", "def iterate(self, data):\n \n # Append data to self.data\n self.data = np.append(self.data, data)\n \n for i, d in enumerate(data):\n update = self.current*self.likelihood(d)\n self.current = self._normalize(update)\n self.posterior = np.concatenate((self.posterior,[self.current]))\n \n print(str(len(data)) + \" iterations completed!\")\n \n return None", "async def _update_values(self, model: Model):\n\n raise NotImplementedError", "def Iterate(self):\n\t\tfor atom in self.atoms:\n\t\t\tself.UpdateAtom(atom)", "def update_state(self):\n # return initial state if no observation was yet\n if len(self.obs_history) == 0:\n return self.kf.initial_state_mean, self.kf.initial_state_covariance\n\n hist = np.ma.masked_array(self.obs_history, mask=np.zeros((1,)))\n for i in range(len(hist)):\n if hist[i] == -1e8:\n hist[i] = np.ma.masked\n\n # print(hist, hist.shape)\n return self.kf.filter(hist)", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def apply_filters(\n isovar_result,\n filter_thresholds={},\n filter_flags=[]):\n filter_values = OrderedDict(isovar_result.filter_values.items())\n new_filter_values = evaluate_filters(\n isovar_result,\n filter_thresholds=filter_thresholds,\n filter_flags=filter_flags)\n filter_values.update(new_filter_values)\n return isovar_result.clone_with_updates(filter_values=filter_values)", "def healthcare_filter(df_all): \n #get requested assets under healthcare tag \n df_filtered = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_filtered = df_filtered.append(df_all.loc[row]) #if so, save in df \n if '\"healthcare\"=>\"doctor\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"healthcare\"=>\"pharmacy\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'pharmacy'\n elif '\"healthcare\"=>\"hospital\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'hospital'\n elif '\"healthcare\"=>\"clinic\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'clinic'\n elif '\"healthcare\"=>\"dentist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'dentist'\n elif '\"healthcare\"=>\"physiotherapist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'physiotherapist'\n elif '\"healthcare\"=>\"alternative\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'alternative'\n elif '\"healthcare\"=>\"laboratory\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'laboratory'\n elif '\"healthcare\"=>\"optometrist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'optometrist'\n elif '\"healthcare\"=>\"rehabilitation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'rehabilitation'\n elif '\"healthcare\"=>\"blood_donation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'blood_donation'\n elif '\"healthcare\"=>\"birthing_center\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'birthing_center'\n else:\n df_filtered = df_filtered.drop(index=row)\n \n return df_filtered", "def filter_patients(self):\n\n if self.dataset is None:\n self.dataset = h5py.File(self.filename, 'r')['dataset']\n \n # Find feature indices belonging to specific criteria\n inclusion_info = self.filter_params['inclusion']\n # exclusion_info = self.filter_params['exclusion']\n case_control_info = self.filter_params['case_control']\n\n inclusion_inds = self.check_criteria(inclusion_info, case_control=False)\n # exclusion_inds = self.check_criteria(exclusion_info, case_control=False)\n case_inds, control_inds = self.check_criteria(case_control_info, case_control=True)\n\n filtered_inds = {}\n # inclusion_exclusion_inds = np.setdiff1d(inclusion_inds, exclusion_inds)\n filtered_inds['case'] = np.intersect1d(inclusion_inds, case_inds)\n filtered_inds['control'] = np.intersect1d(inclusion_inds, control_inds)\n\n return filtered_inds", "def test_updating_multiple_records_through_filter_with_kwarg_value(\n self, test_domain\n ):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all(last_name=\"Fraud\")\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def train_filter2(filt, U, Y):\n n = len(U)\n X, R = [], []\n for i in range(n):\n print(i)\n filt.predict(fx_args=(U[i],))\n filt.update(Y[i])\n X.append(filt.x.copy())\n R.append(filt.P.copy())\n X = np.array(X)\n X, R, K = filt.rts_smoother(X, R, fx_args=(U[i],))\n return X, R", "def apply_filter(self, filter_arg):\n filtered_entries = self.visual.apply_filter(filter_arg, self.get_current_entries())\n # idxs = self.selector.select_by_objects(filtered_entries, yield_ones_index=True)\n self.visual.print_entries_enum(filtered_entries, None)\n # self.list(idxs)", "def forc_model(self):\n lag1_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag1_by_store')\n lag2_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag2_by_store')\n for add in self.X.address1.unique():\n add_mask = self.X.address1 == add\n foo = self.X[ add_mask ].sort_values('visit_date', ascending=False)\n top_index = foo.index[0]\n clust = int(foo.cluster.values[0])\n # get values from last visit for store\n base_input = foo[self.model_mask_cols].values[0]\n base_actual = self.y[top_index]\n lag2_val = base_input[lag1_loc]\n lag1_val = base_actual\n\n for i in range(1, self.num_periods + 1):\n model = self.model_list[clust]\n inputs = base_input\n inputs[lag1_loc] = lag1_val\n inputs[lag2_loc] = lag2_val\n \n pred = model.predict(inputs.reshape(1, -1))\n self._update_cust_table(add, i, pred)\n \n lag2_val = lag1_val\n lag1_val = pred" ]
[ "0.63613224", "0.62198365", "0.6080035", "0.60798997", "0.60306054", "0.58865666", "0.5883915", "0.58141124", "0.5800074", "0.5787996", "0.5541149", "0.5536493", "0.55307084", "0.5513393", "0.55123764", "0.5507698", "0.54158753", "0.5414475", "0.5403888", "0.53936756", "0.5388999", "0.53880715", "0.5363586", "0.5355547", "0.5351123", "0.534992", "0.53498495", "0.5347381", "0.5285174", "0.52653944", "0.5261038", "0.5215623", "0.5215302", "0.52017486", "0.52017146", "0.5200738", "0.51895565", "0.51877266", "0.518692", "0.51809186", "0.5177147", "0.517705", "0.517705", "0.51713455", "0.5161349", "0.5150539", "0.51468694", "0.5144904", "0.51423365", "0.513975", "0.51379853", "0.51308095", "0.51275927", "0.51274836", "0.5118717", "0.51145047", "0.51115704", "0.51114297", "0.5109708", "0.5100439", "0.50909036", "0.509037", "0.5087856", "0.5087688", "0.50621223", "0.5049711", "0.5049681", "0.5045099", "0.5040932", "0.50364006", "0.5034882", "0.50339144", "0.5019592", "0.5012907", "0.50115496", "0.50065666", "0.50048995", "0.5004228", "0.49995285", "0.49954176", "0.4990749", "0.49808678", "0.49754924", "0.4966135", "0.49632093", "0.49604845", "0.4956182", "0.4955419", "0.4955419", "0.49450874", "0.49429345", "0.4934977", "0.49320674", "0.49267983", "0.49172685", "0.49075392", "0.48998687", "0.48974884", "0.48974785", "0.48944095", "0.48937657" ]
0.0
-1
Iterate over the observations and update the filtered values after each iteration
def iterateRegression(self, plot=True, estimate=False, init_params=None): # Create empty arrays to store values F = np.zeros(len(self.y)) a_b = np.zeros((2,len(self.y))) # alphas and betas v = np.zeros(len(self.y)) P = np.zeros((len(self.y),2,2)) # Initialize at the initial values parsed to the class if estimate == True: self.T = np.array([[init_params[0],0],[0,1]]) self.c = np.vstack(([init_params[1],0])) self.R = np.vstack(([init_params[2]],[0])) self.a_start = np.vstack(([self.alpha_mean], [init_params[3]])) P[0,:,:] = self.P_start a_b[:,0:1] = self.a_start # Iterate for t in range(0, len(self.y) - 1): # Slightly different updating equations for KF since we now have regression coefficient v[t] = self.y[t] - np.dot(self.Z[:,t:t+1].T,a_b[:,t]) - self.d F[t] = np.dot(np.dot(self.Z[:,t:t+1].T, P[t]),self.Z[:,t:t+1]) + self.H a_t = a_b[:,t:t+1] + np.dot(P[t],self.Z[:,t:t+1] / F[t]) * v[t] a_b[:,t + 1:t+2] = np.dot(self.T, a_t) + self.c P_t = P[t] - np.dot((np.dot(P[t],self.Z[:,t:t+1]) / F[t]),np.dot(self.Z[:,t:t+1].T, P[t])) P[t + 1,:,:] = np.dot(np.dot(self.T, P_t),self.T.transpose()) + np.dot(self.R * self.Q,self.R.transpose()) F[-1] = np.dot(np.dot(self.Z[:,-1:].T, P[-1]),self.Z[:,-1:]) + self.H v[-1] = self.y[-1] - a_b[0,-1:] # Obtain std error of prediction form variance std = np.sqrt((P[:,0,0] * self.H) / (P[:,0,0] + self.H)) return a_b, std, P, v, F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, other):\n for filter, value in other.items():\n self.__setitem__(filter, value)", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)", "def update(self, updates, predicate):\n for row in self.rows:\n if predicate(row):\n for column, new_value in updates.items():\n row[column] = new_value", "def filter(self):\n self.filter_means = [self.m_0]\n self.filter_covs = [self.P_0]\n self.marginal_covs = []\n for t in range(self.data.shape[0]):\n m_bar, P_bar = self.one_step_prediction(self.filter_means[-1], self.filter_covs[-1])\n\n # Update step\n y = self.data[t]\n if not np.isnan(y).any():\n v = y[:, None] - self.observation_matrix @ m_bar\n S = self.observation_matrix @ P_bar @ self.observation_matrix.T + self.observation_cov\n K = P_bar @ self.observation_matrix.T @ np.linalg.inv(S)\n\n m_bar = m_bar + K @ v\n P_bar = P_bar - K @ S @ K.T\n\n self.marginal_covs.append(S)\n\n self.filter_means.append(m_bar)\n self.filter_covs.append(P_bar)\n self.filter_means = self.filter_means[1:]\n self.filter_covs = self.filter_covs[1:]", "def filter_values(self):\n dfilter = self.args.datafilter\n self.logger.info(u'Filtering values with:{f}'.format(f=dfilter))\n data = self.outputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}, value:{v}'.format(k=key,\n v=value))\n returned_data = dict_value_filter(key, value, dfilter, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data after filter:{d}'.format(d=newdata))\n\n self.outputdata = newdata", "def UpdateSet(self, dataset):\r\n for data in dataset:\r\n self.UpdateOddsRatioVsNoNorm(data)", "def filter_update(observations, observation_matrix, observation_variance,\n observation_indices, observation_count,\n state_mean, state_covariance):\n\n sigma = 0.\n detf = 0.\n n_observation = np.int(observation_count)\n for i in range(n_observation):\n observation_index = int(observation_indices[i])\n obsmat = observation_matrix[observation_index, :]\n innovation = (observations[observation_index]\n - np.dot(obsmat, state_mean))\n dot_statecov_obsmat = np.dot(state_covariance, obsmat)\n innovation_covariance = (np.dot(obsmat, dot_statecov_obsmat)\n + observation_variance[observation_index])\n kgain = dot_statecov_obsmat / innovation_covariance\n state_covariance = (state_covariance\n - (np.outer(kgain, kgain)\n * innovation_covariance))\n\n state_mean = state_mean + kgain * innovation\n\n sigma = sigma + (innovation ** 2 / innovation_covariance)\n detf = detf + np.log(innovation_covariance)\n\n return (state_mean, state_covariance, sigma, detf)", "def update_filter(self, measurement, robot_pos):\n new_weights = []\n for p in self.particles:\n p = self.__move(p, self.MOTION)\n angle = self.__measurement(p, robot_pos)\n prob = self.__measurement_prob(angle, measurement,\n self.sense_noise)\n new_weights.append(prob)\n new_weights = np.array(new_weights)\n new_weights /= np.sum(new_weights) # normalized weights\n self.weights = new_weights\n\n # if self.__neff() > self.n / 2:\n self.particles = self.__resample()", "def set_observations(self, factor):\r\n relevant_obs = set(self.obs).intersection(set(factor.get_variables()))\r\n if relevant_obs:\r\n factor.set_observations({x:self.obs[x] for x in relevant_obs})", "def filter(self, update):\n\n raise NotImplementedError", "def ObserveEvidence(self,E):\r\n for v,e in E.iteritems():\r\n indx, = (self.var==v).nonzero() # find i where f.var[i]=v\r\n if indx.size != 0:\r\n A = I2A(range(len(self.val)), self.card) # all assignments\r\n A = A[:,indx].flatten() # only interested in 'indx' element of each row\r\n self.val[A != e] = 0", "def update(self, predictions, real_vals):\n for pred, actual in zip(predictions, real_vals):\n self.cm.loc[actual, pred] += 1", "def filter(self, observable):", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def update_values(self):\n # have to reset params to 0 when recalculating\n self.total_weight = self.total_price = self.total_value = self.total_fitness = 0\n for index, value in enumerate(self.item_stats):\n if value == 1:\n self.total_weight += self.items[index].weight\n self.total_price += self.items[index].price\n self.total_value += self.items[index].value\n self.total_fitness += self.items[index].fitness", "def update(self, z):\n raise NotImplementedError('Must implement an update step for the filter.')", "def run_batch_filter(self):\n if self.speed_data is None and self.flow_data is None and self.traveltime_data is None:\n print(\n 'Warning: The measurement data must be set before running the batch filter: use function self.set_meas_data()')\n\n # =======================================================================\n # the initial ensembles, which should have been set externally\n X_init = np.matrix(np.zeros((self.dim_state, self.num_ensembles)))\n print(\n 'Setting initial ensembles: rho {0}; qin {1}; qout {2}'.format(self.init_rho, self.init_qin, self.init_qout))\n for ens in range(0, self.num_ensembles):\n X_init[self.x_index['density'][0]:\n self.x_index['density'][self.num_cells - 1], ens] = self.init_rho\n X_init[self.x_index['qin'], ens] = self.init_qin\n X_init[self.x_index['qout'], ens] = self.init_qout\n\n # print('setted qin {0}; qout {1}'.format(X_init[self.x_index['qin'], ens], X_init[self.x_index['qout'], ens] ))\n # add noise to each ensemble\n X_init[:, ens] += np.matrix(np.random.multivariate_normal(\n np.zeros(self.dim_state), self.Q)).reshape((self.dim_state, 1))\n\n self.set_initial_ensembles(X_init)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the initial state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n self.qout_obs.append(np.nan)\n\n # The enKF runs at the finest time grid\n # for each step, update the system\n for step in range(0, self.num_steps):\n\n # update status\n sys.stdout.write('\\r')\n sys.stdout.write('Status: filtering step {0}/{1}'.format(step, self.num_steps))\n sys.stdout.flush()\n # print('Status: filtering step {0}'.format(step))\n\n cur_time = (step + 1) * self.dur_steps\n\n # get the effective measurement\n eff_flow, eff_speed, eff_traveltime = self.__get_eff_meas(cur_time)\n\n # build the observation index\n self.y_index, self.dim_obs, y_obs, cov_noise = self.__build_obs_index(eff_flow, eff_speed, eff_traveltime)\n\n # update the estimate for this step\n est_state = self.update_estimate(y_obs, cov_noise, cur_time)\n\n # =======================================================================\n # DEBUG\n # save the qin and qout in the corresponding probe data\n # save the updated state\n if self.__debug:\n self.qin_f.append(np.squeeze(np.array(self.X_f[self.x_index['qin'], :])).tolist())\n self.qin_a.append(np.squeeze(np.array(self.X_a[self.x_index['qin'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_entrance_sensor in self.y_index['flow'].keys():\n self.qin_obs.append(y_obs[self.y_index['flow'][self.__debug_entrance_sensor]])\n # print('y_index[flow]:{0}'.format(self.y_index['flow'].keys()))\n # print('y_obs[ y_index[flow][entrance] ]:{0}'.format(\n # y_obs[ self.y_index['flow'][self.__debug_entrance_sensor]],\n # self.__debug_entrance_sensor))\n else:\n self.qin_obs.append(np.nan)\n\n self.qout_f.append(np.squeeze(np.array(self.X_f[self.x_index['qout'], :])).tolist())\n self.qout_a.append(np.squeeze(np.array(self.X_a[self.x_index['qout'], :])).tolist())\n if 'flow' in self.y_index.keys() and self.__debug_exit_sensor in self.y_index['flow'].keys():\n self.qout_obs.append(y_obs[self.y_index['flow'][self.__debug_exit_sensor]])\n else:\n self.qout_obs.append(np.nan)\n # =======================================================================\n # save the estimated state\n self.est_state_all[:, step] = est_state\n\n # decouple and save into self.est_density, self.est_speed, self.est_queue, self.est_traveltime\n self.est_density[:, step] = est_state[0:self.num_cells, 0]\n\n # the speed is computed using the fundamental diagram\n for cell_id in range(0, self.num_cells):\n # use the static FD at this step\n self.est_speed[cell_id, step] = self.__rho2v(self.vm_cells[cell_id, 0], self.beta_cells[cell_id, 0],\n self.rhoc_cells[cell_id, 0], self.wc_cells[cell_id, 0],\n self.est_density[cell_id, step])\n\n # REMARK: the queue and travel time a post-processed from the speed field.\n # They are computed in cross_evaluation class for all algorithms\n # the queue length starts from the first cell with speed below queue_threshold to the end of road\n # index = (self.est_speed[:, step] <= self.queue_threshold)\n #\n # # filter out the outliers\n # index_smoothed = deepcopy(index)\n # outlier_max = 3\n # counter = 0\n # for i in range(0, len(index)):\n #\n # if index[i] == True:\n # # trigger the coutner\n # counter += 1\n # elif index[i] == False and counter != 0:\n # if counter <= outlier_max:\n # # found outliers\n # index_smoothed[ i-counter : i ] = False\n # # reset counter\n # counter = 0\n #\n # # if i != 0 and i != len(index)-1:\n # # if sum( index[i-1:i+3] ) >=2:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == 0:\n # # if sum(index[0: 5] ) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n # # elif i == len(index)-1:\n # # if sum(index[ i-4 :len(index)]) >= 3:\n # # index_smoothed[i] = True\n # # else:\n # # index_smoothed[i] = False\n #\n # if sum(index_smoothed) <= 3: # use 4 to suppress false alarms\n # # if less or equal then 2 cells are in congestion, it may be caused by noise.\n # self.est_queue[step] = 0\n # else:\n # # if step > 105 and step < 115:\n # # print(sum(index_smoothed))\n # # print(index_smoothed)\n # # print(index)\n #\n # self.est_queue[step] = \\\n # self.len_cells*( self.num_cells - np.argmax(index_smoothed) )\n # # try:\n # # first_cong_cell_id = [x[0] for x in enumerate( self.est_speed[:,step] ) if x[1] < self.queue_threshold][0]\n # # except IndexError:\n # # # no congested cell\n # # first_cong_cell_id = self.num_cells\n # # # the estimated queue length\n # # self.est_queue[step] = self.len_cells*( self.num_cells - first_cong_cell_id )\n #\n # # the travel time estimate is computed by summing up the travel time in each cell\n # self.est_traveltime[step] = np.sum(self.len_cells/self.est_speed[:,step])\n\n\n # =======================================================================\n # DEBUG\n # plot the update\n if self.__debug:\n plot_len = 19\n # qin\n if False:\n if not np.isnan(self.qin_obs[-1]):\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n positions_f = np.arange(0, len(self.qin_f)) - 0.1\n positions_a = np.arange(0, len(self.qin_a)) + 0.1\n positions_obs = np.arange(0, len(self.qin_obs))\n # predicted as red\n bp = ax1.boxplot(self.qin_f[-plot_len:],\n positions=positions_f[-plot_len:], widths=0.15,\n patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#FF4633', linewidth=1)\n # change fill color\n # box.set( facecolor = '#FF4633' )\n # corrected as green\n bp = ax1.boxplot(self.qin_a[-plot_len:],\n positions=positions_a[-plot_len:], widths=0.15, patch_artist=False)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#07891B', linewidth=1)\n # change fill color\n # box.set( facecolor = '#07891B' )\n # measurement as blue\n ax1.scatter(positions_obs[-plot_len:], self.qin_obs[-plot_len:], color='b', marker='o', s=40,\n label='Observation')\n ax1.set_title('qin')\n # x_ticks = np.arange(0, len(self.qin_f))\n # ax1.set_xticks(x_ticks[-plot_len:])\n plt.show()\n\n # qout\n if False:\n if not np.isnan(self.qout_obs[-1]):\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n positions_f = np.arange(0, len(self.qout_f)) - 0.1\n positions_a = np.arange(0, len(self.qout_a)) + 0.1\n positions_obs = np.arange(0, len(self.qout_obs))\n # predicted as red\n bp = ax2.boxplot(self.qout_f[-plot_len:], positions=positions_f[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#FF4633')\n # corrected as green\n bp = ax2.boxplot(self.qout_a[-plot_len:], positions=positions_a[-plot_len:], widths=0.18,\n patch_artist=True)\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=1)\n # change fill color\n box.set(facecolor='#07891B')\n # measurement as blue\n ax2.scatter(positions_obs[-plot_len:], self.qout_obs[-plot_len:], color='b', marker='o', s=30,\n label='Observation')\n ax2.set_title('qout')\n # x_ticks = np.arange(0, len(self.qout_f))\n # ax2.set_xticks(x_ticks[-plot_len:])\n\n plt.show()\n\n # plot the estimated qin and qout\n if self.__debug:\n if True:\n qin = np.squeeze(np.array(self.est_state_all[self.x_index['qin'], :]))\n qin_meas = np.array(self.qin_obs)[1:]\n print(len(qin), len(qin_meas))\n fig1 = plt.figure(figsize=(10, 5), dpi=100)\n ax1 = fig1.add_subplot(111)\n t = np.arange(len(qin))\n ax1.plot(t, qin, 'r-', label='Estimated')\n not_nan = ~np.isnan(qin_meas)\n ax1.plot(t[not_nan], qin_meas[not_nan], 'b', label='Measured')\n ax1.legend()\n ax1.grid(True)\n ax1.set_title('qin')\n\n plt.draw()\n\n if True:\n qout = np.squeeze(np.array(self.est_state_all[self.x_index['qout'], :]))\n qout_meas = np.array(self.qout_obs)[1:]\n fig2 = plt.figure(figsize=(10, 5), dpi=100)\n ax2 = fig2.add_subplot(111)\n t = np.arange(len(qout))\n ax2.plot(t, qout, 'r-', label='Estimated')\n not_nan = ~np.isnan(qout_meas)\n ax2.plot(t[not_nan], qout_meas[not_nan], 'b', label='Measured')\n ax2.set_title('qout')\n ax2.legend()\n ax2.grid(True)\n plt.draw()", "def _update_edges_filtered(self, change=None):\n self._edges_filter.val_range = self.edges_range\n edges_ids = self._edges_filter.val_ids\n self.edges_ids = self._filter_edges(edges_ids)", "def update(self, obs, q):\n for o in obs:\n prob = np.exp(-70)\n if self.landmarks:\n # find the data association with ML\n prob, landmark_idx, ass_obs, ass_jacobian, ass_adjcov = self.find_data_association(o)\n if prob < self.TOL:\n # create new landmark\n self.create_landmark(o)\n else:\n # update corresponding EKF\n self.update_landmark(np.transpose(np.array([o])), landmark_idx, ass_obs, ass_jacobian, ass_adjcov)\n else:\n # no initial landmarks\n self.create_landmark(o)\n self.weight *= prob\n \n q.put([self]) ###", "def update_filter_params(self, fh):\n (self.data_timestamp, self.framerate,\n self.l, self.d, self.gamma,\n self.eps, self.alex, self.traceswitch) = (fh.attrs['data_timestamp'], fh.attrs['framerate'],\n fh.attrs['l'], fh.attrs['d'], fh.attrs['gamma'],\n fh.attrs['eps'], fh.attrs['alex'], fh.attrs['traceswitch'])", "def after_each(self, dataset: pydicom.dataset.Dataset) -> None:\r\n for a_filter in self.filters[::-1]:\r\n a_filter.after_each(dataset)", "def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)", "def __setitem__(self, query_filter, value):\n saved_items = []\n for index, query in enumerate(self.__bound_queries):\n saved_items.append(query.get(query_filter, None))\n try:\n query[query_filter] = value\n except:\n for q, old_value in itertools.izip(self.__bound_queries[:index],\n saved_items):\n if old_value is not None:\n q[query_filter] = old_value\n else:\n del q[query_filter]\n raise", "def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x", "def update(self, iteration):\n pass", "def before_each(self, dataset: pydicom.dataset.Dataset) -> None:\r\n for a_filter in self.filters:\r\n a_filter.before_each(dataset)", "def update_cards(self, filterdict, values, multiple=True):\n\n self._collection.update(self._constrain_keys(filterdict),\n {'$set': self._constrain_keys(values)},\n multi=multiple)", "def fill_obs(self, observation_table, data_store):\n for obs in observation_table:\n events = data_store.obs(obs_id=obs['OBS_ID']).events\n\n # TODO: filter out (mask) possible sources in the data\n # for now, the observation table should not contain any\n # run at or near an existing source\n\n self.counts_cube.fill_events([events])\n self.livetime_cube.data += events.observation_live_time_duration", "def reset_filter(self):\n arlen = len(self.variant_list)\n self.filter = np.zeros((arlen, arlen)) == 0", "def filter(self, *args):\n from .elements import EqualClauseElement\n for a in args:\n for c in self._criterion:\n if isinstance(c, EqualClauseElement) and isinstance(a, EqualClauseElement) and \\\n c.attribute.node == a.attribute.node and c.attribute.label == a.attribute.label:\n c.value = a.value\n break\n else:\n self._criterion.append(a)\n return self", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def process_dataset(dataset, func):\n new_dataset = copy.copy(dataset)\n del new_dataset[\"val\"]\n new_dataset.update(func(dataset))\n return new_dataset", "def update(self, observed, axis):\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE\n #\n total = 0\n \n # zero out irrelevant data and calculate total for observed sample\n for each in self._table:\n if (axis == 1 and each[1] != observed) or (axis == 0 and each[0] != observed):\n self._table[each] = 0\n else:\n total += self._table[each]\n \n # normalize the revelant data\n for each in self._table:\n if (axis == 1 and each[1] == observed) or (axis == 0 and each[0] == observed):\n self._table[each] = self._table[each] / total\n\n #\n # END OF YOUR CODE\n # ------------------------------------------------------------------------- ", "def _modify_updates(self, updates):\n wxf = self.wxf\n wyf = self.wyf\n wxf_updated = updates[wxf]\n wyf_updated = updates[wyf]\n nwxf = (wxf_updated.std(0) + SMALL)[numpy.newaxis, :]\n nwyf = (wyf_updated.std(0) + SMALL)[numpy.newaxis, :]\n meannxf = nwxf.mean()\n meannyf = nwyf.mean()\n # Center filters\n centered_wxf = wxf_updated - wxf_updated.mean(0)\n centered_wyf = wyf_updated - wyf_updated.mean(0)\n # Fix standard deviation\n wxf_updated = centered_wxf * (meannxf / nwxf)\n wyf_updated = centered_wyf * (meannyf / nwyf)\n updates[wxf] = wxf_updated\n updates[wyf] = wyf_updated", "def apply_filters(self):\n hurst_cut = 0\n coint_cut = 0\n half_life_cut = 0\n mean_cross_cut = 0\n\n # Create an empty list for pairs that pass the filter tests\n validated_pairs = []\n\n # Create all the pairs combination\n self.create_pair_differences()\n\n # Print the number of potential pairs\n print(f\"Number of potential pairs in before filter: {len(self.__pairs_data)}\")\n\n for pair in self.__pairs_data:\n # Select the stocks from the pair\n stock1 = pair[0]\n stock2 = pair[1]\n\n # Test the hurst filter\n if self.hurst_filter(self, stock1=stock1, stock2=stock2):\n hurst_cut += 1\n if self.engel_filter(self, stock1=stock1, stock2=stock2):\n coint_cut += 1\n if self.half_life_filter(self, stock1=stock1, stock2=stock2):\n half_life_cut += 1\n if self.mean_cross_filter(self, stock1=stock1, stock2=stock2):\n mean_cross_cut += 1\n validated_pairs.append([stock1, stock2])\n\n print(f\"Hurst filter pass: {hurst_cut}\")\n print(f\"Co-integration filter pass: {coint_cut}\")\n print(f\"Half-life filter pass: {half_life_cut}\")\n print(f\"Mean-cross filter pass: {mean_cross_cut}\")\n print(f\"Final Number of validated pairs: {len(validated_pairs)}\")\n print(\"The final validated pairs are: \")\n print(validated_pairs)\n\n # Save it to the attribute\n self.__validated_pairs = validated_pairs\n self.__validated_pairs_diff = self.__pair_diff[self.symbolize_pairs(self.__validated_pairs)]", "def test_updating_multiple_records_through_filter_with_arg_value(self, test_domain):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all({\"last_name\": \"Fraud\"})\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None", "def _update_all(self, criteria: Q, *args, **kwargs):\n raise NotImplementedError", "def updateIntensities (self,listAtoms):\r\n \r\n for i in range(len(listAtoms)):\r\n for j in range(len(listAtoms[i].spikeArray)):\r\n self.mol[i].spikeArray[j].intensity = listAtoms[i].spikeArray[j].intensity", "def updateFitness(self):\r\n for candidate in self.candidates:\r\n candidate.updateFitness()\r\n return", "def resample(self):\n self.particles = ParticleFilter.weighted_values(self.particles,\n [p.weight for p in self.particles],\n len(self.particles))\n for p in self.particles:\n p.weight = 1./len(self.particles)", "def update_all(self, stream: Iterator[Mapping[str, np.ndarray]]) -> Self:\n\n for element in stream:\n self.update(element)\n\n return self", "def update_all(self, stream: Iterator[Mapping[str, np.ndarray]]) -> Self:\n\n for element in stream:\n self.update(element)\n\n return self", "def filter(self, observations):\n\n (_, _, _, x_filtered, P_filtered) = filter(self.F, self.Q, self.H, self.R, self.x_0, self.P_0, observations)\n return x_filtered, P_filtered", "def filter_(self,fltr:torch.tensor):\n self.container = self.container[:,fltr]\n self.count_hist = self.count_hist[fltr]", "def update_individuals(individuals, eval_results):\n for ind, res in zip(individuals, eval_results):\n ind.fitness.values = res[0]\n ind.matching_node_pairs = res[1]\n ind.gtp_precisions = res[2]", "def _update_step(self, *, observations: types.ObservationsTorch) -> None:", "def _applyFilters(self) -> None:\n self._dataframe = self._source.loc[:, self._visable_columns]\n for column, value in self._filters.items():\n if value is not None:\n self._dataframe = self._dataframe[self._source[column] == value]\n else:\n self._dataframe = self._dataframe[self._source[column].isnull()]\n\n self.layoutChanged.emit()", "def filter(self, filters):", "def filter(self, filter_dict):\n pass", "def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value", "def prefilter(self, filt=None, verbose=False):\n erased = []\n if verbose:\n msg = 'Prior to filter, we have {} cells.'.format(len(self.cells))\n print(msg)\n # check for supplementary observables to be computed\n raw_obs, func_obs = set_observable_list(filters=[filt, ])\n\n # compute suppl obs for all cells\n if raw_obs:\n for cell in self.cells:\n for obs in raw_obs:\n cell.build(obs)\n for cell in self.cells:\n if filt is not None:\n if not filt(cell):\n erased.append(cell)\n # make Colony.add_cell_recursive non functional\n cell.bpointer = None\n if cell.parent:\n cell.parent.childs.remove(cell)\n # make daughter cells new roots\n for ch in cell.childs:\n ch.bpointer = None\n if verbose:\n msg = '{} cells do not pass filter.'.format(len(erased))\n print(msg)\n for cell in erased:\n self.cells.remove(cell) # otherwise would be considered root\n # clean-up actions for computing extra obs\n # extra obs computation depends on tree decomposition\n # this will be done in lineage.get_timeseries()\n for cell in self.cells:\n for obs in raw_obs:\n del cell._sdata[obs.label]\n if verbose:\n msg = 'After filtering, we get {} cells.'.format(len(self.cells))\n print(msg)\n# self.metadata.filters.append(repr(boofunc))\n return", "def run(self, threshold=1e-3, max_iters=100):\n for msr in self.msrs:\n # find state transition matrix and propagate state\n phi_p, state_prop = self._compute_stm(msr.time, self.phis[-1])\n\n # compute observation deviation, obs_state matrix\n y_i, h_tilde = self._msr_resid(msr, state_prop)\n\n # update information and N\n self._update_info_and_n(y_i, h_tilde, phi_p, msr.cov)\n\n # add everything to the appropriate lists\n #new_cov = np.matmul(phi_p, np.matmul(self.cov_list[-1], phi_p))\n #self.cov_list.append(new_cov)\n self.prop_state_list.append(state_prop)\n self.estimates.append(state_prop)\n self.phis.append(phi_p)\n self.times.append(msr.time)\n\n # compute correction\n self.iters += 1\n x_hat_0 = np.linalg.solve(self.fisher_info[-1], self.N[-1])[0]\n\n # check for convergence\n if np.linalg.norm(x_hat_0) <= threshold:\n print(\"Batch filter converged in {} Iterations\".format(self.iters))\n self.cov_batch = np.linalg.inv(self.fisher_info[-1])\n\n elif self.iters >= max_iters:\n raise StopIteration(\"max_iters: {} reached without convergence\".format(max_iters))\n\n else:\n # reset everything and try again\n updated_istate = np.add(self.prop_state_list[0], np.transpose(x_hat_0))\n # fixes a strange bug wher the size of updated istate was changing\n updated_istate = np.resize(updated_istate, (1, len(self.istate)))[0]\n\n self.prop_state_list = [updated_istate]\n #self.cov_list = [self.apriori]\n self.fisher_info = [self.fisher_info[0]]\n self.pert_vec = np.subtract(self.pert_vec, x_hat_0)\n self.N = [np.matmul(self.apriori, self.pert_vec)]\n self.phis = [self.phis[0]]\n self.estimates = [updated_istate]\n self.times = [0]\n self.run()", "def _UpdateDataSetValues( self ):\n pass", "def set_filter_for_eval(self, x_filter):\n self.x_filter = x_filter\n\n entity_size = len(self.ent_to_idx)\n reln_size = len(self.rel_to_idx)\n\n first_million_primes_list = []\n curr_dir, _ = os.path.split(__file__)\n with open(os.path.join(curr_dir, \"prime_number_list.txt\"), \"r\") as f:\n logger.debug('Reading from prime_number_list.txt.')\n line = f.readline()\n i = 0\n for line in f:\n p_nums_line = line.split(' ')\n first_million_primes_list.extend([np.int64(x) for x in p_nums_line if x != '' and x != '\\n'])\n if len(first_million_primes_list) > (2 * entity_size + reln_size):\n break\n\n # subject\n self.entity_primes_left = first_million_primes_list[:entity_size]\n # obj\n self.entity_primes_right = first_million_primes_list[entity_size:2 * entity_size]\n # reln\n self.relation_primes = first_million_primes_list[2 * entity_size:(2 * entity_size + reln_size)]\n\n self.filter_keys = []\n # subject\n self.filter_keys = [self.entity_primes_left[self.x_filter[i, 0]] for i in range(self.x_filter.shape[0])]\n # obj\n self.filter_keys = [self.filter_keys[i] * self.entity_primes_right[self.x_filter[i, 2]]\n for i in range(self.x_filter.shape[0])]\n # reln\n self.filter_keys = [self.filter_keys[i] * self.relation_primes[self.x_filter[i, 1]]\n for i in range(self.x_filter.shape[0])]\n\n self.is_filtered = True", "def test_feature_is_filtered(self):\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )", "def particle_filter(particle_set_t, measurement_t):\n global count\n n_samples, dim = particle_set_t.shape # no of particles and dimension of each particle\n\n pred_state = np.zeros((n_samples, dim), dtype=\"float64\") # store the predicted state \n weights = np.zeros(n_samples, dtype=\"float64\") # corresponding weights for resampling\n\n particle_set_t1 = np.zeros((n_samples, dim), dtype=\"float64\") # next iteration of particles\n\n\n # this loop calculates \\bar{X_t}, i.e. the predicted belief.\n for n in range(n_samples):\n # predicted motion step:\n xn_t1 = sample_motion_model(particle_set_t[n]) # 3x1 vector: hypothetical state\n\n # measurement correction step:\n weight_xn_t1 = state_likelihood(measurement_t, xn_t1) # scalar value\n\n pred_state[n] = xn_t1\n weights[n] = weight_xn_t1\n\n \n # It was observed that if all weights are 0, the resampling step breaks. \n # Thus, adding a uniform distribution. This is obviously a very bad idea \\ \n # as the true state can easily be discarded in the resampling step: TODO!\n if np.sum(weights) > 0.0:\n weights = weights/np.sum(weights) # normalize array only when sum in not 0\n else:\n print(\"possile divergence!\")\n weights[:] = 1 / n_samples # if sum is 0 then assign uniform distribution throughout\n\n\n # the resampling step:\n # indices = monte_carlo.residual_resample(weights)\n indices = monte_carlo.stratified_resample(weights)\n count += 1\n print(count)\n\n # new particle set is particles at index locations\n for i, index in enumerate(indices):\n particle_set_t1[i] = pred_state[index]\n\n return particle_set_t1", "def filter(self,state0):\n ok,tchi2 = True,0.\n state = state0.copy()\n ii = 0\n for node in self.nodes:\n zrun = node.zrun\n ok,state,F,Q = self.model.propagate(state,zrun)\n if (not ok):\n warning(\"kfilter.filter not possible to filter at \",(ii,zrun))\n debug(\"kfilter.filter i,ok,chi2 \",(ii,ok,tchi2))\n return ok,tchi2\n node.F = F\n node.Q = Q\n node.setstate('pred',state)\n fstate,fchi2 = node.predict(state)\n node.setstate('filter',fstate)\n node.setchi2('filter',fchi2)\n tchi2+=fchi2\n self.model.user_filter(node)\n state = node.getstate('filter').copy()\n ii+=1\n self.status='filter'\n debug(\"kfilter.filter ok,chi2 \",(ok,tchi2))\n return ok,tchi2", "def Filter(self,val):\n \n #set th elength of the lis to 0\n List = [self.InitialList[i] for i in range(0,len(self.InitialList))]\n FilterValues = [None]\n Grab = [None]\n Headers = []\n \n #create the quick index\n for i in range(len(self.Condensensed)):\n \n Headers.append([self.Condensensed[i][l][0] for l in range(len(self.Condensensed[i]))])\n \n #grab the values...\n for j in range(len(self.Variables)):\n \n FilterValues.append(self.Variables[j].get())\n\n if self.Variables[j].get().split(' ')[0] == 'All':\n \n Grab.append(False)\n \n else:\n \n Grab.append(True)\n \n #intermediate list to compare\n ToCompare = []\n \n for i in range(1,len(Grab)):\n \n if Grab[i]:\n \n #find the index\n l = Headers[i].index(FilterValues[i])\n \n #grab it\n ToCompare.append([self.Condensensed[i][l][m] for m in range(len(self.Condensensed[i][l]))])\n\n\n for i in range(0, len(ToCompare)):\n \n List = list(set(List).intersection(ToCompare[i]))\n\n #update the interface\n self.Gatherer(List,list(self.Input))\n self.BuildTree()", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def update(self, **kwargs):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"update {}\".format(item))\n item.update(**kwargs)", "def train_filter(filt, U, Y):\n n = len(U)\n X = []\n R = []\n for i in range(n):\n x_hat_bar = filt.time_update(U[i])\n x_hat, P = filt.measurement_update(Y[i], x_hat_bar)\n print(i, x_hat, P)\n X.append(x_hat)\n R.append(P)\n return np.array(X), R", "def test_updating_record_through_filter(self, test_domain):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update(last_name=\"Fraud\")\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def do_filter(self):\n return np.sum(np.multiply(self._past_values, self._impulse_response))", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def updateValues(self, otherDialog):\n for index in range(len(self._values)):\n for oval in otherDialog._values:\n if self._values[index].getID() == oval.getID():\n self._values[index].combine( oval, intelligent=False )\n break", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def observe(self, observation):\n # shallow copy observation (deep copy can be expensive)\n obs = observation.copy()\n batch_idx = self.opt.get('batchindex', 0)\n self.observation = obs\n #self.answers[batch_idx] = None\n return obs", "def _update_raw_data(params):\n from scipy.signal import filtfilt\n start = params['t_start']\n stop = params['raw'].time_as_index(start + params['duration'])[0]\n start = params['raw'].time_as_index(start)[0]\n data_picks = _pick_data_channels(params['raw'].info)\n data, times = params['raw'][:, start:stop]\n if params['projector'] is not None:\n data = np.dot(params['projector'], data)\n # remove DC\n if params['remove_dc'] is True:\n data -= np.mean(data, axis=1)[:, np.newaxis]\n if params['ba'] is not None:\n data[data_picks] = filtfilt(params['ba'][0], params['ba'][1],\n data[data_picks], axis=1, padlen=0)\n # scale\n for di in range(data.shape[0]):\n data[di] /= params['scalings'][params['types'][di]]\n # stim channels should be hard limited\n if params['types'][di] == 'stim':\n norm = float(max(data[di]))\n data[di] /= norm if norm > 0 else 1.\n # clip\n if params['clipping'] == 'transparent':\n data[np.logical_or(data > 1, data < -1)] = np.nan\n elif params['clipping'] == 'clamp':\n data = np.clip(data, -1, 1, data)\n params['data'] = data\n params['times'] = times", "def _filter_satisfied(self, update_setd=False):\n\n model = self.oracle.get_model()\n setd = set()\n\n for i, cl in enumerate(self.soft):\n if not self.satc[i]:\n if self._satisfied(cl, model):\n self.satc[i] = True\n self.ss_assumps.append(self.sels[i])\n else:\n setd = setd.union(set(cl))\n\n if update_setd:\n self.setd = list(setd)", "def _put_resolved_booleans_into_filter(\n filter_operator: Operator, model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues]\n) -> None:\n for operator in _model_filter_in_operator_generator(filter_operator):\n model_filter = operator.unresolved_value\n operator.resolved_value = model_filters_to_resolved_values.get(\n model_filter, BooleanValues.UNKNOWN\n )", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def set_observations(self, oseries):\n self.oseries_index = oseries.index\n observations_masked = np.ma.array(oseries,\n mask=(~np.isfinite(oseries)))\n (n_timesteps, dimobs) = observations_masked.shape\n self.observation_indices = np.zeros((n_timesteps, dimobs),\n dtype=np.float64)\n self.observation_count = np.zeros(n_timesteps, dtype=np.int64)\n self.observations = np.zeros((n_timesteps, dimobs), dtype=np.float64)\n\n for t in range(n_timesteps):\n observation = observations_masked[t]\n # add large value to find all finite non-masked values\n obstmp = observation + 1e10\n obsindices = obstmp.nonzero()[0]\n self.observation_count[t] = len(obsindices)\n\n if (len(obsindices) > 0):\n for i, _ in enumerate(obsindices):\n obsid = int(obsindices[i])\n self.observations[t, obsid] = observation[obsid]\n self.observation_indices[t, i] = obsid", "def updateObservation(self, obs):\n self.settingsDb.updateObservation(self.observationsTableName(), obs)", "def update(self):\n\n terms_toRemove = []\n\n for termIndex, [term_constantFactor, term_unknowns_attributeAddresses] in enumerate(self.LHS):\n\n # Check if coefficient is 0 - then no need to process any of the unknowns since term will be 0 anyways\n if term_constantFactor == 0:\n terms_toRemove.append(termIndex)\n continue # continue to next term, no need to resolve the unknowns of this term since the product will be 0 anyways\n\n # Check if any unknowns became known\n unknowns_toRemove = []\n for unknown_attributeAddress in term_unknowns_attributeAddresses:\n attribute = getattr_fromAddress(*unknown_attributeAddress)\n if isNumeric(attribute):\n # object.attribute which had previously been identified as unknown now has a value, add it to the constant factor product and remove from the unknowns\n self.LHS[termIndex][0] *= attribute # multiply it with the constant factor product\n unknowns_toRemove.append([termIndex, unknown_attributeAddress])\n for termIndex, unknown_attributeAddress in unknowns_toRemove: # remove unknowns which have become known in the end\n # removing in the end not to tamper with the iteration of the above loop\n self.LHS[termIndex][1].remove(unknown_attributeAddress)\n\n # Move constants to RHS\n if self.LHS[termIndex][1] == []:\n # if term has no unknowns, it is a constant, move to RHS\n self.RHS -= self.LHS[termIndex][0]\n self.LHS.pop(termIndex)\n\n for termIndex in reversed(terms_toRemove): # reversed - otherwise would tamper with indices of items identified for removal\n self.LHS.pop(termIndex)\n\n self._gatherUnknowns()", "def filter(self):\n M, p, q = self.M, self.p, self.q\n x = self.x\n idx = len(self.x) - (p + 1)\n x_ = self.x_prev + (x[idx + p] - x[idx - q]) / M\n self.t_.append(self.t[idx])\n self.t_filtered.append(self.t[idx])\n self.x_.append(x_)\n self.x_filtered.append(x_)\n self.x_prev = x_", "def update_filters(self, **kwargs):\n self._FILTERS = kwargs", "def update(self, values, train, eta=.1):\n\t\tfor X, y_true in zip(values, train):\n\t\t\tprediction = self.activate(X)\n\t\t\terror = y_true - prediction\n\t\t\tweight_update = error * eta * X\n\t\t\tself.weights += weight_update", "def finalize(self):\n # this forces only some filters will be used for feature computation\n # this is not ideal, but a necessary stop-gap while we revise\n # the PropertyTable SQL\n self._good_filters = ['u', 'g', 'r', 'i', 'z', 'Y']\n self._good_filter_wave = np.array([3569.5, 4766.5, 6214.5, 7544.5, 8707.5, 10039.5])\n\n use_filters = set(self._good_filters) & self.filters\n if not self.filters.issubset(use_filters):\n message = 'Number of useful filters ({}) does not equal number available filters ({}) - some filters will not be used'.format(\n ''.join(use_filters), ''.join(self.filters))\n warnings.warn(message, RuntimeWarning)\n self.filters = set(use_filters)\n mask = np.array([True if x in self.filters else False for x in self.passband])\n\n if mask.size: # Not empty arrays\n self.time = self.time[mask]\n self.flux = self.flux[mask]\n self.fluxErr = self.fluxErr[mask]\n self.obsId = self.obsId[mask]\n self.passband = self.passband[mask]\n self.zeropoint = self.zeropoint[mask]\n for key in self._extra_cols:\n val = getattr(self, key)\n setattr(self, key, val[mask])\n\n self.nobs = len(self.time)\n if self.nobs == 0:\n message = 'Object {} with locus ID {} has no good observations.'.format(self.objectId, self.locusId)\n raise ValueError(message)\n\n return self._remove_flux_extinction()", "def step_filter(self, v, imu_meas, meas):\n\n if (imu_meas is not None) and (meas is None):\n xp, Pp = self.prediction(v, imu_meas)\n self.x = xp\n self.P_t = Pp\n return xp\n\n elif (imu_meas is None) and (meas is not None):\n z_t = self.transformMeasurement(meas)\n self.x = z_t\n return z_t\n\n else: # both measurements contain values\n xp, Pp = self.prediction(v, imu_meas)\n z_t = self.transformMeasurement(meas)\n x, P = self.update(z_t, xp, Pp)\n\n self.x = x\n self.P_t = P\n return x", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def _apply_filter(self, fn=lambda ngram, freq: False):\n tmp_ngram = FreqDist()\n for ngram, freq in self.ngram_fd.items():\n if not fn(ngram, freq):\n tmp_ngram[ngram] = freq\n self.ngram_fd = tmp_ngram", "def step_filter(self, v, imu_meas, z_t):\n # YOUR CODE HERE\n pass", "def parameter_update(self, X, X_mask):\n data_with_mask = np.hstack([X, X_mask])\n X_pred, X_MC_preds = self.predict(data_with_mask)\n X[X_mask] *= self.keep_coeff\n X[X_mask] += self.weight_update_coeff * X_pred[X_mask]\n return X, X_MC_preds", "def filter_data(data,filters):\n final_filter = pd.Series(np.array([True] * data.shape[0]))\n for attribute, value in filters:\n final_filter &= data[attribute] == value\n return data[final_filter]", "def _set_values(loop_data):\n \n # These are the indexes of all the data that are unassigned\n value_indexes = loop_data.ix[(loop_data.loop==0)&(loop_data.tag==0)&(loop_data.stop==0)].index\n loop_data[u'value'] = 0\n loop_data.ix[value_indexes,u'value'] = 1\n \n # These are the indexes of data who follow either a loop, label, or stop tag\n value_indexes_begin = loop_data.ix[(value_indexes-1)].ix[loop_data.value==0].index + 1\n \n # The first rows of each data correspond to data for their respective loops\n loop_max = loop_data.loop.max()\n loop_range = np.arange(loop_max-1, -1, -1)\n \n for idx in value_indexes_begin:\n loop_data.ix[idx:idx+len(loop_range)-1,u'value'] += loop_range\n \n return loop_data", "def update_with_observation(particles, landmark_list):\r\n for p in particles:\r\n for i in range(np.shape(landmark_list)[1]):\r\n p.update_particle(landmark_list[:, i])\r\n\r\n return particles", "def process_observation(self, observation):\n return observation", "def process_observation(self, observation):\n return observation", "def iterate(self, data):\n \n # Append data to self.data\n self.data = np.append(self.data, data)\n \n for i, d in enumerate(data):\n update = self.current*self.likelihood(d)\n self.current = self._normalize(update)\n self.posterior = np.concatenate((self.posterior,[self.current]))\n \n print(str(len(data)) + \" iterations completed!\")\n \n return None", "async def _update_values(self, model: Model):\n\n raise NotImplementedError", "def Iterate(self):\n\t\tfor atom in self.atoms:\n\t\t\tself.UpdateAtom(atom)", "def update_state(self):\n # return initial state if no observation was yet\n if len(self.obs_history) == 0:\n return self.kf.initial_state_mean, self.kf.initial_state_covariance\n\n hist = np.ma.masked_array(self.obs_history, mask=np.zeros((1,)))\n for i in range(len(hist)):\n if hist[i] == -1e8:\n hist[i] = np.ma.masked\n\n # print(hist, hist.shape)\n return self.kf.filter(hist)", "def _update_data(self):\n for attribute in [\"flow_rate\"]:\n self._data[attribute] = self._connection.measure", "def apply_filters(\n isovar_result,\n filter_thresholds={},\n filter_flags=[]):\n filter_values = OrderedDict(isovar_result.filter_values.items())\n new_filter_values = evaluate_filters(\n isovar_result,\n filter_thresholds=filter_thresholds,\n filter_flags=filter_flags)\n filter_values.update(new_filter_values)\n return isovar_result.clone_with_updates(filter_values=filter_values)", "def healthcare_filter(df_all): \n #get requested assets under healthcare tag \n df_filtered = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_filtered = df_filtered.append(df_all.loc[row]) #if so, save in df \n if '\"healthcare\"=>\"doctor\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"healthcare\"=>\"pharmacy\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'pharmacy'\n elif '\"healthcare\"=>\"hospital\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'hospital'\n elif '\"healthcare\"=>\"clinic\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'clinic'\n elif '\"healthcare\"=>\"dentist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'dentist'\n elif '\"healthcare\"=>\"physiotherapist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'physiotherapist'\n elif '\"healthcare\"=>\"alternative\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'alternative'\n elif '\"healthcare\"=>\"laboratory\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'laboratory'\n elif '\"healthcare\"=>\"optometrist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'optometrist'\n elif '\"healthcare\"=>\"rehabilitation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'rehabilitation'\n elif '\"healthcare\"=>\"blood_donation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'blood_donation'\n elif '\"healthcare\"=>\"birthing_center\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'birthing_center'\n else:\n df_filtered = df_filtered.drop(index=row)\n \n return df_filtered", "def filter_patients(self):\n\n if self.dataset is None:\n self.dataset = h5py.File(self.filename, 'r')['dataset']\n \n # Find feature indices belonging to specific criteria\n inclusion_info = self.filter_params['inclusion']\n # exclusion_info = self.filter_params['exclusion']\n case_control_info = self.filter_params['case_control']\n\n inclusion_inds = self.check_criteria(inclusion_info, case_control=False)\n # exclusion_inds = self.check_criteria(exclusion_info, case_control=False)\n case_inds, control_inds = self.check_criteria(case_control_info, case_control=True)\n\n filtered_inds = {}\n # inclusion_exclusion_inds = np.setdiff1d(inclusion_inds, exclusion_inds)\n filtered_inds['case'] = np.intersect1d(inclusion_inds, case_inds)\n filtered_inds['control'] = np.intersect1d(inclusion_inds, control_inds)\n\n return filtered_inds", "def test_updating_multiple_records_through_filter_with_kwarg_value(\n self, test_domain\n ):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all(last_name=\"Fraud\")\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def train_filter2(filt, U, Y):\n n = len(U)\n X, R = [], []\n for i in range(n):\n print(i)\n filt.predict(fx_args=(U[i],))\n filt.update(Y[i])\n X.append(filt.x.copy())\n R.append(filt.P.copy())\n X = np.array(X)\n X, R, K = filt.rts_smoother(X, R, fx_args=(U[i],))\n return X, R", "def apply_filter(self, filter_arg):\n filtered_entries = self.visual.apply_filter(filter_arg, self.get_current_entries())\n # idxs = self.selector.select_by_objects(filtered_entries, yield_ones_index=True)\n self.visual.print_entries_enum(filtered_entries, None)\n # self.list(idxs)", "def forc_model(self):\n lag1_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag1_by_store')\n lag2_loc = self.X[self.model_mask_cols].columns.get_loc('shrink_value_per_day_lag2_by_store')\n for add in self.X.address1.unique():\n add_mask = self.X.address1 == add\n foo = self.X[ add_mask ].sort_values('visit_date', ascending=False)\n top_index = foo.index[0]\n clust = int(foo.cluster.values[0])\n # get values from last visit for store\n base_input = foo[self.model_mask_cols].values[0]\n base_actual = self.y[top_index]\n lag2_val = base_input[lag1_loc]\n lag1_val = base_actual\n\n for i in range(1, self.num_periods + 1):\n model = self.model_list[clust]\n inputs = base_input\n inputs[lag1_loc] = lag1_val\n inputs[lag2_loc] = lag2_val\n \n pred = model.predict(inputs.reshape(1, -1))\n self._update_cust_table(add, i, pred)\n \n lag2_val = lag1_val\n lag1_val = pred" ]
[ "0.63613224", "0.62198365", "0.6080035", "0.60798997", "0.60306054", "0.58865666", "0.5883915", "0.58141124", "0.5800074", "0.5787996", "0.5541149", "0.5536493", "0.55307084", "0.5513393", "0.55123764", "0.5507698", "0.54158753", "0.5414475", "0.5403888", "0.53936756", "0.5388999", "0.53880715", "0.5363586", "0.5355547", "0.5351123", "0.534992", "0.53498495", "0.5347381", "0.5285174", "0.52653944", "0.5261038", "0.5215623", "0.5215302", "0.52017486", "0.52017146", "0.5200738", "0.51895565", "0.51877266", "0.518692", "0.51809186", "0.5177147", "0.517705", "0.517705", "0.51713455", "0.5161349", "0.5150539", "0.51468694", "0.5144904", "0.51423365", "0.513975", "0.51379853", "0.51308095", "0.51275927", "0.51274836", "0.5118717", "0.51145047", "0.51115704", "0.51114297", "0.5109708", "0.5100439", "0.50909036", "0.509037", "0.5087856", "0.5087688", "0.50621223", "0.5049711", "0.5049681", "0.5045099", "0.5040932", "0.50364006", "0.5034882", "0.50339144", "0.5019592", "0.5012907", "0.50115496", "0.50065666", "0.50048995", "0.5004228", "0.49995285", "0.49954176", "0.4990749", "0.49808678", "0.49754924", "0.4966135", "0.49632093", "0.49604845", "0.4956182", "0.4955419", "0.4955419", "0.49450874", "0.49429345", "0.4934977", "0.49320674", "0.49267983", "0.49172685", "0.49075392", "0.48998687", "0.48974884", "0.48974785", "0.48944095", "0.48937657" ]
0.0
-1
I have some docs
def docs():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def document(self):\n ...", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def docs(self):\n self._doc_info = DocumentationURL()\n self._doc_info.show()", "def get_documentation(self, *args, **dargs):\n pass", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def get_docs(self):\n return get_view_description(self.callback)", "def documento():\r\n\tpass", "def show_documentation(self):\n self.docs = documentation.Documentation()", "def django_show_docs():\r\n app = wingapi.gApplication\r\n app.ExecuteCommand('show-document', section=\"howtos/django\")", "def __call__(self, doc):\n return doc", "def getDocsList(self):\n return self.docs_list", "def getDoc(self):\r\n return self.__doc__", "def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()", "def merge_docs(self):", "def init_doc(self):\n raise NotImplementedError()", "def items(self):\n return self.docs.items()", "def documents(self):\r\n return doc.Documents(self)", "def test_doc():\n pass", "def build_document(self):\n pass", "def test_docstring(self):\n self.assertTrue(len(FileStorage.__doc__) > 1)\n self.assertTrue(len(FileStorage.all.__doc__) > 1)\n self.assertTrue(len(FileStorage.new.__doc__) > 1)\n self.assertTrue(len(FileStorage.save.__doc__) > 1)\n self.assertTrue(len(FileStorage.reload.__doc__) > 1)", "def test_client_document_list(self):\n pass", "def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self", "def test_all_documents(self):", "def test_doc1(self):\n assert models.review.__doc__ is not None", "def fini_doc(self):\n raise NotImplementedError()", "def test_method_docs(self):\n for func in dir(DBStorage):\n self.assertTrue(len(func.__doc__) > 0)", "def document(self, **kw):\r\n \r\n for p in self.documents(**kw):\r\n return p", "def has_doc() -> None:", "def documentation():\n return auto.html()", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def __call__(self, doc: Doc) -> Doc:\n return doc", "def test_module_doc(self):\n self.assertTrue(len(db_storage.__doc__) > 0)", "def get_doc(self) -> Documentation:\n r : Documentation = [self.get_doc_string()]\n r_src = \"\"\n if hasattr(self,\"_path\"): r_src += \"locally at '%s'\" % (str(self._path))\n if self.url is not None: r_src += \" remote url(orig) '%s'\" % (self.url)\n r_src += \" remote url(parsed) '%s'\" % (self.git_url.as_string())\n if self.branch is not None: r_src += \" branch '%s'\" % (self.branch)\n r.append(r_src)\n r_stages = []\n for (sn,s) in self.stages.items():\n r_stages.append(sn)\n pass\n r_stages.sort()\n if len(r_stages)>0:\n r.append(\"Stages: %s\"%(\" \".join(r_stages)))\n pass\n return r", "def __iter__(self):\n return self.docs.__iter__()", "def func_doc():", "def test_docstrings(self):\n for obj in dir(self.storage):\n self.assertGreater(len(obj.__doc__), 1)", "def test_findDocumentation(self):\n doc = self.builder._findChanges(\n self.project, self.builder._DOC)\n self.assertEquals(\n doc,\n [(40, 'foo.bar.Baz.quux'),\n (41, 'writing Foo servers')])", "def iter_documents(self):\n raise NotImplementedError", "def run_docs(self, *docs):\n self.docs = docs\n self.run()", "def test_documentation(self):\n doc = City.__doc__\n self.assertGreaterEqual(len(doc), 1)", "def documentation_only():\n pass", "def docs(self, history=[]):\n if self._doc_cache is None:\n self._doc_cache = {}\n for lib in [\"BuiltIn\"]:\n self._load_libdoc(lib)\n self._update_imports(history)\n\n docs = dict(**self._doc_cache, **self._history_docs(history))\n\n return docs", "def doc(obj):\n return Documentation.fromObject(obj).first", "def test_method_docs(self):\n for func in dir(BaseModel):\n self.assertTrue(len(func.__doc__) > 0)", "def __doc__(self, ???):", "def triple_quote_docs():\n return", "def __doc__(self):\n return self.fget.__doc__", "def test_docstring(self):\n self.assertTrue(len(BaseModel.__doc__) > 1)\n self.assertTrue(len(BaseModel.__init__.__doc__) > 1)\n self.assertTrue(len(BaseModel.__str__.__doc__) > 1)\n self.assertTrue(len(BaseModel.save.__doc__) > 1)\n self.assertTrue(len(BaseModel.to_dict.__doc__) > 1)", "def get_dashmanager_docs():\n\n\tref_docs = get_registered_docs_for_dashmanager()\n\treturn {\n\t\t\"ref_docs\" : json.dumps(ref_docs)\n\t}", "def find_document(self):\n pass", "def test_document_retrieval(self):", "def test_doc2(self):\n assert Review.__doc__ is not None", "def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)", "def test_method_docs(self):\n for func in dir(Amenity):\n self.assertTrue(len(func.__doc__) > 0)", "def get_doc(self):\n return self.p", "def test_module_doc(self):\n self.assertTrue(len(models.amenity.__doc__) > 0)", "def test_docstring(self):\n self.assertIsNotNone(Review.__doc__)\n self.assertIsNotNone(Review.text.__doc__)", "def test_module_doc(self):\n self.assertTrue(len(base_model.__doc__) > 0)", "def test_method_docs(self):\n for func in dir(Base):\n self.assertTrue(len(func.__doc__) > 0)", "def test_docstring(self):\n self.assertTrue(len(City.__doc__) > 1)\n self.assertTrue(len(City.__init__.__doc__) > 1)\n self.assertTrue(len(City.__str__.__doc__) > 1)\n self.assertTrue(len(City.save.__doc__) > 1)\n self.assertTrue(len(City.to_dict.__doc__) > 1)", "def test_module_doc(self):\n self.assertTrue(len(r.__doc__) > 10)", "def pythondoc(self, irc, msg, args, num, req):\n self.googleq('http://docs.python.org/library/', req, num, irc)", "def test_doc3(self):\n methods = [\"__init__\", \"__str__\", \"to_dict\", \"save\"]\n for key in Review.__dict__.keys():\n if key in methods:\n assert key.__doc__ is not None", "def inherits_doc():\n pass", "def __len__(self):\n return len(self.docs)", "def test_get_documents_populated(index_with_documents):\n response = index_with_documents().get_documents()\n assert isinstance(response.results, list)\n assert len(response.results) == 20", "def main_docstring():", "def DocString():\n return", "def doc(self):\n return \"\\n\".join(self.docLines)", "def docLines(self):\n summary, description = self._getDocParts()\n if description:\n return summary + [\"\"] + description\n return summary", "def test_module_doc(self):\n self.assertTrue(len(base.__doc__) > 0)", "def get_docs_and_page():\n _, *args = sys.argv[:]\n if len(args) > 0:\n print(pydoc.getdoc(*args))\n return pydoc.getdoc(*args)", "def test_document_listing(self, flag_is_active):\n flag_is_active.return_value = True\n\n # Create a topic and product\n t = topic(save=True)\n p = product(save=True)\n\n # Create 3 documents with the topic and product and one without\n for i in range(3):\n doc = revision(is_approved=True, save=True).document\n doc.topics.add(t)\n doc.products.add(p)\n doc = revision(is_approved=True, save=True).document\n\n self.refresh()\n\n # GET the page and verify the content\n url = reverse('products.documents', args=[p.slug, t.slug])\n r = self.client.get(url, follow=True)\n eq_(200, r.status_code)\n doc = pq(r.content)\n eq_(3, len(doc('#document-list li')))", "def dummy(doc):\r\n return doc", "def getDocuments(self):\n return self.objectValues('Multifile')", "def test_method_docs(self):\n for func in dir(User):\n self.assertTrue(len(func.__doc__) > 0)", "async def docs(self, ctx):\n embed = discord.Embed(title = \"Documentation\", description = \"[Click here to visit our documentation!](https://dhb-documentation.readthedocs.io/en/latest/index.html)\", color = discord.Color.blurple())\n await ctx.send(embed = embed)", "def _get_documents(self) -> Iterable[dict]:\n\n return self._db[\"documents\"]", "def doc(self):\n return {'_id': self._id,\n 'text': self.text}", "def setup_docs(self):\n for arg in self.args:\n self.log.debug(\"Processing arg %s\" % arg)\n if isinstance(arg, dexy.doc.Doc) or isinstance(arg, dexy.doc.PatternDoc):\n doc = arg\n\n elif isinstance(arg, list):\n if not isinstance(arg[0], basestring):\n raise Exception(\"First arg %s should be a string\" % arg[0])\n if not isinstance(arg[1], dict):\n raise Exception(\"Second arg %s should be a dict\" % arg[1])\n\n if not \"*\" in arg[0]:\n doc = dexy.doc.Doc(arg[0], **arg[1])\n else:\n # This is a pattern doc or real doc TODO better way to verify?\n doc = dexy.doc.PatternDoc(arg[0], **arg[1])\n\n elif isinstance(arg, basestring):\n doc = dexy.doc.PatternDoc(arg)\n\n else:\n raise Exception(\"unknown arg type %s for arg %s\" % (arg.__class__.__name__, arg))\n\n doc.wrapper = self\n doc.setup()\n\n self.docs.append(doc)", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def main(rc):\n with store_client(rc) as sclient:\n for doc in rc.documents:\n sclient.copydoc(doc)", "def test_init_doc(self):\n self.assertTrue(\n len(Review.__init__.__doc__) > 10\n )", "def get_doc(self):\n return self._doc", "def test_module_doc(self):\n self.assertTrue(len(user.__doc__) > 0)", "def has_test_docs(self):\n pass", "def documentation(self) -> str:\n return pulumi.get(self, \"documentation\")", "def doc(self):\n doc = self.get('doc')\n if doc:\n from .config import defaults\n return defaults.types.doc(doc)", "def rawDoc(self):\n return self.namespace[\"__doc__\"]", "def show_documents():\n\n document = Document(connection=connection, cursor=cursor)\n\n all_documents = document.get_all_documents()\n\n context = {\n 'all_documents': all_documents\n }\n\n return render_template('pages/tables/documents.html', **context)", "def oparl_documentsss():\n start_time = time.time()\n jsonp_callback = request.args.get('callback', None)\n ref = request.args.get('reference', '')\n references = ref.split(',')\n if references == ['']:\n references = None\n output = request.args.get('output', '').split(',')\n rs = util.get_rs()\n q = request.args.get('q', '*:*')\n fq = request.args.get('fq', '')\n sort = request.args.get('sort', 'score desc')\n start = int(request.args.get('start', '0'))\n numdocs = int(request.args.get('docs', '10'))\n date_param = request.args.get('date', '')\n get_attachments = 'attachments' in output\n get_thumbnails = 'thumbnails' in output and get_attachments\n get_consultations = 'consultations' in output\n get_facets = 'facets' in output\n #get_relations = 'relations' in output\n request_info = {} # Info über die Anfrage\n query = False\n docs = False\n submission_ids = []\n # TODO: entscheiden, was mit get_relations passiert\n \"\"\"\n Anhand der übergebenen Parameter wird entschieden, ob eine ES-Suche\n durchgeführt wird, oder ob die Abfrage direkt anhand von Kennungen\n (references) erfolgen kann.\n \"\"\"\n \n if references is None:\n # Suche wird durchgeführt\n # (References-Liste via Suchmaschine füllen)\n query = db.query_submissions(rs=rs, q=q, fq=fq, sort=sort, start=start,\n docs=numdocs, date=date_param, facets=get_facets)\n if query['numhits'] > 0:\n submission_ids = [x['_id'] for x in query['result']]\n else:\n docs = []\n else:\n # Direkte Abfrage\n request_info = {\n 'references': references\n }\n request_info['output'] = output\n\n # Abrufen der benötigten Dokumente aus der Datenbank\n if references is not None:\n docs = db.get_submissions(rs=rs, references=references,\n get_attachments=get_attachments,\n get_consultations=get_consultations,\n get_thumbnails=get_thumbnails)\n elif len(submission_ids) > 0:\n docs = db.get_submissions(rs=rs, submission_ids=submission_ids,\n get_attachments=get_attachments,\n get_consultations=get_consultations,\n get_thumbnails=get_thumbnails)\n\n ret = {\n 'status': 0,\n 'duration': int((time.time() - start_time) * 1000),\n 'request': request_info,\n 'response': {}\n }\n if docs:\n ret['response']['documents'] = docs\n ret['response']['numdocs'] = len(docs)\n if query and 'maxscore' in query:\n ret['response']['maxscore'] = query['maxscore']\n for n in range(len(docs)):\n docs[n]['reference'] = docs[n]['identifier']\n del docs[n]['identifier']\n\n if query:\n ret['response']['numhits'] = query['numhits']\n if get_facets and 'facets' in query:\n ret['response']['facets'] = query['facets']\n \n ret['response']['start'] = start\n ret['request']['sort'] = sort\n ret['request']['fq'] = fq\n\n json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)\n if jsonp_callback is not None:\n json_output = jsonp_callback + '(' + json_output + ')'\n response = make_response(json_output, 200)\n response.mimetype = 'application/json'\n response.headers['Expires'] = util.expires_date(hours=24)\n response.headers['Cache-Control'] = util.cache_max_age(hours=24)\n return response", "def stats_docs(self, host):\n\n s = self.get_stats(host, 'docs')\n\n data = {\n 'count': s['count'],\n 'deleted': s['deleted']\n }\n\n return data", "def test_client_document_retrieve(self):\n pass", "def test_doc_fun(self):\n for fun in self.functions:\n self.assertTrue(len(fun.__doc__) > 0)", "def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')", "def get_documentation():\n return send_file(base_dir / \"static/documentation.html\", \"text/html; charset=UTF-8\")", "def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n logger.warning('Unspecified overload id: %s', entry.object_name)\n member_rst_path = os.path.join(documenter.env.app.srcdir, 'python', 'api',\n entry.page_name + '.rst')\n objtype = entry.documenter.objtype\n member_content = ''\n if objtype == 'class':\n member_content += ':duplicate-local-toc:\\n\\n'\n member_content += sphinx_utils.format_directive(\n 'tensorstore-python-apidoc',\n options=dict(\n fullname=entry.full_name,\n objtype=objtype,\n importname=entry.import_name,\n objectdescription=True,\n subscript=entry.subscript,\n overload=cast(ParsedOverload, entry.overload).overload_id,\n ),\n )\n pathlib.Path(member_rst_path).write_text(member_content)\n _write_member_documentation_pages(entry.documenter)", "def documentation():\n return render_template('help.html')" ]
[ "0.7454927", "0.7446354", "0.7446354", "0.7446354", "0.73413706", "0.7109946", "0.71089756", "0.7055562", "0.70452785", "0.69460195", "0.68773216", "0.6874397", "0.6845154", "0.6833291", "0.67916155", "0.66953266", "0.6682665", "0.66622543", "0.66396433", "0.66306525", "0.65615904", "0.6557985", "0.65171283", "0.6508592", "0.64965737", "0.64489305", "0.6444214", "0.6439048", "0.64372736", "0.6412439", "0.64006406", "0.64002484", "0.64002484", "0.6392618", "0.6392585", "0.6390416", "0.638531", "0.63829464", "0.6378155", "0.63779277", "0.636866", "0.6356126", "0.6352414", "0.63412344", "0.63404906", "0.6323771", "0.63144785", "0.62987155", "0.62887514", "0.6283713", "0.6282232", "0.6257088", "0.62552184", "0.6254228", "0.6237822", "0.6237822", "0.6215817", "0.62150836", "0.6212114", "0.6203372", "0.6178516", "0.6178182", "0.6168165", "0.61668193", "0.61622065", "0.61616796", "0.6137896", "0.61282635", "0.61206347", "0.61148804", "0.6107362", "0.61064005", "0.610368", "0.6101252", "0.60979706", "0.6097739", "0.60947406", "0.60904086", "0.6084152", "0.6075395", "0.6069028", "0.60647666", "0.6060398", "0.60601354", "0.60575044", "0.6048922", "0.6036132", "0.6027402", "0.60257095", "0.60221684", "0.6009887", "0.60061496", "0.6002498", "0.59966713", "0.5992064", "0.59917545", "0.59912497", "0.59910685", "0.59905046", "0.59899664" ]
0.87407255
0
get filename as argument from user or if not given, take filename interactively. Return file handle
def get_filename_as_agrv_if_no_ask(prompt): Found = False ln = len(sys.argv) while not Found: if ln < 2: file = input( prompt) else: file = sys.argv[1] try: RFH = open(file) Found = True except FileNotFoundError: print("%%Error! File not found!") ln = 1 # break return RFH
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obtain_filename():\n file_wanted = input(\"Filename? \")\n return file_wanted", "def askInputFile():\n while True:\n print(\"Enter a valid .txt file\")\n # Try until a plain-text file is provided.\n try:\n fileName = easygui.fileopenbox(\"Enter a .txt file\",\n \"Open file\",\n default=\"C:\\\\\",\n filetypes=[\"*.txt\"])\n if fileName == None:\n raise \n except :\n pass\n else:\n return fileName", "def open_infile(infilename):\n if not infilename or infilename == \"-\":\n return sys.stdin\n else:\n return open(infilename, \"r\")", "def getInputFilename():\n\n argvList = sys.argv\n # print \"argvList=%s\"%(argvList)\n return argvList[0]", "def file_path():\n file_name = input(\"Enter the file name:\")\n return file_name", "def askopenfilename():\r\n file_opt = options = {}\r\n options['defaultextension'] = '.csv'\r\n options['filetypes'] = [('all files', '.*'), ('csv files', '.csv')]\r\n options['initialdir'] = os.getcwd()\r\n options['initialfile'] = 'profile.csv'\r\n options['title'] = 'choose file'\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**file_opt)\r\n\r\n # open file on your own\r\n return filename", "def get_filename():\n filename = input(\"Filename? \")\n while not filename:\n filename = input(\"Filename? \")\n return filename", "def file_name_request(self):\n self.file_name = input(\"What is the name of the input file?\\n>>>\")", "def choose_file():\r\n import tkinter\r\n from tkinter import filedialog\r\n\r\n root_window = tkinter.Tk()\r\n root_window.withdraw()\r\n\r\n return filedialog.askopenfilename()", "def get_input_file():\n if len(sys.argv) < 3:\n return -1\n return sys.argv[2]", "def ask_file(message=\"Select file for open.\", title=None):\n return dialog(\"ask_file\", message=message, title=title)", "def AskForFileName():\n file_doesnot_exsit = True\n file_name = None\n while file_doesnot_exsit:\n try:\n file_name = input(\"What is the name of the input file?\")\n file = open(file_name, 'r')\n file_doesnot_exsit = False\n except FileNotFoundError:\n print(\"File is not found\")\n return file_name", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def choosefile():\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**options)\r\n #print filename, '*****'\r\n\r\n # open file on your own\r\n if filename:\r\n #return open(filename, 'r')\r\n tasks.upload_chosen = filename", "def cli(ctx, filename):\n ctx.obj['filename'] = filename\n click.echo(f'Passed filename: {filename}')", "def get_local_filename_arg(self):\n\ttry:\n\t arg = sys.argv[2]\n\t local_filename = str(arg) \n\texcept IndexError:\n\t print \"Please provide the name under which the received file is to be stored locally.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\n\telse:\n\t return local_filename", "def _filename(self):\n logger.debug(\"Popping Filename browser\")\n return filedialog.askopenfilename(**self._kwargs)", "def get_file_read_arg(self):\n\ttry:\n\t arg = sys.argv[1]\n\t file_read = str(arg)\n\texcept IndexError:\n\t print \"Please provide the name of the file that you wish to receive.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\") \n\tif (len(file_read) > 100):\n\t print \"Name of file must be equal to or less than 100 characters.\"\n\t sys.exit(\"Example usage:\\n\\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0\")\n\telse:\n\t return file_read", "def askopenfilename():\n\n file_opt = options = {}\n options['defaultextension'] = '.*'\n options['initialdir'] = 'User\\\\'\n options['initialfile'] = ''\n options['parent'] = root\n options['title'] = 'choose file'\n options['multiple'] = 1\n\n # get filename\n filename = tk.filedialog.askopenfilename(**file_opt)\n\n if filename:\n self.sourcefile = filename\n if len(filename) is 1:\n file_path_var.set(filename)\n else:\n file_path_var.set(\n \"Multiple files, including {}\".format(filename[0]))", "def open_any(filename):\n if filename == '-':\n fh = sys.stdin\n elif filename[-3:] == '.gz':\n fh = GzipFile(filename, 'r')\n elif filename[-4:] == '.bz2':\n fh = BZ2File(filename, 'r')\n else:\n fh = open(filename, 'r')\n\n return fh", "def openInputFile(infile, *args):\n if infile is None:\n logging.info(\"Reading input from STDIN\")\n return sys.stdin\n\n if isinstance(infile, str):\n if urlRE.match(infile):\n import urllib2\n return urllib2.urlopen(infile)\n if len(infile)>3 and infile[-3:]=='.gz':\n import gzip\n return gzip.GzipFile(infile,'rb')\n elif len(infile)>4 and infile[-4:]=='.bz2':\n import bz2\n return bz2.BZ2File(infile,'rb')\n else:\n return open(infile,'rt')\n else:\n return infile", "def init():\n global file_name\n global interactive\n\n # Allowed names for file_name\n names = [\"common\", \"specific\", \"global\"]\n\n try: # check if file_name exists as given by user\n file_name, interactive = args.get_args()\n except FileNotFoundError: # user can correct if previously wrong\n print('Files does not exist, file_name should be in : {}'.format(names))\n file_name = input('Enter file_name : \\n')\n file_name = '../data/' + file_name + '.fits'\n interactive = input('Interactive mode [True/False] \\n') # adjustment needed to define interactive without calling get_args()\n if interactive == 'True':\n interactive = True\n else :\n interactive = False\n return(file_name, interactive)", "def open_and_read_file():\n\n file_path = sys.argv[1]\n input_file = open(file_path).read()\n return input_file\n\n # your code goes here\n\n #return \"This should be a variable that contains your file text as one long string\"", "def prompt_open(file_name, file_mode):\n\n\tif file_mode.lower() != \"r\" and file_mode.lower() != \"w\":\n\t\tprint(\"That is not a mode the file can be opened in\")\n\t\treturn \"-1\"\n\n\ttry:\n\t\tfile = open(file_name, file_mode)\n\t\treturn file\n\texcept:\n\t\tprint(\"Sorry that's not a valid file\")\n\t\treturn \"-1\"", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def get_source():\n if len(sys.argv) > 1:\n return open(sys.argv[1])\n else:\n return sys.stdin", "def get_path_via_file_ui():\n\n import Tkinter as tk\n import tkFileDialog as filedialog\n root = tk.Tk()\n root.withdraw()\n return filedialog.askopenfilename()", "def get_file():\n # Main Loop\n while True:\n filename = input(\"Please enter the name of the file you want to work on: \")\n # Check if file exists...\n if path.exists(filename):\n print(\"File sucessfully retrieved. Returning to previous menu...\")\n print()\n return filename\n \n print(\"That file does not exist in your current directroy. Try again.\")\n print()", "def menu_Open():\n asdf = tkFileDialog.askopenfilename()\n print(asdf)", "def getFile():\n from tkinter import Tk, filedialog\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n return(filedialog.askopenfilenames())", "def _get_file_object(inputfile=None):\n if type(inputfile) == str:\n return open(inputfile, 'r')\n return inputfile", "def open_file(self):\n filepath = askopenfilename(filetypes=[(\"Image Files\", (\"*.jpg\", \"*.png\")), (\"All Files\", \"*.*\")])\n if not filepath:\n return\n return filepath", "def open_file():\r\n the_file = input (\"Enter a file name: \")\r\n while True:\r\n try: \r\n fp = open (the_file, \"r\")\r\n return fp\r\n except FileNotFoundError:\r\n the_file = input (\"Error. Enter a file name: \")", "def get_stream(fname):\n if fname == '-':\n return sys.stdin\n\n if not os.path.isfile(fname):\n print(f\"file not found: {fname}\")\n sys.exit(1)\n\n return open(fname)", "def _get_infile(filepath):\n # type: (Text) -> BinaryIO\n if filepath is None:\n return sys.stdin\n else:\n if not os.path.exists(filepath):\n raise OSError('File does not exist: {}'.format(filepath))\n return open(filepath, 'r')", "def get_filename():\n filename = None\n while filename is None:\n filename = input(\"Enter a data file name: \")\n if not os.path.isfile(filename): #if the file doesn't exist\n print(\"Invalid File Name Entered!\")\n filename = None\n \n infile = open(filename)\n lines = infile.readlines()\n infile.close()\n return (lines, filename)", "def filepicker():\n import tkinter as tk\n from tkinter import filedialog\n\n root = tk.Tk()\n root.withdraw()\n\n file_path = filedialog.askopenfilename()\n return file_path", "def read_file(self):\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n self.filename = askopenfilename(title='Select Hospital Text File') # show an \"Open\" dialog box and return the path to the selected file", "def choose_file(fname=None, env=None, choices=[]):\n if fname:\n fname = os.path.expanduser(fname)\n if os.path.exists(fname):\n return fname\n else:\n raise LookupError(f'No such file {fname}')\n elif env and env in os.environ:\n fname = os.path.expanduser(os.environ[env])\n if os.path.exists(fname):\n return fname\n else:\n raise LookupError(f'No such file {env} = {fname}')\n else:\n ch = []\n for c in choices:\n fname = os.path.expanduser(str(c))\n if os.path.exists(fname):\n return fname\n else:\n ch.append(fname)\n raise LookupError('File not found (tried {}{})'.format(\n env + (', ' if choices else '') if env else '', ', '.join(ch)))", "def open_input(self, fn):\n\treturn (None, None)", "def openInputFile(self):\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot open input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(filename))", "def get_input_file():\n\n filename = input('Input the file name to save data to: ') + '.csv'\n return filename", "def open_file(file_name):\n pass", "def choosefile(self, diagtitle):\r\n root = Tk()\r\n root.withdraw()\r\n sfile = tkFileDialog.askopenfilename(\r\n parent=root,\r\n filetypes = [('.TXT files', '.txt')],\r\n title=diagtitle )\r\n return sfile", "def ask_file(window_title):\n root = tk.Tk()\n root.withdraw()\n media_info_path = os.path.join(os.path.realpath(__file__))\n if ASK_DLL_LOCATION:\n return filedialog.askopenfile(title=window_title).name\n else:\n return os.path.join(os.path.dirname(__file__), 'modules', 'pymediainfo', 'pymediainfo', 'MediaInfo.dll')", "def on_open_file(self):\n return tkFileDialog.askopenfilename(\n filetypes=[('default', '*.txt'), ('All files', '*.*')])", "def open_file(filename=\"default.txt\", filepath=\"default_path\"):\n\n if filepath == \"default_path\":\n filepath = \"\"\n\n try:\n fp = open(filepath + filename, \"r+\") # Opens file for reading and writing\n return fp\n except IOError:\n l.error(str(filepath + filename) + \" is not an existing file.\")", "def get_existing_file(msg, skip=False):\n inp = None\n while inp is None:\n inp = raw_input(msg)\n if skip and len(inp) == 0:\n return None\n if not os.path.isfile(inp):\n print \"Not a file:\", inp\n inp = None\n return inp", "def existingFile(filename):\n if not os.path.exists(filename):\n raise argparse.ArgumentTypeError(\"{0} does not exist\".format(filename))\n return filename", "def askopenfilename(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n\n filename = tkinter.filedialog.askopenfilename(parent=self.tk)\n if filename:\n self.readwtf(filename)\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "def GetFile(file_name):\n\n the_file = None\n\n try:\n the_file = open(file_name, 'rb')\n \n except IOError:\n the_file = None\n \n return the_file", "def get_input_file(self, *args, refsep='$', docopy=True):\n # filename = self.get_data(*args, docopy=docopy)\n filename = args[1]\n ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE',\n 'DGEOFILE', 'MDRIZTAB'])\n print(\"Looking for REF_FILES: {}\".format(ref_files))\n\n for ref_file in ref_files:\n if ref_file.strip() == '':\n continue\n if refsep not in ref_file: # Local file\n refname = self.get_data('customRef', ref_file)\n else: # Download from FTP, if applicable\n refname = os.path.join(ref_file)\n if self.use_ftp_crds:\n download_crds(refname, self.timeout)\n return filename", "def start_file(filename):\n from spyderlib.qt.QtGui import QDesktopServices\n from spyderlib.qt.QtCore import QUrl\n\n # We need to use setUrl instead of setPath because this is the only\n # cross-platform way to open external files. setPath fails completely on\n # Mac and doesn't open non-ascii files on Linux.\n # Fixes Issue 740\n url = QUrl()\n url.setUrl(filename)\n return QDesktopServices.openUrl(url)", "def open_file():\n\n file_path = tkfd.askopenfilename(\n title='Select a file to open',\n filetypes=[('Secret', '*.secret'), ('Text', \"*.txt\")],\n )\n if not file_path:\n return\n fp = Path(file_path)\n filename = fp.stem\n category, subject = filename.split(' - ')\n message = fp.read_text()\n if fp.suffix == '.secret':\n password = tksd.askstring(\n 'Enter Password',\n 'Enter the password used to '\n 'encrypt the file.'\n )\n message = weaksauce_decrypt(message, password)\n\n cat_var.set(category)\n subject_var.set(subject)\n message_inp.delete('1.0', tk.END)\n message_inp.insert('1.0', message)", "def existing_file(value):\n is_file = os.path.isfile(value)\n if value == \"\" or not is_file:\n argparse.ArgumentTypeError(\n \"Must specify an existing file for input\")\n return value", "def is_file(filename):\n if not os.path.isfile(filename):\n msg = \"{0} is not a file\".format(filename)\n raise argparse.ArgumentTypeError(msg)\n else:\n return filename", "def display():\r\n name = input(\"Enter the filename:\\n\")\r\n if name==\"42.txt\":\r\n print(f42)\r\n elif name == \"1015.txt\":\r\n print(f1015)\r\n else:\r\n print(\"File not found\")", "def load_file(self):\n return tkinter.filedialog.askopenfilename(defaultextension=\".txt\")", "def request_file():\n \n from tkinter import Tk\n from tkinter.filedialog import askopenfilename\n \n # Make a top-level instance and hide from user.\n root = Tk()\n root.withdraw()\n\n # Make it almost invisible - no decorations, 0 size, top left corner.\n root.overrideredirect(True)\n root.geometry('0x0+0+0')\n\n # Show window again and lift it to top so it can get focus, otherwise dialogs will end up behind the terminal.\n root.deiconify()\n root.lift()\n root.focus_force()\n\n # Show an \"Open\" dialog box and return the path to the selected file\n file_path = askopenfilename(initialdir='./IR_Datasets/',\n title='Excel to Read',\n filetypes=(('New Excel', '*xlsx'), ('Old Excel', '*.xls')),\n parent=root)\n\n # Get rid of the top-level instance once to make it actually invisible.\n root.destroy()\n \n return file_path", "def getFileName(self, textEntry):\n textEntry.setText(QtGui.QFileDialog.getOpenFileName())\n textEntry.emit(QtCore.SIGNAL('FILE_SELECTED'))", "def askOpenFile(dirname=\".\"):\n\n import Tkinter,tkFileDialog\n root = Tkinter.Tk()\n file = tkFileDialog.askopenfile(parent=root,mode='rb',title='Choose a file',initialdir=dirname)\n return file", "def main(filename: str, /) -> None:", "def open_file(self):\n try:\n filename = tkFileDialog.askopenfilename()\n file = open(filename)\n self.image_window.status.config(text='Opened: ' + filename)\n return file\n except:\n self.status.config(text='You fool!')\n tkMessageBox.showwarning(\"Open file\",\n \"Cannot open file \" + filename)\n return None", "def get_fileName(path):\n fileName = input('Select data file from ' + ','.join(os.listdir(path)) + ' ')\n return fileName", "def get_existing_filename(existing_files: List[str]) -> str:\n\n # Ask user which file only if there are multiple files\n\n if len(existing_files) == 1:\n return existing_files[0]\n\n questions = [\n {\n 'type': 'list',\n 'name': 'target_filename',\n 'message': 'Which file do you want to load ?',\n 'choices': existing_files\n }\n ]\n return prompt(questions, style=custom_style_2)[\"target_filename\"]", "def read_file_name(command):\n try:\n my_file.read_filename(command[1])\n except FileNotFoundError:\n print('The file {} cannot be found'.format(command[1]))", "def choose_file(self):\n pass", "def get_valid_filename(msg):\r\n\r\n filename = input(msg)\r\n while not os.path.exists(filename):\r\n print(\"That file does not exist.\")\r\n filename = input(msg)\r\n return filename", "def comdlg32_GetOpenFileName(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpofn\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def open_file(file_name):\n\n try:\n return open(file_name, 'rt')\n except Exception as e:\n raise UserException(\"unable to open file {0}\".format(file_name),\n str(e))", "def open_file(self, event=None):\n file = fd.askopenfile(title=\"Choose file to open\",\n filetypes=[(\"Python(default)\", \"*.py\"), (\"Text\", \"*.txt\"),\n (\"Java\", \"*.java\"), (\"JavaScript\", \"*.js\"),\n (\"HTML\", \"*.html\"), (\"CSS\", \"*.css\"),\n (\"All files\", \"*.*\")])\n if file is None:\n return\n else:\n if imghdr.what(\n file.name): # if file is image return image type otherwise return None if file is not an image type\n from project_explorer import ProjectExplorer\n ProjectExplorer().open_image(file.name)\n else:\n self.add_tab(file=file.name, open_file=1)\n from syntax_highlight import Highlighting\n Highlighting().highlight2()", "def openFileExplorer(self, caption=''):\n\n file_path = None\n file_path, idk = QFileDialog.getOpenFileName(caption=caption)\n\n if file_path == '':\n file_path = None\n\n return file_path", "def get_prog_file():\n get_file()\n ## Executa\n file = ARGS.output\n os.system(\"chmod +x \" + file)\n subprocess.call([file])", "def locatefile(self):\r\n dm = DialogManager()\r\n print \"Opening file chooser ...\"\r\n file = dm.choosefile(\"Choose Raw File\")\r\n return file", "def getFilename(self,timeout=None):\n self.show(timeout,modal=True)\n self.exec_()\n if self.result() == QtGui.QDialog.Accepted:\n files = map(str,self.selectedFiles())\n if self.fileMode() == QtGui.QFileDialog.ExistingFiles:\n return files\n else:\n return files[0]\n else:\n return None", "def main():\n file_requested = obtain_filename()\n process_command(file_requested)", "def open_file(extensions, mode):\n validfile = False\n while validfile == False:\n try:\n filename = raw_input().strip(\"\\\"' \")\n validextension = False\n for i in extensions:\n if filename.endswith(i):\n validfile = True\n break\n if validfile == True:\n filename = os.path.join(os.getcwd(), filename)\n f = open(filename, mode)\n else:\n s = \"\"\n if len(extensions) == 1:\n s = extensions[0]\n elif len(extensions) == 2:\n s = extensions[0] + \" or \" + extensions[1]\n elif len(extensions) > 2:\n for i in extensions[:-1]:\n s = s + i + \", \"\n s = s + \"or \" + extensions[-1]\n if filename.endswith(\".zip\") or filename.endswith(\".7z\"):\n print \"File name must end in {}. Do not use compressed files. Enter a new file:\".format(s)\n else:\n print \"File name must end in {}. Enter a new file:\".format(s)\n except IOError:\n print \"File does not exist. Enter in a valid file name:\"\n return f", "def open_input_file(input_file):\n if input_file:\n if not os.path.isfile(input_file):\n sys.stderr.write(\n \"ERROR! Input file (%s) is not a normal file.\\n\" % input_file)\n sys.exit(1)\n try:\n return codecs.open(input_file, \"r\", \"utf8\")\n except:\n sys.stderr.write(\n \"ERROR! Could not open input file (%s) for reading:\\n\" % input_file)\n raise\n else:\n return sys.stdin", "def open_file(entry):\n entry.delete(0, 'end')\n file = askopenfile(mode ='r', filetypes =[('PDF Files', '*.pdf')])\n if file is not None: \n entry.insert(0, file.name)", "def define_file() -> str:\n\n print('===SELECT FILE===')\n filenames = next(walk(IMAGES), (None, None, []))[2]\n filenames.append('Exit the program')\n command = menu(filenames)\n if int(command) == len(filenames):\n exit()\n else:\n return filenames[int(command) - 1]", "def file_open(*args, **kwargs):\n return open(*args, **kwargs)", "def GetFilename(title, filename = \"\"):\r\n return _hiew.HiewGate_GetFilename(title, filename)", "def smart_open(filename, *args, **kwargs):\n return LOADERS.get(os.path.splitext(filename)[1], open)(filename, *args, **kwargs)", "def open_file(self: object) -> None:\n self.file = filedialog.askopenfilename(\n initialdir= os.getcwd(),title=\"Select File\",filetypes=(\n (\"Text Files\", \"*.txt\"),(\"all files\",\"*.*\")))\n\n if self.file:\n messagebox.showinfo(\"Selected file\", \"You have selected %s\"%(\n self.file))", "def open_file_dialog(gui, progress_bar):\n\n # File dialog\n fname, ftype = QFileDialog\\\n .getOpenFileName(caption=\"Open URDF File\",\n filter=\"Supported files (*.urdf *.dhparams)\"\n \";;All files (*)\",\n directory=path + '../Examples')\n if fname == '':\n return\n global robot_obj\n # Open the file\n if fname.split(\".\")[-1].lower() == \"urdf\":\n with open(fname) as file:\n urdf_obj = URDF.URDF(file)\n robot_obj = cr.RobotURDF(urdf_obj, progress_bar)\n\n elif fname.split(\".\")[-1].lower() == \"dhparams\":\n dh_obj = dh(fname)\n robot_obj = cr.RobotDH(dh_obj)\n init_gui_from_robot(gui, robot_obj)", "def get_fh(filename, mode):\n fh = None\n try:\n if mode == 'r':\n fh = open(filename,'r')\n elif mode == 'w':\n fh = open(filename,'w')\n else:\n raise ValueError('Command should be r or w')\n except IOError as e:\n print(e)\n except ValueError as e:\n print(e)\n return fh", "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def showInputFileInExplorer(self):\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot show input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.dirname(filename)))", "def open_file(file_name, mode):\n try:\n the_file = open(file_name, mode)\n except IOError as e:\n print(\"the file can't be open\", file_name, \"the program will be finished\\n\", e)\n sys.exit()\n else:\n return the_file", "def get_output_file_name(argn=2, std_name='output.txt'):\n try:\n name = sys.argv[argn]\n except IndexError:\n name = std_name\n print(\"Warning: no output file name received. Output will be\"\n \" written to '%s'.\" % name)\n return name", "def _open(args):\n p = Path(args.uri)\n if p.is_file():\n uri = p.resolve().as_uri()\n else:\n # hope the user has provided a valid URI\n uri = args.uri\n\n print(f'opening {uri}')\n args.service.open(uri)", "def filename(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_filename(self, *args)", "def askFilename():\n# print(\"\\nDo you have the file already?\"+\n# \"\\nYes - proceed\\t\\t No - go back to main menu\")\n# choice = input(\"(Y/N) \")\n# if choice.upper() == \"N\":\n# filename = None\n# elif choice.upper() == \"Y\": \n print(\"\\nInsert file name (without the filetype)\")\n print(\"(PRESS CTRL+C IF THERE IS NO FILE YET!!)\")\n fileOpt = input(\"or press enter if saved on default name: \") \n if fileOpt != \"\":\n filename = fileOpt+\".txt\"\n else:\n print(\"\\n\\nFinding file...\")\n print(\"\\n\\nWhich party is it for?\")\n print(\"A. Labor\\t\\t B. Liberal\")\n partyOpt = input(\"Selected party is (A/B): \")\n list1 = [\"A\", \"B\"]\n while partyOpt.upper() not in list1:\n partyOpt = input(\"Selected party is (A/B): \")\n marginOpt = input(\"\\nWhat was the margin used? (enter as int) \")\n if partyOpt.upper() == \"A\":\n filename = \"LaborParty_MarginalSeatList\"+str(marginOpt)+\"%.txt\"\n elif partyOpt.upper() == \"B\":\n filename = \"LiberalParty_MarginalSeatList\"+str(marginOpt)+\"%.txt\"\n return filename", "def filename(self) -> Optional[str]:\n ...", "def send_file_name():\n if value.get() == \"----------------------\":\n messagebox.showinfo(\"Choose File\", \"Please choose a file to edit.\", parent=app_frame)\n return\n elif len(entries) != 0:\n messagebox.showinfo(\"Warning!\", \"You must first close the current file!\", parent=app_frame)\n return\n\n events = get_file(value.get())\n # Call display_lr_assignments() and send events file to be displayed in the application window\n display_lr_assignments(events)", "def get_file_path():\n root = tk.Tk()\n root.withdraw()\n file_path = filedialog.askopenfilename(filetypes=[(\"Excel file\", \"*.xlsx\")])\n return file_path", "def open(file):\n args = {\"file\": file}\n send_command(\"open\", args)", "def selectfile (f_name):\n global file_name\n file_name = \"\"\n for i in f_name:\n file_name = file_name + i\n try:\n file = open(file_name,\"r\")\n dictionnary = False\n print(\"Now using {0} as base file\".format(file_name))\n file.close()\n except:\n print(\"Are you kidding me? That file doesn't even exist, could you please try again?\")\n return", "def play(filename):\n if sys.platform == \"win32\":\n os.startfile(filename)\n else:\n opener =\"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, filename])", "def _get_filename_from_dialog(file_type):\n\n if file_type is 'res':\n caption = 'Select a results file.'\n filter = 'Adams Results Files (*.res)'\n # Bring up a dialog for the user to select a results file\n filename = PyQt4.QtGui.QFileDialog.getOpenFileName(caption=caption, filter=filter)\n\n elif file_type is 'csv':\n caption='Select location to save the csv results file.'\n filter='CSV Files (*.csv)'\n # Bring up a dialog for the user to select a results file\n filename = PyQt4.QtGui.QFileDialog.getSaveFileName(caption=caption, filter=filter) \n\n return filename" ]
[ "0.73346573", "0.72095716", "0.7022069", "0.69514996", "0.68601674", "0.68402874", "0.6771454", "0.67425793", "0.6699415", "0.6610038", "0.66019046", "0.6577786", "0.6573504", "0.65729046", "0.6548677", "0.6542653", "0.6541863", "0.653708", "0.6503757", "0.65007627", "0.64802676", "0.6442664", "0.64343554", "0.63803226", "0.63379484", "0.6323981", "0.63214576", "0.6291532", "0.62796736", "0.62664354", "0.62581426", "0.6248894", "0.6235451", "0.62298054", "0.6197978", "0.61835253", "0.61835015", "0.6154466", "0.6153664", "0.61435777", "0.6105153", "0.606593", "0.6048966", "0.6025789", "0.60236067", "0.59855485", "0.59845096", "0.5970155", "0.5950574", "0.59192896", "0.59043795", "0.58997566", "0.58877873", "0.5877058", "0.5874593", "0.5854362", "0.58482707", "0.58445346", "0.58362186", "0.5826135", "0.5811371", "0.5808242", "0.5807927", "0.57680213", "0.5759183", "0.5753348", "0.5734968", "0.57314116", "0.5727204", "0.5722768", "0.5715738", "0.5710026", "0.5703809", "0.57035357", "0.5699069", "0.56974316", "0.5692355", "0.56877", "0.5687392", "0.56848705", "0.56838703", "0.5681357", "0.5675174", "0.5668804", "0.56631833", "0.56527066", "0.56454337", "0.5640067", "0.56305623", "0.562501", "0.5623871", "0.56222177", "0.56136763", "0.56077975", "0.55987656", "0.55822873", "0.5578809", "0.5576966", "0.5571158", "0.5564444" ]
0.70722294
2
prints a list consisting of each element is a list of 2 itmes as (str,int) in a neat format [example list = [ ("abc",1),("efghij",20),......etc ]
def tabular_formatted_printing(data_list): n = len(data_list) max = 0 for i in range(0,n): if int(len(data_list[i][0])) > max: max = len(data_list[i][0]) for i in range(0,n): if int(len(data_list[i][0])) < max: space = max - len(data_list[i][0]) else: space = 0 print(data_list[i][0]+space*' '+' : '+str(data_list[i][1])) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_list(numbers):\n print(' '.join(map(str, numbers)))", "def _list_to_printable(value):\n fixed_items = []\n for item in value:\n if isinstance(item, (int, float)):\n fixed_items.append(str(item))\n elif item == None:\n fixed_items.append(\"NULL\")\n elif isinstance(item, UNICODE_TYPE):\n fixed_items.append(\"'%s'\" % item.replace(\"'\", \"''\"))\n elif isinstance(item, BYTES_TYPE):\n fixed_items.append(\"'%s'\" % tostr(item.replace(\"'\", \"''\")))\n else:\n raise Exception(\"Unsupported type '%s' given to _list_to_printable\" % type(item))\n\n return '(' + ','.join(fixed_items) + ')'", "def _list_to_printable(value):\n fixed_items = []\n for item in value:\n if type(item) in (int, long, float):\n fixed_items.append(str(item))\n elif item == None:\n fixed_items.append(\"NULL\")\n elif type(item) == unicode:\n fixed_items.append(\"'%s'\" % item.replace(\"'\", \"''\"))\n elif type(item) == str:\n fixed_items.append(\"'%s'\" % str_to_unicode(item.replace(\"'\", \"''\")))\n else:\n raise Exception, \"Unsupported type '%s' given to _list_to_printable\" % type(item)\n\n return '(' + ','.join(fixed_items) + ')'", "def print_list(l):\n print('[' + ', '.join([x.__str__() for x in l]) + ']')", "def printPairList(values, lab1, lab2, precision, offset=16):\n\tprint(lab1.ljust(offset, \" \") + lab2)\n\tfor (v1, v2) in values:\n\t\tsv1 = toStr(v1, precision).ljust(offset, \" \")\n\t\tsv2 = toStr(v2, precision)\n\t\tprint(sv1 + sv2)", "def format_tuple(data):\n return \",\".join([str(item) for item in data])", "def basic_print(lista):\n for item in lista:\n print(\"{} \\t\\t {}\".format(item[0], item[1]))", "def info(self, list: list[int], /) -> list[int]:", "def listToStringFormat(self, list) ->str:\n string = ''\n for element in list:\n string = string + str(element) + \"\\n\"\n return string", "def format_list(list1, fmt = '%16s', delimiter = \",\"):\n string1 = delimiter.join(fmt % h for h in list1) + '\\n'\n return string1", "def show_list(self, desc, lst, writeln):\n if not lst:\n return\n val = ', '.join([list_escape(v) for v in lst])\n writeln(\"%s: %s\" % (desc, val))", "def display(n):\n print ' -> '.join(map(str, to_list(n)))", "def display(n):\n print ' -> '.join(map(str, to_list(n)))", "def scapy_fields_FieldListField_i2repr(self, pkt, x):\n\treturn repr([self.field.i2repr(pkt, v) for v in x])", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def main():\n sampleTuple = (100, 200, 300)\n print(tupleStrFormat(sampleTuple))", "def print_list(self, items):\n\t\tstrtype = unicode if self.encoding else bytes\n\t\titems = map(strtype, items)\n\t\twidth = self.get_width()\n\t\tlines = []\n\t\tsep = strtype(' ')\n\t\tfor item in items:\n\t\t\tif lines:\n\t\t\t\tnew = lines[-1] + sep + item\n\t\t\t\tif len(new) <= width:\n\t\t\t\t\tlines[-1] = new\n\t\t\t\t\tcontinue\n\t\t\tlines.append(item)\n\t\tself.write(strtype('\\n').join(lines))", "def format_output(list_to_output):\n return \" \".join(str(item) for item in list_to_output)", "def ex_list(data):\n return tuple(data)", "def print_list(arr: list, format: str = \"{}: {}\", l_type: ListTypes = ListTypes.NUMERIC_ORDERED) -> str:\n\n result = \"\"\n for i, e in enumerate(arr):\n result += get_list_entry_str(e, i, format, l_type) + '\\n'\n\n return result[:-1]", "def str(self) -> List[Tuple[str, str]]:\n kl = self.keys()\n vl = self.values()\n return [str(kl[idx]) + \",\" + str(vl[idx]) for idx in range(len(kl))]", "def _render_list_to_string(self, alist):\n return \",\".join(self._render_row(alist))", "def viewList(list):\n for i in list:\n print i", "def print_list(self):\n self.print_avec_separateur(\" \")", "def _join_list_of_list(lst):\n\n int_list = [list(map(int, each)) for each in lst]\n # print(*int_list, sep=\"\\n\")\n str_list = [\",\".join(map(str, each)) for each in int_list]\n # print(*int_list, sep=\"\\n\")\n # print(str_list)\n final_str = \" \".join(str_list)\n # print(final_str)\n return final_str", "def print_list(list):\n for char in list:\n print(char, end = \", \")\n print(\"\\n\")", "def list_to_string(list):\n if len(list) == 1:\n string = '{}x1'.format(list[0])\n elif list[1:] == list[:-1]:\n string = '{}x{}'.format(list[1], len(list))\n else:\n string = ''\n for i in range(len(list) - 1):\n string += str(list[i]) + ','\n string += str(list[-1])\n return string", "def int_repr(arr):\n return list(map(list2int, arr))", "def print(listing: typing.Iterable[typing.Any]) -> None:\n listing = tuple(str(i) for i in listing)\n if not listing:\n return\n width = max(len(i) for i in listing) + 2\n count = min(shutil.get_terminal_size().columns // width, len(listing))\n for row in itertools.zip_longest(*(listing[i::count] for i in range(count)), fillvalue=''):\n print(*(f'{c:<{width}}' for c in row), sep='')", "def number_list(l):\n return ['{i:>{s}}. {v}'.format(s=len(str(len(l))), i=i+1, v=l[i]) for i in range(len(l))]", "def format_list(list):\n return \" \".join(str(tok) for tok in list)", "def i2s(i):\n return \"[%0.3f,%0.3f]\" % (i[0], i[1])", "def str_tuple(item):\n return \"{}:{}\".format(item[0], item[1])", "def format_number_list(x):\n return ' '.join([format_number(y) for y in x])", "def _to_string(self, lst, indent=''):\n result = []\n for elem in lst:\n if isinstance(elem, list):\n if len(elem) > 0:\n result.append('\\n')\n result.append(self._to_string(elem, indent + ' '))\n elif isinstance(elem, float):\n result.append('%.6f' % elem)\n elif isinstance(elem, basestring):\n for char in ('(', ')', ' '):\n if char in elem:\n result.append('\"%s\"' % elem)\n break\n else:\n result.append(str(elem))\n elif elem is not None:\n result.append(str(elem))\n return indent + '(' + ' '.join(result) + ')\\n' + indent", "def pretty_print(count_list):\n\tfor i in range(len(count_list)):\n\t\tif (count_list[i] > 0):\n\t\t\tprint(chr(i+ord('a')),count_list[i],sep = \": \", end =\"\\n\")", "def print_list(items):\n for element in items:\n print(element)", "def print_list_algos(algos:tuple):\n for i, name_algo in enumerate(algos):\n print(\" {} - {}\".format(i + 1, name_algo))", "def collatz_print (a) :\n return (str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\" for i, j, v in a)", "def stringify_list(rows, separator='\\t'):\n return (separator.join(toolz.map(text_type, row)) for row in rows)", "def print_results(list_object1, list_object2):\n STUDENT_COLUMN = 16\n GENERAL_COLUMN = 14\n\n print()\n print(\"{:>{}}\".format(\"Student ID\",STUDENT_COLUMN),end=\"\")\n\n for i in range(len(list_object1)):\n print(\"{:>{}}\".format(list_object1[i][0],GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(\"Course grade\",GENERAL_COLUMN))\n\n for tuple_element in list_object2:\n\n print(\"{:>{}}\".format(tuple_element[0],STUDENT_COLUMN),end=\"\")\n\n for i, value in enumerate(tuple_element[1]):\n print(\"{:>{}}\".format(value,GENERAL_COLUMN),end=\"\")\n \n print(\"{:>{}}\".format(round(tuple_element[-1],2),GENERAL_COLUMN))", "def pretty_print(results: List[Tuple[str, torch.Tensor]]):\n for item in results:\n print(\"...[%.2f] - %s\" % (item[1], item[0]))", "def format(lis):\n if lis:\n return \";\".join(\",\".join(str(i) for i in n) for n in lis)\n else:\n return \"NULL\"", "def _format_list(param_list: Iterable[Any]):\n fmt_list = []\n for item in param_list:\n if isinstance(item, str):\n fmt_list.append(f\"'{item}'\")\n else:\n fmt_list.append(f\"{item}\")\n return \",\".join(fmt_list)", "def formatter(t: tuple):\n s = 'The {} numbers are: ' + '{}, '*(len(t)-1) + '{}'\n return s.format(len(t),*t)", "def display_data(data):\n\n index = 0\n for details in data:\n index += 1\n print(\"{5:1}{0}. {1:10} in {2:15} priority {3:>3}\".format(index, *details))", "def format_list(my_list):\r\n\treturn \", \".join(my_list[::2]) + (\" and \" + my_list[-1])", "def my_formatter(numbers):\n my_list_of_numbers = \", \".join(\"{:d}\".format(my_num) for (my_num) in numbers)\n results = f\"My list of numbers is: {my_list_of_numbers}.\"\n return results", "def listToStr(lst):\n return ','.join(lst)", "def _tupleListToStrings(self):\n graphColorStrings = []\n previousSelection = self.colorlist.GetSelection()\n print(repr(self.graphColors))\n if isinstance(self.graphColors, str):\n self.graphColors = eval(self.graphColors)\n for col in self.graphColors:\n col1 = '%.2f' % float(col[0])\n col2 = '%.2f' % float(col[1])\n col3 = '%.2f' % float(col[2])\n graphColorStrings.append(', '.join([col1, col2, col3]))\n self.colorlist.SetItems(graphColorStrings)\n if 0 <= previousSelection < len(graphColorStrings):\n self.colorlist.SetSelection(previousSelection)\n return graphColorStrings", "def print_as_numbered_list(items):\n\n i = 1\n\n for item in items:\n print(f\"{i}. {item}\")\n i +=1", "def __str__(self):\n s = \"\"\n for x in range(self.length):\n line = []\n #print (f\"x: {x}\")\n for y in range(self.length):\n #print (f\"y: {y}\")\n line.append(str(self.lst[x*self.length +y][1])) \n #print (line)\n s += \" | \".join(line) + \"\\n\"\n return (s)", "def encode_int_list(L):\n return str(L).replace(\" \", \"\")", "def view_list(my_list):\n\n for item in my_list:\n print my_list.index(item)+1 + \". \" + item", "def print_list(list_to_parse: list):\n for item in list_to_parse:\n print(item)", "def print_as_numbered_list(items):\n\n i = 1\n\n for item in items:\n print(f\"{i}. {item}\")\n i += 1", "def print_list(input_):\n for item in input_:\n print(item, end = \" \")\n print(\"\")", "def print_list(words):\r\n for w in words:\r\n print w,\r\n print", "def display(self):\n res = \"(\"\n curr = self.head\n while curr:\n val = curr.val\n if type(val) is str:\n val = \"'\" + val + \"'\"\n else:\n val = str(val)\n res += val\n if curr.next:\n res += ', '\n curr = curr.next\n return res + ')'", "def ListToStr(val):\n return ''.join(['%c' % c for c in val])", "def tupleStrFormat(tupl):\n string = \"this is a tuple (\"\n for element in tupl:\n string += str(element) + \", \"\n string += \")\"\n return string", "def getformat(self) -> List[str]:\r\n\r\n if isinstance(self.listaTipos, list) is False:\r\n raise TypeError(f\"{self.listaTipos} has to be a list({type(self.listaTipos)})\")\r\n if len(self.listaTipos) != 10:\r\n raise ValueError(f\"{self.listaTipos} needs to have 10 elements ({len(self.listaTipos)})\")\r\n\r\n saida = []\r\n for _ in self.listaTipos:\r\n saida.append(f\"{_}\")\r\n return saida", "def filter_marks(lst):\n integers = []\n rest = []\n\n for ele in lst: # interate over list\n\n # 'type' will returns type of element\n # if type of element is integer append\n # element to integers list\n if type(ele) is int:\n integers.append(ele)\n else:\n # otherwise append in rest list\n rest.append(ele)\n\n # multiple comma seprated values are returned\n # as tuple in python\n return integers, rest", "def numListar(lista):\r\n num_list=str()\r\n for num in lista:\r\n num = int(num)\r\n num_list=num_list+str(num)\r\n return num_list", "def formatter(in_tuple):\n length = len(in_tuple)\n form_string = (\"the {} numbers are: \" + \", \".join([\"{}\"]*length)).format(length, *in_tuple)\n return form_string.format(in_tuple)", "def list_to_text(ingridients_list):\n to_return = \"List\\n\"\n for (ingridient, quantity) in ingridients_list:\n to_return = f\"{to_return}{ingridient.name} {quantity}\\n\"\n return to_return", "def format_list2(list1, sfmt = '%16s', nfmt = '%16.8e', delimiter = ','):\n outlist = []\n for h in list1:\n try:\n outlist.append(nfmt % h)\n except TypeError:\n outlist.append(sfmt % h)\n \n string1 = delimiter.join(outlist) + '\\n'\n return string1", "def listOptions(lst):\n for k, e in enumerate(lst,1):\n print(\"{:^15}{:<10}\".format(k,e))", "def human_list(lst, connector='and'):\n # we don't want to listify non iterables\n if not getattr(lst, '__iter__', False):\n return lst\n else:\n s = ''\n max_idx = len(lst) - 1\n for i, item in enumerate(lst):\n if i == 0:\n t = '%s'\n elif i == max_idx and max_idx > 1:\n t = ', ' + connector + ' %s'\n elif i == max_idx and max_idx == 1:\n t = ' ' + connector + ' %s'\n else:\n t = ', %s'\n s += t % filter.conditional_escape(item)\n return mark_safe(s)", "def display(self):\n container = []\n current = self.head\n while current is not None:\n container.append(current.val)\n current = current.next\n print(tuple(container))\n return tuple(container)", "def format_for_scikit(labels, dataset):\n nd = []\n l = [int(lab) for lab in labels]\n for i in dataset:\n tmp = [int(v) for v in i.values()]\n nd.append(tmp)\n return l,nd", "def print_num_list(self, l):\n self.print_newline()\n for num, item in enumerate(l):\n self._write(\" %i. %s\\n\" % (num + 1, item))\n num += 1\n self.print_newline()", "def processed(N:int)->tuple:\n l1= str(N)\n a,b = '',''\n for i in range(len(l1)):\n if l1[i] == '4':\n a+='2'\n b+='2'\n else:\n a+=str(l1[i])\n b+='0'\n return int(a), int(b)", "def stringer(list):\n\tstring = \"\"\n\tfor x in list:\n\t\tstring = string + str(x)\n\treturn string", "def get_list_of_int2(self):\n pass", "def out(lst, max_width=100, index=False, spaces=3, ret=False):\n # Not even a list - just print\n if not isinstance(lst, (list,tuple)):\n print lst\n return\n\n # List of lists of same size\n strs = []\n if all([isinstance(l, (list,tuple)) for l in lst]) and all([len(l) == len(lst[0]) for l in lst]):\n L = len(lst[0])\n temp_strs = []\n for l in lst:\n temp_line = []\n for x in l:\n temp_line.append(str(x))\n temp_strs.append(temp_line)\n fields_sizes = []\n for i in range(L):\n temp_size = []\n for ts in temp_strs:\n temp_size.append(len(ts[i]))\n fields_sizes.append(temp_size)\n widths = [min(max(fs),max_width) for fs in fields_sizes]\n for i,l in enumerate(lst):\n temp = ''\n for j,x in enumerate(l):\n temp += temp_strs[i][j].ljust(widths[j])+' '*spaces\n strs.append(temp)\n\n else:\n for l in lst:\n strs.append(str(l))\n\n if index:\n index_width=len(str(len(strs)))\n for i in range(len(strs)):\n strs[i] = str(i).rjust(index_width)+':'+' '*spaces + strs[i]\n\n s = '\\n'.join(strs)\n\n if (ret == False):\n print s\n else:\n return s", "def NFelt(a):\n return \",\".join([str(c) for c in list(a)])", "def print_digits(digit_list):\n for digit in digit_list:\n print(digit)", "def print_bul_list(self, l):\n self.print_newline()\n for i in l:\n self._write(\" - %s\\n\" % i)\n self.print_newline()", "def display_phrasewise_list(prob_dict):\n print(\"***********Phrase pairs and their ranks*****************\")\n for f_phrase in prob_dict:\n e_phrases = prob_dict[f_phrase]\n s = [(phrase, e_phrases[phrase]) for phrase in sorted(e_phrases, key=e_phrases.get, reverse=True)]\n print(f_phrase ,\"->\",s)\n print(\"----------------------------------------------------------------------\")", "def list_stringify(inlist):\n outlist = []\n for item in inlist:\n if not isinstance(item, (tuple, list)):\n if not isinstance(item, basestring):\n item = str(item)\n else:\n item = list_stringify(item)\n outlist.append(item)\n return outlist", "def format_list(my_list):\n \n new_list = my_list[2: -1]\n new_list = new_list[: : 2]\n new_list = [my_list[0]] + new_list\n new_list = new_list + [\"and \" + my_list[-1]]\n \n string = ', '.join(new_list)\n print(string)", "def FormatOutput( data ):\n nColumn = len(data[0])\n nRow = len(data)\n\n i = 0\n oData = []\n while i < nRow:\n tmp = ', '.join( str(x) for x in data[i] )\n oData.append(tmp)\n i += 1\n \n return oData", "def list_2_string(l, name='List'):\n buff = io.StringIO()\n print_list(l, name=name, output=buff)\n return buff.getvalue()", "def print_list(data):\n for i, line in enumerate(data):\n print(\"Linha {}: {}\".format(i, line))", "def display(self):\n new_list = []\n\n for trans in self.log:\n interne = []\n # apply a 2 decimals float format\n interne.extend([f'{trans.good_type}',\n f'{trans.good_value:.2f}',\n f'{trans.quantity:.2f}',\n f'{trans.sign}',\n f'{trans.total_value:.2f}'])\n new_list.append(list(interne))\n return new_list", "def get_string_from_tuple_list(lstTuples, number):\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack", "def rearrange_digits(input_list):\n sorted_list=mergesort(input_list)[::-1]\n\n first_num=''\n second_num=''\n\n for i,item in enumerate(sorted_list):\n if i%2 == 0:\n first_num+=str(item)\n else:\n second_num+=str(item)\n # print([int(first_num),int(second_num)])\n return [int(first_num),int(second_num)]", "def listToString(L):\r\n S = ''\r\n for x in L:\r\n S += str(x)\r\n return S", "def print_list_index(iterable_item):\n if str(type(iterable_item)) == \"<class 'str'>\":\n characters = list(iterable_item)\n for i in enumerate(characters):\n print(characters[i], \":\", i)\n if str(type(iterable_item)) == \"<class 'list'>\":\n for i in enumerate(iterable_item):\n print(iterable_item[i], \":\", i)", "def to_string(student_list):\n student_info = \"\"\n for student in student_list:\n student_info += f\"{str(student)}\\n\"\n return student_info", "def write(lst):\n # TODO", "def format_rich_list(rich_text_list):\n list_style = rich_text_list.get(\"style\")\n list_indent = rich_text_list.get(\"indent\")\n list_items = []\n for idx, elem in enumerate(rich_text_list.get(\"elements\", [])):\n elem_text = format_rich_text(elem)\n elem_text = \"\\u3000\" * list_indent \\\n + get_numbering(idx+1, list_style, list_indent) \\\n + \" \" + elem_text\n list_items.append(elem_text)\n return \"\\n\".join(list_items) + \"\\n\"", "def humanise_list(lst):\n assert len(lst) > 0\n if len(lst) == 1:\n return lst[0]\n head = \", \".join(lst[:-1])\n tail = lst[-1]\n return f\"{head} and {tail}\"", "def repr_long_list(seq):\n if len(seq) < 8:\n return repr(seq)\n else:\n return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]", "def __str__(self):\n data_string = \"\"\n for list_el in self.data_list:\n for inner_list_el in list_el:\n data_string += str(inner_list_el)\n data_string += \"\\t\"\n data_string += \"\\n\"\n return data_string", "def __str__(self):\n lst = []\n for key in self.forward:\n for val in self.forward[key]:\n lst.append((key, val))\n return str(lst)", "def printList(arr):\n for word in arr:\n print(f\" {word}\", end=\"\\n\\r\")", "def prettyPrintListHelper_ (l, stream, indent, pretty_print=True, indent_additive=4) :\r\n \r\n # Base case, empty table\r\n entries = len(l)\r\n if entries==0 :\r\n stream.write(\"[ ]\")\r\n return\r\n \r\n # Recursive case\r\n stream.write(\"[\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n for ii in xrange(0,entries) :\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n specialStream_(l[ii], stream, indent, pretty_print, indent_additive)\r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n if pretty_print : indentOut_(stream, indent); \r\n stream.write(\"]\")", "def pretty_print(output: list):\n for movie in output:\n for item in movie.items():\n print(item[0]+\":\", item[1])\n print()", "def list_str(lis):\r\n as_str = \"\"\r\n for item in lis:\r\n as_str += \" \" + str(item) + \",\"\r\n return as_str[:-1]" ]
[ "0.6564532", "0.6426839", "0.6420148", "0.6390392", "0.6295237", "0.6276649", "0.6256306", "0.6188778", "0.617754", "0.61772645", "0.6162995", "0.61229706", "0.61229706", "0.6106786", "0.60617495", "0.60488445", "0.6039565", "0.60372084", "0.6031298", "0.6028703", "0.6027985", "0.60045195", "0.5977026", "0.59603864", "0.59511197", "0.59203815", "0.591829", "0.59142375", "0.5902983", "0.58927864", "0.58922297", "0.58900523", "0.5882701", "0.58723134", "0.58626896", "0.58572733", "0.58551633", "0.5846961", "0.5824465", "0.58241314", "0.5817596", "0.58094835", "0.5795171", "0.5782589", "0.5771329", "0.5765411", "0.57632124", "0.57630837", "0.57630706", "0.5755923", "0.5751123", "0.5742339", "0.5732483", "0.5728502", "0.5725004", "0.5720709", "0.5715134", "0.57091284", "0.57051283", "0.5703617", "0.5702083", "0.5696912", "0.56927174", "0.567669", "0.5671765", "0.56699884", "0.5667605", "0.56670696", "0.566237", "0.56508607", "0.5644462", "0.56398356", "0.5633828", "0.5629279", "0.562873", "0.5625577", "0.56242687", "0.5610418", "0.55919135", "0.55897534", "0.5581129", "0.55749947", "0.55727077", "0.55650055", "0.55547446", "0.55390906", "0.5539062", "0.55319685", "0.552894", "0.55268073", "0.55263793", "0.5522909", "0.552186", "0.55040497", "0.5504042", "0.5500477", "0.549672", "0.54849565", "0.54810596", "0.54707533", "0.5469916" ]
0.0
-1
Running cost function (Lagrangian)
def lagr(self, x): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_cost(AL, Y):\n pass", "def compute_cost(AL, Y):\n pass", "def compute_cost_derivative(AL, Y):\n pass", "def compute_cost(AL, Y, parameters ,lambd):\n L = len(parameters) // 2\n m = Y.shape[1]\n cost = -1 / m * np.sum(np.nan_to_num(Y * np.log(AL) + (1-Y) * np.log(1-AL)))\n cost+= 0.5*(lambd/m)*sum(np.linalg.norm(parameters['W' + str(i)])**2 for i in range(1,L))\n return cost", "def cost(self) -> float:", "def compute_cost(AL, Y):\n m = Y.shape[1]\n log_val = np.multiply(Y,np.log(AL)) + np.multiply(1-Y,np.log(1-AL))\n cost = np.sum(log_val) * (-1/m)\n cost = np.squeeze(cost)\n return cost", "def compute_cost(self, r):\n self.r_max = 1\n return np.exp(1 / (np.power(r, 2) - np.power(self.r_max, 2))) if r < self.r_max else 0", "def costFun(self, S, x):", "def cost(self):\n\t\treturn self.g + self.h", "def calculate_total_cost(state):\n pass", "def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta", "def get_lr_cost(self):\n\n\t\tlabels = self.get_symbolic_expected_rewards()\n\n\t\treturn -theano.tensor.mean(\n\t\t\ttheano.tensor.log(labels)[\n\t\t\t\ttheano.tensor.arange(self.symbolic_output.shape[0]),\n\t\t\t\tself.symbolic_output])", "def final_state_cost(x_tape):\n return np.linalg.norm(deviation_from_upright_equilibrium(x_tape[:, -1]))", "def compute_cost(self, AL, Y):\n m = Y.shape[1]\n cost = (-1 / m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply(1 - Y, np.log(1 - AL)))\n cost = np.squeeze(cost)\n\n return cost", "def compute_cost(AL, Y):\n \n m = Y.shape[1]\n\n # Compute loss from aL and y.\n ### START CODE HERE ### (≈ 1 lines of code)\n cost = - ( np.sum( np.multiply( Y, np.log(AL)) + np.multiply( (1 - Y), np.log(1 - AL))))/Y.shape[1]\n ### END CODE HERE ###\n \n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n assert(cost.shape == ())\n \n return cost", "def final_cost(self, x):\n return self.x_M_x(x[-1,:,:],self.R)", "def calculate_total_cost(state):\r\n return state.cost()", "def costFunction(R, W):\n costFunc = 0\n for i in range(0, len(R)):\n for j in range(i, len(R)):\n costFunc += costBetweenNodes(R, W, i, j)\n return costFunc", "def checkCostFunction(lbd=0):\n # Create small problem\n X_t = np.random.rand(4, 3)\n Theta_t = np.random.rand(5, 3)\n\n # Zap out most entries\n Y = X_t.dot(Theta_t.T)\n Y[np.random.rand(Y.shape[0], Y.shape[1]) > .5] = 0\n R = np.zeros(Y.shape)\n R[Y == 0] = 1\n\n # Run Gradient Checking\n X = np.random.randn(X_t.shape[0], X_t.shape[1])\n Theta = np.random.randn(Theta_t.shape[0], Theta_t.shape[1])\n num_users = Y.shape[1]\n num_movies = Y.shape[0]\n num_features = Theta_t.shape[1]\n\n def Jfunc(t):\n return cofiCostFunc(t, Y, R, num_users, num_movies, num_features, lbd)\n\n numgrad = computeNumericalGradient(Jfunc, np.r_[X.flatten(), Theta.flatten()])\n\n cost, grad = cofiCostFunc(np.r_[X.flatten(), Theta.flatten()], Y, R, num_users, num_movies, num_features, lbd)\n\n print(np.c_[numgrad, grad])\n print('The above two columns you get should be very similar.')\n print('(Left-Your Numerical Gradient, Right-Analytical Gradient)\\n')\n\n diff = np.linalg.norm(numgrad-grad)/np.linalg.norm(numgrad+grad)\n print('If your cost function implementation is correct, then')\n print('the relative difference will be small (less than 1e-9).')\n print('Relative Difference: %g\\n' % diff)", "def compute_cost(AL, Y):\n m = AL.shape[1]\n cost = 1./m*(-Y*np.log(AL)-(1.0 - Y)*np.log(1.0 - AL))\n cost = np.squeeze(cost)\n return cost", "def compute_cost(X, y, theta, lambd):\n assert(theta.shape[0] == X.shape[1])\n \n m = X.shape[0]\n grad = np.zeros(y.shape)\n J = 0\n \n output = sigmoid(np.dot(X, theta))\n\n J = np.sum(- y * np.log(output) - (1 - y) * np.log(1 - output)) / m + lambd / (2 * m) * np.sum(np.square(theta[1:]))\n\n grad = np.dot(X.T, (output - y)) / m\n \n grad[1:] = grad[1:] + lambd / m * theta[1:]\n\n return J, grad", "def calcCostFun(self):\n\n self.start()\n F, K = self.model()\n \n return self.costFunction", "def cost_derivative(output_activations, y):\n return (output_activations - y)", "def lrCostFunction(theta,X,y, lambda_reg):\n m = np.size(y)\n grad = np.zeros(np.size((theta)))\n J_base, grad = costFunction(theta, X, y)\n \n\n reg_cost = (lambda_reg / (2.0 * m)) * np.sum(theta[1:] ** 2)\n \n reg_gradient = (lambda_reg / m) * theta\n reg_gradient[0] = 0\n cost = J_base + reg_cost\n return cost, grad + reg_gradient", "def lr_cost_function(theta: np.ndarray, X: np.ndarray, y: np.ndarray, l: float) -> float:\n\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # You need to return the following variable correctly.\n J = 0\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the cost of a particular choice of theta.\n # You should set J to the cost.\n\n # =============================================================\n return J", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining:\n :return: current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining:\n :return: current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def running_cost(self, t_span, step_size, x):\n cost = 0\n ones = torch.ones(x.shape[1], 1, device=x.device)\n for idx,t in enumerate(t_span):\n input_nn = torch.cat([ones*t, x[idx,:,:]], 1)\n X_C_X = self.x_M_x(x[idx,:,:], self.C)\n alpha_t = self.func_ode.alpha(input_nn)\n alpha_D_alpha = self.x_M_x(alpha_t, self.D)\n cost += step_size*(X_C_X + alpha_D_alpha)\n return cost", "def lr_cost_function(theta, X, y, learning_rate):\n m = len(y) # number of training examples\n # You need to return the following variables correctly \n J = 0\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta))\n\n #Compute cost:\n first = (1/m)*np.sum(np.dot(-y.T, np.log(h)) - np.dot((1-y).T, np.log(1-h)), axis=0)\n second = (learning_rate/(2*m))*np.sum(theta[1:]**2)\n J = first + second\n \n #Compute gradient\n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return float(J), grad", "def cost(self, cost_object, target):\n\n return cost_object.f(self.a[-1], target).mean(axis=0).sum()", "def _cost_refueling(self):\n if self.number_of_courses % self.refueling_frequency == 0 & self.number_of_courses != 0:\n lowest_amount = self.refueling_liter_range[0] # take a minimum value\n highest_amount = self.refueling_liter_range[1] # take a maximum value\n refueled_petrol = randint(lowest_amount, highest_amount)\n cost = refueled_petrol * self.petrol_cost\n return cost\n else:\n return 0", "def cost(self, state: Grid2D.State): # pylint: disable=no-self-use\n return 1", "def step(self, closure=None):\n orig_loss,err,pred = closure()\n loss = orig_loss\n\n group = self.param_groups[0]\n lr = group['lr']\n decay_lr = group['decay_lr']\n max_iter = group['max_iter']\n reg = group['reg']\n backtrack = group['backtrack']\n bt_alpha = group['bt_alpha']\n bt_beta = group['bt_beta']\n sketch_size = group['sketch_size']\n tolerance = group['tolerance']\n\n #import pdb; pdb.set_trace()\n n = err.shape[0] #batch size\n #If sketching the jacobian, randomly select [sketch_size] samples\n \n if sketch_size is not None:\n idx = torch.randperm(n)[:sketch_size]\n else:\n idx = torch.arange(n) #Don't sketch, use all samples\n \n w0 = nn.utils.parameters_to_vector(self._params) #weight parameters in vector form\n \n #Compute Gauss-Newton vector product \n grad, ggnvp = _make_ggnvp(err,self._params,w0,n,reg,idx) #return gradient in vector form + ggnvp function\n #Solve for the Conjugate Gradient Direction\n dw, cost_log = _conjugate_gradient(ggnvp, grad, max_iter, tolerance)\n\n #Perform backtracking line search\n val = loss + 0.5 * reg * torch.norm(w0)**2\n fprime = -1*dw @ grad\n \n self.grad_update += 1\n if backtrack > 0:\n t = lr\n\n #TODO: If using backtracking, get new loss with (w0 - t*dw) as network parameters\n bts = 0\n alpha = bt_alpha\n beta = bt_beta \n while (loss + 0.5 * reg * torch.norm(w0 - t*dw)**2 > val + alpha * t * fprime):\n t = beta * t\n bts += 1\n if bts > backtrack:\n print('Maximum backtracking reached, accuracy not guaranteed')\n break\n elif decay_lr: #decay lr\n t = lr/np.maximum(1, self.grad_update-10)\n else: #use lr step-size\n t = lr\n\n print('step size: {}'.format(t))\n\n #Update the model parameters\n self._add_grad(-t, dw)\n \n return val, pred", "def fn(x):\n if x == 0: return 0\n if x < 0: return -inf \n return max(fn(x - c) * 10 + i + 1 for i, c in enumerate(cost))", "def get_cost(self, Y, T):\n return - np.multiply(T, np.log(Y)).sum() / Y.shape[0]", "def compute_gradients(self, logits, target):\n\n target_length = target.shape[0]\n num_time_steps = logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n\n # expand labels by inserting a blank between each pair\n normalized_logits = softmax(logits)\n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n alpha = self.compute_forward_variables(normalized_logits, target) \n beta = self.compute_backward_variables(normalized_logits, target)\n\n # rescale\n alpha = alpha / np.sum(alpha, axis=0)\n beta = beta / np.sum(beta, axis=0)\n alphabeta = alpha * beta\n print \"alpha\"\n print alpha\n\n # compute zt\n z = Counter()\n for t in xrange(num_time_steps):\n for s, k in enumerate(l):\n z[t] += alphabeta[s, t] / normalized_logits[t, k]\n \n # normalized_logits is time steps t by labels k\n # alpha is 2 * target_length - 1 by time steps\n lab_zk = np.zeros_like(normalized_logits)\n for s, k in enumerate(l):\n for t in xrange(num_time_steps):\n lab_zk[t, k] += alphabeta[s, t]\n\n grad = normalized_logits\n for k in xrange(target.shape[0]):\n for t in xrange(num_time_steps):\n ytk = normalized_logits[t, k]\n constant = 1.0 / (ytk * z[t])\n grad[t, k] = ytk - constant * lab_zk[t, k]\n \n return grad", "def cost_derivative(self,output_results,y):\r\n\t\treturn (output_results-y)", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining: (float)\n :return: (float) current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def ensemble_cost(x_tapes):\n per_trajectory_costs = [final_state_cost(x_tape) for x_tape in x_tapes]\n # Return the L1-norm of the per-trajectory-costs, scaled by the number of\n # trajectories.\n return np.linalg.norm(per_trajectory_costs, 1) / len(x_tapes)", "def objective(rp,n=5000,C=-2*10**11,a=300,b=1):\n l = log(rp)/n\n r = exp(l)\n rm1 = r-1\n return (rp-1)*((a-b*n)*rm1 + 1) - C*(rm1)*(rm1)\n #return rm1", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def total_cost(self):\n return np.einsum('i->', self.c[self.s])", "def Hstep_cost_function(H): \n U = Wold - Yold\n #cost = -np.trace(H.T@K@H) + (self.admm_rho/2)*(norm(H.T@D - Wold + self.Y, 'fro')**2) \n cost = -np.trace(H.T@K@H)/nsamples + (rho/2)*np.trace((H.T@D - U)@(H.T@D-U).T) \n return cost", "def eval_cost(self, params, **kwargs):\n raise NotImplementedError", "def _cost_method(self, *args, **kwargs):\n\n cost_val = 0.5 * np.linalg.norm(self.obs_data - self.op(args[0])) ** 2\n\n if 'verbose' in kwargs and kwargs['verbose']:\n print(' - DATA FIDELITY (X):', cost_val)\n\n return cost_val", "def cost(self, dgvel):\n J_ = self._controlled_frame.jacobian[3:6,:]\n J = param(value=matrix(J_))\n dJ = self._controlled_frame.djacobian[3:6,:]\n gvel = self._world.gvel\n Pdes = self._target_frame.pose[0:3,3]\n cf = self._controlled_frame\n dVdes = 10.*dot(cf.pose[0:3,0:3].T, Pdes - cf.pose[0:3,3]) -\\\n 2.*sqrt(10.)*dot(J_, self._world.gvel)\n return norm2(J*dgvel + param(value=matrix(dot(dJ, gvel) - dVdes)))", "def get_cost(self) -> float:\n return math.e / self.fitness", "def prediction_cost(a, y):\n return np.sum(-(y * np.log(a) + (1 - y) * np.log(1 - a)))", "def obs_cost_fn(self, state):\n # Weights for different terms\n W_PUSHER = 1\n W_GOAL = 2\n W_DIFF = 5\n\n length = state.shape[0]\n # pusher_x, pusher_y = state[:, 0], state[:, 1]\n box_x, box_y = state[:, 2], state[:, 3]\n # goal_x, goal_y = np.tile(self.goal[0], (length, 1)), np.tile(self.goal[1], (length, 1))\n\n pusher = state[:, 0:2]\n box = state[:, 2:4]\n goal = np.tile(self.goal, (length, 1))\n goal_x, goal_y = goal[:, 0], goal[:, 1]\n\n d_box = np.linalg.norm(pusher - box, axis=1, ord=2)\n d_goal = np.linalg.norm(box - goal, axis=1, ord=2)\n\n\n # pusher_box = np.array([box_x - pusher_x, box_y - pusher_y])\n # box_goal = np.array([goal_x - box_x, goal_y - box_y])\n # d_box = np.sqrt(np.dot(pusher_box, pusher_box))\n # d_goal = np.sqrt(np.dot(box_goal, box_goal))\n diff_coord = np.abs(box_x / (box_y + EPSILON) - goal_x / (goal_y + EPSILON))\n # the -0.4 is to adjust for the radius of the box and pusher\n return W_PUSHER * np.max([d_box - 0.4, np.zeros(len(d_box))], axis=0) + W_GOAL * d_goal + W_DIFF * diff_coord", "def lbfgsb(cost_func, x0, args=(), delta_func = 0):\n\n return minimize(fun = cost_func, x0 = x0, args = args,\n method = 'L-BFGS-B', jac = delta_func).x", "def cost(self, Y, A):\n loss1 = Y * np.log(A)\n m = Y.shape[1]\n cost = -1 * np.sum(loss1) / m\n return cost", "def calculate_cost(x, y, weights):\r\n predictions = compute_prediction(x, weights)\r\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\r\n return cost", "def l2_reg_cost(cost, lambtha, weights, L, m):\n sumWeights = 0\n for i in range(1, L + 1):\n sumWeights += np.linalg.norm(weights['W' + str(i)])\n return cost + sumWeights * lambtha / (2 * m)", "def CostFunction(self, out, V, P, params):\n u = self.u\n p = self.p\n puni = self.puni\n xd = self.xd\n xa = self.xa\n l = self.l\n Lagrange_Tracking = 0\n Lagrange_Regularisation = 0\n\n # input regularization\n for name in set(u.keys()):\n Lagrange_Regularisation += puni['weights',name][0]*ca.mtimes((u[name]-p['ref',name]).T,u[name]-p['ref',name])\n\n Lagrange_Regularisation += puni['weights','AoA']*out['AoA']**2\n Lagrange_Regularisation += puni['weights','sslip']*out['sslip']**2\n\n # --- Initialization tracking\n for name in set(xd.keys())- set(['R','E','Drag']):\n Lagrange_Tracking += puni['weights',name][0]*ca.mtimes((xd[name]-p['ref',name]).T,xd[name]-p['ref',name])\n for k in range(9):\n Lagrange_Tracking += ca.reshape(puni['weights','R'][0]*ca.mtimes((xd['R']-p['ref','R']).T,xd['R']-p['ref','R']),9,1)[k]\n\n\n Lagrange_Tracking = ca.Function('lagrange_track', [xd,xa,u,p,puni,l],[Lagrange_Tracking])\n Lagrange_Regularisation = ca.Function( 'lagrange_reg', [xd,xa,u,p,puni,l],[Lagrange_Regularisation])\n\n\n Tracking = 0\n Regularisation = 0\n\n\n for k in range(self.nk): # V['XA',k,0] is not same time step as V['Xd',k,0] but same result\n ftrack = Lagrange_Tracking(V['Xd',k,0], V['XA',k,0], V['U',k], P['p',k,0],P['puni'], V['l'])\n Tracking += ftrack\n\n freg = Lagrange_Regularisation(V['Xd',k,0], V['XA',k,0], V['U',k], P['p',k,0],P['puni'], V['l'])\n Regularisation += freg\n\n E_final = 10. * V['Xd',-1,-1,'E'] # for maximising final energy\n Tracking_Cost = (1-P['toggle_to_energy']) * Tracking #* 1e-3 # Tracking of initial guess\n Regularisation_Cost = Regularisation # Regularisation of inputs\n Lift_Cost = 0.5*V['vlift']**2 #* 1e2 # Regularisation of inputs\n Energy_Cost = P['toggle_to_energy'] * (E_final/params['sref'])/V['tf']\n SOSCFix = 10. * V['Xd',self.nk/4,0,'q',1]**2\n\n Cost = 0\n Cost = (Tracking_Cost + Regularisation_Cost + Lift_Cost + SOSCFix)/float(self.nk) + Energy_Cost\n\n return Cost", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n while (L):\n index = \"W{}\".format(L)\n weight = weights[index]\n f += np.linalg.norm(weight)\n L -= 1\n return cost + lambtha / (2 * m) * f", "def gradient_descent(data_x, data_y, parameters, learn_rate, nb_iterations):\n\n # Cost history\n cost_tracking = np.zeros(nb_iterations)\n\n for _i in range(nb_iterations):\n parameters -= learn_rate * gradient(data_x, data_y, parameters)\n # recording the cost for each iteration\n cost_tracking[_i] = cost_function(data_x, data_y, parameters)\n\n return parameters, cost_tracking", "def lr_cost_function_grad(theta: np.ndarray, X: np.ndarray, y: np.ndarray, l: float) -> np.ndarray:\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # You need to return the following variable correctly.\n grad = np.zeros(theta.shape)\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the partial derivatives and set grad to the partial\n # derivatives of the cost w.r.t. each parameter in theta.\n\n # =============================================================\n return grad", "def l2_reg_cost(cost, lambtha, weights, L, m):\n Frobenius = 0\n for k, v in weights.items():\n if k[0] == \"W\":\n Frobenius += np.linalg.norm(v)\n return cost + (lambtha/(2*m)) * Frobenius", "def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def __compute_cost(self, x, y):\n\n predictions = self.__compute_prediction(x)\n cost = np.mean(-y * np.log(predictions) - (1 - y) * np.log(1 - predictions))\n\n return cost", "def b_gradient_descent(self, LB,UB,eta, tol,iter):\n bgd=[]\n bgd_x=[LB]\n iteration=0\n # current_pt=X\n first_derivative=sym.diff(self.gdfunc)\n #print(first_derivative)\n x=sym.Symbol('x')\n first_derivative=sym.lambdify(x,first_derivative)\n learn_rate=eta\n \n new_x=LB\n bgd_x.append(LB)\n \n for i in range(iter):\n for j in np.arange(LB,UB,0.1):\n prev_x=new_x\n new_x=prev_x-(learn_rate*first_derivative(prev_x))\n #print(\"i = \",j,\"gradient =\",(learn_rate*first_derivative(j)),iteration)\n iteration=iteration+1\n #print(iteration)\n if iteration >=iter:\n break \n if new_x <= tol:\n #print(\"new_x = \",new_x,\"gradient =\",(learn_rate*first_derivative(prev_x)), iteration) \n break\n \n \n \n \n #print(new_x)\n bgd_x.append(new_x)\n \n \n bgd.append(bgd_x)\n bgd.append(new_x)\n bgd.append(iteration)\n\n return bgd", "def return_l_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.matrix([[(k3/2)*U**2*dt]])\n else:\n result1 = lambda X,U,dt: np.matrix([[0]])\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.matrix([[k1*(1/2)*(X[0]-TargetAngle)**2*dt]])\n else:\n result2 = lambda X,U,dt: np.matrix([[0]])\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt:\\\n np.matrix([[k2*(1/2)*(X[1]-TargetAngularVelocity)**2*dt]])\n else:\n result3 = lambda X,U,dt: np.matrix([[0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def step_cost(self, state, action, result=None):\n return 1 # Override this if actions have different costs", "def cost_function(theta, X, y):\n\n l = None\n #######################################################################\n # TODO: #\n # Compute and return the cost l of a particular choice of #\n # theta. #\n # #\n #######################################################################\n thetaX = np.dot(X, theta)\n g = tanh(thetaX)-y\n l = np.sum(g*g) / X.shape[0]\n \n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n return l", "def cost(self, start_value, end_value):\n cost = np.linalg.norm(start_value - end_value)\n return cost", "def return_running_cost_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.trapz((k3/2)*U**2,dx=dt)\n else:\n result1 = lambda X,U,dt: 0\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.trapz(k1*(1/2)*(X[0,1:]-TargetAngle)**2,dx=dt)\n else:\n result2 = lambda X,U,dt: 0\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt:\\\n np.trapz(k2*(1/2)*(X[1,1:]-TargetAngularVelocity)**2,dx=dt)\n else:\n result3 = lambda X,U,dt: 0\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def add_cost(self):\n \n # Get Pij, probability j will be i's neighbor\n self._get_Pij()\n \n def _add_to_cumSum(Idx, cumsum):\n \n \"\"\"Add patient to log partial likelihood sum \"\"\"\n \n # Get survival of current patient and corresponding at-risk cases\n # i.e. those with higher survival or last follow-up time\n Pred_thisPatient = self.T[Idx]\n Pred_atRisk = self.T[self.At_Risk[Idx]:tf.size(self.T)-1]\n \n # Get Pij of at-risk cases from this patient's perspective\n Pij_thisPatient = self.Pij[Idx, self.At_Risk[Idx]:tf.size(self.T)-1]\n \n # exponentiate and weigh Pred_AtRisk\n Pij_thisPatient = tf.pow(Pij_thisPatient, self.KAPPA)\n Pred_atRisk = tf.multiply(tf.exp(Pred_atRisk), Pij_thisPatient)\n \n # Get log partial sum of prediction for those at risk\n LogPartialSum = tf.log(tf.reduce_sum(Pred_atRisk))\n \n # Get difference\n Diff_ThisPatient = tf.subtract(Pred_thisPatient, LogPartialSum)\n \n # Add to cumulative log partial likeliood sum\n cumsum = tf.add(cumsum, Diff_ThisPatient)\n \n return cumsum\n \n def _add_if_observed(Idx, cumsum):\n \n \"\"\" Add to cumsum if current patient'd death time is observed \"\"\"\n \n with tf.name_scope(\"add_if_observed\"):\n cumsum = tf.cond(tf.equal(self.O[Idx], 1), \n lambda: _add_to_cumSum(Idx, cumsum),\n lambda: tf.cast(cumsum, tf.float32)) \n \n Idx = tf.cast(tf.add(Idx, 1), tf.int32)\n \n return Idx, cumsum\n \n def _penalty(W):\n \n \"\"\"\n Elastic net penalty. Inspired by: \n https://github.com/glm-tools/pyglmnet/blob/master/pyglmnet/pyglmnet.py\n \"\"\"\n \n with tf.name_scope(\"Elastic_net\"):\n \n # Lasso-like penalty\n L1penalty = self.LAMBDA * tf.reduce_sum(tf.abs(W))\n \n # Compute the L2 penalty (ridge-like)\n L2penalty = self.LAMBDA * tf.reduce_sum(W ** 2)\n \n # Combine L1 and L2 penalty terms\n P = 0.5 * (self.ALPHA * L1penalty + (1 - self.ALPHA) * L2penalty)\n \n return P\n \n \n with tf.variable_scope(\"loss\"):\n \n cumSum = tf.cast(tf.Variable([0.0]), tf.float32)\n Idx = tf.cast(tf.Variable(0), tf.int32)\n \n # Go through all uncensored cases and add to cumulative sum\n c = lambda Idx, cumSum: tf.less(Idx, tf.cast(tf.size(self.T)-1, tf.int32))\n b = lambda Idx, cumSum: _add_if_observed(Idx, cumSum)\n Idx, cumSum = tf.while_loop(c, b, [Idx, cumSum])\n \n # cost is negative weighted log likelihood\n self.cost = -cumSum\n \n # Add elastic-net penalty\n self.cost = self.cost + _penalty(self.W)", "def cofiCostFunc(params, Y, R, num_users, num_movies, num_features, lbd):\n X = np.reshape(params[:num_movies*num_features], (num_movies, num_features))\n Theta = np.reshape(params[num_movies*num_features:], (num_users, num_features))\n\n # J=sum((X*Theta'-Y)^2) where R[i,j]==1\n h = X.dot(Theta.T)-Y\n M = h**2\n J = (M*R).sum()/2\n reg = lbd/2*((X**2).sum()+(Theta**2).sum())\n J = J+reg\n\n X_grad = (h*R).dot(Theta)+lbd*X\n Theta_grad = (h*R).T.dot(X)+lbd*Theta\n\n grad = np.r_[X_grad.flatten(), Theta_grad.flatten()]\n return J, grad", "def compute_cost(self, del_u : list, u : list):\n print(\"ym: \", self.ym, \"yn: \", self.yn)\n self.cost = 0.0\n\n self.ym = self.d_model.ym\n self.yn = self.d_model.yn\n\n # FIXME : this is supposed to be from N1 to N2\n self.cost+= (self.ym[0] - self.yn[0])\n angle_diff = (self.ym[1] - self.yn[1])\n if angle_diff > np.pi:\n angle_diff -= 2*np.pi\n if angle_diff < -np.pi:\n angle_diff += 2*np.pi\n self.cost += angle_diff\n\n for j in range(self.Nu):\n self.cost += (self.ym[j] - self.yn[j])**2\n\n for j in range(self.Nu):\n self.cost += self.lambd[j]*(del_u[j])**2\n\n for j in range(self.Nu):\n self.cost += self.s / (u[j] + self.r / 2.0 - self.b) + self.s / (self.r/2.0 + self.b - u[j]) - 4.0 / self.r\n\n return self.cost", "def fixed_cost(self):\n return np.einsum('i->', self.c[self.f])", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def _calc_rebl_cost(self, ozone, max_cost=7):\n dist = Veh._get_dist_to_all_zones(ozone)[[\"DOLocationID\", \"trip_distance_meter\"]]\n # dist = veh._get_dist_to_all_zones(veh.ozone)[[\"DOLocationID\", \"trip_distance_meter\"]]\n # this is the costliest operation! \n dist[\"costs\"] = ((dist.trip_distance_meter * self.data_obj.FUEL_COST).apply(\n lambda x: np.around(x, 1))) / max_cost\n # dist[\"costs\"] = dist[\"costs\"].apply(lambda x: np.around(x, 1))\n # dist[\"costs\"] /= max_cost\n\n return dist", "def cost_function(params, count):\n circuit = models.Circuit(nqubits)\n for l in range(layers):\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(0, nqubits - 1, 2):\n circuit.add(gates.CZ(q, q + 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(1, nqubits - 2, 2):\n circuit.add(gates.CZ(q, q + 1))\n circuit.add(gates.CZ(0, nqubits - 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n\n cost = 0\n circuit.set_parameters(\n params\n ) # this will change all thetas to the appropriate values\n for i in range(len(ising_groundstates)):\n final_state = circuit(np.copy(ising_groundstates[i]))\n cost += np.real(encoder.expectation(final_state.state()))\n\n if count[0] % 50 == 0:\n print(count[0], cost / len(ising_groundstates))\n count[0] += 1\n\n return cost / len(ising_groundstates)", "def compute_cost(A3, Y):\n m = A3.shape[1]\n cost = -1 / m * (np.dot(Y, np.log(A3).T) + np.dot(1 - Y, np.log(1 - A3).T));\n return cost", "def fn(i, cost):\n if cost >= target or i == len(toppingCosts): return cost\n return min(fn(i+1, cost), fn(i+1, cost+toppingCosts[i]), key=lambda x: (abs(x-target), x))", "def cost_function(self, config_samples):\n cost = self.work_tracker(config_samples)\n return cost", "def return_lu_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.matrix([[k3*U*dt]])\n else:\n result1 = lambda X,U,dt: np.matrix([[0]])\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.matrix([[0]])\n else:\n result2 = lambda X,U,dt: np.matrix([[0]])\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt: np.matrix([[0]])\n else:\n result3 = lambda X,U,dt: np.matrix([[0]])\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def prediction_cost(a, y):\n return np.sum((a - y) ** 2) / 2", "def cost_fun(x, problem):\n j = 0\n if problem['use_log_bar']:\n c = ineqconstr(x, problem)\n j += np.sum(logbarrierfunc(0.1, c, problem['use_sigma']))\n\n x, t_final = matrify(x, problem)\n if problem['T']!=0:\n j += np.sum([problem['cost_fun_single'](x[:, :, i], t_final, problem) for i in range(problem['Nv'])])\n else:\n j = t_final\n return j", "def compute_cost(n: int,\n lam: float,\n dE: float,\n L: int,\n Lxi: int,\n chi: int,\n beta: int,\n stps: int,\n verbose: bool = False) -> Tuple[int, int, int]:\n\n # The number of bits used for the second register.\n nxi = np.ceil(np.log2(n // 2))\n\n # The number of bits for the contiguous register.\n nLxi = np.ceil(np.log2(Lxi + n // 2))\n\n # The number of bits used for the first register.\n nL = np.ceil(np.log2(L + 1))\n\n # The power of 2 that is a factor of L + 1\n eta = power_two(L + 1)\n\n oh = [0] * 20\n for p in range(20):\n # JJG note: arccos arg may be > 1\n v = np.round(np.power(2,p+1) / (2 * np.pi) * arccos(np.power(2,nL) /\\\n np.sqrt((L + 1)/2**eta)/2))\n oh[p] = np.real(stps * (1 / (np.sin(3 * arcsin(np.cos(v * 2 * np.pi / \\\n np.power(2,p+1)) * \\\n np.sqrt((L + 1)/2**eta) / np.power(2,nL)))**2) - 1) + 4 * (p + 1))\n\n # Bits of precision for rotation\n br = int(np.argmin(oh) + 1)\n\n # The following costs are from the list starting on page 50.\n\n # The cost for preparing an equal superposition for preparing the first\n # register in step 1 (a). We double this cost to account for the inverse.\n cost1a = 2 * (3 * nL + 2 * br - 3 * eta - 9)\n\n # The output size for the QROM for the first state preparation in Eq. (C27)\n bp1 = nL + chi\n\n # The cost of the QROM for the first state preparation in step 1 (b) and\n # its inverse.\n cost1b = QR(L + 1, bp1)[1] + QI(L + 1)[1]\n\n # The cost for the inequality test, controlled swap and their inverse in\n # steps 1 (c) and (d)\n cost1cd = 2 * (chi + nL)\n\n # The total cost for preparing the first register in step 1.\n cost1 = cost1a + cost1b + cost1cd\n\n # The output size for the QROM for the data to prepare the equal\n # superposition on the second register, as given in Eq. (C29).\n bo = nxi + nLxi + br + 1\n\n # This is step 2. This is the cost of outputting the data to prepare the\n # equal superposition on the second register. We will assume it is not\n # uncomputed, because we want to keep the offset for applying the QROM for\n # outputting the rotations.\n cost2 = QR(L + 1, bo)[1] + QI(L + 1)[1]\n\n # The number of bits for rotating the ancilla for the second preparation.\n # We are just entering this manually because it is a typical value.\n br = 7\n\n # The cost of preparing an equal superposition over the second register in\n # a controlled way. We pay this cost 4 times.\n cost3a = 4 * (7 * nxi + 2 * br - 6)\n\n # The cost of the offset to apply the QROM for state preparation on the\n # second register.\n cost3b = 4 * (nLxi - 1)\n\n bp2 = nxi + chi + 2\n\n # The cost of the QROMs and inverse QROMs for the state preparation, where\n # in the first one we need + n/2 to account for the one-electron terms.\n cost3c = QR(Lxi + n // 2, bp2)[1] + QI(Lxi + n // 2)[1] + QR(\n Lxi, bp2)[1] + QI(Lxi)[1]\n\n # The inequality test and state preparations.\n cost3d = 4 * (nxi + chi)\n\n # The total costs for state preparations on register 2.\n cost3 = cost3a + cost3b + cost3c + cost3d\n\n # The cost of adding offsets in steps 4 (a) and (h).\n cost4ah = 4 * (nLxi - 1)\n\n # The costs of the QROMs and their inverses in steps 4 (b) and (g).\n cost4bg = QR(Lxi + n // 2, n * beta // 2)[1] + QI(Lxi + n // 2)[1] + QR(\n Lxi, n * beta // 2)[1] + QI(Lxi)[1]\n\n # The cost of the controlled swaps based on the spin qubit in steps 4c and f\n cost4cf = 2 * n\n\n # The controlled rotations in steps 4 (d) and (f).\n cost4df = 4 * n * (beta - 2)\n\n # The controlled Z operations in the middle for step 4 (e).\n cost4e = 3\n\n # This is the cost of the controlled rotations for step 4.\n cost4 = cost4ah + cost4bg + cost4cf + cost4df + cost4e\n\n # This is the cost of the reflection on the second register from step 6.\n cost6 = nxi + chi + 2\n\n # The cost of the final reflection req'd to construct the step of the\n # quantum walk from step 9.\n cost9 = nL + nxi + chi + 1\n\n # The extra two qubits for unary iteration and making the rflxn controlled.\n cost10 = 2\n\n # The Toffoli cost for a single step\n cost = cost1 + cost2 + cost3 + cost4 + cost6 + cost9 + cost10\n\n # The number of steps needed\n iters = np.ceil(np.pi * lam / (2 * dE))\n\n # Now the number of qubits from the list on page 54.\n\n k1 = np.power(2, QR(Lxi + n // 2, n * beta // 2)[0])\n\n # The control register for phase estimation and iteration on it.\n ac1 = np.ceil(np.log2(iters + 1)) * 2 - 1\n\n # The system qubits\n ac2 = n\n\n # The first register prepared, a rotated qubit and a flag qubit.\n ac3 = nL + 2\n\n # The output of the QROM, the equal superposition state and a flag qubit.\n ac4 = nL + chi * 2 + 1\n\n # The data used for preparing the equal superposition state on the 2nd reg\n ac5 = bo\n\n # The second register, a rotated qubit and a flag qubit.\n ac6 = nxi + 2\n\n # The second preparation QROM output.\n ac8 = bp2\n\n # The equal superposition state and the result of the inequality test.\n ac9 = chi + 1\n\n # The angles for rotations.\n ac10 = k1 * n * beta // 2\n\n # The phase gradient state.\n ac11 = beta\n\n # A control qubit for the spin.\n ac12 = 1\n\n # A T state.\n ac13 = 1\n\n if verbose:\n print(\"[*] Top of routine\")\n print(\" [+] nxi = \", nxi)\n print(\" [+] nLxi = \", nLxi)\n print(\" [+] nL = \", nL)\n print(\" [+] eta = \", eta)\n print(\" [+] cost3 = \", cost3)\n print(\" [+] cost4 = \", cost4)\n print(\" [+] cost = \", cost)\n print(\" [+] iters = \", iters)\n\n ancilla_cost = ac1 + ac2 + ac3 + ac4 + ac5 + ac6 + ac8 + ac9 + ac10 + ac11\\\n + ac12 + ac13\n\n # Sanity checks before returning as int\n assert cost.is_integer()\n assert iters.is_integer()\n assert ancilla_cost.is_integer()\n\n step_cost = int(cost)\n total_cost = int(cost * iters)\n ancilla_cost = int(ancilla_cost)\n\n return step_cost, total_cost, ancilla_cost", "def compute_cost(features, values, theta):\r\n \r\n # your code here\r\n error = (values - features.dot(theta))\r\n cost = error.dot(error) \r\n return cost", "def compute_cost(features, values, theta):\r\n m = len(values)\r\n sum_of_square_errors = numpy.square(numpy.dot(features, theta) - values).sum()\r\n cost = sum_of_square_errors / (2*m)\r\n\r\n return cost", "def costFun(self, x):\n\ttmp = x.reshape(self.inp_shape)\n\tc = np.float64(self.calcCost(np.asarray(tmp,dtype=np.float32))) + self.alpha * np.dot(x.T, x)\n\treturn c", "def cost(h, y):\n\tm = y.shape[0]\n\tcost = (-1/m) * (y.T @ np.log(h) + (1 - y).T @ np.log(1 - h))\n\treturn cost", "def initial_cost(self):\n return self.algorithm_results[0].initial_cost", "def run_gradient_descent(data,theta,alpha,num_iters):\n population = data[:,0]\n prices = data[:,1]\n x = ones(shape=(len(population),2)) #add ones for theta0 \n x[:,1] = population\n x = transpose(x)\n error_history = zeros(shape=(num_iters,1))\n \n for i in range(num_iters):\n predictions = theta.dot(x)\n errors_x1 = (predictions - prices) * x[0,:]\n errors_x2 = (predictions - prices) * x[1,:]\n theta[0][0] = theta[0][0] - alpha*(1.0/len(population))*errors_x1.sum()\n theta[0][1] = theta[0][1] - alpha*(1.0/len(population))*errors_x2.sum()\n error_history[i,0] = calculate_cost(theta,data)\n \n return theta, error_history", "def cost_derivative(self, output_activations, y):\n\t\treturn (output_activations - y)", "def subgradient(self):\n \n UB_full = self.total_cost\n ufull = np.copy(self.u)\n\n # Update core: possible bottleneck\n (a_csr, a_csc) = self.update_core()\n mrows = a_csr.shape[0]\n ncols = a_csr.shape[1]\n u_this = self.u[~self.f_covered]\n # np.einsum is 20% faster than np.sum ...\n UB_fixed = self.fixed_cost\n UB = UB_full - UB_fixed\n cost = self.c[~self.f]\n\n # save nsteps calculations (Lagrangian multipliers and lower bounds)\n u_sequence = np.zeros((mrows, self._subg_nsteps)) \n Lu_sequence = np.zeros(self._subg_nsteps)\n # update u\n x = np.zeros(ncols, dtype=bool)\n niters_max = self._subg_maxiters\n maxfracchange = self._subg_maxfracchange\n maxabschange = self._subg_maxabschange\n\n # initialization\n f_change = _largenumber\n a_change = _largenumber\n niters = 0\n Lu_max0 = 0\n while ((f_change>maxfracchange) or (a_change>maxabschange)) and (niters<niters_max):\n u_this = (1.0+(np.random.rand(mrows)*2.-1)*self._u_perturb)*u_this\n u_sequence[:,0] = u_this\n cost_u = cost - a_csc.dot(u_sequence[:,0]) # Lagrangian cost\n # next lower bound of the Lagrangian subproblem\n Lu_sequence[0] = np.einsum('i->', cost_u[cost_u<0])+np.einsum('i->', u_sequence[:,0]) \n\n for i in np.arange(self._subg_nsteps-1):\n # current solution to the Lagrangian subproblem\n x[0:] = False\n x[cost_u<0] = True\n\n # subgradient; for two boolean arrays, multiplication seems to be the best way \n # (equivalent to logical_and)\n s_u = 1. - a_csr.dot(x.astype(int)) \n s_u_norm = np.einsum('i,i',s_u,s_u) # subgradient's norm squared\n\n # Update\n # next Lagrangian multiplier\n u_temp = u_sequence[:,i]+self._stepsize*(UB - Lu_sequence[i])/s_u_norm*s_u \n u_temp[u_temp<0] = 0\n\n u_sequence[:,i+1] = u_temp\n cost_u = cost - a_csc.dot(u_sequence[:,i+1]) # Lagrangian cost\n # next lower bound of the Lagrangian subproblem\n Lu_sequence[i+1] = np.einsum('i->', cost_u[cost_u<0])+np.einsum('i->', u_sequence[:,i+1]) \n \n #print(UB_full, UB, Lu_sequence[i+1])\n # Check the last nadaptive steps and see if the step size needs to be adapted\n if (np.mod(i+1,self._subg_nadaptive)==0):\n Lu_max_adapt = np.amax(Lu_sequence[i+1-self._subg_nadaptive:i+1])\n Lu_min_adapt = np.amin(Lu_sequence[i+1-self._subg_nadaptive:i+1])\n if (Lu_max_adapt <= 0.):\n Lu_max_adapt = _smallnumber\n f_change_adapt = (Lu_max_adapt-Lu_min_adapt)/np.fabs(Lu_max_adapt)\n if f_change_adapt > self._max_adapt:\n self._stepsize = self._stepsize*0.5\n elif (f_change_adapt < self._min_adapt) and (self._stepsize<1.5):\n self._stepsize = self._stepsize*1.5\n # swap the last multiplier with the optimal one\n i_optimal = np.argmax(Lu_sequence[i+1-self._subg_nadaptive:i+1])\n if (i_optimal != (self._subg_nadaptive-1)):\n u_temp = u_sequence[:,i]\n u_sequence[:,i] = u_sequence[:,i+1-self._subg_nadaptive+i_optimal]\n u_sequence[:,i+1-self._subg_nadaptive+i_optimal] = u_temp\n Lu_sequence[i+1-self._subg_nadaptive+i_optimal] = Lu_sequence[i]\n Lu_sequence[i] = Lu_max_adapt\n\n i_optimal = np.argmax(Lu_sequence)\n Lu_max = Lu_sequence[i_optimal]\n u_this = u_sequence[:,i_optimal]\n niters = niters + 1\n a_change = Lu_max - Lu_max0\n f_change = a_change/np.fabs(Lu_max)\n Lu_max0 = Lu_max # Just a copy. Not the reference (It's a number object)\n # save current u_this???\n\n if (niters == niters_max): \n warnings.warn(\"Iteration in subgradient reaches maximum = {0}\".format(niters))\n\n # update multipliers\n self.u[~self.f_covered] = u_this\n\n # return the last nsteps multipliers\n # save nsteps calculations (Lagrangian multipliers and lower bounds)\n u_sequence_full = np.zeros((self.mrows, self._subg_nsteps)) \n Lu_sequence_full = np.zeros(self._subg_nsteps)\n u_sequence_full[self.f_covered,:] = self.u[self.f_covered][:, np.newaxis]\n u_sequence_full[~self.f_covered,:] = u_sequence\n\n Lu_sequence_full = Lu_sequence + self.fixed_cost\n\n return (u_sequence_full, Lu_sequence_full)", "def lrCostFunction(theta, X, y, lambda_):\n if X.ndim == 1:\n X = X.reshape(1, -1)\n\n if y.dtype == bool:\n y = y.astype(int)\n\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the cost of a particular choice of theta.\n # You should set J to the cost.\n #\n # Hint: The computation of the cost function and gradients can be\n # efficiently vectorized. For example, consider the computation\n #\n # sigmoid(X * theta)\n #\n # Each row of the resulting matrix will contain the value of the\n # prediction for that example. You can make use of this to vectorize\n # the cost function and gradient computations.\n #\n\n z = X @ theta\n h = sigmoid(z)\n\n theta_ = np.r_[0, theta[1:]]\n\n J = (-y @ np.log(h) - (1 - y) @ np.log(1 - h)) / m\n J += lambda_ * sum(theta_**2) / (2 * m)\n\n grad = (h - y) @ X / m\n grad += lambda_ * theta_ / m\n\n # =============================================================\n\n return J, grad", "def ComputeCost(Y, W, P, my_lambda):\n l = [np.log(P[i][np.argmax(Y[i])]) for i in range(len(Y))]\n l = -np.mean(l)\n J = l\n for w in W:\n J += my_lambda * (w**2).sum()\n return J, l", "def cost(self, Y, A):\n m = Y.shape[1]\n cost = - (1 / m) * np.sum(\n np.multiply(\n Y, np.log(A)) + np.multiply(\n 1 - Y, np.log(1.0000001 - A)))\n return cost", "def compute_cost(features, values, theta):\n m = len(values)\n sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()\n cost = sum_of_square_errors / (2 * m)\n\n return cost", "def __cost_to_goal(self, goal_state):\n cost = 0\n for i in range(len(goal_state) * len(goal_state[0])):\n if(i != 0):\n pos_goal = self.__get_position(i, goal_state)\n pos_current = self.__get_position(i, self.puzzle)\n cost += self.__manhattan(pos_current[0], pos_current[1], pos_goal[0], pos_goal[1])\n return cost", "def grad_loss_wrt_b(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1=y.reshape((N,1))\n dr = (1+np.exp(1*y1*k1))\n nr = -y1\n c2=0\n c1 = nr/dr\n for i in range(N):\n c2 +=c1[i][0]\n l_b = c2 / N\n #b2 = np.copy(self.b)\n #b1 = np.zeros((10,1))\n #b1[0] = b2\n #for i in range(1,10):\n #b1[i] = b1[i-1] - self.lr*l_b\n\n\n\n return l_b\n\n\n #raise NotImplementedError", "def get_cost_updates(self):\n\n y = self.get_hidden_values()\n z = self.get_reconstructed_input(y)\n\n L = T.sum((self.x-z)**2, axis=1)\n\n cost = T.mean(L)\n\n return cost", "def step(self, closure=None):\n loss = self.optimizer.step(closure)\n self._la_step += 1\n\n if self._la_step >= self._total_la_steps:\n self._la_step = 0\n # Lookahead and cache the current optimizer parameters\n for group in self.optimizer.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n p.data.mul_(self.la_alpha).add_(1.0 - self.la_alpha, param_state['cached_params']) # crucial line\n param_state['cached_params'].copy_(p.data)\n if self.pullback_momentum == \"pullback\":\n internal_momentum = self.optimizer.state[p][\"momentum_buffer\"]\n self.optimizer.state[p][\"momentum_buffer\"] = internal_momentum.mul_(self.la_alpha).add_(\n 1.0 - self.la_alpha, param_state[\"cached_mom\"])\n param_state[\"cached_mom\"] = self.optimizer.state[p][\"momentum_buffer\"]\n elif self.pullback_momentum == \"reset\":\n self.optimizer.state[p][\"momentum_buffer\"] = torch.zeros_like(p.data)\n\n return loss", "def plot_cost(self):\n steps = np.arange(len(self.cost_values))\n plt.plot(steps, self.cost_values, '-o')\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Cost value\")\n plt.title(\"Cost value per step using Gradient Descent\")\n plt.show()", "def reward_function(self):\r\n def R(state, decision, nodes):\r\n return -1.0/1000*nodes['G'].get_preds_value(state)*(decision['G:R_1']+decision['G:L'])\r\n \r\n return R", "def get_expected_cost(self):" ]
[ "0.6927716", "0.6886076", "0.6690253", "0.65850383", "0.6506424", "0.6447081", "0.6429702", "0.63852924", "0.63443947", "0.6304937", "0.626403", "0.62328404", "0.62026113", "0.61965936", "0.618344", "0.6173027", "0.6151762", "0.61476064", "0.6139949", "0.61124724", "0.60989594", "0.60971797", "0.6091175", "0.6090517", "0.6073226", "0.60602736", "0.60602736", "0.60482615", "0.6029008", "0.60255885", "0.6024652", "0.60119414", "0.60103065", "0.60005707", "0.59995407", "0.59988755", "0.5984755", "0.59808695", "0.59696245", "0.59693706", "0.59637034", "0.5957065", "0.5944189", "0.5930167", "0.592984", "0.59170574", "0.5912684", "0.59047204", "0.5904213", "0.59001815", "0.58973473", "0.5895699", "0.58943063", "0.58840746", "0.5883526", "0.5877553", "0.5866521", "0.58637273", "0.5855959", "0.58532876", "0.5852019", "0.5851969", "0.58509505", "0.5848465", "0.58468586", "0.5844711", "0.5838295", "0.58375496", "0.5833877", "0.58280784", "0.5816435", "0.5810965", "0.5810705", "0.5809569", "0.5806003", "0.58014065", "0.5788239", "0.57870543", "0.57853097", "0.5785189", "0.5784252", "0.57784456", "0.577662", "0.5775026", "0.5775017", "0.57731885", "0.57669", "0.57548547", "0.5743188", "0.5741631", "0.5736287", "0.5725896", "0.57203686", "0.5719092", "0.57155687", "0.57092243", "0.5709099", "0.57032317", "0.5703038", "0.56992036", "0.569898" ]
0.0
-1
Save data with games to file
async def save(self, res: dict): self.___cache_data(res['games'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, game):\n try:\n with open(self.filename, mode='w+') as file:\n # First char in the file is the next player\n file.write(game.next_player)\n # Then the board as a string of 64 characters\n file.write(str(game.board))\n\n except IOError as err:\n print(f\"Error saving file: {err}\")", "def save(self, p):\n pickle.dump(p, open('save.dat', 'wb'))\n print(\"Game Saved!\")", "def save_game(player, data):\n\n data = {\n \"rooms\": data[\"rooms\"],\n \"maze\": data[\"maze\"],\n }\n\n file_name = f\"./dork/saves/{player}.yml\"\n with open(file_name, \"w\") as save_file:\n yaml.safe_dump(\n data, save_file,\n indent=4, width=80,\n )\n\n return f\"Your game was successfully saved as {player}.yml!\"", "def save_game(partie):\n fichier= open(\"save_game.json\",\"w\")\n json.dump(partie,fichier)\n fichier.close()", "def save(self, dir_name=None):\n root_dir = os.getcwd()\n cur_datetime = str(datetime.datetime.now()).split(\".\")[0] # remove fractional seconds\n if not dir_name:\n dir_name = \"game_\" + cur_datetime + \"/\"\n save_dir = root_dir + \"/saved_games/\" + dir_name + \"_\" + cur_datetime + \"/\"\n\n # Check if the filepath already exists\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # Game\n game_dir = save_dir + \"game/\"\n os.makedirs(game_dir)\n with open(game_dir + \"game.json\", \"w\") as file_handle:\n game_dict = self.to_json_dict()\n json.dump(game_dict, file_handle)\n\n # Player\n player_dir = save_dir + \"player/\"\n os.makedirs(player_dir)\n with open(player_dir + \"player.json\", \"w\") as file_handle:\n player_dict = self.player.to_json_dict()\n json.dump(player_dict, file_handle)\n\n # Items\n items_dir = save_dir + \"items/\"\n os.makedirs(items_dir)\n for i in self.items:\n with open(items_dir + i.get_name() + \"_\" + str(i.get_id()) + \".json\", \"w\") as file_handle:\n item_dict = i.to_json_dict()\n json.dump(item_dict, file_handle)\n\n # Characters\n characters_dir = save_dir + \"characters/\"\n os.makedirs(characters_dir)\n for c in self.characters:\n with open(characters_dir + c.get_name() + \"_\" + str(c.get_id()) + \".json\", \"w\") as file_handle:\n character_dict = c.to_json_dict()\n json.dump(character_dict, file_handle)\n\n # Spaces\n spaces_dir = save_dir + \"spaces/\"\n os.makedirs(spaces_dir)\n for s in self.spaces:\n with open(spaces_dir + s.get_name() + \"_\" + str(s.get_id()) + \".json\", \"w\") as file_handle:\n spaces_dict = s.to_json_dict()\n json.dump(spaces_dict, file_handle)\n\n # Exits\n exits_dir = save_dir + \"exits/\"\n os.makedirs(exits_dir)\n for e in self.exits:\n with open(exits_dir + e.get_name() + \"_\" + str(e.get_id()) + \".json\", \"w\") as file_handle:\n exits_dict = e.to_json_dict()\n json.dump(exits_dict, file_handle)", "def save(cls):\n playerdata = getAttributes(cls)\n Data.object_dump(playerdata, \"savedata.dat\")\n del playerdata", "def save(self) -> None:\n path = os.path.join(os.getcwd(), 'mancalaGame.gg')\n with open(path, 'wb') as handle: pickle.dump({\n \"state\": self.currentNode.gameState,\n \"playerType\": 1 if self.currentNode.playerType is MaxMinPlayer.MAX_PLAYER else 0\n },\n handle, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"game saved\")", "def save_game(player_location, filename):\n\tsave_data = {\"location\": player_location}\n\twith open(os.path.normpath(\"save_files/\" + filename + \".txt\"), \"w\") as outfile:\n\t\tjson.dump(save_data, outfile)\n\tprint(\"\\nYour cluster and player data has been saved.\")", "def save(self):\n file_name = common.RANK_FILE % (self.week.season.name, self.week.num)\n with open(file_name, 'w') as rank_file:\n for team, score in self.score.items():\n rank_file.write('%s,%s\\n' % (team, score))", "def save_game_file(self, game_file_name):\r\n SlTrace.lg(f\"save_game_file {game_file_name}\")\r\n with open(game_file_name, \"w\") as fout:\r\n print(f\"# {game_file_name}\", file=fout)\r\n today = date.today()\r\n d2 = today.strftime(\"%B %d, %Y\")\r\n print(f\"# On: {d2}\\n\", file=fout)\r\n print(f\"from dots_commands import *\", file=fout)\r\n print(f\"\", file=fout)\r\n players = self.get_players()\r\n playing_labels = [player.label for player in players]\r\n playing_str = \",\".join(playing_labels)\r\n print(f\"\"\"set_playing(\"{playing_str}\")\"\"\", file=fout)\r\n max_line = 60\r\n indent_str = \" \"\r\n for player in players:\r\n self.print_set_play(player, max_line=max_line, file=fout,\r\n indent_str=indent_str)\r\n print(f\"start_game()\", file=fout) # Required for any game playing commands\r\n move_type_d = {\r\n PlayMove.MARK_EDGE : \"mark\",\r\n PlayMove.SELECT_EDGE : \"select\",\r\n PlayMove.UNDO_MOVE : \"undo\",\r\n PlayMove.REDO_MOVE : \"redo\",\r\n PlayMove.PLAY_MOVE : \"play_move\",\r\n PlayMove.PLAY_MOVE_TILL : \"play_move_till\",\r\n PlayMove.SET_PLAYING : \"set_playing\",\r\n PlayMove.GAME_CHECK : \"game_check\",\r\n PlayMove.SET_PLAY : \"set_play\"\r\n }\r\n for pm in self.play_moves:\r\n if pm.removed: # Skip removed moves\r\n continue\r\n \r\n if pm.move_type not in move_type_d:\r\n raise SelectError(f\"save_file move type: {pm.move_type} uninplemented\")\r\n gfun = move_type_d[pm.move_type]\r\n hv_str = '\"h\"' if pm.hv == PlayMove.HV_H else '\"v\"'\r\n if pm.move_type == PlayMove.MARK_EDGE:\r\n line_str = f\"{gfun}({hv_str}, {pm.row}, {pm.col})\"\r\n elif pm.move_type == PlayMove.SELECT_EDGE:\r\n line_str = f\"{gfun}({hv_str}, {pm.row}, {pm.col})\"\r\n elif pm.move_type == PlayMove.UNDO_MOVE:\r\n line_str = f\"{gfun}()\"\r\n elif pm.move_type == PlayMove.REDO_MOVE:\r\n line_str = f\"{gfun}()\"\r\n elif pm.move_type == PlayMove.SET_PLAY:\r\n if pm.pre_comment is not None:\r\n print(pm.pre_comment, file=fout)\r\n temp_player = SelectPlayer(self, id=0) # Not real\r\n for field in pm.kwargs:\r\n val = pm.kwargs[field]\r\n setattr(temp_player, field, val)\r\n self.print_set_play(temp_player, file=fout)\r\n if pm.line_comment is not None:\r\n print(pm.line_comment, file=fout)\r\n continue # Done with this move\r\n \r\n elif pm.move_type == PlayMove.SET_PLAYING:\r\n playing_str = \"\" if pm.playing is None else f'\"{pm.playing}\"'\r\n line_str = f\"{gfun}({playing_str})\" # Do we drop this ???\r\n elif pm.move_type == PlayMove.GAME_CHECK:\r\n line_str = f\"{gfun}(\"\r\n if pm.mode is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += f'\"{pm.mode}\"'\r\n if pm.row is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += str(pm.row)\r\n if pm.col is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += str(pm.col)\r\n if pm.is_set is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += f\"is_set={pm.is_set}\"\r\n if pm.show_fail is not None:\r\n if not line_str.endswith(\"(\"):\r\n line_str += \", \"\r\n line_str += f\"show_fail={pm.show_fail}\"\r\n line_str += \")\" \r\n elif pm.move_type == PlayMove.PLAY_MOVE:\r\n line_str = f\"{gfun}()\"\r\n elif pm.move_type == PlayMove.PLAY_MOVE_TILL:\r\n line_str = f\"{gfun}()\"\r\n else:\r\n raise SelectError(f\"save_file move type:\"\r\n f\" {pm.move_type} uninplemented\")\r\n pre_comment = pm.pre_comment\r\n if pre_comment is not None:\r\n print(pre_comment, file=fout)\r\n line_comment = pm.line_comment\r\n if line_comment is not None:\r\n line_str += line_comment\r\n print(line_str, file=fout)\r\n \r\n return True", "def save(self, file):\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j]!= 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == \"Elephant\":\n elephants.append(\"(\" + str(i) + \",\" + str(j)+ \") : np.array([\"+str(L[0])+ \",\" + str(L[1])+\"])\")\n elif piece.species == \"Rhinoceros\":\n rhinos.append(\"(\"+str(i)+\",\" +str(j)+ \") : np.array([\"+str(L[0]) + \",\" + str(L[1])+\"])\")\n elif isinstance(piece, Boulder):\n boulders.append(\"(\" + str(i) + \",\" + str(j) + \")\")\n file.write(\"# King of Siam GameFile \\n\\nplayer_turn {\\n \" + self.playerTurn + \"\\n}\\n\\n\")\n file.write(\"Boulder {\")\n for k in range(len(boulders)):\n file.write(\"\\n \" + boulders[k] + \";\")\n file.write(\"\\n}\\n\\nElephant {\")\n for elt in elephants:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\\n\\nRhinoceros {\")\n for elt in rhinos:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\")\n\n file.close()", "def save_game(filename, player_x, player_y, player_symbol, inventory, werewolf):\n global width, height, dungeon_map # DO NOT REMOVE\n try:\n with open(filename, 'w') as file_handler:\n # TODO: Write the game data to the file with the given filename.\n # - the first row contain's the dungeon width and height, followed by player data, followed by werewolf data\n # - then write the dungeon map to the file\n # - then write the player's inventory to the map\n pass\n except:\n return False\n return True", "def save_game(partie):\n fichier = open(\"save/save_game.json\",\"w\")\n json.dump(partie, fichier, indent = 4)\n fichier.close()\n print(\"Votre partie a été sauvegardé.\")", "def saveClassroomData():\n with open(\"ClassRoomData.txt\",\"wb\") as classroomData:\n pickle.dump(classroomEntities,classroomData)", "def save_game_encours(partie):\n fichier= open(\"contgame.json\",\"w\")\n json.dump(partie,fichier)\n fichier.close()", "def save(self):\n \n fileName=self.characterName+\"_\"+self.race+\"_\"+self.classType+\"_lvl_\"+str(self.level)\n new_file = open(str(fileName)+\".txt\",\"w\")\n new_file.write(\"~~~~~~~~~~~ \"+self.characterName+\" the \"+self.race+\" \"+self.classType+\" ~~~~~~~~~~~\\n\\n\")\n new_file.write(\"Level: \"+str(self.level)+\" HP: \"+str(self.hp)+\" XP: \"+str(self.xp)+\" Hit Dice: \"+str(self.level)+str(self.hit_dice[self.classType])+\"\\n\")\n new_file.write(str(self.abilityScores()))\n new_file.write(\"\\n\\n~~~~~~~~~ Skills ~~~~~~~~~\\n\")\n for i in self.skills:\n new_file.write(\"\\n\"+i+\" \"+\"(\"+skills[i.lower()].upper()+\")\")\n new_file.write(\"\\n\\n~~~~~~~~~ Traits ~~~~~~~~~\\n\")\n for i in self.traits:\n new_file.write(\"\\n ~~\"+i+\"~~\\n \"+str(self.traits[i])+\"\\n\")\n new_file.write(\"\\n\\n~~~~~~~~~ Specialty: \"+self.specialty+\" ~~~~~~~~\\n\")\n new_file.write(\"\\n \"+self.specialtyStory+\"\\n\")\n new_file.write(\"\\n ~~~~ Feats ~~~~\\n\")\n for i in range(1,self.level+1):\n if i == 1 or i%3 == 0:\n new_file.write(\"\\n Level \"+str(i)+\": \"+self.feats[i]['name']+' '\\\n \"(\"+self.feats[i]['type']+\")\\n\"\\\n ' \"'+self.feats[i]['description']+'\"\\n\\n')\n if 'prereq' in self.feats[i]:\n new_file.write(\" Prerequisite: \"+self.feats[i]['prereq']+\"\\n\")\n if 'benefit' in self.feats[i]:\n new_file.write(\" Benefit: \"+self.feats[i]['benefit']+\"\\n\")\n if 'effect' in self.feats[i]:\n new_file(\" Effect: \"+self.feats[i]['effect']+\"\\n\")\n \n new_file.write(\"\\n\\n~~~~~~~~~ Background: \"+self.background+\" ~~~~~~~~\\n\")\n if self.backgroundProfession == '':\n pass\n else:\n new_file.write(\"Profession: \"+self.backgroundProfession)\n new_file.write(\"\\n \"+self.backgroundStory)\n \n new_file.close()\n print \"File \"+str(fileName)+\".txt saved.\"", "def save(self):\n\n # TODO:Find place to save data, write logic to save images(Filter out video?)", "def save(self):\n os.rename(self.scores_filename, '%s-%s' % (self.scores_filename, str(time.time())))\n scores_file = codecs.open(self.scores_filename, 'w', encoding='utf-8')\n for each_chan in self.scores_dict:\n for each_nick in self.scores_dict[each_chan]:\n line = '{0},{1},{2},{3}\\n'.format(each_chan, each_nick, self.scores_dict[each_chan][each_nick][0], self.scores_dict[each_chan][each_nick][1])\n scores_file.write(uc.decode(line))\n scores_file.close()", "def save_active_games(self, filename):\n with open(filename, \"w\") as json_file:\n json.dump(self.active_games, json_file)", "def saveGameToCache(self, theKey, theGame):\n if theGame == None:\n return\n theGameFile = File(self.theCacheDirectory, theKey + \".zip\")\n try:\n theGameFile.createNewFile()\n pw.print_(theGame.serializeToJSON())\n pw.flush()\n pw.close()\n gOut.close()\n fOut.close()\n except Exception as e:\n e.printStackTrace()", "def savePlayerInfo(self):\n if self.__filename == \"\":\n self.__setPlayerFilename()\n try:\n #f = open(self.__filename, \"w\")\n pickle.dump(self, open(self.__filename, \"w\"))\n return True\n #f.close()\n except IOError:\n raise PlayerIOError(\"Unable to write player info to file.\")", "def save():", "def save(self, output, data):", "def saveGame(self) -> None:\n self.state[\"phase\"] = self._phase\n\n state_as_string = json.dumps(self.state)\n with open(self.save_location, \"w\") as File:\n File.write(state_as_string)", "def save_to_file(self, name, data):\n if os.path.isdir(\"saved_data\"):\n with open(f'saved_data/{name}.txt', 'wb') as file:\n pickle.dump(data, file)\n else:\n os.mkdir(\"saved_data\")\n self.save_to_file(name, data)", "def saveTS(tournament, fileName):\n fd = open(fileName)\n pickle.dump(tournament, fd)\n TournamentSystem._logger.debug(\"Dumped game state to %s\", fileName)", "def test_005_write_file(self):\n __test = chess_storage.ChessStorage()\n __test_data = list(range(consts.TEST_LIST_LENGHT))\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __dir_game_saves = os.path.join(__dir_game_saves, consts.TEST_FILENAME)\n # pylint: disable = protected-access\n __save_test = __test._ChessStorage__write_file(__dir_game_saves, __test_data)\n # pylint: enable = protected-access\n self.assertEqual(__save_test, consts.ERROR_CODES[\"SUCCESSFULL\"])", "def save_data_file(self):\n with open(self.files['data'], 'w') as outfile:\n outfile.write(self.to_json())\n outfile.close()", "def save(self, save_dir):\n ProfileManager.save_data_to_disk(self.player_data, path.join(save_dir, self.player_name + '.yaml'))", "def save(self, data):\n self.write(data)", "def save_game(handler: input_handlers.BaseEventHandler, filename: str)->None:\n if isinstance(handler, input_handlers.BaseEventHandler):\n handler.engine.save_as(filename)\n print(\"Game saved.\")", "def save_game(self, path):\n try:\n file = open(path, \"wb\")\n for i in self.state_stack.states:\n i.on_save()\n pic.dump(self.state_stack, file)\n for i in self.state_stack.states:\n i.on_load()\n except IOError or pic.PicklingError as e:\n print(\"Game save error: {}\".format(e))", "def save(self, split: dataset_split.DatasetSplit, directory: str, save_entire_dataset: bool = True):\n directory: str = os.path.join(directory, str(split))\n if not os.path.exists(directory):\n os.mkdir(directory)\n with util.get_progressbar('Saving dataset', len(self._train_games)) as pbar:\n for game_idx, (game_id, game) in enumerate(self._train_games.items()):\n with open(os.path.join(directory, game_id + '.pkl'), 'wb') as ofile:\n pickle.dump(game, ofile)\n pbar.update(game_idx)\n with open(os.path.join(directory, 'args.pkl'), 'wb') as ofile:\n pickle.dump(self._args, ofile)\n if save_entire_dataset:\n with open(os.path.join(directory, 'dataset.pkl'), 'wb') as ofile:\n pickle.dump(self, ofile)", "def save_battle(battle, battle_name):\n path = './data_reader/data/battles/' + battle_name\n\n with open(path, 'wb') as outfile:\n pickle.dump(battle, outfile, -1)", "def save_game(handler: input_handlers.BaseEventHandler, filename: str) -> None:\n if isinstance(handler, input_handlers.EventHandler):\n handler.engine.save_as(filename)\n print(\"Game saved.\")", "def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)", "def save_to_file(self, data):\n\t\tif self.data_file.write(data):\n\t\t\tprint(\"Data successfully added to file\")\n\t\telse:\n\t\t\tPrint(\"Problem occured during adding to file\")", "def save_games(season, logging_level=logging.INFO):\n logger.info('Starting the download of games...')\n\n if season.season == get_current_season():\n current_game_events_ids = season.get_current_game_events_ids()\n game_ids_list = list(current_game_events_ids.values())\n else:\n game_ids_list = season.get_game_ids()\n\n\n n_checkpoints = 4\n checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]\n for i in range(len(game_ids_list)):\n game_id = int(game_ids_list[i]) % 1000\n url2 = BASE_URL + \"/fichas/LACB{}.php\".format(game_ids_list[i])\n filename = os.path.join(season.GAMES_PATH, str(game_id)+\"-\" +str(game_ids_list[i]) + '.html')\n\n open_or_download(file_path=filename, url=url2)\n if i in checkpoints:\n logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))\n\n logger.info('Download finished! (new {} games in {})\\n'.format(len(game_ids_list), season.GAMES_PATH))", "def write_data():", "def save(self):\n memento = self.create_memento()\n import datetime\n f = open(str(datetime.datetime.now()).replace(' ','_')+'.saved_story','w')\n cPickle.dump(memento,f)\n f.close()\n zcanvas.message(\"Saved!\")", "def dump(self):\n if self.data_path.exists():\n raise ValueError(f'Invalid path - it must not exist: {self.data_path}')\n self.data_path.parent.mkdir(parents=True, exist_ok=True)\n\n import json\n\n data = {path.name: is_played for movie, path, is_played in self.iter_movies()}\n log.info(f'{self.lp.save} played data for {len(data)} movies to {self.data_path.as_posix()}')\n if not self.dry_run:\n with self.data_path.open('w') as f:\n json.dump(data, f, indent=4, sort_keys=True)", "def dump_data_file(game, data, file_basename):\n\n ts = time.strftime(\"%Y%m%d-%s\", time.gmtime())\n\n if os.path.exists(STATS_DIR):\n stats_dir = STATS_DIR\n else:\n stats_dir = \".\"\n\n np.set_printoptions(precision=1, linewidth=240, suppress=True, threshold=np.inf)\n\n data_str = np.array2string(data.astype(np.int64), separator=\",\")\n\n with open(stats_dir + '/' + file_basename + \"-\" + ts + \"-bot-\" + str(game.me.id) + \".log\", \"w\") as f:\n f.write(data_str)", "def save_map(self, filename):\n with open(filename, 'wb') as file:\n pickle.dump(self.current_obstacles, file)\n pickle.dump(self.current_goal, file)\n pickle.dump(getstate(), file)", "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def SaveListInFile(games):\n\twhile True:\n\t\tfileName = input('What file should we write to, Professor? ')\n\t\ttry:\n\t\t\twith open(fileName, 'a') as file:\n\t\t\t\tfor game in games:\n\t\t\t\t\tfor move in game:\n\t\t\t\t\t\tfile.write(move + '\\n')\n\t\t\tbreak\n\t\texcept OSError as error:\n\t\t\tprint('{}, try again.'.format(error))\n\treturn", "def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")", "def save(data, filename):\r\n with open(filename, 'wb') as fp:\r\n pickle.dump(data, fp)", "def save(self):\n # TODO: save the file", "def save(self):\n data = \"\"\n for y in xrange(0, BLOCK_NUM_HEIGHT):\n for x in xrange(0, BLOCK_NUM_WIDTH):\n data += self.blocks[y][x]\n data += '\\n'\n print data\n options = {'defaultextension': '.lvl',\n 'filetypes': [('Levels', '.lvl'), ('All files', '*')],\n 'initialdir': 'levels',\n 'initialfile': '',\n 'title': 'Save level'}\n # filename = tkFileDialog.asksaveasfile(**options)\n filename = asksaveasfilename(**options)\n if filename:\n with open(filename, \"w\") as level:\n level.write(data)", "def save_data(data, file_name):\r\n file = open(file_name, \"w\")\r\n file.write(data + \"\\n\")\r\n file.close()", "def save(self, filename):\n pass", "def save(self, output, data):\n pass", "def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)", "def dump_stats(game, data, key = \"all\"):\n if key == \"all\":\n keys = data.keys()\n else:\n keys = [key]\n\n ts = time.strftime(\"%Y%m%d-%s\", time.gmtime())\n\n if os.path.exists(STATS_DIR):\n stats_dir = STATS_DIR\n else:\n stats_dir = \".\"\n\n for k in keys:\n with open(stats_dir + '/' + k + \"-\" + ts + \"-bot-\" + str(game.me.id) + \".log\", \"w\") as f:\n for line in data[k]:\n f.write(str(line) + \"\\n\")", "def save(self, path_to_save):\n for item in self.data_array:\n item.save(path_to_save+item.file_name)", "def write_data_to_file(pos, fps, data_file):\n xs = []\n for x,y in pos:\n xs.append(x)\n with open(data_file,'wb') as f:\n np.save(f,pos)\n np.save(f,xs)\n np.save(f,fps)", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def write_savefile(state: PhysicsState, file: Path):\n if file.suffix.lower() != '.json':\n # Ensure a .json suffix.\n file = file.parent / (file.name + '.json')\n log.info(f'Saving to savefile {file.resolve()}')\n\n savefile_json_dict = google.protobuf.json_format.MessageToDict(\n state.as_proto(),\n including_default_value_fields=False,\n preserving_proto_field_name=True,\n use_integers_for_enums=False,\n )\n\n for i, component in enumerate(savefile_json_dict['engineering']['components']):\n component['name'] = strings.COMPONENT_NAMES[i]\n\n with open(file, 'w') as outfile:\n json.dump(savefile_json_dict, outfile, indent=2)\n\n return file", "def save(self):\r\n # os.mkdirs(DATADIR, exist_ok=True)\r\n savefile = os.path.join(wg.DATADIR, str(self.guild.id) + \".json\")\r\n\r\n savedata = {\r\n 'userchars': {id:self.usercharacters[id].to_data() for id in self.usercharacters},\r\n 'guildid': self.guild.id,\r\n 'last_known_name': self.guild.name,\r\n }\r\n\r\n with tempfile.NamedTemporaryFile(mode=\"w\", dir=wg.DATADIR) as outf:\r\n json.dump(savedata, outf, indent=1)\r\n if os.path.exists(savefile):\r\n os.unlink(savefile)\r\n os.link(outf.name, savefile)\r\n\r\n wg.log.info(f'Guild {debug_id(guild=self.guild)} saved. '\r\n f'{len(self.usercharacters)} user chars and {len(self.npcs)} npcs.')\r\n\r\n pass", "def save(self, data, outpath):\n with open(path, \"wt\") as open_file:\n json.dump(data, open_file, indent=4)", "def save(self):\n\n # make a clone to preserve the original in case it's still needed\n clone = {}\n\n for machine in self.activity.keys():\n data = self.activity[machine].copy()\n data[\"filtered activity\"] = np.array(data[\"filtered activity\"], dtype=np.float)\n data[\"raw activity\"] = np.array(data[\"raw activity\"], dtype=np.float)\n data[\"time\"] = np.array(data[\"time\"], dtype=np.float)\n clone[machine] = data\n\n out = open(self.filename, \"wb\")\n pickle.dump(clone, out)\n out.close()", "def saveData(data, file, path='./data/'):\n\twith open(\"{}{}.yml\".format(path, file), 'w') as out:\n\t\tyaml.dump(data, out)", "def save(self, projectData, filename):\n f = open(filename, 'wb')\n pickle.dump(projectData, f, protocol=1)\n f.close()", "def save_data(self):\n pass", "def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')", "def save_data(data, filename=None):\n filename = filename or OUTPUT_FILE\n common.open_and_write_file(filename, data, as_json=True)\n print(\"Saved file: {}.\".format(filename))", "def save_values(self):\n # TODO: Add self.prefix and extension\n NetworkTables.saveEntries(self.file.get_filename(), prefix='/vision/' + self.name + '_')", "def export_data(self):\n folder = os.path.dirname(self.filename[0])\n filename_ext = os.path.basename(self.filename[0])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n\n path = folder + \"/\" + filename + \"_fit_results.txt\"\n if not os.path.exists(path):\n file = open(path, \"w+\")\n else:\n file = open(path, \"a+\")\n\n for i in range(len(self.data_list)):\n file.write(self.data_list[i] + \"\\n\\n\")\n\n self.data_list = []\n file.close()", "def saveData(self):\n pass", "def save_to_file(self, filename):\n outMap = {}\n for kanal in self._datastoreMap:\n # svaki pojedini Datastore se zna zapakirati u mapu (dictionary)\n outMap[kanal] = self._datastoreMap[kanal].store2dict()\n # serialize u binary string\n binstr = pickle.dumps(outMap)\n # zapis u file\n with open(filename, 'wb') as f:\n f.write(binstr)", "def save_data(app):\n\n prepared_data = (\n app.block_chain.to_list(),\n app.open_txs.to_list(),\n app.network.to_list(),\n app.wallet.to_dict()\n )\n\n try:\n with open(\n file=r'./app/data/app-{}.dat'.format(app.port),\n mode='w',\n encoding='utf-8'\n ) as f:\n for data in prepared_data:\n f.write(json.dumps(data))\n f.write('\\n')\n \n ConsoleLogger.write_log(\n 'info',\n __name__,\n 'save_data',\n 'Data saving is done successfully.'\n )\n\n return True\n except IOError:\n ConsoleLogger.write_log(\n 'error',\n __name__,\n 'save_data',\n 'Data saving is failed.'\n )\n\n return False", "def save_game(univers, fname=None):\n # TODO use try/except\n if not fname:\n fname = 'savegame'\n with shelve.open(fname, 'n') as savefile:\n savefile['univers'] = univers\n savefile.close()", "def save(fname, data):\r\n with open(fname, 'wb') as f:\r\n pickle.dump(data, f)", "def write_data(self, file_path, success_cutoff):\n agg_df = pd.DataFrame(columns=tf.Move)\n for game in self.game_list:\n agg_df = agg_df.add(game, fill_value = 0)\n agg_df.to_csv(file_path)\n pass", "def writing_get_game(file_name, title):\n result = str(reports.get_game(file_name, title))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")", "def saveTeachersData():\n with open(\"TeacherData.txt\",\"wb\") as teacherData:\n pickle.dump(teacherEntities,teacherData)", "def save(self):\n # Sanity checks\n assert len(self.actions) == len(self.rewards)\n assert len(self.actions) == len(self.episode_starts)\n assert len(self.actions) == len(self.images_path)\n assert len(self.actions) == len(self.ground_truth_states)\n assert len(self.target_positions) == self.episode_idx + 1\n\n data = {\n 'rewards': np.array(self.rewards),\n 'actions': np.array(self.actions),\n 'episode_starts': np.array(self.episode_starts)\n }\n\n ground_truth = {\n 'target_positions': np.array(self.target_positions),\n 'ground_truth_states': np.array(self.ground_truth_states),\n 'images_path': np.array(self.images_path)\n }\n print(\"Saving preprocessed data...\")\n np.savez('{}/preprocessed_data.npz'.format(self.data_folder), **data)\n np.savez('{}/ground_truth.npz'.format(self.data_folder), **ground_truth)", "def save(self, filename='test'):\n file = open(filename+'.txt','w')\n pickle.dump(self, file)\n file.close()", "def saveOnFile(self, path, data):\n with open(path, \"w\") as f:\n f.write(data)", "def _save(self, data: np.ndarray) -> None:\n ...", "def save_ships_to_file(json_data):\n filtered_data = list(filter(lambda a: a['cost_in_credits'] != 'unknown', json_data))\n sorted_by_cost = sorted(filtered_data, key=lambda a: float(a['cost_in_credits']), reverse=True)\n sentences = [ship['name'] + ' kosztuje ' + ship['cost_in_credits'] + ' credits' for ship in sorted_by_cost]\n for sentence in sentences:\n with open('sorted_ships.txt', 'a') as file:\n file.write(sentence + \"\\n\")", "def _save(self):\n # TODO: Use local.punny dump (when written)\n with open(filename, 'w') as f:\n pickle = Pickler(f)\n pickle.dump(self.pungen.puns)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def save(item,name):\n\n file = open(name,'wb')\n dump(item,file)\n file.close()", "def save_predictions(battle_name: str, data: str, predictions: List):\n path = './data_reader/data/predictions/' + data + '.' + battle_name\n with open(path, 'w') as outfile:\n for prediction in predictions:\n outfile.write(str(prediction) + '\\n')", "def _save_data(data, file):\n with jsonlines.open(file, mode='w') as writer:\n for conversation in data:\n writer.write(conversation.to_json())", "def save(self):\n print(\"Clicked S(ave)\")\n saved_tiles = []\n for tile in self.tiles.sprites():\n # Append tiles pos to correct list if tile is occupied\n if not tile.is_available:\n tiles_attr = {\"type\": tile.tile_type, \"pos\": tile.rect.topleft}\n saved_tiles.append(tiles_attr)\n save_tiles(saved_tiles, lvl=\"02\")\n print(saved_tiles)\n # Flash white screen when level is saved\n self.surface.fill(s.WHITE)\n pygame.display.flip()\n pygame.time.wait(100)\n print(\"Saved\")", "def save(self, data):\n try:\n with open(self.__filepath, 'w') as file:\n text = jsonpickle.encode(data)\n file.write(text)\n except IOError as e:\n print(e)", "def save_data(data, filename='data.txt'):\n with open(filename, 'w', encoding=\"utf-8\") as file:\n for item in data:\n print(item, file=file)", "def make_data(data,save_as):\n #Log(self.__name).info(\"Making Data: %s\",save_as)\n dkeys=config.MOVIE_DB_KEYS\n if data:\n try:\n f=codecs.open(save_as, \"w+\", \"utf-8\")\n for i in dkeys:\n if data.has_key(i):\n f.write(i+\": \"+ data[i]+\"\\n\")\n f.write(\"\\n\")\n f.write(u\"SoftwareName: \"+unicode(config.APP_NAME)+u\"\\n\")\n f.write(u\"SoftwareVersion: \"+unicode(config.APP_VERSION)+u\"\\n\")\n f.write(u\"SoftwareDeveloper: \"+unicode(config.APP_DEVELOPER)+u\"\\n\")\n f.write(u\"SoftwareHomepage: \"+unicode(config.APP_WEB)+u\"\\n\")\n f.write(u\"CompanyHomepage: \"+unicode(\"http://www.cliodin.com\")+u\"\\n\")\n f.write(u\"FacebookHomepage: \"+unicode(\"https://www.facebook.com/cliodin\")+u\"\\n\") \n f.close()\n return True\n except Exception:return False\n return False", "def test_save_to_file(self):\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n Rectangle.save_to_file([r1, r2])\n\n with open(\"Rectangle.json\", \"r\") as file:\n txt = file.read()", "def savefile(filename, data):\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n output = dumps(data, ensure_ascii=False, sort_keys=True, indent=2)\n file.write(output)", "def save_memory(self, filename):\n \n\n with open(filename + '/obses.npy', 'wb') as f:\n np.save(f, self.obses)\n \n with open(filename + '/actions.npy', 'wb') as f:\n np.save(f, self.actions)\n\n with open(filename + '/next_obses.npy', 'wb') as f:\n np.save(f, self.next_obses)\n \n with open(filename + '/rewards.npy', 'wb') as f:\n np.save(f, self.rewards)\n \n with open(filename + '/not_dones.npy', 'wb') as f:\n np.save(f, self.not_dones)\n \n with open(filename + '/not_dones_no_max.npy', 'wb') as f:\n np.save(f, self.not_dones_no_max)\n\n with open(filename + '/index.txt', 'w') as f:\n f.write(\"{}\".format(self.idx))\n\n print(\"save buffer to {}\".format(filename))", "def save_scores(self):\n\n with open('.scores.pickle', 'wb') as f:\n pickle.dump(self.scores, f)", "def save():\n pass", "def save_data(self, filename):\n with open(settings.DIR_PATH + '/' + filename, 'w', encoding='utf-8') as f:\n json.dump(self.data, f, indent=4)", "def store(self, filename):", "def save(self):\n #test output\n pywikibot.output('PICKLING %s records at %s' % (len(self.historyDict),datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n with open(self.datfilename, 'wb') as f:\n pickle.dump(self.historyDict, f, protocol=config.pickle_protocol)", "def saveWorld(self, filename):\n worldFile = open( filename, 'w' );\n worldFile.write( \"WIDTH {0}\\nHEIGHT {1}\\n\".format( self.mWidth, self.mHeight ) );\n\n for space in self.mSpaces:\n if isinstance(space, Circle):\n worldFile.write( \"SPACE circle {0} {1} {2}\\n\".format( space.X, space.Y, space.Radius ) );\n elif isinstance(space, Rect):\n worldFile.write( \"SPACE rect {0} {1} {2} {3}\\n\".format( space.X,space.Y,space.Width,space.Height ) );\n\n for obst in self.mObstMgr.mObstacles:\n if isinstance(obst,Circle):\n worldFile.write( \"OBSTACLE circle {0} {1} {2}\\n\".format( obst.X, obst.Y, obst.Radius ) );\n elif isinstance(obst,Rect):\n worldFile.write( \"OBSTACLE rect {0} {1} {2} {3}\\n\".format( obst.X,obst.Y,obst.Width,obst.Height ) );\n worldFile.close();\n pass;", "def save(self):\n fname = self.dir_saving+str(self.folder)+'/colours.txt'\n if not os.path.isfile(fname):\n self.file_com = open(fname, 'w')\n else:\n print 'warning this person has an objects file in its dir, I will rewrite it.'\n self.file_com = open(fname, 'w')\n\n self.file_com.write(self.all_objects['upper']+','+self.all_objects['lower'])\n # self.all_objects = {}\n self.first_click = 1\n self.file_com.close()\n self.NextVideo()\n # count = 1\n # for im_name in self.onlyfiles:\n # img = cv2.imread(self.dir2+im_name)\n # cv2.rectangle(img,(0,0),(250,50),(255,255,255),-1)\n # cv2.putText(img,'frame : '+str(count),(10,30), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,0),2)\n # img = self.add_objects(img)\n # cv2.imwrite(self.dir_saving+str(self.folder)+'/obj_images/'+im_name,img)\n # count+=1\n self.clear" ]
[ "0.76108927", "0.7576153", "0.73735857", "0.73119766", "0.71675503", "0.7154071", "0.7152303", "0.71430117", "0.7106383", "0.7105386", "0.7067481", "0.699197", "0.6965784", "0.69141096", "0.6870099", "0.68450946", "0.67772365", "0.6770521", "0.6744814", "0.6721609", "0.67079896", "0.66946095", "0.66597843", "0.663747", "0.6626393", "0.66134363", "0.6587089", "0.65810454", "0.6565009", "0.65554816", "0.65419024", "0.65381", "0.6529081", "0.6503306", "0.6485702", "0.648033", "0.6479217", "0.6450025", "0.64455324", "0.6443449", "0.64433074", "0.64340603", "0.64106226", "0.63965434", "0.63894284", "0.63877493", "0.63820237", "0.63807994", "0.63794684", "0.6370213", "0.63605875", "0.6348694", "0.63486266", "0.63351417", "0.63306105", "0.63292515", "0.63290626", "0.6325957", "0.6321957", "0.6314885", "0.6311562", "0.6299839", "0.6292817", "0.62890977", "0.62860346", "0.6282104", "0.62803906", "0.6267854", "0.6259282", "0.623461", "0.6233477", "0.6230346", "0.62283415", "0.6228132", "0.622771", "0.6227532", "0.6224663", "0.6222314", "0.6217714", "0.6206871", "0.62033033", "0.62015694", "0.61998487", "0.6198085", "0.6182995", "0.61804885", "0.6179376", "0.6174231", "0.61720574", "0.6164299", "0.6159362", "0.6153908", "0.6150039", "0.6137229", "0.61324733", "0.6131513", "0.6130414", "0.61284804", "0.6120126", "0.6117175" ]
0.63036275
61
Get data from cached file and filter it
def __get_data(self, filters): if not os.path.exists(CACHE_FILE): raise DataNotScrappedError() df = pd.read_csv(CACHE_FILE) if not filters: return list(df.T.to_dict().values()) filtered_df = df[df['name'] == filters][['category', 'name']] return list(filtered_df.T.to_dict().values())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _retrieveCachedData(self):", "def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)", "def getData(self, local_cache):", "def read_data_cache(self):\n if os.path.exists(self.cache_filename):\n return self.read_data_cache_file()\n else:\n data = self._empty_data()\n self.write_data_cache(data)\n return data", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []", "def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def use_cached_files(self, cache_key):\r\n pass", "def files():\n return get_cached(\"files.json\")", "def __read_cache(self, fileName):\n if self.__log:\n self.__logger.info(f\"Cache hit - {fileName}\")\n # Cache hit\n with open(fileName, \"rb\") as f:\n content = self.__handle_decompression(f.read())\n variables = pickle.loads(content)\n\n # Move node to front\n node = os.path.relpath(fileName, \"cache\")\n self.__shift_node(node)\n\n return variables", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def _read_cache_file(self) -> bytes:\n with open(self.cache_file, 'rb') as file:\n return file.read()", "def load_cache():\n return {}", "def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data", "def notify_cache(filename: str) -> Dict:\n\n cache = {}\n\n try:\n with open(filename) as f:\n for line in f:\n if line:\n cache[line.strip()] = True\n except FileNotFoundError:\n warning(\n \"Cache file {} not found, will be created if necessary\".format(filename)\n )\n\n return cache", "def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)", "def list_cached():\n for json_name in cached_files():\n source_name = get_source_file_name(json_name)\n yield (json_name, source_name)", "def get_json_from_cache(file_name):\n result = None\n path = clean_path(file_name)\n cached_file_name = get_cached_file_name(path)\n if os.path.exists(cached_file_name):\n time = os.path.getmtime(path)\n cached_time = os.path.getmtime(cached_file_name)\n if cached_time > time:\n try:\n source = open(cached_file_name, \"r\")\n try:\n result = json.load(source)\n except ValueError:\n pass\n source.close()\n except OSError:\n # Includes IOError\n pass\n return result", "def reload_cache(self):\n self.data = self.read_data_cache()", "def _read_cache(url):\n\n j = None\n m = hashlib.md5()\n m.update(url)\n if os.path.exists('.cache.%s' % m.hexdigest()):\n with open('.cache.%s' % m.hexdigest(), 'rb') as infile:\n j = json.load(infile)\n\n return j", "def read_cached_file(filename, cache_info, reload_func=None):\n mtime = os.path.getmtime(filename)\n if not cache_info or mtime != cache_info.get('mtime'):\n LOG.debug(_(\"Reloading cached file %s\") % filename)\n with open(filename) as fap:\n cache_info['data'] = fap.read()\n cache_info['mtime'] = mtime\n if reload_func:\n reload_func(cache_info['data'])\n return cache_info['data']", "def read_cached_file(filename, cache_info, reload_func=None):\n mtime = os.path.getmtime(filename)\n if not cache_info or mtime != cache_info.get('mtime'):\n LOG.debug(\"Reloading cached file %s\" % filename)\n with open(filename) as fap:\n cache_info['data'] = fap.read()\n cache_info['mtime'] = mtime\n if reload_func:\n reload_func(cache_info['data'])\n return cache_info['data']", "def load_cache(self, filename):\n output_df = cudf.read_hdf(filename, key=self.uid)\n return output_df", "def get_output_from_cache(name, filename):\n cache_filename = _get_cache_filename(name, filename)\n if (os.path.exists(cache_filename) and\n os.path.getmtime(filename) < os.path.getmtime(cache_filename)):\n with io.open(cache_filename) as f:\n return f.read()\n\n return None", "def get_cache(feed_key):\n\n cache_file = CACHE_DIR / (feed_key + \".json\")\n\n with open(cache_file, \"r\") as file:\n entries = json.load(file)\n\n return entries", "def read_cached_file(self, path):\n if self.config.get('do_caching', False):\n ext = path.split('.')[-1]\n\n if ext == 'cache':\n with open(path, 'r') as fd:\n try:\n return fd.read()\n except UnicodeDecodeError as e:\n self.logger.warning(str(e))\n else:\n raise Exception('\"{}\" is a invalid cache file.'.format(path))", "def read_filter_cache_scratch(cache_dir):\n # Load up the cache file with the most keys (precomputed filter matrices).\n cache = {}\n cache_files = glob.glob(cache_dir + '/*.filter_cache')\n # loop through cache files, load them.\n # If there are new keys, add them to internal cache.\n # If not, delete the reference matrices from memory.\n for cache_file in cache_files:\n cfile = open(cache_file, 'rb')\n cache_t = pickle.load(cfile)\n for key in cache_t:\n if key not in cache:\n cache[key] = cache_t[key]\n return cache", "def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return", "def cache(self):\n if self._cache is None:\n with open(self.cache_path, 'r') as cache_file:\n self._cache = json.load(cache_file)\n return self._cache", "def test_cache_retrieved(self):\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n data = read.data.decode()\n self.assertIn(\n '<span class=\"expan\">et </span>', data,\n \"Text content should be transformed\"\n )\n self.assertIn(\n 'Facsimilaire', data,\n \"Other content should be added\"\n )\n\n cached = self.cache.get(\"urn:cts:froLit:jns915.jns1856.ciham-fro1:1\").decode()\n self.assertIn('<aside class=\"text-left\">', cached, \"Assert cache is made\")\n\n with mock.patch(\"nemo_xslttwo_plugin.shell\") as shell:\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n cached_response = read.data.decode()\n self.assertEqual(\n cached_response, data,\n \"Text content should the same in cache\"\n )\n self.assertEqual(\n shell.call_count, 0,\n \"Shell should not be called because we use cache\"\n )", "def cache_body(self):\n with open(self.path, \"rb\") as fh:\n fh.seek(fh.tell(), os.SEEK_END)\n fh.seek(max(0, fh.tell()-LEN_CACHE_BYTES), os.SEEK_SET)\n return fh.read(LEN_CACHE_BYTES).decode('utf-8') #.split(\"\\n\")", "def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result", "def get_from_cache(self, subject_id):\n with self.cache.open_for_read(subject_id) as cache_file:\n chunks = utils.chunkiter(cache_file)\n for chunk in chunks:\n yield chunk", "def _read_cache(self, path):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n if os.path.exists(cache_path):\n with io.open(cache_path, encoding='utf-8') as f:\n text = f.read()\n\n return text\n\n msg = ('Unable to download remote file \"{0}\" and local cache is not '\n 'available.').format(path)\n raise RuntimeError(msg)", "def load_cache(base_url, path=\"logs/\"):\n\n # Convert URL to filename and read contents\n url_filename = url_to_filename(base_url)\n\n filename = f\"{path}CACHE-{url_filename}.html\"\n f = open(filename, \"r\")\n data_cache = f.read()\n\n data_cache = \" \".join(data_cache.split()) # Remove all whitespaces\n\n return data_cache", "def cache(self):\n import hxl.filters\n return hxl.filters.CacheFilter(self)", "def get_data(self, path):\n\n if path == self.original_path:\n cache = self._2to3_cache_path(path)\n data = self._load_cached_2to3(path, cache)\n if data is None:\n output, encoding = self._refactor_2to3(path)\n data = bytearray(output, encoding or sys.getdefaultencoding())\n self.set_data(cache, data)\n return data\n\n else:\n return super().get_data(path)", "def get_cache_file_data(year: int, day: int, session: str) -> str:\n server_action = importlib.import_module(\".server_action\")\n server_action.download_input(year, day, session)\n cache_file = _join_path(year, day, session, file_type=\"input_file\")\n with open(cache_file) as opened_file:\n input_data = opened_file.read()\n return input_data", "def _load_data(self):\n path = os.path.join(self._cache_path, '%s.data' % self._name)\n\n if not os.path.exists(path):\n raise IOError('Data cache missing at %s' % path)\n\n f = bz2.BZ2File(path)\n data = pickle.loads(f.read())\n f.close()\n\n return data", "def get_content_from_cache(self):\n\n rss_feed = []\n news_to_show = 0\n\n try:\n self.print_if_verbose(\n f\"Method 'get_content_from_cache' is working: \\n\"\n f\"Trying to get content from cache...\"\n )\n os.chdir(\"cache\")\n except Exception as error:\n print(f\"{error}: cache does not exists!\")\n return\n\n try:\n os.chdir(\"image_cache\")\n self.full_path_to_image_cache = os.getcwd()\n os.chdir(\"..\")\n except:\n pass\n\n try:\n with open(\"rss_reader_cache.json\", \"r\", encoding=\"utf-8\") as cache_file:\n data_from_cache = json.load(cache_file)\n self.print_if_verbose(f\"Content from cache has been received successfully. \\n\")\n except Exception as error:\n self.print_if_verbose(f\"{error}: cache file does not exist! \\n\")\n return\n\n if self.source:\n for feed in data_from_cache:\n if self.source in feed.keys():\n for news in feed[self.source]:\n if news[\"PubDate\"] == str(self.date):\n rss_feed.append(news)\n news_to_show += 1\n if self.limit and news_to_show == self.limit:\n break\n if self.limit and news_to_show == self.limit:\n break\n else:\n for channel in data_from_cache:\n for feed_link in channel:\n for news in channel[feed_link]:\n if news[\"PubDate\"] == str(self.date):\n rss_feed.append(news)\n news_to_show += 1\n if self.limit and news_to_show == self.limit:\n break\n if self.limit and news_to_show == self.limit:\n break\n\n os.chdir(\"..\")\n\n self.news_amount = len(rss_feed)\n\n if self.news_amount == 0:\n print(f\"There is no news in cache for specified date. \\n\")\n else:\n self.print_if_verbose(f\"There is {self.news_amount} news in cache for specified date. \\n\")\n\n self.print_if_verbose(f\"Method 'get_content_from_cache' is finished. \\n\")\n\n return rss_feed", "def DataFromFileCache(self,FilePath):\n # dont want to throw an error if the high res doesnt have a separation\n return BinaryHDF5Io.ReadWaveIntoWaveGroup(FilePath,ErrorOnNoSep=False)", "def read_cache(cc):\n \n out_file = os.path.join(cc.scene_dir, 'output', cc.scene_id+'_pickle')\n if cc.atmo_src == 'narr':\n out_file += '_narr'\n elif cc.atmo_src == 'merra':\n out_file += '_merra'\n \n if not os.path.isfile(out_file):\n raise OSError('pickle_file is not in expected location %s' % out_file) \n\n with open(out_file, 'rb') as f:\n x = pickle.load(f)\n return x", "def load(cache_file: Path, *, mode: str = None, unsafe: bool = False):\n if mode == 'binary':\n return cache_file.read_bytes()\n\n content = cache_file.read_text()\n if mode == 'json':\n content = json.loads(content)\n\n return content", "def get(self, path):\n\t\treturn self.cache.get(path)", "def get_from_cache(cls, file_name):\n random.shuffle(cls.CACHE_BACKENDS)\n fname = None\n for cb in cls.CACHE_BACKENDS:\n if not cb.health_check():\n continue\n fname = cb.get_from_cache(file_name)\n if fname:\n break\n return fname", "def cache_data(name, data):\n cache_path = get_cachefile('%s.cache' % name)\n with open(cache_path, 'wb') as f:\n pickle.dump(data, f)", "def cache(file_name, load_func, *func_args, **func_kwargs):\n if path.exists(file_name):\n with open(file_name, 'rb') as f:\n return pickle.load(f)\n else:\n data = load_func(*func_args, **func_kwargs)\n with open(file_name, 'wb') as f:\n pickle.dump(data, f)\n return data", "def read_cache():\n try:\n cache_file = open(CACHE_FILENAME, 'r', encoding=\"utf-8\")\n cache_contents = cache_file.read()\n cache_dict = json.loads(cache_contents)\n cache_file.close()\n return cache_dict\n except:\n cache_dict = {}\n return cache_dict", "def load_csv_cached(filename='../apps/naive_c_stats.csv', cache={}):\n if filename in cache:\n return cache[filename]\n if not os.path.exists(filename):\n ans = None\n else:\n ans = numpy.recfromcsv(filename)\n cache[filename] = ans\n return ans", "def retrieve_cached_records(self):\r\n return u.load_cached_data(self.records_cache_path)", "def json_from_cache(file_name: str) -> Optional[Dict]:\n\n json_path = os.path.join(CACHE_DIR, file_name)\n\n try:\n with open(json_path, \"r\") as cache_file:\n return json.load(cache_file)\n except IOError:\n log.notice(f\"Could not read JSON from {json_path}\")\n return None", "def pre_cache(self, scope, name):\n item = {'did': '%s:%s' % (scope, name),\n 'base_dir': self.cache_path,\n 'no_subdir': self.no_subdir,\n 'transfer_timeout': self.transfer_timeout}\n if self.rse:\n item['rse'] = self.rse\n\n client = Client()\n all_files = client.list_files(scope, name)\n\n download_client = DownloadClient(client=client)\n downloaded_files = download_client.download_dids([item], num_threads=self.num_threads)\n\n self.logger.info('Downloaded files: %s' % downloaded_files)\n\n ret_files = []\n for file in all_files:\n downloaded_file = None\n for d_file in downloaded_files:\n if d_file['scope'] == file['scope'] and d_file['name'] == file['name']:\n downloaded_file = d_file\n break\n\n ret_file = {'scope': file['scope'],\n 'name': file['name'],\n 'min_id': 1,\n 'max_id': file['events'],\n 'status': ContentStatus.PRECACHED if downloaded_file and downloaded_file['clientState'] == 'ALREADY_DONE' else ContentStatus.NEW,\n 'size': file['bytes'],\n 'md5': downloaded_file['md5'] if downloaded_file else None,\n 'adler32': downloaded_file['adler32'] if downloaded_file else None,\n 'pfn': downloaded_file['dest_file_path'] if downloaded_file else None\n\n }\n \"\"\"\n if ret_file['status'] == ContentStatus.AVAILABLE:\n if req.granularity_type == GranularityType.PARTIAL:\n ret_file['status'] = ContentStatus.PRECACHED\n elif req.granularity_type == GranularityType.FILE:\n ret_file['status'] = ContentStatus.TOSTAGEDOUT\n \"\"\"\n ret_files.append(ret_file)\n return ret_files", "async def get() -> list:\n if _cache is None:\n await _update()\n return _cache", "def retrieve_cached_decisions(self):\r\n return u.load_cached_data(self.decisions_cache_path)", "def _csv_get(page):\n cache_key = reverse('timetable.views.display')\n\n ret = cache.get(cache_key)\n if ret is not None:\n print 'hola'\n return ret\n else:\n print 'ciao'\n ret = _csv_download(page)\n cache.set(cache_key, ret, timeout=15) # cache lasts 15 seconds\n return ret", "def prefill_cache():\n print(\"Prefilling cache.\")\n print(\"\\rListing ATS files...\", end=\"\")\n file_names = list(ats_files())\n print(\"\\rListing ATS files: done.\")\n index = 0\n files_count = len(file_names)\n cached_count = 0\n for file_name in file_names:\n index += 1\n print(\"\\rHandling ATS file #%i of %i\" % (index, files_count), end=\"\")\n if get_json(file_name) is not None:\n cached_count += 1\n print(\"\\nDone: %i file(s) cached.\" % cached_count)", "def get_cache(self, key):\n return self.r.get(key)", "def _load_cache():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n fname = os.path.join(BASE_DIR, \"model_cache.json\")\n with open(fname) as f:\n models_cache = json.load(f)\n return models_cache", "def process(self, data):\n file = self.get_cache_file(data)\n loaded = False\n if self.check_cache_exists(data):\n if self.force:\n log.info(\"Item found in cache but force=True\")\n else:\n try:\n log.info(\"Found in cache, skipping chain\")\n with open(file, 'rb') as f:\n # https://stackoverflow.com/questions/2766685/how-can-i-speed-up-unpickling-large-objects-if-i-have-plenty-of-ram/36699998#36699998\n # disable garbage collector for speedup unpickling\n gc.disable()\n cache = pickle.load(f)\n\n # enable garbage collector again\n gc.enable()\n\n retrieved_data = cache['data']\n stop = cache[\"stopped\"]\n if stop:\n raise StopIteration()\n self._check_time_consistency(cache['chain_mtime'],\n self.chain_info['chain_mtime'])\n for key, value in retrieved_data.items():\n data[key] = value\n loaded = True\n except EOFError:\n log.warning(\n \"Failed to load cache item {} (corrupted file will be deleted)\".format(file))\n os.unlink(file)\n if not loaded:\n log.debug(\"Not found in cache, processing chain\")\n cache, stop = self._process(data, {})\n cache = cache[self.chain_info['chain_hash']]\n if self.save_cache:\n with open(file, 'wb') as f:\n try:\n pickle.dump(cache, f, protocol=HIGHEST_PROTOCOL)\n except:\n pickle.dump(cache, f)\n\n # Try to set some more flexible access rights\n try:\n os.chmod(file, RWRWRW)\n except OSError:\n pass", "def load_restaurants():\n try:\n with open(CACHE_FILE) as infile:\n print(\"Cache found, loading from file {}\".format(CACHE_FILE))\n restaurants = json.load(infile)\n except Exception:\n print(\"No cache found, loading from API\")\n restaurants = get_restaurants()\n with open(CACHE_FILE, 'w+') as outfile:\n json.dump(restaurants, outfile)\n return restaurants\n return restaurants", "def initCacheFile(self):\n self.cacheData = {\"data\": []}\n for i in range(int(self.frameCount)):\n self.cacheData[\"data\"].append({\"isLoaded\": False,\n \"faces\": []})\n self.saveCacheFile()", "def loadGameFromCache(self, theKey):\n theGameFile = File(self.theCacheDirectory, theKey + \".zip\")\n theLine = None\n try:\n theLine = br.readLine()\n br.close()\n ir.close()\n gIn.close()\n fIn.close()\n except Exception as e:\n if theLine == None:\n return None\n return Game.loadFromJSON(theLine)", "def data(self):\n if self._data is None:\n try:\n with open(self.storage_path, 'r') as cache_file:\n self._data = json.load(cache_file)\n except FileNotFoundError:\n self._data = {}\n return self._data", "def load_from_cache(self):\n try:\n with open(self.cache_filename, 'r') as cache:\n json_data = cache.read()\n data = json.loads(json_data)\n except IOError:\n data = {'data': {}, 'inventory': {}}\n\n self.data = data['data']\n self.inventory = data['inventory']", "def load_cached(cache_path, in_dir):\n\n print(\"Creating dataset from the files in: \" + in_dir)\n\n # If the object-instance for DataSet(in_dir=data_dir) already\n # exists in the cache-file then reload it, otherwise create\n # an object instance and save it to the cache-file for next time.\n\n cache=Cache()\n dataset = cache.cache_data(cache_path=cache_path,\n fn=Dataset, in_dir=in_dir)\n\n return dataset", "def get_cached(self, keyword, search_engine, scrapemode, page_number):\n if self.config.get('do_caching', False):\n file_name = self.cached_file_name(\n keyword,\n search_engine,\n scrapemode,\n page_number\n )\n cache_dir = self.config.get('cachedir', self.CACHEDIR)\n if file_name in os.listdir(cache_dir):\n try:\n modtime = os.path.getmtime(\n os.path.join(cache_dir, file_name)\n )\n except FileNotFoundError:\n return False\n modtime = (time.time() - modtime) / 60 / 60\n if (modtime > int(self.config('clean_cache_after', 48))):\n return False\n path = os.path.join(cache_dir, file_name)\n return self.read_cached_file(path)\n else:\n return False", "def load_local_cache(self):\n folder = os.path.dirname(__file__)\n path = os.path.join(folder, 'local_document_cache.dat')\n path = os.path.normpath(path)\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n with open(path, mode='rb') as the_file:\n try:\n mapa = pickle.load(the_file)\n self.komponente = mapa['komponente']\n self.analitickeMetode= mapa['metode']\n self.dilucijskeJedinice = mapa['dilucije']\n self.generatoriCistogZraka = mapa['generatori']\n self.uredjaji = mapa['uredjaji']\n self.postaje = mapa['postaje']\n except Exception as err:\n logging.error(str(err), exc_info=True)\n mes = '\\n'.join(['Ucitavanje REST cache nije uspjelo.', str(err)])\n QtGui.QApplication.restoreOverrideCursor()\n QtGui.QMessageBox.warning(QtGui.QApplication, 'Problem', mes)\n QtGui.QApplication.restoreOverrideCursor()", "def cache_matrio_data(filename):\n prefix = \"https://data.matr.io/3/api/v1/file\"\n key = MATRIO_DATA_KEYS[filename]\n if not os.path.isfile(filename):\n cache_download(\"{}/{}/download\".format(prefix, key), filename)", "def get_response_from_cache(responsefile):\n global __response_cache\n\n if responsefile not in __response_cache:\n return\n\n if not goodfile(responsefile):\n try:\n del __response_cache[responsefile]\n except KeyError: # pragma: no cover\n pass\n return\n\n modtime = str(os.path.getmtime(responsefile))\n if modtime not in __response_cache.get(responsefile, {}):\n return\n\n log.debug(\"Retrieving data from response file (%s) in cache\" %\n responsefile)\n return __response_cache.get(responsefile, {}).get(modtime)", "def get_inventory_from_cache(self):\n cache = open(self.cache_path_cache, 'r')\n json_inventory = cache.read()\n return json_inventory", "def cache(self, file_name, content):\n self.files_loaded[file_name] = content", "def getCacheContents(self):\n return self._cache", "def _locate_from_cache_file():\n path_file = os.path.join(_get_temp_dir(), _config.pathfile)\n return _read_file(path_file) if os.path.isfile(path_file) else None", "def getCache(self, path, prefix, verbose = True):\n\n fpath = path + prefix + \"_cache\"\n\n if not os.path.isfile(fpath):\n if verbose: print \"Cache file not found - please check!\"\n return False\n\n with open(fpath, \"rb\") as f:\n s = np.fromfile(f, count=4, dtype=np.int32)\n NFILT, NTEMP, NZ, NOBJ = s[0], s[1], s[2], s[3]\n\n tempfilt = np.fromfile(f, count=NFILT*NTEMP*NZ, dtype=np.double).reshape((NZ,NTEMP,NFILT))\n lc = np.fromfile(f, count=NFILT, dtype=np.double)\n zgrid = np.fromfile(f, count=NZ, dtype=np.double)\n fnu = np.fromfile(f, count=NFILT*NOBJ, dtype=np.double).reshape((NOBJ,NFILT))\n efnu = np.fromfile(f, count=NFILT*NOBJ, dtype=np.double).reshape((NOBJ,NFILT))\n\n keys = ['NFILT','NTEMP','NZ','NOBJ','tempfilt','lc','zgrid','fnu','efnu']\n values = [NFILT, NTEMP, NZ, NOBJ, tempfilt, lc, zgrid, fnu, efnu]\n\n if verbose: print \".cache file found and read in correctly!\"\n return dict(zip(keys, values))", "def __read_cache_file_if_exists(self) -> None:\n if os.path.exists(self.__cache_file):\n self.__config.open_file(self.__cache_file, \"r\", self.__process_cache)", "def get_raw_data(ashrae_dir, cache_file=None, filenames=const.NAMES):\n cache_file = pathlib.Path(cache_file)\n\n if cache_file is not None and cache_file.exists():\n data = import_dict_from_cached(cache_file, filenames)\n else:\n data = import_data(ashrae_dir)\n _cache_data(data, cache_file)\n\n # Sanity check: the set of building ids should be the same in the train and test sets.\n assert set(data['train'].building_id) == set(data['test'].building_id)\n\n return data", "def get(self, key):\n return self.cache_data.get(key)", "def get_cache(self):\n self._topo.create_cache()\n with open('/run/geopm-service/geopm-topo-cache') as fid:\n result = fid.read()\n return result", "def get(self):\n CACHE_KEY = 'sources'\n if not memcache.get(CACHE_KEY):\n logging.info('Populating cache.')\n feeds = Feed.all().order('name')\n feed_list = []\n for feed in feeds:\n feed_list.append(feed.ToDict())\n memcache.add(CACHE_KEY, simplejson.dumps(feed_list), 600)\n logging.info('Using cache.')\n logging.info(memcache.get(CACHE_KEY))\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(memcache.get(CACHE_KEY))", "def cached_files():\n for (dir_path, _dir_names, file_names) in os.walk(CACHE):\n for file_name in file_names:\n if is_json_file(file_name):\n yield os.path.join(dir_path, file_name)", "def cached_json_get(url):\n return requests.get(url).json()", "def read(self, source):\n _source = self._source_prefix+source\n return self.cache[_source]", "def retrieve():\n # type: () -> list\n with Cache(CACHE_URI) as c:\n data = c.get(SAVED_SEARCH)\n return json.loads(data[\"blob\"]) if data else []", "def _load_cache(self):\n logger.debug(\"Loading coherence data for %s from cache\", self.w1)\n\n assert self.variant_unit is None, \"Cannot load from cache once variant_unit has been set\"\n with open(self._cache_key) as f:\n self.rows = json.load(f)\n\n self._already_generated = True\n logger.debug(\"Loaded {} rows from cache ({})\".format(len(self.rows), self._cache_key))", "def get_cache(self):\n return self.cache", "def cached(cache_path, generator):\n if path.exists(cache_path):\n with open(cache_path, 'rb') as f:\n return pickle.load(f)\n output = generator()\n with open(cache_path, 'wb+') as f:\n pickle.dump(output, f)\n return output", "def getResults(self, nocache=False, filter=None):\n if 'mysql_cache' in self.config:\n if not nocache and path.isfile(self.config['mysql_cache']):\n file = open(self.config['mysql_cache'], \"r\")\n results = json.load(file)\n else:\n results = self.getDBResults()\n file = open(self.config['mysql_cache'], \"w\")\n file.write(json.dumps(results, indent=4, sort_keys=True))\n shuffle(results)\n return results", "def get_cache_path(self):", "def get_cache_path(self):", "def _read_buckets_cache_file(cache_file):\n\n log.debug(\"Reading buckets cache file\")\n\n with salt.utils.files.fopen(cache_file, \"rb\") as fp_:\n try:\n data = pickle.load(fp_)\n except (\n pickle.UnpicklingError,\n AttributeError,\n EOFError,\n ImportError,\n IndexError,\n KeyError,\n ValueError,\n ) as exc:\n log.debug(\"Exception reading buckets cache file: '%s'\", exc)\n data = None\n\n return data", "async def check_cache(self, **kwargs) -> Optional[T]:\n assert set(kwargs.keys()) == set(\n self._unique_attribues\n ), \"Invalid attributes used to check cache\"\n path = self._build_cache_path(**kwargs)\n if not path.is_file():\n return None\n async with aiofiles.open(path) as file:\n raw = await file.read()\n return self._cacheable_class.from_json(raw)", "def get_datfile(filename):\n if ARGV.get(DEBUG_OPT):\n err_print('Getting datfile from \"{}\"'.format(filename))\n\n try:\n with open(filename, 'rb') as pickle_file:\n try:\n (cache, readlist) = pickle.load(pickle_file)\n pickle_file.close()\n except (EOFError, ValueError):\n (cache, readlist) = ({\"feed\": None, \"max-age\": None, \"last-request\": None}, [])\n except (FileNotFoundError, PermissionError):\n (cache, readlist) = ({\"feed\": None, \"max-age\": None, \"last-request\": None}, [])\n return (cache, readlist)", "def loadcache(self, cachepath):\n loadfunc = json.load if self.serializer == 'json' else pickle.load\n try:\n # check for recency\n if self.expiration > 0:\n elapsed = time.time() - os.stat(cachepath).st_mtime\n #print >>sys.stderr, '%s exp, %s elapsed' % (self.expiration, elapsed)\n if elapsed > self.expiration:\n if self.expirepolicy == 'archive':\n os.rename(cachepath, self.archivepath(cachepath))\n raise IOError\n return loadfunc(open(cachepath))\n except Exception, e:\n #print >>sys.stderr, 'Could not load cache file %s: %s' % (cachepath, e)\n raise IOError('Could not load cache file %s: %s' % (cachepath, e))", "def get_data_not_yet_ready_file(self):\n pass", "def dump():\n global CACHE\n return CACHE", "def get_headers_and_data(self):\n\n if self.config.flag_usecache:\n fpath, fhdr, dirpath = self.get_url_store_paths()\n\n fpath_f = os.path.isfile(fpath)\n fhdr_f = os.path.isfile(fhdr)\n \n if fpath_f and fhdr_f:\n try:\n content = zlib.decompress(open(fpath).read())\n headers = eval(zlib.decompress(open(fhdr).read()))\n\n if self.make_head_request(headers):\n # Update URL from cache\n self.url = self.headers.get('url', self.url)\n \n log.info(self.url, \"==> URL is up-to-date, returning data from cache\")\n\n self.content = content\n self.headers = headers\n\n self.content_type = urlhelper.get_content_type(self.url, self.headers)\n \n eventr = crawlerbase.CrawlerEventRegistry.getInstance() \n # Raise the event for retrieving URL from cache\n eventr.publish(self, 'download_cache',\n message='URL has been retrieved from cache',\n code=304,\n event_key=self.url, \n params=self.__dict__) \n\n return True\n except Exception, e:\n log.error(\"Error in getting URL headers & data for URL\",self.url)\n log.error(\"\\t\",str(e))\n else:\n if not fpath_f:\n log.debug(\"Data file [%s] not present =>\" % fpath, self.url)\n if not fhdr_f:\n log.debug(\"Header file [%s] not present =>\" % fhdr, self.url) \n\n return False", "def test_products_get_cache(data, mocker):\n mocker.patch(\"sps.cache.load\", autospec=True)\n cache.load.return_value = {\"product\": data[\"data\"]}\n assert products.get(None, __file__, False, False) == data[\"data\"]", "def _get_data_reference_list_from_cache_by_data_asset_name(\n self, data_asset_name: str\n ) -> List[Any]:\n raise NotImplementedError", "async def _cache_patterns(self) -> None:\n results: [asyncpg.Record] = await db_fetch(\n self.bot.db_conn,\n \"SELECT * FROM filter\"\n )\n for result in results:\n re_compiled = re.compile(result[\"filter_pattern\"])\n self._filter_cache[result[\"server_id\"]][re_compiled] = [result[\"filter_identifier\"]]" ]
[ "0.7370039", "0.72422016", "0.71613204", "0.690746", "0.68249017", "0.67976505", "0.6747037", "0.66864574", "0.6593007", "0.6577908", "0.6532192", "0.6532192", "0.65105075", "0.6455156", "0.64536875", "0.64248145", "0.64161235", "0.6378454", "0.63769585", "0.6357792", "0.63189375", "0.6316802", "0.6307072", "0.6275134", "0.62722045", "0.62580717", "0.6255759", "0.62394935", "0.6204416", "0.61977565", "0.6191567", "0.6143484", "0.6136635", "0.6134081", "0.61319387", "0.61082685", "0.6106395", "0.6097055", "0.6095996", "0.6092914", "0.6091083", "0.6087861", "0.6082811", "0.6058318", "0.60508245", "0.6046634", "0.6034646", "0.6028815", "0.60201967", "0.6019069", "0.5998318", "0.59943104", "0.59854823", "0.59836274", "0.5977367", "0.5956368", "0.5939407", "0.59304637", "0.59195095", "0.59178466", "0.5917795", "0.5915173", "0.5913957", "0.5911724", "0.59115946", "0.5904808", "0.5903751", "0.5888189", "0.5881904", "0.58660716", "0.5864611", "0.5855923", "0.585006", "0.5845861", "0.5845306", "0.58375597", "0.58216316", "0.58108956", "0.581074", "0.581033", "0.58034664", "0.5793572", "0.5787963", "0.5783834", "0.57781315", "0.5777591", "0.5777265", "0.57617366", "0.5742032", "0.5742032", "0.57328874", "0.57290244", "0.56992096", "0.56960493", "0.5681918", "0.5671972", "0.56690925", "0.5655013", "0.56516135", "0.56497186" ]
0.60076046
50
Get data from cached file
async def get(self, filters: dict = None): res = self.__get_data(filters) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _retrieveCachedData(self):", "def getData(self, local_cache):", "def read_data_cache_file(self):\n with open(self.cache_filename, 'r') as json_data:\n return json.load(json_data)", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def GetFromCache(self, filename):\n return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))", "def read_data_cache(self):\n if os.path.exists(self.cache_filename):\n return self.read_data_cache_file()\n else:\n data = self._empty_data()\n self.write_data_cache(data)\n return data", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def _load_cached_2to3(self, path, cache):\n try:\n cache_stats = os.stat(cache)\n source_stats = os.stat(path)\n except OSError as e:\n if e.errno == errno.ENOENT: # FileNotFoundError\n self.logger.debug('Cache miss: %s' % cache)\n return None\n else:\n raise\n\n if cache_stats.st_mtime <= source_stats.st_mtime:\n self.logger.debug('Cache miss (stale): %s' % cache)\n return None\n\n self.logger.debug(\"Cache hit: %s\" % cache)\n return super().get_data(cache)", "def _read_cache_file(self) -> bytes:\n with open(self.cache_file, 'rb') as file:\n return file.read()", "def get(self, path):\n\t\treturn self.cache.get(path)", "def read_cached_file(filename, cache_info, reload_func=None):\n mtime = os.path.getmtime(filename)\n if not cache_info or mtime != cache_info.get('mtime'):\n LOG.debug(_(\"Reloading cached file %s\") % filename)\n with open(filename) as fap:\n cache_info['data'] = fap.read()\n cache_info['mtime'] = mtime\n if reload_func:\n reload_func(cache_info['data'])\n return cache_info['data']", "def read_cached_file(filename, cache_info, reload_func=None):\n mtime = os.path.getmtime(filename)\n if not cache_info or mtime != cache_info.get('mtime'):\n LOG.debug(\"Reloading cached file %s\" % filename)\n with open(filename) as fap:\n cache_info['data'] = fap.read()\n cache_info['mtime'] = mtime\n if reload_func:\n reload_func(cache_info['data'])\n return cache_info['data']", "def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result", "def _read_cache(url):\n\n j = None\n m = hashlib.md5()\n m.update(url)\n if os.path.exists('.cache.%s' % m.hexdigest()):\n with open('.cache.%s' % m.hexdigest(), 'rb') as infile:\n j = json.load(infile)\n\n return j", "def get_cached(factory, cache_file_name, **kwargs):\n if os.path.exists(cache_file_name):\n _logger.info('Loading {}'.format(cache_file_name))\n cached = deserialize(cache_file_name)\n return cached\n\n _logger.info('Creating {}'.format(cache_file_name))\n data = factory()\n serialize(cache_file_name, data, **kwargs)\n return data", "def __read_cache(self, fileName):\n if self.__log:\n self.__logger.info(f\"Cache hit - {fileName}\")\n # Cache hit\n with open(fileName, \"rb\") as f:\n content = self.__handle_decompression(f.read())\n variables = pickle.loads(content)\n\n # Move node to front\n node = os.path.relpath(fileName, \"cache\")\n self.__shift_node(node)\n\n return variables", "def get(self):\n if path.exists(self.cachefile):\n self.invalidion()\n full_cache = self._get_all()\n return full_cache\n else:\n return []", "def _read_cache(self, path):\n if self._cache:\n cache_path = os.path.join(self._cache, path)\n\n if os.path.exists(cache_path):\n with io.open(cache_path, encoding='utf-8') as f:\n text = f.read()\n\n return text\n\n msg = ('Unable to download remote file \"{0}\" and local cache is not '\n 'available.').format(path)\n raise RuntimeError(msg)", "def get_cache(self, key):\n return self.r.get(key)", "def get_data(self, path):\n\n if path == self.original_path:\n cache = self._2to3_cache_path(path)\n data = self._load_cached_2to3(path, cache)\n if data is None:\n output, encoding = self._refactor_2to3(path)\n data = bytearray(output, encoding or sys.getdefaultencoding())\n self.set_data(cache, data)\n return data\n\n else:\n return super().get_data(path)", "def read_cached_file(self, path):\n if self.config.get('do_caching', False):\n ext = path.split('.')[-1]\n\n if ext == 'cache':\n with open(path, 'r') as fd:\n try:\n return fd.read()\n except UnicodeDecodeError as e:\n self.logger.warning(str(e))\n else:\n raise Exception('\"{}\" is a invalid cache file.'.format(path))", "def get(self, key):\n return self.cache_data.get(key)", "def load_cache(base_url, path=\"logs/\"):\n\n # Convert URL to filename and read contents\n url_filename = url_to_filename(base_url)\n\n filename = f\"{path}CACHE-{url_filename}.html\"\n f = open(filename, \"r\")\n data_cache = f.read()\n\n data_cache = \" \".join(data_cache.split()) # Remove all whitespaces\n\n return data_cache", "def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)", "def get_json_from_cache(file_name):\n result = None\n path = clean_path(file_name)\n cached_file_name = get_cached_file_name(path)\n if os.path.exists(cached_file_name):\n time = os.path.getmtime(path)\n cached_time = os.path.getmtime(cached_file_name)\n if cached_time > time:\n try:\n source = open(cached_file_name, \"r\")\n try:\n result = json.load(source)\n except ValueError:\n pass\n source.close()\n except OSError:\n # Includes IOError\n pass\n return result", "def get_output_from_cache(name, filename):\n cache_filename = _get_cache_filename(name, filename)\n if (os.path.exists(cache_filename) and\n os.path.getmtime(filename) < os.path.getmtime(cache_filename)):\n with io.open(cache_filename) as f:\n return f.read()\n\n return None", "def data(self):\n if self._data is None:\n try:\n with open(self.storage_path, 'r') as cache_file:\n self._data = json.load(cache_file)\n except FileNotFoundError:\n self._data = {}\n return self._data", "def _load_data(self):\n path = os.path.join(self._cache_path, '%s.data' % self._name)\n\n if not os.path.exists(path):\n raise IOError('Data cache missing at %s' % path)\n\n f = bz2.BZ2File(path)\n data = pickle.loads(f.read())\n f.close()\n\n return data", "def cache(self):\n if self._cache is None:\n with open(self.cache_path, 'r') as cache_file:\n self._cache = json.load(cache_file)\n return self._cache", "def load_cache():\n return {}", "def get(key):\n return Cache.cache_connector.get(key)", "def files():\n return get_cached(\"files.json\")", "def load_cache(self, filename):\n output_df = cudf.read_hdf(filename, key=self.uid)\n return output_df", "def read_cache():\n try:\n cache_file = open(CACHE_FILENAME, 'r', encoding=\"utf-8\")\n cache_contents = cache_file.read()\n cache_dict = json.loads(cache_contents)\n cache_file.close()\n return cache_dict\n except:\n cache_dict = {}\n return cache_dict", "def cache_get(item: str) -> object:\n\titem = str(item)\n\tcache = cache_find(item)\n\n\t# cache_find() will return none if the cache does not exist\n\t# the returned location is guaranteed to exist, so no point checking again.\n\n\tif cache is not None:\n\t\ttry:\n\t\t\tcached = pickle.load(open(cache, \"rb\"))\n\t\texcept EOFError as ex:\n\t\t\t# Cache file is corrupted, so print an error and act like it does\n\t\t\t# not exist. We do not delete the cache file incase the user wants\n\t\t\t# to recover the file.\n\t\t\tuux.show_error(\"Error when loading file from cache: \" + str(ex))\n\t\t\treturn None\n\t\texcept Exception as ex:\n\t\t\traise ex\n\t\tuux.show_debug(\"Cache hit for \" + item)\n\t\treturn cached\n\n\treturn None", "def get(self, identifier):\n cache_file_path = self._get_cache_file_path(identifier)\n\n if os.path.isfile(cache_file_path):\n with open(cache_file_path, 'rb') as fp:\n result = pickle.load(fp)\n return result\n\n return None", "def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def get_data_from_file(self, path):\n data = None\n if not path:\n raise RedisUpdateException(\"Failed to provide a path\")\n\n try:\n with open(path, 'r') as _file:\n data = _file.read()\n print(\"Retrieved data from {}\".format(path))\n except Exception as e:\n print(str(e))\n raise RedisUpdateException(\n \"Failed to open file in location {}\".format(path))\n return data", "def cache_body(self):\n with open(self.path, \"rb\") as fh:\n fh.seek(fh.tell(), os.SEEK_END)\n fh.seek(max(0, fh.tell()-LEN_CACHE_BYTES), os.SEEK_SET)\n return fh.read(LEN_CACHE_BYTES).decode('utf-8') #.split(\"\\n\")", "def load(cache_file: Path, *, mode: str = None, unsafe: bool = False):\n if mode == 'binary':\n return cache_file.read_bytes()\n\n content = cache_file.read_text()\n if mode == 'json':\n content = json.loads(content)\n\n return content", "def get_datfile(filename):\n if ARGV.get(DEBUG_OPT):\n err_print('Getting datfile from \"{}\"'.format(filename))\n\n try:\n with open(filename, 'rb') as pickle_file:\n try:\n (cache, readlist) = pickle.load(pickle_file)\n pickle_file.close()\n except (EOFError, ValueError):\n (cache, readlist) = ({\"feed\": None, \"max-age\": None, \"last-request\": None}, [])\n except (FileNotFoundError, PermissionError):\n (cache, readlist) = ({\"feed\": None, \"max-age\": None, \"last-request\": None}, [])\n return (cache, readlist)", "def use_cached_files(self, cache_key):\r\n pass", "def get_cache_path(self):", "def get_cache_path(self):", "def get_cache_file_data(year: int, day: int, session: str) -> str:\n server_action = importlib.import_module(\".server_action\")\n server_action.download_input(year, day, session)\n cache_file = _join_path(year, day, session, file_type=\"input_file\")\n with open(cache_file) as opened_file:\n input_data = opened_file.read()\n return input_data", "def cache(file_name, load_func, *func_args, **func_kwargs):\n if path.exists(file_name):\n with open(file_name, 'rb') as f:\n return pickle.load(f)\n else:\n data = load_func(*func_args, **func_kwargs)\n with open(file_name, 'wb') as f:\n pickle.dump(data, f)\n return data", "def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path", "def get_from_cache(cls, file_name):\n random.shuffle(cls.CACHE_BACKENDS)\n fname = None\n for cb in cls.CACHE_BACKENDS:\n if not cb.health_check():\n continue\n fname = cb.get_from_cache(file_name)\n if fname:\n break\n return fname", "def cache_matrio_data(filename):\n prefix = \"https://data.matr.io/3/api/v1/file\"\n key = MATRIO_DATA_KEYS[filename]\n if not os.path.isfile(filename):\n cache_download(\"{}/{}/download\".format(prefix, key), filename)", "def get(self, key):\n if key:\n return self.cache_data.get(key)\n else:\n return None", "def get_response_from_cache(responsefile):\n global __response_cache\n\n if responsefile not in __response_cache:\n return\n\n if not goodfile(responsefile):\n try:\n del __response_cache[responsefile]\n except KeyError: # pragma: no cover\n pass\n return\n\n modtime = str(os.path.getmtime(responsefile))\n if modtime not in __response_cache.get(responsefile, {}):\n return\n\n log.debug(\"Retrieving data from response file (%s) in cache\" %\n responsefile)\n return __response_cache.get(responsefile, {}).get(modtime)", "def getCache(self, key):\n return self._cache.get(key, None)", "def get_cache(feed_key):\n\n cache_file = CACHE_DIR / (feed_key + \".json\")\n\n with open(cache_file, \"r\") as file:\n entries = json.load(file)\n\n return entries", "def read_cache(cc):\n \n out_file = os.path.join(cc.scene_dir, 'output', cc.scene_id+'_pickle')\n if cc.atmo_src == 'narr':\n out_file += '_narr'\n elif cc.atmo_src == 'merra':\n out_file += '_merra'\n \n if not os.path.isfile(out_file):\n raise OSError('pickle_file is not in expected location %s' % out_file) \n\n with open(out_file, 'rb') as f:\n x = pickle.load(f)\n return x", "def get_cache(self):\n self._topo.create_cache()\n with open('/run/geopm-service/geopm-topo-cache') as fid:\n result = fid.read()\n return result", "def test_cache_retrieved(self):\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n data = read.data.decode()\n self.assertIn(\n '<span class=\"expan\">et </span>', data,\n \"Text content should be transformed\"\n )\n self.assertIn(\n 'Facsimilaire', data,\n \"Other content should be added\"\n )\n\n cached = self.cache.get(\"urn:cts:froLit:jns915.jns1856.ciham-fro1:1\").decode()\n self.assertIn('<aside class=\"text-left\">', cached, \"Assert cache is made\")\n\n with mock.patch(\"nemo_xslttwo_plugin.shell\") as shell:\n read = self.client.get(\"/read/froLit/jns915/jns1856/ciham-fro1/1\")\n cached_response = read.data.decode()\n self.assertEqual(\n cached_response, data,\n \"Text content should the same in cache\"\n )\n self.assertEqual(\n shell.call_count, 0,\n \"Shell should not be called because we use cache\"\n )", "def retrieve_cached_records(self):\r\n return u.load_cached_data(self.records_cache_path)", "def get_cache(self):\n return self.cache", "def _csv_get(page):\n cache_key = reverse('timetable.views.display')\n\n ret = cache.get(cache_key)\n if ret is not None:\n print 'hola'\n return ret\n else:\n print 'ciao'\n ret = _csv_download(page)\n cache.set(cache_key, ret, timeout=15) # cache lasts 15 seconds\n return ret", "def cache_get(self, key: str) -> Optional[bytes]:\n if self.cache is not None:\n return self.cache.get(key)\n return None", "async def get() -> list:\n if _cache is None:\n await _update()\n return _cache", "def _locate_from_cache_file():\n path_file = os.path.join(_get_temp_dir(), _config.pathfile)\n return _read_file(path_file) if os.path.isfile(path_file) else None", "def loadGameFromCache(self, theKey):\n theGameFile = File(self.theCacheDirectory, theKey + \".zip\")\n theLine = None\n try:\n theLine = br.readLine()\n br.close()\n ir.close()\n gIn.close()\n fIn.close()\n except Exception as e:\n if theLine == None:\n return None\n return Game.loadFromJSON(theLine)", "def cached_json_get(url):\n return requests.get(url).json()", "def json_from_cache(file_name: str) -> Optional[Dict]:\n\n json_path = os.path.join(CACHE_DIR, file_name)\n\n try:\n with open(json_path, \"r\") as cache_file:\n return json.load(cache_file)\n except IOError:\n log.notice(f\"Could not read JSON from {json_path}\")\n return None", "def get(self, key):\n if key is None:\n return None\n return self.cache_data.get(key, None)", "def getCacheContents(self):\n return self._cache", "def loadcache(self, cachepath):\n loadfunc = json.load if self.serializer == 'json' else pickle.load\n try:\n # check for recency\n if self.expiration > 0:\n elapsed = time.time() - os.stat(cachepath).st_mtime\n #print >>sys.stderr, '%s exp, %s elapsed' % (self.expiration, elapsed)\n if elapsed > self.expiration:\n if self.expirepolicy == 'archive':\n os.rename(cachepath, self.archivepath(cachepath))\n raise IOError\n return loadfunc(open(cachepath))\n except Exception, e:\n #print >>sys.stderr, 'Could not load cache file %s: %s' % (cachepath, e)\n raise IOError('Could not load cache file %s: %s' % (cachepath, e))", "def get(self, key):\n # Initialize key variables\n result = self.cache.get(key)\n\n # Return\n return result", "def get(self, key):\n if key and key in self.cache_data:\n return self.cache_data[key]\n return None", "def cached_load(filepath: str) -> io.BytesIO:\n with open(filepath, 'rb') as f:\n return io.BytesIO(f.read())", "def get_data(self, name):\n assert name, \"Must input a valid dataset name.\"\n try:\n return self.data[\"dataset\"][name]\n except KeyError:\n raise KeyError(\"The dataset \\'{}\\' does not exist in the cache.\".format(name))", "def load_cache(self, URL):\n\n cache_file = self.get_cache_file_path(URL)\n\n with open(cache_file, 'rb') as f:\n PAGE = f.read()\n\n return PAGE", "def read(self, source):\n _source = self._source_prefix+source\n return self.cache[_source]", "def getCurrentCacheData(self):\n return self.getCacheData(int(self.currentFrameNumber - 1))", "def get_cache(name):\n\n return get_component(CachingPackage.COMPONENT_NAME).get_cache(name)", "def reload_cache(self):\n self.data = self.read_data_cache()", "def read(self, file, path):\n pos, = struct.unpack('<Q', file.read(8))\n if pos == 0:\n raise VergeMLError(\"Invalid cache file: {}\".format(path))\n file.seek(pos)\n self.index, self.meta, self.info = pickle.load(file)", "def get_from_cache(self, subject_id):\n with self.cache.open_for_read(subject_id) as cache_file:\n chunks = utils.chunkiter(cache_file)\n for chunk in chunks:\n yield chunk", "def cache_data(name, data):\n cache_path = get_cachefile('%s.cache' % name)\n with open(cache_path, 'wb') as f:\n pickle.dump(data, f)", "def load_local_cache(self):\n folder = os.path.dirname(__file__)\n path = os.path.join(folder, 'local_document_cache.dat')\n path = os.path.normpath(path)\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n with open(path, mode='rb') as the_file:\n try:\n mapa = pickle.load(the_file)\n self.komponente = mapa['komponente']\n self.analitickeMetode= mapa['metode']\n self.dilucijskeJedinice = mapa['dilucije']\n self.generatoriCistogZraka = mapa['generatori']\n self.uredjaji = mapa['uredjaji']\n self.postaje = mapa['postaje']\n except Exception as err:\n logging.error(str(err), exc_info=True)\n mes = '\\n'.join(['Ucitavanje REST cache nije uspjelo.', str(err)])\n QtGui.QApplication.restoreOverrideCursor()\n QtGui.QMessageBox.warning(QtGui.QApplication, 'Problem', mes)\n QtGui.QApplication.restoreOverrideCursor()", "def load_cached(cache_path, in_dir):\n\n print(\"Creating dataset from the files in: \" + in_dir)\n\n # If the object-instance for DataSet(in_dir=data_dir) already\n # exists in the cache-file then reload it, otherwise create\n # an object instance and save it to the cache-file for next time.\n\n cache=Cache()\n dataset = cache.cache_data(cache_path=cache_path,\n fn=Dataset, in_dir=in_dir)\n\n return dataset", "def cache_file(cache_key):\n\n return MASTOOLS_DIR / f\"{cache_key}_cache.json\"", "def cache_path(self):", "def cache_path(self):", "def get_from_cache(self, url):\n cache_key, cache_lookup = self.get_cacheable_info(url)\n\n cache_timeout = self.cache_timeouts.get(cache_key,\n self.default_cache_timeout)\n\n data, access_time = MEM_CACHE[cache_key].get(cache_lookup, (None, 0))\n if data and time.time() - access_time < cache_timeout:\n return data\n return False", "def get_inventory_from_cache(self):\n cache = open(self.cache_path_cache, 'r')\n json_inventory = cache.read()\n return json_inventory", "def _load_cache():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n fname = os.path.join(BASE_DIR, \"model_cache.json\")\n with open(fname) as f:\n models_cache = json.load(f)\n return models_cache", "def _cache_get(self, metric_name):\n pass", "def get(self, key):\n if key in self.cache:\n return self.cache[key]\n valueat,valuelen = self.keys[key]\n valuedump = self.file.readp(valueat, valuelen)\n value = pickle.loads(valuedump)\n self.cache[key] = value\n return value", "def _read_buckets_cache_file(cache_file):\n\n log.debug(\"Reading buckets cache file\")\n\n with salt.utils.files.fopen(cache_file, \"rb\") as fp_:\n try:\n data = pickle.load(fp_)\n except (\n pickle.UnpicklingError,\n AttributeError,\n EOFError,\n ImportError,\n IndexError,\n KeyError,\n ValueError,\n ) as exc:\n log.debug(\"Exception reading buckets cache file: '%s'\", exc)\n data = None\n\n return data", "def get_cache(self):\n return self._instance._cache[self.name]", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def get_content_from_cache(self):\n\n rss_feed = []\n news_to_show = 0\n\n try:\n self.print_if_verbose(\n f\"Method 'get_content_from_cache' is working: \\n\"\n f\"Trying to get content from cache...\"\n )\n os.chdir(\"cache\")\n except Exception as error:\n print(f\"{error}: cache does not exists!\")\n return\n\n try:\n os.chdir(\"image_cache\")\n self.full_path_to_image_cache = os.getcwd()\n os.chdir(\"..\")\n except:\n pass\n\n try:\n with open(\"rss_reader_cache.json\", \"r\", encoding=\"utf-8\") as cache_file:\n data_from_cache = json.load(cache_file)\n self.print_if_verbose(f\"Content from cache has been received successfully. \\n\")\n except Exception as error:\n self.print_if_verbose(f\"{error}: cache file does not exist! \\n\")\n return\n\n if self.source:\n for feed in data_from_cache:\n if self.source in feed.keys():\n for news in feed[self.source]:\n if news[\"PubDate\"] == str(self.date):\n rss_feed.append(news)\n news_to_show += 1\n if self.limit and news_to_show == self.limit:\n break\n if self.limit and news_to_show == self.limit:\n break\n else:\n for channel in data_from_cache:\n for feed_link in channel:\n for news in channel[feed_link]:\n if news[\"PubDate\"] == str(self.date):\n rss_feed.append(news)\n news_to_show += 1\n if self.limit and news_to_show == self.limit:\n break\n if self.limit and news_to_show == self.limit:\n break\n\n os.chdir(\"..\")\n\n self.news_amount = len(rss_feed)\n\n if self.news_amount == 0:\n print(f\"There is no news in cache for specified date. \\n\")\n else:\n self.print_if_verbose(f\"There is {self.news_amount} news in cache for specified date. \\n\")\n\n self.print_if_verbose(f\"Method 'get_content_from_cache' is finished. \\n\")\n\n return rss_feed", "def retrieve_cached_decisions(self):\r\n return u.load_cached_data(self.decisions_cache_path)", "def request_data(url): \n requests_cache.install_cache('data_cache')\n while True:\n data = requests.get(url)\n if not data.status_code == 200 or \"try again later\" in data.text:\n continue\n else:\n break\n return data.text", "def get(self):\n CACHE_KEY = 'sources'\n if not memcache.get(CACHE_KEY):\n logging.info('Populating cache.')\n feeds = Feed.all().order('name')\n feed_list = []\n for feed in feeds:\n feed_list.append(feed.ToDict())\n memcache.add(CACHE_KEY, simplejson.dumps(feed_list), 600)\n logging.info('Using cache.')\n logging.info(memcache.get(CACHE_KEY))\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(memcache.get(CACHE_KEY))", "def cached(cache_path, generator):\n if path.exists(cache_path):\n with open(cache_path, 'rb') as f:\n return pickle.load(f)\n output = generator()\n with open(cache_path, 'wb+') as f:\n pickle.dump(output, f)\n return output", "def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n\n import requests\n from hashlib import md5\n from pathlib import Path\n\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok=True)\n file_path = data_dir/Path(file)\n # If the file already exists and we want to force a download then\n # delete the file first so that the creation date is correct.\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n resp = requests.get(data_url, stream=True)\n file_size = int(resp.headers.get('content-length', 0))\n step = 40\n chunk_size = file_size//step\n with file_path.open('wb') as f:\n for chunk in resp.iter_content(chunk_size): # write file in chunks\n f.write(chunk)\n step -= 1\n print('[' + '#'*(41 - step) + (step)*' ' + ']\\r', end='')\n print(f\"\\nDownloaded {data_url.split('/')[-1]}!\")\n else:\n import time\n time_downloaded = time.ctime(file_path.stat().st_ctime)\n print(\"Using version already downloaded:\", time_downloaded)\n # Compute and print md5 hash of file, whether newly downloaded or not\n m5 = md5()\n m5.update(file_path.read_bytes())\n print(f\"MD5 hash of file: {m5.hexdigest()}\")\n return file_path" ]
[ "0.8146995", "0.8129194", "0.79291904", "0.77733576", "0.77733576", "0.77026176", "0.7592949", "0.73793447", "0.73505074", "0.7287", "0.7261666", "0.7246066", "0.7216164", "0.71735895", "0.71685135", "0.71626484", "0.7114864", "0.7036473", "0.7034909", "0.70220995", "0.7006369", "0.6975311", "0.69622076", "0.6943401", "0.69353586", "0.6920103", "0.6919028", "0.6896948", "0.68959576", "0.6871039", "0.6867054", "0.68519145", "0.6847267", "0.68235826", "0.67484635", "0.67352676", "0.67338187", "0.6733026", "0.6733026", "0.67288", "0.67274654", "0.6726811", "0.6714318", "0.67114407", "0.6703592", "0.6703592", "0.6701789", "0.6695161", "0.6691642", "0.66765565", "0.66747165", "0.667183", "0.6661721", "0.66529596", "0.6651134", "0.6641018", "0.66391027", "0.6639079", "0.66384447", "0.6637422", "0.663631", "0.66056776", "0.6592226", "0.65884316", "0.6582975", "0.65813714", "0.6565181", "0.65468854", "0.6531947", "0.65151364", "0.65042657", "0.65017456", "0.64978594", "0.6481052", "0.64697903", "0.6462119", "0.64553225", "0.64503926", "0.6433405", "0.6429722", "0.642417", "0.64205563", "0.6418205", "0.64140874", "0.6410683", "0.6402098", "0.6402098", "0.64002734", "0.64000225", "0.63616955", "0.6346805", "0.6339783", "0.6336766", "0.6329984", "0.6317512", "0.63083047", "0.63082385", "0.630411", "0.63017666", "0.62969184", "0.6274771" ]
0.0
-1
Grava objetos em formato texto no arquivo de Indices.
def gravarArquivoIndices(indices): arq = open("arquivoIndices.txt", "w") for i in indices.indices: linha = i.codigo + "," + str(i.indice) + "," + str(i.excluido) + "\n" arq.write(linha) arq.close() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obj_index(self) -> str:\n return str(self._data[\"index\"])", "def WriteIndexContent(indexList, formatindex, fpindex):#{{{\n if formatindex == FORMAT_TEXT:\n numRecord = len(indexList[0])\n idList = indexList[0]\n v1 = indexList[1]\n v2 = indexList[2]\n v3 = indexList[3]\n for i in range(numRecord):\n print(idList[i], v1[i], v2[i], v3[i], file=fpindex)\n else:\n maxOffset = max(indexList[2])\n\n numRecord = len(indexList[0])\n\n idList = indexList[0]\n v1 = indexList[1]\n v3 = indexList[3]\n if maxOffset > LargeFileThresholdSize:\n v2 = indexList[2]\n else: #'I'\n v2 = array('I', [x for x in indexList[2]])\n\n dumpedliststr = '\\n'.join(s for s in idList)\n\n vI=array('I')\n vI.append(len(dumpedliststr))\n vI.tofile(fpindex)\n fpindex.write(dumpedliststr)\n\n vI=array('I')\n vI.append(numRecord)\n vI.tofile(fpindex)\n\n v1.tofile(fpindex)\n v2.tofile(fpindex)\n v3.tofile(fpindex)", "def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary", "def __getitem__(self, idx):\n\n text, label = self.data[idx]\n ids = self.get_ids(text)\n\n return {\"ids\": ids, \"label\": label}", "def read_file_object(self, file_obj, file_format='FASTA'):\n if ( file_format.upper() == 'FASTA' ):\n read_func = read_fasta \n #elif ( file_format.upper() == 'COMPACT' ):\n # read_func = read_compact\n #elif ( file_format.upper() == 'COMPACT3' ):\n # read_func = read_compact3\n else:\n raise NotImplementedError(\"Unknown file format (%s) is not supported\" % file_format)\n self.colcount = 0\n for name, seq in read_func(file_obj):\n cseq, l = self.get_alignment_seq_object(seq)\n self[name] = cseq\n self.colcount = max(l, self.colcount)", "def make_iob(txt, ents, etypes):\r\n index = 0\r\n for i in ents:\r\n start = txt.index(i, index) #get the start of the entity\r\n tmp1, tmp2 = txt[:start], txt[start:]\r\n tmp1 += \" eeeeeeeeeeeeeeeeeeee \"\r\n txt = ' '.join([tmp1, tmp2])\r\n index = start + len(i) + len(\" eeeeeeeeeeeeeeeeeeee \")\r\n \r\n line_tokens = word_tokenize(txt)#tokenize the text\r\n \r\n #get the starting positions of the entities\r\n starts = []\r\n try: #in order to handle the last case where list.index doesnt finds anything\r\n while line_tokens.index(\"eeeeeeeeeeeeeeeeeeee\") > -1:\r\n tmp = line_tokens.index(\"eeeeeeeeeeeeeeeeeeee\")\r\n starts.append(tmp)\r\n del line_tokens[tmp]\r\n except ValueError:\r\n pass\r\n \r\n line_iob = ['O'] * len(line_tokens)# the iob tags of the whole text\r\n \r\n for i in range(0, len(ents)):\r\n #tokenize the entities\r\n entity_tokens = word_tokenize(ents[i])\r\n tmp = 'I-'+etypes[i]\r\n entity_iob = [tmp] * len(entity_tokens)\r\n entity_iob[0] = \"B-\" + etypes[i]\r\n \r\n #make changes to the iob tags to match the entities\r\n for j in range(0, len(entity_iob)):\r\n line_iob[starts[i] + j] = entity_iob[j]\r\n \r\n #the format is: token IOB-etypes\r\n for i in range(0, len(line_tokens)):\r\n output.write(\"{}\\t{}\\n\".format(line_tokens[i], line_iob[i]))\r\n output.write('\\n')#new document\r", "def _get_objects(self,label_fh):\n objects = []\n for line in label_fh.readlines():\n try:\n object = {}\n line = line.replace(u'\\ufeff', '')\n if line != '':\n x1, y1, x2, y2, x3, y3, x4, y4= [int(i) for i in line.split(',')[:-1]]\n p1 = (x1, y1)\n p2 = (x2, y2)\n p3 = (x3, y3)\n p4 = (x4, y4)\n object['polygon'] = [p1,p2,p3,p4]\n objects.append(object)\n except:\n pass\n return objects", "def loadOBJModel(file_name):\n file_text = open(file_name)\n text = file_text.readlines()\n vertex = []\n normals = []\n uv = []\n faces_vertex = []\n faces_normal = []\n faces_uv = []\n for line in text:\n info = line.split(\" \")\n if info[0] == \"v\":\n vertex.append(\n (float(info[1]), float(info[2]) - 0.1, float(info[3])))\n elif info[0] == \"vn\":\n normals.append((float(info[1]), float(info[2]), float(info[3])))\n elif info[0] == \"vt\":\n uv.append((float(info[1]), float(info[2])))\n elif info[0] == \"f\":\n p1 = info[1].split(\"/\")\n p2 = info[2].split(\"/\")\n p3 = info[3].split(\"/\")\n faces_vertex.append((int(p1[0]), int(p2[0]), int(p3[0])))\n faces_uv.append((int(p1[1]), int(p2[1]), int(p3[1])))\n faces_normal.append((int(p1[2]), int(p2[2]), int(p3[2])))\n return vertex, normals, uv, faces_vertex, faces_normal, faces_uv", "def ReadIndex_text(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n idList = []\n v1 = array('B') # dbfile index\n v2 = array('L') # offset\n v3 = array('I') # block size\n apd1 = idList.append\n apd2 = v1.append\n apd3 = v2.append\n apd4 = v3.append\n indexFileHeaderText = []\n origdbname=\"\"\n origversion=\"\"\n origext=\"\"\n origprefix=\"\"\n try:\n\n hdl = mybase.ReadLineByBlock(indexfile)\n lines = hdl.readlines()\n while lines != None:\n for line in lines:\n if not line or line[0] == \"#\":\n continue\n strs = line.split()\n if strs[0] == \"DEF_DBNAME\":\n if len(strs)>=2:\n origdbname=strs[1]\n elif strs[0] == \"DEF_VERSION\":\n if len(strs)>=2:\n origversion=strs[1]\n elif strs[0] == \"DEF_EXTENSION\":\n if len(strs)>=2:\n origext=strs[1]\n elif strs[0] == \"DEF_PREFIX\":\n if len(strs)>=2:\n origprefix = strs[1]\n else:\n apd1(strs[0])\n apd2(int(strs[1]))\n apd3(int(strs[2]))\n apd4(int(strs[3]))\n lines = hdl.readlines()\n\n indexList.append(idList)\n indexList.append(v1)\n indexList.append(v2)\n indexList.append(v3)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n\n numRecord = len(idList)\n lastDBFileIndex = v1[numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n if isPrintWarning:\n if origversion == \"\":\n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0],indexfile,\n origversion, version), file=sys.stderr)\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)", "def get_formatted_data(line, indices=None):\n\tfile_data = str.strip(line).split(' ')\n\tif indices is None:\n\t\tdata = list(range(len(file_data)))\n\telse:\n\t\tdata = list(indices)\n\t\t\n\tfor i, file_column in enumerate(data):\n\t\tif file_column is not None:\n\t\t\tdatum = file_data[file_column]\n\t\telse:\n\t\t\tdatum = ' '\n\t\tif '.' in datum:\n\t\t\ttry:\n\t\t\t\tdatum = float(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\telse:\n\t\t\ttry:\n\t\t\t\tdatum = int(datum)\n\t\t\texcept:\n\t\t\t\tpass\n\t\tdata[i] = datum\n\treturn data", "def load_annotation_at_index(self, index):\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n if not self.cfg['use_diff']:\n # Exclude the samples labeled as difficult\n non_diff_objs = [\n obj for obj in objs if int(obj.find('difficult').text) == 0]\n # if len(non_diff_objs) != len(objs):\n # print 'Removed {} difficult objects'.format(\n # len(objs) - len(non_diff_objs))\n objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n #overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n #seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n\n\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n cls = self._class_to_num[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n #overlaps[ix, cls] = 1.0\n #seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n #overlaps = scipy.sparse.csr_matrix(overlaps)\n return {'boxes' : boxes,\n 'gt_classes': gt_classes}", "def index_object(idxs=None):", "def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return", "def __getInterProIndex(self, filePath):\n\n interProD = {}\n encodingD = {\"encoding\": \"ascii\"} if sys.version_info[0] < 3 else {}\n rowL = self.__mU.doImport(filePath, fmt=\"tdd\", rowFormat=\"list\", **encodingD)\n for row in rowL:\n try:\n interProId = row[0].strip().upper()\n interProType = row[1].strip()\n descr = row[2].strip()\n interProD[interProId] = {\"description\": descr, \"type\": interProType}\n except Exception:\n pass\n #\n return interProD", "def parse_rec(json_dataset, index):\n info = voc_info(json_dataset)\n data_path = info['data_path']\n image_file = os.path.join(data_path, 'images', index + '.jpg')\n assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)\n\n height, width = cv2.imread(image_file).shape[:2]\n annopath = os.path.join(data_path, 'annotations', '{:s}.txt')\n filename = annopath.format(index)\n rotate = 0\n objects = []\n with open(filename) as f:\n line = f.readline()\n while line:\n parts = line.split()\n if parts[0] == 'rotate':\n rotate = int(parts[1])\n assert rotate == 0\n else:\n obj_struct = {'name': parts[0]}\n x1 = min(max(int(parts[1]), 0), width - 1)\n y1 = min(max(int(parts[2]), 0), height - 1)\n x2 = min(max(int(parts[3]), 0), width - 1)\n y2 = min(max(int(parts[4]), 0), height - 1)\n obj_struct['bbox'] = [x1, y1, x2, y2]\n obj_struct['truncated'] = int(parts[5])\n obj_struct['difficult'] = 0\n objects.append(obj_struct)\n line = f.readline()\n\n return objects", "def format_as_index(indices):\r\n\r\n if not indices:\r\n return \"\"\r\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)", "def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping", "def get_index_repr(self):\r\n return \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.trf_id,\r\n self.trf_period,\r\n self.trf_array_length,\r\n self.trf_array_gc,\r\n self.trf_pvar,\r\n self.trf_gi,\r\n self.trf_l_ind,\r\n self.trf_r_ind,\r\n self.trf_chr)", "def indirectobject(self, index, io):\n if self.indices != '':\n self.indices += ' '\n self.indices += '%d %d' % (index, len(self.ios))\n self.ios += io\n self.objects.append(index)", "def index_to_algebraic(pos: tuple[int, int]) -> str:\r\n return INDEX_TO_FILE[pos[1]] + INDEX_TO_RANK[pos[0]]", "def IndexFileToPrefixInfo(index_fp):\n\n IX_FH = open(index_fp, \"r\")\n\n header_line = IX_FH.readline()\n\n c_line = \"placeholder\"\n\n # prefix is an important list that holds [[nLeading i, indexseq s, name s],...]\n # nLeading is number of n's before index \n prefix = []\n line_num = 0\n\n while c_line != \"\":\n c_line = IX_FH.readline().rstrip()\n line_num += 1\n\n line_split = c_line.split('\\t')\n\n if len(line_split) > 2:\n raise Exception(\"In indexfile, found a line that has more than \"\\\n + \"2 tsvs.\\n Filename: {} Line Number: {}\".format(\n index_fp, line_num))\n #Note name & index are in form H1, ATCACGAG\n name, index = line_split \n\n # What does this account for?\n if (re.search(r'name', name ,re.IGNORECASE)):\n continue\n\n nLeading = None\n indexseq = None\n\n match = re.search(r'^([nN]*)([ACGT]+)$',index)\n if not match:\n raise Exception(\"Invalid index sequence {}\".format(index))\n else:\n nLeading = len(match[0])\n indexseq = match[1]\n\n if (nLeading == None ) or (indexseq == None) or (name == ''):\n raise Exception(line)\n prefix.append([nLeading, indexseq, name])\n\n IX_FH.close()\n\n report_str = \"Read {} indices from {}\\n\".format(len(prefix),index_fp)\n prefixNames = [x[2] for x in prefix]\n\n \n return {\n \"report_str\": report_str,\n \"prefixNames\": prefixNames,\n \"prefix\": prefix\n }", "def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index", "def _construct_index(self, row) -> str:\n chrom = row[\"CHROM\"]\n pos = row[\"POS\"]\n ref = row[\"REF\"]\n alt = row[\"ALT\"]\n\n return f\"{chrom}_{pos}_{ref}>{alt}\"", "def __getitem__(self, index: int):\n path, label = self.paths[index], self.labels[index]\n data = self._read_input_file(path)\n data = self._apply_transform(data)\n\n return {\"data\": data, \"target\": label}", "def dump(self):\n res = []\n #res.append(\"Submeshes: %d\" % len(self.submeshes))\n #res.append(\"IdxBuf: 0x%04X bytes\" % len(self.idx_buf))\n #res.append(\"PrimFmt: 0x%04X (%s)\" % (\n # self.prim_fmt_id, self.prim_fmt))\n #res.append(\"IdxType: 0x%02X (%s)\" % (\n # self.header['idx_type'], self.idx_fmt,\n #))\n #res.append(\"IdxCnt: %d\" % self.header['idx_cnt'])\n #res.append(\"VisGrp: %d\" % self.header['visibility_group'])\n #res.append(\"Unknown: 0x%08X 0x%08X 0x%08X\" % (\n # self.header['unk08'],\n # self.header['unk10'],\n # self.header['unk34'],\n #))\n #return '\\n'.join(res).replace('\\n', '\\n ')\n\n return \"%4d│%04X│%04X %-24s│%02X %s│%5d│%5d│%08X│%08X│%08X\" %(\n len(self.submeshes),\n len(self.idx_buf),\n self.prim_fmt_id, self.prim_fmt,\n self.header['idx_type'], self.idx_fmt,\n self.header['idx_cnt'],\n self.header['visibility_group'],\n self.header['unk08'], self.header['unk10'],\n self.header['unk34'],\n )", "def index(self):\n path = self.path.format('index')\n \n with open(path, 'r', newline='') as file:\n l = list(csv.reader(file))\n \n index = [v for _ in l for v in _]\n index = dict((v, i) for (i, v) in enumerate(index))\n \n return index", "def to_iob(self) -> List[List[List[str]]]:\n iobs: List[List[List[str]]] = [\n [[\"O\"] * (len(tokens) - 1) for _ in self.attributes]\n for tokens in self.word_alignments\n ]\n for ne in self.nes:\n start_line: int = ne.token_offset.start.line_id\n start_offset: int = ne.token_offset.start.offset\n end_line: int = ne.token_offset.end.line_id\n end_offset: int = ne.token_offset.end.offset\n\n # 文を跨いだentityは除外\n if start_line != end_line:\n continue\n\n # 正解となるsubwordを含むwordまでタグ付\n attr_idx: int = self.attr2idx[ne.attribute]\n ne_start: int = self.sub2word[start_line][start_offset]\n ne_end: int = self.sub2word[end_line][end_offset - 1] + 1\n\n for idx in range(ne_start, ne_end):\n iobs[start_line][attr_idx][idx] = \"B\" if idx == ne_start else \"I\"\n\n return iobs", "def sentences_2_idxs(self):\n fo_pos = open(self.config.parsed_train_file_pos, 'w')\n fo_neg = open(self.config.parsed_train_file_neg, 'w')\n self.load_dicts()\n labels = pd.read_csv(self.config.train_file, usecols=[\"target\"])\n\n labels = list(labels.values[:, 0])\n questions = pd.read_csv(self.config.train_file,\n usecols=[\"question_text\"], index_col=False)\n unk_idx = self.word2idx.get(self.config.unknown_token)\n\n for label, quest in zip(labels, questions.question_text):\n tokens = preprocess_text(quest)\n\n if self.config.include_unknown:\n idxs = [self.word2idx.get(token, unk_idx) for token in\n tokens]\n else:\n idxs = [self.word2idx.get(token) for token in tokens]\n idxs = [idx for idx in idxs if idx]\n out_line = (str(\" \".join(str(num) for num in idxs)) + \"\\n\")\n if label == 1:\n fo_pos.write(out_line)\n else:\n fo_neg.write(out_line)", "def WriteIndexHeader(indexFileHeaderText, formatindex, fpindex):#{{{\n if formatindex == FORMAT_TEXT:\n for s in indexFileHeaderText:\n print(s, file=fpindex)\n else:\n dumpedtext='\\n'.join(s for s in indexFileHeaderText)\n vI = array('I')\n vI.append(len(dumpedtext))\n vI.tofile(fpindex)\n fpindex.write(dumpedtext)", "def OBJ(filename, pos=(0,0,0),\r\n rotation=(0,0,0), colorize=(1,1,1,1)):\r\n view.require_init()\r\n svertices = []\r\n snormals = []\r\n stexcoords = []\r\n sfaces = []\r\n\r\n material = None\r\n smtl = None\r\n for line in open(filename, \"r\"):\r\n if line.startswith('#'): continue\r\n values = line.split()\r\n if not values: continue\r\n if values[0] == 'v':\r\n v = list(map(float, values[1:4]))\r\n svertices.append(v)\r\n elif values[0] == 'vn':\r\n v = list(map(float, values[1:4]))\r\n snormals.append(v)\r\n elif values[0] == 'vt':\r\n stexcoords.append(list(map(float, values[1:3])))\r\n elif values[0] in ('usemtl', 'usemat'):\r\n material = values[1]\r\n elif values[0] == 'mtllib':\r\n path = os.path.split(filename)[0]\r\n smtl = {}\r\n mtl = None\r\n for line in open(os.path.join(path, values[1]), \"r\"):\r\n if line.startswith('#'): continue\r\n values = line.split()\r\n if not values: continue\r\n if values[0] == 'newmtl':\r\n smtl[values[1]] = None\r\n mtl = values[1]\r\n elif mtl is None:\r\n raise ValueError(\"mtl file doesn't start with newmtl stmt\")\r\n elif values[0] == 'map_Kd':\r\n tex = data.Texture(os.path.join(path, values[1]))\r\n smtl[mtl] = tex\r\n elif values[0]==\"Kd\":\r\n tex = data.BlankTexture(color=list(map(float, values[1:])))\r\n smtl[mtl] = tex\r\n elif values[0] == 'f':\r\n face = []\r\n texcoords = []\r\n norms = []\r\n for v in values[1:]:\r\n w = v.split('/')\r\n face.append(int(w[0]))\r\n if len(w) >= 2 and len(w[1]) > 0:\r\n texcoords.append(int(w[1]))\r\n else:\r\n texcoords.append(0)\r\n if len(w) >= 3 and len(w[2]) > 0:\r\n norms.append(int(w[2]))\r\n else:\r\n norms.append(0)\r\n sfaces.append((face, norms, texcoords, material))\r\n\r\n\r\n faces_ordered_by_material = {}\r\n for face in sfaces:\r\n v, n, t, m = face\r\n if m in faces_ordered_by_material:\r\n faces_ordered_by_material[m].append(face)\r\n else:\r\n faces_ordered_by_material[m] = [face]\r\n\r\n lists = []\r\n for i in faces_ordered_by_material:\r\n sfaces = faces_ordered_by_material[i]\r\n\r\n material = smtl[i]\r\n\r\n gl_list = data.DisplayList()\r\n gl_list.begin()\r\n current_tex = None\r\n for face in sfaces:\r\n vertices, normals, texture_coords, _m = face\r\n glBegin(GL_POLYGON)\r\n for i in range(len(vertices)):\r\n if normals[i] > 0:\r\n glNormal3fv(snormals[normals[i] - 1])\r\n if texture_coords[i] > 0:\r\n glTexCoord2fv(stexcoords[texture_coords[i] - 1])\r\n glVertex3fv(svertices[vertices[i] - 1])\r\n glEnd()\r\n gl_list.end()\r\n\r\n lists.append([gl_list, material])\r\n\r\n verts = []\r\n for i in sfaces:\r\n for x in i[0]:\r\n verts.append(svertices[x-1])\r\n\r\n return BasicMesh(lists, pos, rotation, verts, 1, colorize)", "def load_word2index(self):\n word2index = {}\n with open(self.nodes_file, 'r') as reader:\n for index, line in enumerate(reader):\n node = line.strip()\n word2index[node] = index\n\n return word2index", "def indirectobject(self, index, version, io):\n self.appendString(\"\\n\")\n self.indirectObjects[index] = self.filesize()\n self.appendString(\"%d %d obj\\n%s\\nendobj\\n\" % (index, version, io))", "def __getitem__(self, index):\n return self.parses[index]", "def counterdict(self):\n vas = []\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.append(s_i)\n for ele in enumerate(vas):\n print(ele)\n logging.debug(\"Starting with to\")", "def CsvToJson(nomfichierJson):\n with open(\"save/save.csv\",'r') as f:\n liste_cube = list()\n liste_robot = list()\n \"\"\"deux listes vides pour contenir les objets charges\"\"\"\n for line in f:\n ligne=line.split(\";\")\n if ligne[0] == 'Arene':\n \"\"\"On cree une nouvelle arene avec les parametres trouves sur la ligne, separes par des ';' \"\"\"\n arene = Arene(int(ligne[1]),int(ligne[2]),int(ligne[3]),liste_cube,liste_robot)\n arene.liste_robot=liste_robot\n elif ligne[0] == 'Cube':\n \"\"\"On ajoute le cube a la liste de cube de l'arene, avec parametres trouves sur la ligne\"\"\"\n arene.liste_cube.append(Cube(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5]),int(ligne[6])))\n elif ligne[0] == 'Mur':\n arene.liste_cube.append(Mur(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5]),int(ligne[6])))\n elif ligne[0] == 'Sol':\n arene.liste_cube.append(Sol(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5])))\n elif ligne[0] == 'Robot':\n (x,y,z)=literal_eval(ligne[1])\n ((cax,cay),(cbx,cby),(ccx,ccy),(cdx,cdy))=literal_eval(ligne[2])\n (a,b,c)=literal_eval(ligne[3])\n (lo,la,ha)=literal_eval(ligne[4])\n vitesse=literal_eval(ligne[5])\n arene.liste_robot.append(Robot((x,y,z),((cax,cay),(cbx,cby),(ccx,ccy),(cdx,cdy)),(a,b,c),(lo,la,ha),vitesse))\n saveFic(arene,nomfichierJson)", "def extract_word2vec(fichier, words_indices): \n \n word2vec={} #\n \n #\n with open(fichier,\"r\",encoding=\"utf-8\") as file:\n for line in file:\n line = line.replace(\" \\n\",\"\").split(\" \")\n # Lecture des informations du fichier\n # nombre de mots presents et nombre de features\n if len(line)==2 :\n nb_words=int(line[0])\n nb_feats=int(line[1])\n \n #\n else:\n if line[0] in words_indices:\n word, vec = line[0],np.array(line[1:])\n word2vec[word]=vec\n\n print(\"{} embbedings de taille {} pertinent parmi les {} du fichier\".format(len(word2vec), nb_feats, nb_words))\n\n return word2vec, nb_feats", "def int_to_text(self, labels):\n string = []\n for i in labels:\n string.append(self.index_map[i])\n return ''.join(string).replace('', ' ')", "def dump_traza(self, fichero='traza.txt'):\n fichero = open(fichero, 'w', encoding=\"utf-8\")\n for punto in self.trazado:\n fichero.write(\"{},{}\\n\".format(punto.x, punto.y))\n fichero.close()", "def get_index_content(obj, **kw):\n path = obj\n\n # NOTE: It's already a read content list\n if isinstance(obj, list):\n return obj\n elif isinstance(obj, dict):\n # NOTE: It's a resource with content inside...\n if 'content' in obj:\n return obj['content']\n # NOTE: It's a resource with a path that should be read.\n elif 'path' in obj:\n path = obj['path']\n # NOTE: It's just a path, read it\n return utils._read(path, **kw)", "def __init__(self, text, idx):\n self.text = text\n self.idx = idx", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def export(self, buffer: IO[str], ind: str = '') -> None:\n buffer.write(ind + 'camera\\n')\n buffer.write(ind + '{\\n')\n buffer.write(f'{ind}\\t\"position\" \"[{self.pos}]\"\\n')\n buffer.write(f'{ind}\\t\"look\" \"[{self.target}]\"\\n')\n buffer.write(ind + '}\\n')", "def load_data(self):\n \n # only loader implemented so far !\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep='')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0]\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n pass # try another format\n\n try:\n _ascii_array = Utilities.load_ascii(filename=self.filename, sep=',')\n start_row = TOF._first_line_number_with_real_data(_ascii_array[0, 0])\n\n _tof_column = _ascii_array[start_row:, 0] # first row must be excluded in this format\n\n if not TOF._is_this_numeric(_tof_column[0]):\n start_row += 1\n\n _tof_column = _ascii_array[start_row:, 0]\n _counts_column = _ascii_array[start_row:, 1]\n\n self.tof_array = _tof_column\n self.counts_array = _counts_column\n return\n\n except IndexError:\n raise IndexError(\"Format not implemented!\")", "def create_labels(filename, class_indices):\n \n _logger.debug(\"Mapping labels\")\n label={}\n label['category']=[]\n for key in class_indices:\n label['category'].append({\n 'name' : key,\n 'index' : class_indices[key]\n })\n label_path = os.path.join(config.TRAINED_MODELS_DATA, filename)\n with open(os.path.join(label_path, 'labels.txt'), 'w') as outfile:\n json.dump(label, outfile)\n return label_path", "def get_family_id_to_index():\n \n family_ids = open(\n resource_filename('contextual_lenses.resources', 'pfam_family_ids.txt'),\n 'r').readlines()\n family_id_to_index = {}\n for i, family_id in enumerate(family_ids):\n family_id_to_index[family_id.replace('\\n', '')] = i\n\n return family_id_to_index", "def __getitem__(self, index):\n ind = self.id[index]\n X_text = self.X[index]\n if(self.y is None): y = None\n else: y = self.y[index]\n if(self.hier): \n if self.elmo_pre is not None:\n f = lambda l, d: itemgetter(*l)(d) # get Tuple(values) with List[keys]\n X_1,X_2,X_3 = self.X[index][0], self.X[index][1], self.X[index][2]\n return (*f([X_1.lower(),X_2.lower(),X_3.lower()], self.elmo_pre), y, ind, X_text)\n X_1,X_2,X_3 = self.preprocess(self.X[index])\n return X_1, X_2, X_3, y, ind, X_text\n else: \n X = self.preprocess(self.X[index])\n return X, y, ind, X_text", "def get_text(adm, obj):\n return adm['data'][slice(*extent(obj))]", "def load_annos(self, anno_path):\n\n if os.path.exists(anno_path) is False or os.path.isfile(anno_path) is False or anno_path.endswith('txt') is False:\n print(\"Wrong path: not exist or not a txt file: %s\" % anno_path)\n return None, None\n\n list_file_id, list_anno_id = [], []\n list_x, list_y, list_w, list_h = [], [], [], []\n list_blur, list_expr, list_illum, list_occ, list_pose, list_inval = [], [], [], [], [], []\n anno_id = 0\n\n list_id = []\n list_filename = []\n file_id = 0\n\n num_annos_total = 0\n\n with open(anno_path) as afile:\n line = \"begin\"\n while line != \"\":\n line = afile.readline()\n\n if line.rstrip().endswith('jpg'): # it is a file\n file_name = line.strip()\n list_id.append(file_id)\n list_filename.append(file_name)\n\n num_annos = int(afile.readline().strip())\n\n for i in range(num_annos):\n px, py, pw, ph, blur, expr, illum, inval, occ, pose = afile.readline().strip().split(' ')\n px, py, pw, ph = int(px), int(py), int(pw), int(ph)\n\n if pw == 0 or ph == 0: # ignore invalid faces (0 width or height)\n continue\n\n if pw < 0:\n px = px+pw\n pw = abs(pw)\n if ph < 0:\n py = py+ph\n ph = abs(ph)\n\n list_file_id.append(file_id)\n list_anno_id.append(anno_id)\n list_x.append(px)\n list_y.append(py)\n list_w.append(pw)\n list_h.append(ph)\n list_blur.append(int(blur))\n list_expr.append(int(expr))\n list_illum.append(int(illum))\n list_occ.append(int(occ))\n list_pose.append(int(pose))\n list_inval.append(int(inval))\n anno_id = anno_id + 1\n\n file_id = file_id + 1\n num_annos_total += num_annos\n\n files = {'id': np.array(list_id), 'filename': list_filename }\n annos = {'file_id': np.array(list_file_id), 'anno_id': np.array(list_anno_id), \\\n 'x': np.array(list_x), 'y': np.array(list_y), \\\n 'w': np.array(list_w), 'h': np.array(list_h), \\\n 'blur': np.array(list_blur), 'expression': np.array(list_expr), \\\n 'illumination': np.array(list_illum), 'occlusion': np.array(list_occ), \\\n 'pose': np.array(list_pose), 'invalid': np.array(list_inval) }\n\n assert (len(list_id) == len(list_filename)), \\\n \"file_id and filename lists should have the same length\"\n\n self._num_annos = num_annos_total\n self._num_images = file_id\n\n return files, annos", "def __getitem__(self, index):\n item = {}\n item[\"input_txt\"] = self.src[index]\n item[\"target_txt\"] = self.trg[index]\n item[\"cand_txt\"] = self.cands[index]\n item[\"cand_index\"] = []\n for c in self.cands[index]:\n item[\"cand_index\"].append(self.preprocess(c, is_list_of_str=True))\n item[\"persona_txt\"] = self.persona[index]\n\n item[\"input_batch\"] = self.preprocess(self.src[index])\n item[\"target_batch\"] = self.preprocess(\n self.trg[index], is_list_of_str=True)\n if config.pointer_gen:\n item[\"input_ext_vocab_batch\"], item[\"article_oovs\"] = \\\n self.process_input(item[\"input_txt\"])\n item[\"target_ext_vocab_batch\"] = self.process_target(\n item[\"target_txt\"], item[\"article_oovs\"])\n return item", "def export(self, buffer: IO[str], ind: str = '') -> None:\n buffer.write(ind + 'cordon\\n')\n buffer.write(ind + '{\\n')\n buffer.write(ind + '\\t\"name\" \"' + self.name + '\"\\n')\n buffer.write(ind + '\\t\"active\" \"' +\n srctools.bool_as_int(self.active) +\n '\"\\n')\n buffer.write(ind + '\\tbox\\n')\n buffer.write(ind + '\\t{\\n')\n buffer.write(ind + '\\t\\t\"mins\" \"(' +\n self.bounds_min.join(' ') +\n ')\"\\n')\n buffer.write(ind + '\\t\\t\"maxs\" \"(' +\n self.bounds_max.join(' ') +\n ')\"\\n')\n buffer.write(ind + '\\t}\\n')\n buffer.write(ind + '}\\n')", "def __getitem__(self, index):\n x = self.preprocess(self.data[index]['text'])\n y = self.data[index]['label']\n return x, y", "def _wr_3fmt_goeaobj(goea_results, goeaobj, wr_params, log):\n # List of all fields, printable or not, available from GOEnrichmentRecord\n log.write(\"\\nGOEnrichmentRecord FIELDS: {F}\\n\".format(F=\" \".join(goea_results[0].get_prtflds_all())))\n # Use the subset of namedtuple fields_names that are listed in the format string:\n # Same format: print to screen and print to file:\n goeaobj.prt_txt(log, goea_results, **wr_params) # Print to screen\n goeaobj.wr_txt(\"nbt3102_subset_obj.txt\", goea_results, **wr_params)\n # Print to Excel Spreadsheet\n title=\"Print subset of fields from GOEnrichmentRecord\"\n goeaobj.wr_xlsx(\"nbt3102_subset_obj.xlsx\", goea_results, title=title, **wr_params)\n # Print to tab-separated file\n goeaobj.wr_tsv(\"nbt3102_subset_obj.tsv\", goea_results, **wr_params)", "def compute_index(self, filename, tri):\n self.index = {'name': filename, \"bi\": {}}\n if tri:\n self.index[\"tri\"] = {}\n fichero = open(filename, 'r').read()\n fichero = fichero.replace(\";\",\".\")\n fichero = fichero.replace(\"\\n\\n\",\".\")\n fichero = fichero.replace(\",\",\".\")\n fichero = fichero.replace(\"?\",\".\")\n fichero = fichero.replace(\"!\",\".\")\n fichero = fichero.lower()\n\n for frase in fichero.split('.'):\n frase = self.r2.sub(\" \", frase)\n frase = \"$ \" + frase + \" $\"\n Monkey.index_sentence(self, frase, tri)\n\n #sort_index(self, self.index['bi'])\n if tri:\n sort_index(self, self.index['tri'])\n\n extension = filename.find('.')\n aux = filename[:extension] \n new_filename = aux + 'index'\n\n with open(new_filename, 'w') as fh:\n #print(self.index['bi'].items())\n for nombre, valor in self.index['bi'].items():\n fh.write(\"%s %s\\n\" %(nombre, valor))", "def read_file_object(self, file_obj, file_format='FASTA'):\n if ( file_format.upper() == 'FASTA' ):\n read_func = read_fasta\n #elif ( file_format.upper() == 'NEXUS' ):\n # read_func = read_nexus\n #elif ( file_format.upper() == 'PHYLIP' ):\n # read_func = read_phylip\n #elif ( file_format.upper() == 'COMPACT3' ):\n # read_func = read_compact3\n else:\n raise NotImplementedError(\"Unknown file format (%s) is not supported\" % file_format)\n for name, seq in read_func(file_obj):\n self[name] = seq", "def save_byte_index(index, fp):\n encoded_index = dict()\n for key, offset in index.items():\n encoded_index[key.decode(\"utf8\")] = offset\n json.dump(encoded_index, fp)\n return fp", "def ReadIndex_binary(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n indexFileHeaderText = []\n size_indexfile = os.path.getsize(indexfile)\n cntReadByte = 0\n try:\n fpin=open(indexfile, \"rb\")\n vI = array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n dumpedtext = fpin.read(vI[0])\n cntReadByte += vI[0]\n\n strs = dumpedtext.split(\"\\n\")\n origdbname = \"\"\n origversion = \"\"\n origext = \"\"\n origprefix = \"\"\n for line in strs:\n if not line or line[0] == \"#\":\n continue\n ss=line.split()\n if ss[0] == \"DEF_DBNAME\":\n if len(ss)>=2:\n origdbname=ss[1]\n elif ss[0] == \"DEF_VERSION\":\n if len(ss)>=2:\n origversion=ss[1]\n elif ss[0] == \"DEF_EXTENSION\":\n if len(ss)>=2:\n origext=ss[1]\n elif ss[0] == \"DEF_PREFIX\":\n if len(ss)>=2:\n origprefix=ss[1]\n if isPrintWarning:\n if origversion == \"\": \n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0], indexfile,\n origversion, version), file=sys.stderr)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n #read in other information\n vI = array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n\n dumpedidlist=fpin.read(vI[0])\n cntReadByte += vI[0]\n\n idlist = dumpedidlist.split(\"\\n\")\n vI=array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n\n numRecord = vI[0]\n if numRecord != len(idlist):\n msg = \"{}: numID ({}) != numRecord ({}) for indexfile {} \"\n print(msg.format(sys.argv[0], len(idlist),\n numRecord, indexfile), file=sys.stderr)\n\n sizeRecord_I = (array('B').itemsize + array('I').itemsize +\n array('I').itemsize)\n sizeRecord_L = (array('B').itemsize + array('L').itemsize +\n array('I').itemsize)\n sizeRecord = int(mybase.FloatDivision(size_indexfile - cntReadByte, numRecord))\n if abs(sizeRecord - sizeRecord_I) < abs(sizeRecord - sizeRecord_L):\n vIarray=[array('B'), array('I'), array('I')]\n else:\n vIarray=[array('B'), array('L'), array('I')]\n for i in range(3):\n vIarray[i].fromfile(fpin,numRecord)\n\n lastDBFileIndex = vIarray[0][numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n indexList.append(idlist)\n for i in range(3):\n indexList.append(vIarray[i])\n fpin.close()\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)", "def readInstance(self):\n file = open(self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()", "def _load_nimble_annotation(self, index):\n filename = os.path.join(self._data_path, 'Annotations_Python', index + '.json')\n #currently only one bbox is considered.\n assert os.path.exists(cache_file),'Annotation {} has to be here'.format(filename)\n \n num_objs = 1\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n f = open(filename,'r')\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n cls = 1 \n gtboxes_1[ix, :] = obj.bbox\n gtboxes_2[ix,:] = obj.gtbbox\n gt_classes_1[ix] = cls\n overlaps_1[ix, cls] = 1.0\n seg_areas_1[ix] = 0\n gt_classes_1[ix] = cls\n overlaps_1[ix, cls] = 1.0\n seg_areas_1[ix] = 0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'donor_file' : donor_file,\n 'boxes_1' : gtboxes_1,\n 'boxes_2' : gtboxes_2,\n 'gt_classes_1': gt_classes_1,\n 'gt_overlaps_1' : overlaps_1,\n 'gt_classes_2': gt_classes_2,\n 'gt_overlaps_2' : overlaps_2,\n 'flipped' : False,\n 'seg_areas_1' : seg_areas_1,\n 'seg_areas_2' : seg_areas_2}", "def build_from_file(path):\n with open(path) as obj:\n raw_file = obj.read()\n file_lines = [line.split(\" \") for line in raw_file.split(\"\\n\")]\n\n vertices = {}\n faces = []\n for number, line in enumerate(file_lines):\n if line[0] == \"v\":\n vertices[number + 1] = tuple(map(float, line[1:]))\n if line[0] == \"f\":\n face = []\n for index in line[1:]:\n face.append(vertices[int(index)])\n face.append(vertices[int(line[1])])\n faces.append(face)\n return Object(points=faces)", "def filtraFileDiAnn(fileInput, geneNames):\n\n\t#---------------------\n\t# Creazione di una lista dove ogni elemento e' una riga del file \n\t# Ogni elem e' una lista di informazioni divise per colonne \n\t#\n\t# formato di un elemento di lines:\n\t#\n\t#\tPOSIZIONE \t\t\tCONTENUTO\n\t#\t\t0\t\t\t\t\tcromosoma\n\t#\t\t3\t\t\t\t\tstart\n\t#\t\t4\t\t\t\t\tend\n\t#\t\t6\t\t\t\t\tstrand\n\t#\t\t8\t\t\t\t\tgene_id\n\t#\t\t9\t\t\t\t\ttranscript_id\n\t#\t\t10\t\t\t\t\texon_number\n\t#\t\t11\t\t\t\t\tgene_name\n\t#\t\t12\t\t\t\t\ttranscript_name\t\n\t#\n\n\n\tstringa \t= '\\texon\\t'\n\tlines \t\t= []\n\tdictGeneChr = {}\n\t\n\t# Indici per il file di annotazione\n\t#\n\tidx_cromosoma = 0\n\tidx_geneName = 11\n\tidx_start = 3\n\tidx_end = 4\n\t\n\tfor x in open(fileInput):\n\t\triga = x.strip(';\\n').replace('; ','\\t').split('\\t')\n\n\t\tif not geneNames.has_key(riga[idx_geneName]):\n\t\t\tcontinue\n\t\t\t\t\n\t\t# Creazione del dizionario dei gene_name per ogni cromosoma\n\t\t#\n\t\tkey_geneChr = riga[idx_geneName] + '\\t' + riga[idx_cromosoma]\n\t\tif not dictGeneChr.has_key(key_geneChr):\n\t\t\tdictGeneChr[key_geneChr] = [riga[idx_start], riga[idx_end]]\n\t\telse:\n\t\t\t\n\t\t\t# Si aggiona il valore dello start del gene se si trova un \n\t\t\t# valore piu' piccolo\n\t\t\t#\n\t\t\tif int(dictGeneChr[key_geneChr][0]) > int(riga[idx_start]):\n\t\t\t\tdictGeneChr[key_geneChr][0] = riga[idx_start]\n\t\t\t\t\n\t\t\t# Si aggiorna il valore dell'end del gene se si trova un\n\t\t\t# valore piu' grande\n\t\t\t#\n\t\t\tif int(dictGeneChr[key_geneChr][1]) < int(riga[idx_end]):\t\n\t\t\t\tdictGeneChr[key_geneChr][1] = riga[idx_end]\n\t\t\n\t\t# Si filtra il file considerando solamente le regioni di tipo \"exon\"\n\t\t#\n\t\tif stringa in x:\n\t\t\tlines.append(riga)\n\n\treturn [lines, dictGeneChr]", "def save2txt(obj, file: str):\n with open(file, \"w\") as f:\n print(obj, file=f)", "def export(self, buffer: IO[str], ind: str) -> None:\n for fixup in sorted(self._fixup.values(), key=operator.attrgetter('id')):\n # When exporting, pad the index with zeros if needed\n buffer.write(\n f'{ind}\\t\"replace{fixup.id:02}\" \"${fixup.var} {fixup.value}\"\\n'\n )", "def make_documents(f, index: str) -> typing.Iterator[dict]:\n\n while True:\n line = f.readline()\n if not line:\n break\n idx = int(line.strip())\n line = f.readline()\n doc = {\n '_index': index,\n '_type': \"_doc\",\n '_source': line.strip(),\n '_id': idx,\n }\n yield doc", "def loadOBJ(fileName):\n\tvertices = []\n\tnormals = []\n\ttexcoords = []\n\tfaces = []\n\n\tmaterial = None\n\tfor line in open(fileName, \"r\"):\n\t\tif line.startswith('#'): continue\n\t\tvalues = line.split()\n\t\tif not values: continue\n\t\tif values[0] == 'v':\n\t\t\tv = list(map(float, values[1:4]))\n\t\t\tvertices.append(v)\n\t\telif values[0] == 'vn':\n\t\t\tv = list(map(float, values[1:4]))\n\t\t\tnormals.append(v)\n\t\telif values[0] == 'vt':\n\t\t\ttexcoords.append((map(float, values[1:3])))\n\t\telif values[0] == 'f':\n\t\t\tface = []\n\t\t\ttexcoords = []\n\t\t\tnorms = []\n\t\t\tfor v in values[1:]:\n\t\t\t\tif '//' in v:\n\t\t\t\t\tglue = '//'\n\t\t\t\telse:\n\t\t\t\t\tglue = '/'\n\t\t\t\tw = v.split(glue)\n\t\t\t\tface.append(int(w[0]) - 1)\n\t\t\t\tif len(w) >= 2 and len(w[1]) > 0:\n\t\t\t\t\ttexcoords.append(int(w[1]))\n\t\t\t\telse:\n\t\t\t\t\ttexcoords.append(0)\n\t\t\t\t\tif len(w) >= 3 and len(w[2]) > 0:\n\t\t\t\t\t\tnorms.append(int(w[2]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tnorms.append(0)\n\t\t\tfaces.append(face)\n\treturn np.asarray(vertices), np.asarray(faces), np.asarray(normals)", "def get_data_as_indices(self, file_name):\n X, Y = [],[]\n org_X, org_Y = [], []\n\n for (words, tags) in read_conll_file(file_name):\n word_indices, word_char_indices = self.get_features(words)\n tag_indices = [self.tag2idx.get(tag) for tag in tags]\n X.append((word_indices,word_char_indices))\n Y.append(tag_indices)\n org_X.append(words)\n org_Y.append(tags)\n return X, Y #, org_X, org_Y - for now don't use", "def __str__(self):\r\n return self.afficherOBJ()", "def get_index(self):\n with open(self.index_path, \"r\") as f:\n return json.load(f)", "def load_Bietenholz(path=data_path+\"Table1_complete_ascii.txt\"):\n\n res = {}\n num_of_SN = 0\n ex = \"\"\n\n with open(path, 'r') as f:\n for i in range(30):\n next(f)\n for line in f:\n words = line.split()\n current_SN_name = words[0]\n # determine if it's a new SN\n if current_SN_name != ex:\n if num_of_SN > 0:\n res[ex] = SN # save previous SN\n SN = SuperNova()\n num_of_SN += 1\n ex = words[0]\n\n SN.name = words[0]\n if ('L' in line[10]):\n SN.is_limit = np.append(SN.is_limit, True)\n else:\n SN.is_limit = np.append(SN.is_limit, False)\n SN.year = np.append(SN.year, int(line[12:16]))\n SN.month = np.append(SN.month, int(line[17:19]))\n SN.day = np.append(SN.day, float(line[20:25]))\n SN.telescope = np.append(SN.telescope, line[26:33])\n SN.freq = np.append(SN.freq, float(line[35:40]))\n SN.flux = np.append(SN.flux, float(line[41:49]))\n SN.dflux = np.append(SN.dflux, float(line[50:56]))\n SN.comment = np.append(SN.comment, line[57:63])\n res[words[0]] = SN\n return res", "def index_fobj(fobj):\n doc = fileobject_to_dict(fobj)\n if doc is not None:\n #print doc\n SOLR.add(doc)\n else:\n pass", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def get_index_text(self, name):\n raise NotImplementedError('must be implemented in subclasses')", "def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.indices})\"", "def convert(file):\n outfile = file.replace(\".json\",\"_pos.txt\")\n essays_string = open(\"temp/\" + file,encoding='utf-8', errors='ignore').read()\n essays = json.loads(essays_string)\n\n for essay in essays:\n with open(\"temp/\" + str(essay['id']) + \"_pos.txt\", 'w', encoding='utf-8') as posfile, \\\n open(\"temp/\" + str(essay['id']) + \"_raw.txt\", 'w', encoding='utf-8') as txtfile:\n sentID = 0\n for paragraph in essay['utterances']:\n for sentence in paragraph['sentences']:\n sentID += 1\n wordID = 0\n for discourse_unit in sentence['discourseUnits']:\n for word in discourse_unit['words']:\n wordID += 1\n if word['wordform'] is not ' ':\n posfile.write(str(sentID) + \".\" + str(wordID) + \":\" + word['wordform'] + \"_\" + word['posTagLabel'] + \" \")\n txtfile.write(word['wordform'])\n posfile.write(\"\\n\")\n txtfile.write(\"\\n\")\n posfile.write(\"\\n\")\n txtfile.write(\"\\n\")", "def format(self, obj):\n pass", "def format(self, obj):\n pass", "def show(self,\r\n index,\r\n shortform=False,\r\n length=None,\r\n yestags=True,\r\n highlight=None,\r\n show_date=True,\r\n most_recent=False,\r\n curtail=0,\r\n deepest=None):\r\n\r\n\r\n if not self.notebook_contains(index):\r\n display.noteprint((alerts.ATTENTION,'INDEX NOT FOUND'))\r\n return [set(),EMPTYCHAR]\r\n if not deepest:\r\n deepest = self.deepest(is_string=True,abridged=True)\r\n deepest += 3\r\n if not length:\r\n length = self.defaults.get('texttrim')\r\n d_index = str(index)\r\n if len(d_index) > 10:\r\n d_index = index_reduce(d_index) # to display long indexes in compact form\r\n if highlight is None:\r\n highlight = set()\r\n l_temp = []\r\n if show_date:\r\n date_insert = VERTLINE + \\\r\n self.get_note(index).date(short=True,\r\n most_recent=most_recent,\r\n convert=False)\\\r\n + BLANK\r\n else:\r\n date_insert = EMPTYCHAR\r\n\r\n\r\n if str(index) not in self.indexes():\r\n return [EMPTYCHAR, EMPTYCHAR]\r\n\r\n keyset_temp = self.get_keys_from_note(index) #fetches keyset\r\n\r\n keyset_temp = self.keypurger.purge(keyset_temp,projects=set(self.default_dict['projects']\r\n .get_all_projects()))\r\n seq_keys = set()\r\n if self.defaults.get('sequences_in_text') and not shortform:\r\n oldkeys = set(keyset_temp)\r\n seq_keys = set()\r\n keyset_temp = set()\r\n seq_keys = {x_temp for x_temp in oldkeys if ATSIGN in x_temp}\r\n keyset_temp = oldkeys - seq_keys\r\n\r\n kl = self.abridged_str_from_list(remove_tags(\r\n self.return_least_keys(transpose_keys(keyset_temp,\r\n notebook=notebook),\r\n override=not self.defaults.get('orderkeys'),\r\n add_number=True,no_allcaps=False), override=yestags),\r\n override=not shortform)\r\n seq_text = EMPTYCHAR\r\n\r\n if seq_keys:\r\n proj_seq = []\r\n main_seq = []\r\n other_seq = []\r\n\r\n for kx_temp in seq_keys:\r\n ident_temp= kx_temp.split(ATSIGN)[0]\r\n value_temp = kx_temp.split(ATSIGN)[1]\r\n if ident_temp in self.default_dict['projects'].get_all_projects():\r\n proj_seq.append(kx_temp)\r\n elif ident_temp in self.default_dict['main_sequences']:\r\n main_seq.append(kx_temp)\r\n else:\r\n other_seq.append(kx_temp)\r\n proj_seq.sort()\r\n main_seq.sort()\r\n other_seq.sort()\r\n\r\n if proj_seq:\r\n seq_text = 'PROJECTS: ' + ', '.join(proj_seq) \\\r\n + self.defaults.get('seqform1')\r\n if main_seq:\r\n for kx_temp in main_seq:\r\n ident_temp= kx_temp.split(ATSIGN)[0]\r\n value_temp = kx_temp.split(ATSIGN)[1]\r\n seq_text += ident_temp + ':' + value_temp \\\r\n + self.defaults.get('seqform1')\r\n if other_seq:\r\n seq_text += EOL\r\n for kx_temp in other_seq:\r\n ident_temp= kx_temp.split(ATSIGN)[0]\r\n value_temp = kx_temp.split(ATSIGN)[1]\r\n seq_text += ident_temp + ':' + value_temp \\\r\n + self.defaults.get('seqform1')\r\n if seq_text:\r\n seq_text += EOL + self.defaults.get('seqform2')\r\n\r\n seq_text = seq_text.replace(BLANK+EOL,EOL)\r\n\r\n if COMMA + EOL in seq_text or COLON +EOL \\\r\n in seq_text or SEMICOLON + EOL in seq_text:\r\n seq_text = seq_text\\\r\n .replace(COMMA+EOL,EOL)\\\r\n .replace(COLON+EOL,EOL)\\\r\n .replace(SEMICOLON+EOL,EOL)\r\n\r\n\r\n\r\n for char in string.whitespace[1:]:\r\n kl = kl.replace(char, EMPTYCHAR)\r\n\r\n kl = kl.replace(UNDERLINE, BLANK)\r\n\r\n\r\n if not shortform:\r\n\r\n tex_temp = self.get_text_from_note(index).replace(TAB,BLANK*4).replace('/T',BLANK*4)\r\n\r\n for rep_temp in range(0,tex_temp.count('}}')):\r\n if '{{' in tex_temp and '}}' in tex_temp:\r\n n_temp = tex_temp.split('{{')[1].split('}}')[0]\r\n\r\n\r\n if n_temp and n_temp[0] in [ATSIGN, STAR]:\r\n pass\r\n if self.show_text:\r\n folder_temp = {ATSIGN:'/textfiles',\r\n STAR:'/attachments'}[n_temp[0]]\r\n n_temp = n_temp[1:]\r\n try:\r\n textfile = file_access.get_text_file(n_temp,\r\n folder=folder_temp)\r\n tex_temp = tex_temp.replace('{{'+ATSIGN+n_temp+'}}',\r\n textfile)\r\n except:\r\n display.noteprint((alerts.ATTENTION,\r\n labels.FILE_ERROR))\r\n elif n_temp and n_temp[0] in ['^']:\r\n if self.show_images:\r\n folder_temp = '/pictures'\r\n directoryname = os.getcwd()+folder_temp\r\n picture = Image.open(directoryname\r\n +'/'+n_temp[1:]\r\n +'.jpg')\r\n picture.show()\r\n\r\n\r\n\r\n suffix = EMPTYCHAR\r\n if self.no_flash: #To disable flash card mode\r\n tex_temp = tex_temp.replace('/FC/','\\n /BREAK/ \\n')\r\n if '/FC/' in tex_temp: #For a flash card\r\n sides_temp = tex_temp.split('/FC/')\r\n if self.flexflip:\r\n self.sides = len(sides_temp)\r\n if self.last_sides != self.sides:\r\n self.side=0\r\n self.last_sides = self.sides\r\n tex_temp = sides_temp[self.side%len(sides_temp)]\r\n suffix = '[' + str(self.side%len(sides_temp)+1) + ']'\r\n\r\n\r\n\r\n\r\n if curtail != 0 and len(tex_temp) > curtail:\r\n tex_temp = tex_temp[0:curtail]\r\n # Adds the first and second element on the list\r\n l_temp.append(d_index+self.mark(index)+suffix\r\n +BLANK+VERTLINE+BLANK\r\n +self.field(index)\r\n +date_insert\r\n +BLANK+VERTLINE+BLANK+kl\r\n +BLANK+VERTLINE)\r\n l_temp.append(seq_text + nformat.encase(tex_temp,\r\n highlight))\r\n\r\n if len(l_temp) > 1:\r\n if self.defaults.get('curtail'):\r\n l_temp[1] = l_temp[1].strip(EOL)\r\n l_temp[1] = EOL * self.defaults.get('header') \\\r\n + l_temp[1] + EOL \\\r\n * self.defaults.get('footer')\r\n\r\n else:\r\n\r\n t_temp = self.get_text_from_note(index)\r\n t_temp = t_temp[0 : min([len(t_temp), length])]\r\n t_temp = nformat\\\r\n .purgeformatting(t_temp)\\\r\n .replace(EOL,EMPTYCHAR)\\\r\n .replace(TAB,EMPTYCHAR)\\\r\n .replace(VERTLINE,EMPTYCHAR)\\\r\n .replace(UNDERLINE,EMPTYCHAR)\r\n\r\n t_temp = nformat.encase(t_temp,highlight)\r\n\r\n\r\n\r\n l_temp.append(d_index+self.mark(index)\r\n +max([deepest-(len(d_index+self.mark(index))),0])\r\n *BLANK+BLANK+VERTLINE+BLANK\r\n +self.field(index)\r\n +max([self.field_length()\r\n -(len(self.field(index))), 0])*BLANK+BLANK\r\n +date_insert\r\n +BLANK\r\n +VERTLINE+BLANK+kl\r\n +(self.defaults.get('keytrim')-len(kl))*BLANK\\\r\n +BLANK+VERTLINE\r\n +BLANK+t_temp)\r\n\r\n return l_temp", "def format_data(file):\r\n \r\n \r\n data = pd.read_csv(file)\r\n data.index = list(data.iloc[:,0])\r\n data = data.iloc[:,1:]\r\n \r\n return data", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def __loadIndex( self ):\n\n assert self.mCreateMode == False, \"asked to read from database opened for writing\"\n\n if self.mMethod == \"uncompressed\":\n self.mDatabaseFile = open( self.mDbname, \"r\" )\n elif self.mMethod == \"dictzip\":\n import dictzip\n self.mDatabaseFile = dictzip.GzipFile( self.mNameDb)\n elif self.mMethod == \"lzo\":\n import lzo\n self.mDatabaseFile = Uncompressor( self.mNameDb, lzo.decompress )\n elif self.mMethod == \"gzip\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, gzip_demangler )\n elif self.mMethod == \"zlib\":\n self.mDatabaseFile = Uncompressor( self.mNameDb, zlib.decompress )\n elif eslf.mMethod == \"bz2\":\n self.mDatabaseFile = bz2.BZ2File( self.mNameDb )\n elif self.mMethod == \"debug\":\n self.mDatabaseFile = Uncompressor( self.mDbname + \".debug\", lambda x: x ) \n\n self.mIndex = {}\n\n for line in open(self.mNameIndex, \"r\"):\n\n if line.startswith(\"#\"): continue\n data = line[:-1].split(\"\\t\")\n\n # index with random access points\n if len(data) > 4:\n (identifier, pos_id, block_size, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n points = map(int, data[3:-1])\n self.mIndex[int(identifier)] = (pos_id, block_size, lsequence, points)\n else:\n (identifier, pos_id, pos_seq, lsequence) = bytes(data[0]), int(data[1]), int(data[2]), int(data[-1])\n self.mIndex[int(identifier)] = (pos_id, pos_seq, lsequence) \n \n self.mIsLoaded = True", "def __str__(self):\r\n information = self.get_dna_fragment()\r\n\r\n return \"protein indices = \" + str(self._indices) + \": \\n\" + \\\r\n \"t~ strand = \" + str(information[0][0]) + \"\\n\" + \\\r\n \" \" + str(information[0][1]) + \"\\n\" + \\\r\n \"c~ strand = \" + str(information[1][0]) + \"\\n\" + \\\r\n \" \" + str(information[1][1]) + \"\\n\"", "def _do_index_fields(self, doc, generator, obj, obj_weight):\n for field in self.fields + self.tags:\n # Trying to resolve field value or skip it\n # Отладочка:\n # print(field, field.resolve(obj))\n try:\n value = field.resolve(obj)\n if value is None:\n continue\n except AttributeError:\n continue\n if field.prefix:\n fvalue = field.convert(value)\n doc.add_value(field.number, fvalue)\n prefix = smart_text(field.get_tag())\n value = smart_text(value)\n generator.index_text_without_positions(value, field.weight*obj_weight, prefix)\n if prefix: # if prefixed then also index without prefix\n generator.index_text_without_positions(value, field.weight*obj_weight)", "def xrefobjAndTrailer(self, index, version, root):\n maximumIndexValue = max(index, max(self.indirectObjects.keys()))\n dObjects = {}\n for objstm in self.objstms:\n for indexIter in objstm.objects:\n dObjects[indexIter] = objstm\n maximumIndexValue = max(maximumIndexValue, max(dObjects.keys()))\n\n self.appendString('\\n')\n self.indirectObjects[index] = self.filesize()\n\n xrefFormat = '>BII'\n xrefStream = ''\n for iter in range(maximumIndexValue + 1):\n if iter in self.indirectObjects.keys():\n xrefStream += struct.pack(xrefFormat, 1, self.indirectObjects[iter], 0)\n elif iter in dObjects.keys():\n xrefStream += struct.pack(xrefFormat, 2, dObjects[iter].index, dObjects[iter].objects.index(iter))\n else:\n xrefStream += struct.pack(xrefFormat, 0, 0, 0)\n\n formatSizes = ' '.join([str(size) for size in map(struct.calcsize, [c for c in xrefFormat]) if size != 0])\n self.appendString(('%d %d obj\\n<< /Type /XRef /Length %d /W [%s] /Root %s /Size %d >>\\nstream\\n') % (index, version, len(xrefStream), formatSizes, root, maximumIndexValue + 1))\n self.appendBinary(xrefStream)\n self.appendString('\\nendstream\\nendobj\\n')\n\n self.appendString('\\nstartxref\\n%d\\n%%%%EOF\\n' % self.indirectObjects[index])", "def WriteOBJ(self, filename, write_texture=False, write_normal=False):\n\n self.__do_essential_memebers_exist__()\n\n mesh = deepcopy(self)\n p = self.InferPolynomialDegree()\n\n if p > 1:\n mesh = self.GetLinearMesh(remap=True)\n\n edim = mesh.InferElementalDimension()\n\n if edim == 2:\n elements = np.copy(mesh.elements).astype(np.int64)\n elif edim == 3:\n elements = np.copy(mesh.faces).astype(np.int64)\n else:\n raise RuntimeError(\"Writing obj file for {} elements not supported\".format(mesh.element_type))\n\n points = mesh.points[np.unique(elements),:]\n if points.shape[1] == 2:\n points = np.hstack((points,np.zeros((points.shape[0],1))))\n\n points_repr = np.zeros((points.shape[0],points.shape[1]+1), dtype=object)\n points_repr[:,0] = \"v\"\n points_repr[:,1:] = points\n\n elements_repr = np.zeros((elements.shape[0],elements.shape[1]+1), dtype=object)\n elements_repr[:,0] = \"f\"\n elements_repr[:,1:] = elements + 1\n\n if write_texture:\n textures = mesh.textures[np.unique(elements),:]\n\n textures_repr = np.zeros((textures.shape[0],textures.shape[1]+1), dtype=object)\n textures_repr[:,0] = \"vt\"\n textures_repr[:,1:] = textures\n\n elements_repr = np.zeros((mesh.telements.shape[0],mesh.telements.shape[1]+1), dtype=object)\n elements_repr[:,0] = \"f\"\n # elements_repr[:,1:] = telements + 1\n counter = 0\n for i, j in zip(elements,mesh.telements):\n curr_row = [str(ii+1)+\"/\"+str(jj+1) for ii,jj in zip(i,j)]\n elements_repr[counter,1:] = curr_row\n counter += 1\n\n with open(filename, \"w\") as f:\n # f.write(\"# \"+ str(mesh.nnode))\n # f.write('\\n')\n # f.write(\"# \"+ str(mesh.nelem))\n # f.write('\\n')\n\n np.savetxt(f, points_repr, fmt=\"%s\")\n if write_texture:\n np.savetxt(f, textures_repr, fmt=\"%s\")\n\n if write_normal:\n if self.normals is None:\n enormals = self.Normals()\n els = self.GetNodeCommonality()[0]\n self.normals = np.zeros((self.nnode, 3))\n for counter, el in enumerate(els):\n self.normals[counter] = np.sum(enormals[el], axis=0) / enormals[el].shape[0]\n\n normals_repr = np.zeros((self.normals.shape[0], self.normals.shape[1]+1), dtype=object)\n normals_repr[:,0] = \"vn\"\n normals_repr[:,1:] = self.normals\n np.savetxt(f, normals_repr, fmt=\"%s\")\n\n f.write('\\n')\n np.savetxt(f, elements_repr, fmt=\"%s\")", "def tmpl_to_str(self, template_idx, o1_id, o2_id):\r\n template_str = self.template_generator.templates[template_idx]\r\n holes = template_str.count('OBJ')\r\n assert holes <= 2\r\n if holes <= 0:\r\n return template_str\r\n elif holes == 1:\r\n return template_str.replace('OBJ', self.vocab_act[o1_id])\r\n else:\r\n return template_str.replace('OBJ', self.vocab_act[o1_id], 1)\\\r\n .replace('OBJ', self.vocab_act[o2_id], 1)", "def convert_txt_to_data():\n pass", "def read_file_object(self, file_obj, file_format='FASTA'):\n if file_format.upper() == 'FASTA':\n read_func = read_fasta\n# elif (file_format.upper() == 'NEXUS'):\n# read_func = read_nexus\n# elif (file_format.upper() == 'PHYLIP'):\n# read_func = read_phylip\n else:\n raise NotImplementedError(\n \"Unknown file format (%s) is not supported\" % file_format)\n for name, seq in read_func(file_obj):\n self[name] = seq.upper()\n return self", "def __repr__(self):\n return str(self.index)", "def test_create_index_swift(self):\n\n indexfile = tempfile.mktemp()\n self.addCleanup(os.unlink, indexfile)\n\n index = TroveSwiftIndexBuilder(\"short.dat\", out=indexfile)\n\n # read the index file that was created\n with open(indexfile, 'r+b') as fd:\n indextext = fd.read()\n indexlines = indextext.split('\\n')\n\n # 11 lines includes on blank line at the end\n self.assertEquals(11, len(indexlines))\n del indexlines[10]\n\n # check the first character of each line\n docs = [line[0] for line in indexlines]\n self.assertEquals(['1', '2', '3', '4', '5', '6', '7', '8', '9', '1'], docs)\n\n # check some lines from the index\n ref = \"1, 0, 31, short.dat\"\n self.assertEqual(ref, indexlines[0])\n ref = \"10, 279, 32, short.dat\"\n self.assertEqual(ref, indexlines[9])", "def convert_single_example(ex_index, example, label_list, max_seq_length,tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n text_a=example.text_a\n labels_a=[]\n text_a=re.split(\"(<[a-zA-Z]+>[^<>]+</[a-zA-Z]+>)\",text_a)\n tokens_a=[]\n for sub_text in text_a:\n if len(sub_text.strip())<1:\n continue\n elif re.search('<([a-zA-Z]+)>([^<>]+)<[/a-zA-Z]+>',sub_text):\n re_res=re.search('<([a-zA-Z]+)>([^<>]+)<[/a-zA-Z]+>',sub_text)\n slot_name=re_res.group(1)\n slot_value=re_res.group(2)\n slot_value=tokenizer.tokenize(slot_value)\n slot_labels=[]\n for i,s in enumerate(slot_value):\n if i==0:\n slot_labels.append(\"B_\"+slot_name)\n elif re.search(\"^##\",s):\n slot_labels.append(\"x\")\n else:\n slot_labels.append(\"M_\"+slot_name)\n tokens_a.extend(slot_value)\n labels_a.extend(slot_labels)\n else:\n sub_text=tokenizer.tokenize(sub_text)\n sub_labels=['x' if re.search(\"^##\",i) else 'o' for i in sub_text]\n tokens_a.extend(sub_text)\n labels_a.extend(sub_labels)\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n labels=[example.label]\n for label in labels_a:\n labels.append(label)\n labels.append('o')\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_ids)\n output_mask=[1 if i!='x' else 0 for i in labels]\n label_ids=[label_map[i] for i in labels]\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n segment_ids.append(0)\n input_mask.append(0)\n output_mask.append(0)\n label_ids.append(label_map['<PAD>'])\n assert len(input_ids)==max_seq_length\n assert len(segment_ids)==max_seq_length\n assert len(label_ids)==max_seq_length\n assert len(input_mask)==max_seq_length\n assert len(output_mask)==max_seq_length\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(tokens))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"labels: %s\" % \" \".join([str(x) for x in labels]))\n tf.logging.info(\"label_ids: %s\" % \" \".join([str(x) for x in label_ids]))\n tf.logging.info(\"output_mask: %s\" % \" \".join([str(x) for x in output_mask]))\n feature = InputFeatures(\n input_ids=input_ids,\n segment_ids=segment_ids,\n label_ids=label_ids,\n input_mask=input_mask,\n output_mask=output_mask)\n return feature", "def serialize_index(index):\n writer = faiss.VectorIOWriter()\n faiss.write_index(index, writer)\n return faiss.vector_to_array(writer.data)", "def creer_labyrinthe_depuis_chaine(self, chaine):\n labyLoad = {}\n y = 0\n x = 0\n for obj in chaine:\n if obj == \"\\n\":\n labyLoad[x, y] = obj\n y += 1\n x = 0\n else:\n labyLoad[x, y] = obj\n x += 1\n return labyLoad", "def write(self, text):\n text = open(text, 'w')\n text.write('File type = \"ooTextFile\"\\n')\n text.write('Object class = \"TextGrid\"\\n\\n')\n text.write('xmin = %f\\n' % self.__xmin)\n text.write('xmax = %f\\n' % self.__xmax)\n text.write('tiers? <exists>\\n')\n text.write('size = %d\\n' % self.__n)\n text.write('item []:\\n')\n for (tier, n) in zip(self.__tiers, range(1, self.__n + 1)):\n text.write('\\titem [%d]:\\n' % n)\n if tier.__class__ == IntervalTier: \n text.write('\\t\\tclass = \"IntervalTier\"\\n')\n text.write('\\t\\tname = \"%s\"\\n' % tier.name())\n text.write('\\t\\txmin = %f\\n' % tier.xmin())\n text.write('\\t\\txmax = %f\\n' % tier.xmax())\n text.write('\\t\\tintervals: size = %d\\n' % len(tier))\n for (interval, o) in zip(tier, range(1, len(tier) + 1)): \n text.write('\\t\\t\\tintervals [%d]:\\n' % o)\n text.write('\\t\\t\\t\\txmin = %f\\n' % interval.xmin())\n text.write('\\t\\t\\t\\txmax = %f\\n' % interval.xmax())\n text.write('\\t\\t\\t\\ttext = \"%s\"\\n' % interval.mark())\n else: # PointTier\n text.write('\\t\\tclass = \"TextTier\"\\n')\n text.write('\\t\\tname = \"%s\"\\n' % tier.name())\n text.write('\\t\\txmin = %f\\n' % tier.xmin())\n text.write('\\t\\txmax = %f\\n' % tier.xmax())\n text.write('\\t\\tpoints: size = %d\\n' % len(tier))\n for (point, o) in zip(tier, range(1, len(tier) + 1)):\n text.write('\\t\\t\\tpoints [%d]:\\n' % o)\n text.write('\\t\\t\\t\\ttime = %f\\n' % point.time())\n text.write('\\t\\t\\t\\tmark = \"%s\"\\n' % point.mark())\n text.close()", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def __str__(self):\n line = ''\n for linea in self.lista:\n for atributo in linea:\n if linea[atributo] != \"\":\n line += atributo + \"=\" + \"'\" + linea[atributo] + \"'\" + '\\t'\n line += '\\n'\n return line", "def crearIndices(self):\n l = self.encontrarCaracteres()\n i=0\n for c in l:\n self.indices[c] = i\n i+=1", "def write_genre_index(self):\n for giEntry in self.genreIndex:\n # Write to file\n self.db_file.write(giEntry.get_representation())", "def read_example(self, index):\n if index < 0 or index >= len(self._data):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n\n name = self._data[index][0]\n (X, header) = self._read_timeseries(name)\n\n return {\"X\": X,\n \"t\": self._data[index][1],\n \"ihm\": self._data[index][2],\n \"los\": self._data[index][3],\n \"pheno\": self._data[index][4],\n \"decomp\": self._data[index][5],\n \"header\": header,\n \"name\": name}", "def _read_annot_ctab_old_format(fobj, n_entries):\n assert hasattr(fobj, 'read')\n\n dt = _ANNOT_DT\n # orig_tab string length + string\n length = np.fromfile(fobj, dt, 1)[0]\n orig_tab = np.fromfile(fobj, '>c', length)\n orig_tab = orig_tab[:-1]\n names = list()\n ctab = np.zeros((n_entries, 5), dt)\n for i in xrange(n_entries):\n # structure name length + string\n name_length = np.fromfile(fobj, dt, 1)[0]\n name = np.fromfile(fobj, \"|S%d\" % name_length, 1)[0]\n names.append(name)\n # read RGBT for this entry\n ctab[i, :4] = np.fromfile(fobj, dt, 4)\n\n return ctab, names", "def get_index(path):\n with open(path,'r') as f:\n zz = f.readlines()\n return [index.split(\"\\n\")[0] for index in zz]", "def convert(label, tags, categories, projects, view, featured):\n\n filename = f\"content/publication/{label}/index.md\"\n content = readfile(filename)\n if featured: \n content = content.replace(\"featured: false\", f'featured: true') \n if tags: \n content = content.replace(\"tags: []\", f'tags: [\"{tags}\"]') \n if categories: \n content = content.replace(\"categories: []\", f'categories: [\"{categories}\"]') \n if projects: \n content = content.replace(\"projects: []\", f'projects: [\"{projects}\"]')\n writefile(filename, content)\n if view:\n print(content)" ]
[ "0.56216776", "0.5584734", "0.55089396", "0.5442182", "0.53701806", "0.53672534", "0.5365042", "0.5347537", "0.53458637", "0.5334542", "0.5304593", "0.52780014", "0.52368265", "0.52331924", "0.5230119", "0.52129966", "0.51751065", "0.51676047", "0.5156888", "0.5112448", "0.510058", "0.5071025", "0.50695306", "0.5068904", "0.50651175", "0.5054637", "0.505233", "0.5043695", "0.5034985", "0.5022904", "0.49966505", "0.4993721", "0.49897358", "0.49824104", "0.49766526", "0.49680832", "0.49501157", "0.49340755", "0.49339792", "0.49209937", "0.4915881", "0.4913899", "0.4911967", "0.49095032", "0.49089187", "0.49013704", "0.48999867", "0.4895156", "0.48905092", "0.4888779", "0.4881569", "0.4879992", "0.4871842", "0.48551336", "0.48526376", "0.48394248", "0.48379475", "0.48320243", "0.48206788", "0.48203367", "0.4809762", "0.4808033", "0.48056468", "0.4804782", "0.47975564", "0.47877628", "0.47849014", "0.478362", "0.47816095", "0.47814795", "0.4779242", "0.47767663", "0.47705525", "0.47669077", "0.47669077", "0.4762166", "0.47596687", "0.47581652", "0.4751725", "0.47443464", "0.47436914", "0.47433156", "0.47426906", "0.4742472", "0.47389776", "0.4734997", "0.47330788", "0.4731502", "0.4730924", "0.47285667", "0.47264546", "0.47190404", "0.471703", "0.47158885", "0.47075993", "0.47075614", "0.47072643", "0.47015396", "0.4699663", "0.46996018" ]
0.6343497
0
Grava objetos em formato texto no arquivo Extencao
def gravarArquivoExtencao(chave, nomeLivro, nomeAutor, mes, ano, extencao): arq = open("arquivoExtencao.txt", "w") linha = chave + nomeLivro + nomeAutor + mes + ano + "\n" arq.seek(extencao, 0) arq.write(linha) arq.close() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save2txt(obj, file: str):\n with open(file, \"w\") as f:\n print(obj, file=f)", "def convert_txt_to_data():\n pass", "def __str__(self):\n line = ''\n for linea in self.lista:\n for atributo in linea:\n if linea[atributo] != \"\":\n line += atributo + \"=\" + \"'\" + linea[atributo] + \"'\" + '\\t'\n line += '\\n'\n return line", "def __str__(self):\r\n return self.afficherOBJ()", "def format(self, data):", "def format(self, obj):\n pass", "def format(self, obj):\n pass", "def afficherOBJ(self):\r\n str_abr = self.abr.afficher()## appel de l'affichage d'un ABR\r\n return str(self.debut)+':'+str(self.fin)+';'+str_abr ##concaténation du resultat\r", "def format_data(self, data):", "def format_item(self,obj):\n return unicode(obj)", "def _txt_record(self, name, content):\n\n return {\n \"name\": name,\n \"type\": \"TXT\",\n \"aux\": None,\n \"ttl\": MetanameApiClient.minimum_ttl,\n \"data\": content,\n }", "def xephemFormat(self):\n line = []\n #Field 1: names\n names = [self.getName()]\n identifiers = self.getIdentifiers()\n if identifiers[0] is not None:\n names.append(identifiers[0])\n for i in range(1,4):\n if identifiers[i] is not None:\n names.extend(identifiers[i])\n line.append(\"|\".join(names))\n\n #Field 2: type designation\n objType = self.getType()\n if objType in (\"Galaxy Pair\", \"Galaxy Triplet\", \"Group of galaxies\"):\n line.append(\"f|A\")\n elif objType == \"Globular Cluster\":\n line.append(\"f|C\")\n elif objType == \"Double star\":\n line.append(\"f|D\")\n elif objType in (\"HII Ionized region\", \"Nebula\"):\n line.append(\"f|F\")\n elif objType == \"Galaxy\":\n if self.getHubble().startswith(\"S\"):\n line.append(\"f|G\")\n else:\n line.append(\"f|H\")\n elif objType == \"Dark Nebula\":\n line.append(\"f|K\")\n elif objType in (\"Emission Nebula\", \"Reflection Nebula\"):\n line.append(\"f|N\")\n elif objType in (\"Association of stars\", \"Open Cluster\"):\n line.append(\"f|O\")\n elif objType == \"Planetary Nebula\":\n line.append(\"f|P\")\n elif objType == \"Supernova remnant\":\n line.append(\"f|R\")\n elif objType == \"Star\":\n line.append(\"f|S\")\n elif objType == \"Star cluster + Nebula\":\n line.append(\"f|U\")\n else:\n line.append(\"f\")\n\n #Field 3: Right Ascension\n line.append(self.getRA())\n\n #Field 4: Declination\n line.append(self.getDec())\n\n #Field 5: Magnitude\n #We use the first available magnitude in the sequence b,v,j,h,k\n for mag in self.getMagnitudes():\n if mag is not None:\n line.append(str(mag))\n break\n\n #Field 6: optional Epoch, we let it empty\n line.append(\"\")\n\n #Field 7: Dimensions\n dimensions = []\n #Xephem format wants axes espressed in arcsec, we have arcmin\n for value in (self.getDimensions()[0],self.getDimensions()[1]):\n if value is not None:\n dimensions.append(str(value*60))\n else:\n dimensions.append(\"\")\n if self.getDimensions()[2] is not None:\n dimensions.append(str(value))\n else:\n dimensions.append(\"\")\n line.append(\"|\".join(dimensions))\n\n return \",\".join(line)", "def archivoXlFormateado(archivo):\r\n return ow(archivo, formatting_info=True)", "def extension_to_format(self, extension):", "def __repr__(self):\n return '%s.from_text(%r)' % (self.__class__.__name__, self.to_text())", "def __str__(self) -> str:\n obj_dict: Dict[str, Any] = {}\n obj_dict[\"doc\"] = self.doc\n obj_dict[\"type\"] = self.type\n obj_dict[\"name\"] = self.name\n\n line_range = self.line_range()\n obj_dict[\"start_line\"] = line_range[0]\n obj_dict[\"end_line\"] = line_range[1]\n\n obj_dict[\"children\"] = []\n\n for child in self.children.values():\n obj_dict[\"children\"].append(json.loads(str(child)))\n\n return json.dumps(obj_dict)", "def __str__(self):\n\t\treturn str(self.dato)", "def stringReco(obj):\n name = obj.get_name()\n name = obj._pid if (name is None) else name\n return (\"pdg: \" + name + \" E: \" + str(obj._E)\n + \" px: \" + str(obj._px) + \" py: \" + str(obj._py)\n + \" pz: \"+ str(obj._pz) + \" mass: \" + str(obj._m))", "def CsvToJson(nomfichierJson):\n with open(\"save/save.csv\",'r') as f:\n liste_cube = list()\n liste_robot = list()\n \"\"\"deux listes vides pour contenir les objets charges\"\"\"\n for line in f:\n ligne=line.split(\";\")\n if ligne[0] == 'Arene':\n \"\"\"On cree une nouvelle arene avec les parametres trouves sur la ligne, separes par des ';' \"\"\"\n arene = Arene(int(ligne[1]),int(ligne[2]),int(ligne[3]),liste_cube,liste_robot)\n arene.liste_robot=liste_robot\n elif ligne[0] == 'Cube':\n \"\"\"On ajoute le cube a la liste de cube de l'arene, avec parametres trouves sur la ligne\"\"\"\n arene.liste_cube.append(Cube(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5]),int(ligne[6])))\n elif ligne[0] == 'Mur':\n arene.liste_cube.append(Mur(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5]),int(ligne[6])))\n elif ligne[0] == 'Sol':\n arene.liste_cube.append(Sol(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5])))\n elif ligne[0] == 'Robot':\n (x,y,z)=literal_eval(ligne[1])\n ((cax,cay),(cbx,cby),(ccx,ccy),(cdx,cdy))=literal_eval(ligne[2])\n (a,b,c)=literal_eval(ligne[3])\n (lo,la,ha)=literal_eval(ligne[4])\n vitesse=literal_eval(ligne[5])\n arene.liste_robot.append(Robot((x,y,z),((cax,cay),(cbx,cby),(ccx,ccy),(cdx,cdy)),(a,b,c),(lo,la,ha),vitesse))\n saveFic(arene,nomfichierJson)", "def obj_ext(self):\n return \".o\"", "def txt_loader(fileobj):\n if isinstance(fileobj, bytes):\n data = fileobj.decode('utf-8')\n elif isinstance(fileobj, six.string_types):\n with open(fileobj, 'rb') as f:\n data = f.read().decode('utf-8')\n elif hasattr(fileobj, 'read'):\n data = fileobj.read().decode('utf-8')\n else:\n raise ValueError('fileobj is not a filename or a file object')\n return data", "def _get_objects(self,label_fh):\n objects = []\n for line in label_fh.readlines():\n try:\n object = {}\n line = line.replace(u'\\ufeff', '')\n if line != '':\n x1, y1, x2, y2, x3, y3, x4, y4= [int(i) for i in line.split(',')[:-1]]\n p1 = (x1, y1)\n p2 = (x2, y2)\n p3 = (x3, y3)\n p4 = (x4, y4)\n object['polygon'] = [p1,p2,p3,p4]\n objects.append(object)\n except:\n pass\n return objects", "def generar_Informe(self):\n\t\t\"\"\"Crea archivo a partir de un modelo\"\"\"\n\t\twith open(\"./Reportes/Reporte-%s.txt\" % str(self.ID), \"w\") as fw, open(\"./Reportes/Reporte\",\"r\") as fr:\n\t\t\tfw.writelines(l for l in fr if \"\" in l)\n\t\tfw.close()\n\t\tfr.close()\n\t\t\"\"\"Modifica el archivo creado con los datos del paciente \"\"\"\n\t\tfileDir = os.path.dirname(os.path.realpath(\"/media/leandro/Datos/PROYECTO FINAL/SOFTWARE/IntegracionIUHW/Reportes/\"))\n\t\tprint fileDir\n\t\twith ModificarArchivo(os.path.join(fileDir,\"Reportes/Reporte-\"+str(self.ID)+\".txt\")) as fe: \n\t\t\tfe.writeline(\"Paciente: \" + self.Apellido + \" \"+ self.Nombre, 5)\n\t\t\tfe.writeline(\"Edad: \" + self.Edad, 6)\n\t\t\tfe.write(\"Sexo: \" + self.Sexo+\"\\n\", 7)\n\t\t\tfe.write(\"Copa: \" + self.Posicion, 8)\n\t\t\tfe.write(self.Test+ \"\t\t\t\" + self.Resultado+ \"\t\t\t\t\"+ \"2,8 - 10 mg/mL\")\n\t\t\"\"\"Abre el archivo\"\"\"\n\t\treporte = open(\"./Reportes/Reporte-\"+str(self.ID)+\".txt\",\"r\")\n\t\trep = reporte.read()\n\t\treporte.close()\n\t\treturn rep", "def format(self):\n ...", "def encode(self, obj):\n s = super(CustomEncoder, self).encode(obj)\n # If uncompressed, postprocess for formatting\n if len(s.splitlines()) > 1:\n s = self.postprocess(s)\n return s", "def sext(self, typ):", "def text():\n return {\n \"@context\": \"http://www.w3.org/ns/anno.jsonld\",\n \"type\": \"Annotation\",\n \"body\": {\n \"creator\": \"user\",\n \"type\": \"TextualBody\",\n \"value\": \"string\"\n },\n \"generator\": {\n \"homepage\": \"http://mnemosyne.ml\",\n \"id\": \"string\",\n \"name\": \"Mnemosyne\",\n \"type\": \"Mnemosyne\"\n },\n \"target\": {\n \"id\": \"string\",\n \"type\": \"TextQuoteSelector\",\n \"exact\": \"string\",\n \"format\": \"string\",\n \"source\": \"string\",\n \"prefix\": 0,\n \"suffix\": 0,\n \"refinedBy\": {\n \"type\": \"TextPositionSelector\",\n \"start\": \"/div[2]\",\n \"end\": \"/div[2]\"\n },\n },\n }", "def format_to_extension(self, format):", "def format_result(self,obj):\n return unicode(obj)", "def readObject(f):\n name = f.readline().rstrip()\n if name == \"\":\n name = f.readline().rstrip()\n if name == \"\":\n return None\n description = f.readline().rstrip()\n location = f.readline().rstrip()\n return AdvObject(name, description, location )", "def getText(self):", "def _wr_3fmt_goeaobj(goea_results, goeaobj, wr_params, log):\n # List of all fields, printable or not, available from GOEnrichmentRecord\n log.write(\"\\nGOEnrichmentRecord FIELDS: {F}\\n\".format(F=\" \".join(goea_results[0].get_prtflds_all())))\n # Use the subset of namedtuple fields_names that are listed in the format string:\n # Same format: print to screen and print to file:\n goeaobj.prt_txt(log, goea_results, **wr_params) # Print to screen\n goeaobj.wr_txt(\"nbt3102_subset_obj.txt\", goea_results, **wr_params)\n # Print to Excel Spreadsheet\n title=\"Print subset of fields from GOEnrichmentRecord\"\n goeaobj.wr_xlsx(\"nbt3102_subset_obj.xlsx\", goea_results, title=title, **wr_params)\n # Print to tab-separated file\n goeaobj.wr_tsv(\"nbt3102_subset_obj.tsv\", goea_results, **wr_params)", "def object_to_text_line(frame, object_):\n\n bit_size = 32\n track_id = object_.track_id.int >> bit_size\n\n # String class label\n if object_.class_id == 1:\n class_ = \"Pedestrian\"\n elif object_.class_id == 2:\n class_ = \"Car\"\n elif object_.class_id == 3:\n class_ = \"Cyclist\"\n else:\n class_ = \"DontCare\"\n\n # Form list\n label = [frame,\n track_id,\n class_,\n -1,\n -1,\n -10,\n object_.bound_box2d.u1,\n object_.bound_box2d.v1,\n object_.bound_box2d.u2,\n object_.bound_box2d.v2,\n object_.bound_box3d.y_dim,\n object_.bound_box3d.z_dim,\n object_.bound_box3d.x_dim,\n object_.bound_box3d.x,\n object_.bound_box3d.y,\n object_.bound_box3d.z,\n object_.bound_box3d.theta,\n object_.score]\n\n # Covert list to a line\n line = \"{0:d} {1:d} {2} {3:d} {4:d} {5:d} {6:.6f} {7:.6f} {8:.6f} {9:.6f} {10:.6f} {11:.6f} {12:.6f} {13:.6f} {14:.6f} {15:.6f} {16:.6f} {17:.6f}\\n\".format(\n *label)\n return line", "def save_to_text(self, file_name, data):\n\n valid_data = ''\n for item in data:\n valid_data = valid_data + item.get_data()+'\\n'\n\n file_save = open(file_name, 'w')\n file_save.write(valid_data)\n file_save.close()", "def get_data_from_nonformat_text():\n pass", "def text(self) -> str:\n return self.load().open().read().decode('utf-8')", "def dump2txt(cls, argv):\n try:\n for filename in argv[1:]:\n parser = cls(filename)\n invalid_str = r\"[\\\\/:*?\\\"<>|]\" # Not allowed to use filename\n # Remove invalid text\n subject = re.sub(invalid_str, \"\", parser.subject)\n # Remove local time \"+09:00\", \"-\"\n title_date = parser.date[:-len(\"+09:00\")].replace(\"-\", \"\")\n # Remove invalid strings\n date = re.sub(invalid_str, \"\", title_date)\n result = parser.get_attr_data()\n # Overwrite same date+subject eml\n with open(f'{date}_{subject}.txt', 'w',\n encoding='utf-8') as _f:\n _f.write(result)\n except BaseException as e:\n with open('eml2ext_error.txt', 'w', encoding='utf-8') as _f:\n print(f'error {e}')\n # _f.write(e)", "def loadOBJModel(file_name):\n file_text = open(file_name)\n text = file_text.readlines()\n vertex = []\n normals = []\n uv = []\n faces_vertex = []\n faces_normal = []\n faces_uv = []\n for line in text:\n info = line.split(\" \")\n if info[0] == \"v\":\n vertex.append(\n (float(info[1]), float(info[2]) - 0.1, float(info[3])))\n elif info[0] == \"vn\":\n normals.append((float(info[1]), float(info[2]), float(info[3])))\n elif info[0] == \"vt\":\n uv.append((float(info[1]), float(info[2])))\n elif info[0] == \"f\":\n p1 = info[1].split(\"/\")\n p2 = info[2].split(\"/\")\n p3 = info[3].split(\"/\")\n faces_vertex.append((int(p1[0]), int(p2[0]), int(p3[0])))\n faces_uv.append((int(p1[1]), int(p2[1]), int(p3[1])))\n faces_normal.append((int(p1[2]), int(p2[2]), int(p3[2])))\n return vertex, normals, uv, faces_vertex, faces_normal, faces_uv", "def __str__(self):\n txt = (self.name, self.description)\n return txt", "def __str__(self):\n return '\\n'.join(self.contents)", "def to_text(self):\n co = self.to_config()\n return co.write()", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def get_text(adm, obj):\n return adm['data'][slice(*extent(obj))]", "def get_texte(name):\r\n #with open(name, 'r', encoding='utf-8') as myfile:\r\n with open(name, 'r', encoding='utf-8') as myfile:\r\n data=myfile.read()\r\n return data", "def asformat(self, format):", "def __str__(self):\n\t\treturn self.text", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def generate_txt(self):\n txt_string = ''\n rp_obj = self.env['res.partner']\n for txt in self:\n vat = rp_obj._find_accounting_partner(\n txt.company_id.partner_id).vat[2:]\n vat = vat\n for txt_line in txt.txt_ids:\n vendor, buyer = self.get_buyer_vendor(txt, txt_line)\n period = txt.period_id.name.split('/')\n period2 = period[0] + period[1]\n # TODO: use the start date of the period to get the period2\n # with the 'YYYYmm'\n operation_type = ('V' if txt_line.invoice_id.type in\n ['out_invoice', 'out_refund'] else 'C')\n document_type = self.get_type_document(txt_line)\n document_number = self.get_document_number(\n txt_line, 'inv_number')\n control_number = self.get_number(\n txt_line.invoice_id.nro_ctrl, 'inv_ctrl', 20)\n document_affected = self.get_document_affected(txt_line)\n voucher_number = self.get_number(\n txt_line.voucher_id.number, 'vou_number', 14)\n amount_exempt, amount_untaxed = \\\n self.get_amount_exempt_document(txt_line)\n amount_untaxed = amount_untaxed\n alicuota = self.get_alicuota(txt_line)\n amount_total, amount_exempt = self.get_amount_line(\n txt_line, amount_exempt)\n\n txt_string = (\n txt_string + buyer + '\\t' + period2.strip() + '\\t' +\n txt_line.invoice_id.date_invoice + '\\t' + operation_type +\n '\\t' + document_type + '\\t' + vendor + '\\t' +\n document_number + '\\t' + control_number + '\\t' +\n str(round(amount_total, 2)) + '\\t' +\n str(round(txt_line.untaxed, 2)) + '\\t' +\n str(round(txt_line.amount_withheld, 2)) + '\\t' +\n document_affected + '\\t' + voucher_number + '\\t' +\n str(round(amount_exempt, 2)) + '\\t' + str(alicuota) +\n '\\t' + '0' + '\\n')\n return txt_string", "def parse_obj(lt_objs,content):\n\n # loop over the object list\n\n\n for obj in lt_objs:\n\n # if it's a textbox, print text and location\n if isinstance(obj, pdfminer.layout.LTRect):\n content[0].append(int(obj.x0))\n content[0].append(int(obj.x1))\n content[1].append(int(obj.y1))\n content[1].append(int(obj.y0))", "def __str__(self):\n return self.descricao", "def __str__(self):\n return ''.join(self.contents)", "def creer_labyrinthe_depuis_chaine(self, chaine):\n labyLoad = {}\n y = 0\n x = 0\n for obj in chaine:\n if obj == \"\\n\":\n labyLoad[x, y] = obj\n y += 1\n x = 0\n else:\n labyLoad[x, y] = obj\n x += 1\n return labyLoad", "def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')", "def __str__(self):\n s = \"Ext Object (Type: 0x%02x, Data: \" % self.type\n s += \" \".join([\"0x%02x\" % ord(self.data[i:i + 1])\n for i in xrange(min(len(self.data), 8))])\n if len(self.data) > 8:\n s += \" ...\"\n s += \")\"\n return s", "def __str__(self) -> str:\n return \"\\n\".join(str(x) for x in self.content)", "def createObject(dirPath,gSettings,ICs):\n \n with open(os.path.join('../in','object.txt')) as f:\n objFile = f.readlines()\n \n objFile[3] = \"{:+22.15E}; MJD [TT]\\n\".format(ICs[0])\n objFile[4] = \"{:+22.15E}; SMA [km]\\n\".format(ICs[1])\n objFile[5] = \"{:+22.15E}; ECC [-]\\n\".format(ICs[2])\n objFile[6] = \"{:+22.15E}; INC [deg]\\n\".format(ICs[3])\n objFile[7] = \"{:+22.15E}; RAAN [deg]\\n\".format(ICs[4])\n objFile[8] = \"{:+22.15E}; AOP [deg]\\n\".format(ICs[5])\n objFile[9] = \"{:+22.15E}; M [deg]\\n\".format(ICs[6])\n\n SCraft = gSettings[\"Spacecraft\"]\n objFile[11] = \"{:+22.15E}; Mass [kg]\\n\".format(SCraft[\"Mass\"])\n objFile[12] = \"{:+22.15E}; Area (drag) [m^2]\\n\".format(SCraft[\"Drag area\"])\n objFile[13] = \"{:+22.15E}; Area (SRP) [m^2]\\n\".format(SCraft[\"SRP area\"])\n objFile[14] = \"{:+22.15E}; CD [-]\\n\".format(SCraft[\"CD\"])\n objFile[15] = \"{:+22.15E}; CR [-]\\n\".format(SCraft[\"CR\"])\n\n with open(os.path.join(dirPath,'object.txt'),'w') as f:\n f.writelines(objFile)", "def parse(self,obj,padding=0):\n data = ''\n\n if issubclass(obj.__class__,(BinData,)):\n data += self._parse_bindata(obj,padding)\n elif issubclass(obj.__class__,(Entry,)):\n data += self._parse_entry(obj,padding)\n elif issubclass(obj.__class__,(EntryList,)):\n data += self._parse_entry_list(obj,padding)\n elif issubclass(obj.__class__,(EntryTable,)):\n data += '\\n' + self._parse_entry_table(obj)\n else:\n raise Exception('Invalid class for Parser: %s' % obj.__class__)\n\n return data", "def __dxf__(self):\n return tags2str(self)", "def get_text(self):", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n string = ''\n contentsstring = ''\n if self.contents == []:\n contentsstring = 'Empty'\n else:\n for i in self.contents:\n contentsstring +='\\n '\n contentsstring += str(i)\n string += 'Name: ' + str(self.name) + '\\nColor: ' + str(self.color) + '\\nSize: ' + str(len(self.contents)) + '\\nMax Size: ' + str(self.max_size) + '\\nContents: ' + contentsstring\n return string", "def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def __str__(self):\n return self.getBooksString() + \"\\n\" + self.getPatronsString()", "def __str__(self):\n return '{} {}'.format(self.nombre, self.apellido)", "def text(self) -> str:", "def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return", "def format_item_display(self, obj):\n return u\"%s - %s\" % (escape(obj.nombre),obj.rfc)", "def __str__(self):\t\t\n\t\tcadena = []\n\t\tactual = self.prim\t\t\n\t\twhile actual:\n\t\t\tif type(actual.dato) == str:\n\t\t\t\tcadena.append(\"'\" + str(actual.dato) + \"'\")\n\t\t\telse:\t\n\t\t\t\tcadena.append(str(actual.dato))\n\t\t\tactual = actual.prox\n\t\treturn \"[\" + \", \".join(cadena) + \"]\"", "def format_content(text):\n coords = get_coordinates(text)\n if coords is None:\n raise Exception(\"Unable to determine coordinates\")\n\n content = {\n \"name\": get_name(text),\n \"cp\": get_cp(text),\n \"level\": get_level(text),\n \"latitude\": coords[\"lat\"],\n \"longitude\": coords[\"lon\"]\n }\n return content", "def format(self):\n return \"dok\"", "def to_content(cls, data: Mapping) -> str:", "def __str__(self):\n return self.data.__str__()", "def __str__(self):\n return self.data.__str__()", "def serialize(self):", "def __str__(self) -> str:\n respuesta: str = f\"\"\"\n{self.documento} rev {self.revision:02d} ({self.f_actualizacion}))\n\"\"\"\n return respuesta", "def read_file_object(self, file_obj, file_format='FASTA'):\n if ( file_format.upper() == 'FASTA' ):\n read_func = read_fasta\n #elif ( file_format.upper() == 'NEXUS' ):\n # read_func = read_nexus\n #elif ( file_format.upper() == 'PHYLIP' ):\n # read_func = read_phylip\n #elif ( file_format.upper() == 'COMPACT3' ):\n # read_func = read_compact3\n else:\n raise NotImplementedError(\"Unknown file format (%s) is not supported\" % file_format)\n for name, seq in read_func(file_obj):\n self[name] = seq", "def read_file_object(self, file_obj, file_format='FASTA'):\n if file_format.upper() == 'FASTA':\n read_func = read_fasta\n# elif (file_format.upper() == 'NEXUS'):\n# read_func = read_nexus\n# elif (file_format.upper() == 'PHYLIP'):\n# read_func = read_phylip\n else:\n raise NotImplementedError(\n \"Unknown file format (%s) is not supported\" % file_format)\n for name, seq in read_func(file_obj):\n self[name] = seq.upper()\n return self", "def output_classLabel_to_txt(save_path):\n file_obj = open(save_path,'w')\n length = len(class_label)\n for i in range(0,length):\n line = '%d:%s'%(i,class_label[i])\n file_obj.writelines(line+'\\n')\n return True", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def format(self) -> str:", "def get_name_from_txt (txtpath):\r\n f= open(txtpath,\"r\")\r\n contents = json.load(f)\r\n #f.close \r\n return contents", "def imprimirObjeto(self):\n print( \"Fin:\"+str(self.fin)+\" Estado:\"+str(self.estado)+\"\\nProgreso:\\n\"+\"\\n\".join(self.progeso))", "def __str__(self):\n table = 'objects'.join(self.galcat.__str__().split('objects')[1:])\n return self.__repr__()+'\\n'+table", "def __str__(self):\n return repr(self.content)", "def json_to_python_object(file):\n with open(f\"objects/{file}\") as f:\n json_file = json.load(f) # Load json file\n json_objects = json_file['objects'] # Parse all data into python file\n print(f'Number of objects: {len(json_objects)}') # To print number of objects\n for i in range(0, len(json_objects)): # Make sure that we do this for all objects on file\n points = extract_points(json_objects[i])\n occluded = extract_occluded(json_objects[i])\n attributes = extract_attributes(json_objects[i])\n label = extract_label(json_objects[i])\n print(\"================================\") # Simple object separator (Not necessary, only for visualization)\n print(f'Object number {i + 1}: ')\n print(f'Label: {label} \\n' # Get label from object\n f'Points: {points} \\n' # Get point from object\n f'Occlusion: {occluded} \\n' # Get occlusion value from object\n f'Attributes: {attributes}') # Get attribute list from object", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def __str__(self):\n return '\\tNo readable data representation.'" ]
[ "0.62898755", "0.60060185", "0.58115673", "0.5791528", "0.5781442", "0.57075506", "0.57075506", "0.5631964", "0.56186527", "0.55803376", "0.5567039", "0.55542934", "0.5503587", "0.54971385", "0.5476744", "0.53883576", "0.5385696", "0.53556794", "0.5348875", "0.5341528", "0.53351194", "0.532535", "0.5313231", "0.5282667", "0.5278984", "0.52627033", "0.5259863", "0.5249858", "0.5246681", "0.52270967", "0.5224375", "0.5220823", "0.520789", "0.52039814", "0.520134", "0.51917726", "0.51908165", "0.51715416", "0.51539373", "0.515017", "0.5145998", "0.51352525", "0.51250875", "0.5124249", "0.5110179", "0.5107371", "0.50979376", "0.5093765", "0.5080454", "0.5072547", "0.5068578", "0.5066614", "0.5066602", "0.5054674", "0.5053796", "0.50469065", "0.50424755", "0.5041946", "0.50303483", "0.50299835", "0.50299835", "0.50299835", "0.50299835", "0.50299835", "0.5022136", "0.50175035", "0.501643", "0.501643", "0.501643", "0.501643", "0.501643", "0.4999511", "0.49964365", "0.49888462", "0.49859488", "0.49857205", "0.49738097", "0.49665886", "0.49618822", "0.49594772", "0.4954628", "0.4954628", "0.49476635", "0.494307", "0.49401486", "0.49319875", "0.4929591", "0.49257755", "0.49257755", "0.49257755", "0.49257755", "0.49257755", "0.492448", "0.49237484", "0.49226606", "0.4921682", "0.49176863", "0.4917112", "0.49124768", "0.49052978", "0.48981598" ]
0.0
-1
Grava objetos em formato texto no arquivo Extencao
def getArquivoExtencao(chave): arq = open("arquivoExtencao.txt", "r") linha = arq.readline() achou = False while linha and not achou: if linha[0:7] == chave: achou = True linha = arq.readline() arq.close() return linha
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save2txt(obj, file: str):\n with open(file, \"w\") as f:\n print(obj, file=f)", "def convert_txt_to_data():\n pass", "def __str__(self):\n line = ''\n for linea in self.lista:\n for atributo in linea:\n if linea[atributo] != \"\":\n line += atributo + \"=\" + \"'\" + linea[atributo] + \"'\" + '\\t'\n line += '\\n'\n return line", "def __str__(self):\r\n return self.afficherOBJ()", "def format(self, data):", "def format(self, obj):\n pass", "def format(self, obj):\n pass", "def afficherOBJ(self):\r\n str_abr = self.abr.afficher()## appel de l'affichage d'un ABR\r\n return str(self.debut)+':'+str(self.fin)+';'+str_abr ##concaténation du resultat\r", "def format_data(self, data):", "def format_item(self,obj):\n return unicode(obj)", "def _txt_record(self, name, content):\n\n return {\n \"name\": name,\n \"type\": \"TXT\",\n \"aux\": None,\n \"ttl\": MetanameApiClient.minimum_ttl,\n \"data\": content,\n }", "def xephemFormat(self):\n line = []\n #Field 1: names\n names = [self.getName()]\n identifiers = self.getIdentifiers()\n if identifiers[0] is not None:\n names.append(identifiers[0])\n for i in range(1,4):\n if identifiers[i] is not None:\n names.extend(identifiers[i])\n line.append(\"|\".join(names))\n\n #Field 2: type designation\n objType = self.getType()\n if objType in (\"Galaxy Pair\", \"Galaxy Triplet\", \"Group of galaxies\"):\n line.append(\"f|A\")\n elif objType == \"Globular Cluster\":\n line.append(\"f|C\")\n elif objType == \"Double star\":\n line.append(\"f|D\")\n elif objType in (\"HII Ionized region\", \"Nebula\"):\n line.append(\"f|F\")\n elif objType == \"Galaxy\":\n if self.getHubble().startswith(\"S\"):\n line.append(\"f|G\")\n else:\n line.append(\"f|H\")\n elif objType == \"Dark Nebula\":\n line.append(\"f|K\")\n elif objType in (\"Emission Nebula\", \"Reflection Nebula\"):\n line.append(\"f|N\")\n elif objType in (\"Association of stars\", \"Open Cluster\"):\n line.append(\"f|O\")\n elif objType == \"Planetary Nebula\":\n line.append(\"f|P\")\n elif objType == \"Supernova remnant\":\n line.append(\"f|R\")\n elif objType == \"Star\":\n line.append(\"f|S\")\n elif objType == \"Star cluster + Nebula\":\n line.append(\"f|U\")\n else:\n line.append(\"f\")\n\n #Field 3: Right Ascension\n line.append(self.getRA())\n\n #Field 4: Declination\n line.append(self.getDec())\n\n #Field 5: Magnitude\n #We use the first available magnitude in the sequence b,v,j,h,k\n for mag in self.getMagnitudes():\n if mag is not None:\n line.append(str(mag))\n break\n\n #Field 6: optional Epoch, we let it empty\n line.append(\"\")\n\n #Field 7: Dimensions\n dimensions = []\n #Xephem format wants axes espressed in arcsec, we have arcmin\n for value in (self.getDimensions()[0],self.getDimensions()[1]):\n if value is not None:\n dimensions.append(str(value*60))\n else:\n dimensions.append(\"\")\n if self.getDimensions()[2] is not None:\n dimensions.append(str(value))\n else:\n dimensions.append(\"\")\n line.append(\"|\".join(dimensions))\n\n return \",\".join(line)", "def archivoXlFormateado(archivo):\r\n return ow(archivo, formatting_info=True)", "def extension_to_format(self, extension):", "def __repr__(self):\n return '%s.from_text(%r)' % (self.__class__.__name__, self.to_text())", "def __str__(self) -> str:\n obj_dict: Dict[str, Any] = {}\n obj_dict[\"doc\"] = self.doc\n obj_dict[\"type\"] = self.type\n obj_dict[\"name\"] = self.name\n\n line_range = self.line_range()\n obj_dict[\"start_line\"] = line_range[0]\n obj_dict[\"end_line\"] = line_range[1]\n\n obj_dict[\"children\"] = []\n\n for child in self.children.values():\n obj_dict[\"children\"].append(json.loads(str(child)))\n\n return json.dumps(obj_dict)", "def __str__(self):\n\t\treturn str(self.dato)", "def stringReco(obj):\n name = obj.get_name()\n name = obj._pid if (name is None) else name\n return (\"pdg: \" + name + \" E: \" + str(obj._E)\n + \" px: \" + str(obj._px) + \" py: \" + str(obj._py)\n + \" pz: \"+ str(obj._pz) + \" mass: \" + str(obj._m))", "def CsvToJson(nomfichierJson):\n with open(\"save/save.csv\",'r') as f:\n liste_cube = list()\n liste_robot = list()\n \"\"\"deux listes vides pour contenir les objets charges\"\"\"\n for line in f:\n ligne=line.split(\";\")\n if ligne[0] == 'Arene':\n \"\"\"On cree une nouvelle arene avec les parametres trouves sur la ligne, separes par des ';' \"\"\"\n arene = Arene(int(ligne[1]),int(ligne[2]),int(ligne[3]),liste_cube,liste_robot)\n arene.liste_robot=liste_robot\n elif ligne[0] == 'Cube':\n \"\"\"On ajoute le cube a la liste de cube de l'arene, avec parametres trouves sur la ligne\"\"\"\n arene.liste_cube.append(Cube(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5]),int(ligne[6])))\n elif ligne[0] == 'Mur':\n arene.liste_cube.append(Mur(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5]),int(ligne[6])))\n elif ligne[0] == 'Sol':\n arene.liste_cube.append(Sol(int(ligne[1]),int(ligne[2]),int(ligne[3]),int(ligne[4]),int(ligne[5])))\n elif ligne[0] == 'Robot':\n (x,y,z)=literal_eval(ligne[1])\n ((cax,cay),(cbx,cby),(ccx,ccy),(cdx,cdy))=literal_eval(ligne[2])\n (a,b,c)=literal_eval(ligne[3])\n (lo,la,ha)=literal_eval(ligne[4])\n vitesse=literal_eval(ligne[5])\n arene.liste_robot.append(Robot((x,y,z),((cax,cay),(cbx,cby),(ccx,ccy),(cdx,cdy)),(a,b,c),(lo,la,ha),vitesse))\n saveFic(arene,nomfichierJson)", "def obj_ext(self):\n return \".o\"", "def txt_loader(fileobj):\n if isinstance(fileobj, bytes):\n data = fileobj.decode('utf-8')\n elif isinstance(fileobj, six.string_types):\n with open(fileobj, 'rb') as f:\n data = f.read().decode('utf-8')\n elif hasattr(fileobj, 'read'):\n data = fileobj.read().decode('utf-8')\n else:\n raise ValueError('fileobj is not a filename or a file object')\n return data", "def _get_objects(self,label_fh):\n objects = []\n for line in label_fh.readlines():\n try:\n object = {}\n line = line.replace(u'\\ufeff', '')\n if line != '':\n x1, y1, x2, y2, x3, y3, x4, y4= [int(i) for i in line.split(',')[:-1]]\n p1 = (x1, y1)\n p2 = (x2, y2)\n p3 = (x3, y3)\n p4 = (x4, y4)\n object['polygon'] = [p1,p2,p3,p4]\n objects.append(object)\n except:\n pass\n return objects", "def generar_Informe(self):\n\t\t\"\"\"Crea archivo a partir de un modelo\"\"\"\n\t\twith open(\"./Reportes/Reporte-%s.txt\" % str(self.ID), \"w\") as fw, open(\"./Reportes/Reporte\",\"r\") as fr:\n\t\t\tfw.writelines(l for l in fr if \"\" in l)\n\t\tfw.close()\n\t\tfr.close()\n\t\t\"\"\"Modifica el archivo creado con los datos del paciente \"\"\"\n\t\tfileDir = os.path.dirname(os.path.realpath(\"/media/leandro/Datos/PROYECTO FINAL/SOFTWARE/IntegracionIUHW/Reportes/\"))\n\t\tprint fileDir\n\t\twith ModificarArchivo(os.path.join(fileDir,\"Reportes/Reporte-\"+str(self.ID)+\".txt\")) as fe: \n\t\t\tfe.writeline(\"Paciente: \" + self.Apellido + \" \"+ self.Nombre, 5)\n\t\t\tfe.writeline(\"Edad: \" + self.Edad, 6)\n\t\t\tfe.write(\"Sexo: \" + self.Sexo+\"\\n\", 7)\n\t\t\tfe.write(\"Copa: \" + self.Posicion, 8)\n\t\t\tfe.write(self.Test+ \"\t\t\t\" + self.Resultado+ \"\t\t\t\t\"+ \"2,8 - 10 mg/mL\")\n\t\t\"\"\"Abre el archivo\"\"\"\n\t\treporte = open(\"./Reportes/Reporte-\"+str(self.ID)+\".txt\",\"r\")\n\t\trep = reporte.read()\n\t\treporte.close()\n\t\treturn rep", "def format(self):\n ...", "def encode(self, obj):\n s = super(CustomEncoder, self).encode(obj)\n # If uncompressed, postprocess for formatting\n if len(s.splitlines()) > 1:\n s = self.postprocess(s)\n return s", "def sext(self, typ):", "def text():\n return {\n \"@context\": \"http://www.w3.org/ns/anno.jsonld\",\n \"type\": \"Annotation\",\n \"body\": {\n \"creator\": \"user\",\n \"type\": \"TextualBody\",\n \"value\": \"string\"\n },\n \"generator\": {\n \"homepage\": \"http://mnemosyne.ml\",\n \"id\": \"string\",\n \"name\": \"Mnemosyne\",\n \"type\": \"Mnemosyne\"\n },\n \"target\": {\n \"id\": \"string\",\n \"type\": \"TextQuoteSelector\",\n \"exact\": \"string\",\n \"format\": \"string\",\n \"source\": \"string\",\n \"prefix\": 0,\n \"suffix\": 0,\n \"refinedBy\": {\n \"type\": \"TextPositionSelector\",\n \"start\": \"/div[2]\",\n \"end\": \"/div[2]\"\n },\n },\n }", "def format_to_extension(self, format):", "def format_result(self,obj):\n return unicode(obj)", "def readObject(f):\n name = f.readline().rstrip()\n if name == \"\":\n name = f.readline().rstrip()\n if name == \"\":\n return None\n description = f.readline().rstrip()\n location = f.readline().rstrip()\n return AdvObject(name, description, location )", "def getText(self):", "def _wr_3fmt_goeaobj(goea_results, goeaobj, wr_params, log):\n # List of all fields, printable or not, available from GOEnrichmentRecord\n log.write(\"\\nGOEnrichmentRecord FIELDS: {F}\\n\".format(F=\" \".join(goea_results[0].get_prtflds_all())))\n # Use the subset of namedtuple fields_names that are listed in the format string:\n # Same format: print to screen and print to file:\n goeaobj.prt_txt(log, goea_results, **wr_params) # Print to screen\n goeaobj.wr_txt(\"nbt3102_subset_obj.txt\", goea_results, **wr_params)\n # Print to Excel Spreadsheet\n title=\"Print subset of fields from GOEnrichmentRecord\"\n goeaobj.wr_xlsx(\"nbt3102_subset_obj.xlsx\", goea_results, title=title, **wr_params)\n # Print to tab-separated file\n goeaobj.wr_tsv(\"nbt3102_subset_obj.tsv\", goea_results, **wr_params)", "def object_to_text_line(frame, object_):\n\n bit_size = 32\n track_id = object_.track_id.int >> bit_size\n\n # String class label\n if object_.class_id == 1:\n class_ = \"Pedestrian\"\n elif object_.class_id == 2:\n class_ = \"Car\"\n elif object_.class_id == 3:\n class_ = \"Cyclist\"\n else:\n class_ = \"DontCare\"\n\n # Form list\n label = [frame,\n track_id,\n class_,\n -1,\n -1,\n -10,\n object_.bound_box2d.u1,\n object_.bound_box2d.v1,\n object_.bound_box2d.u2,\n object_.bound_box2d.v2,\n object_.bound_box3d.y_dim,\n object_.bound_box3d.z_dim,\n object_.bound_box3d.x_dim,\n object_.bound_box3d.x,\n object_.bound_box3d.y,\n object_.bound_box3d.z,\n object_.bound_box3d.theta,\n object_.score]\n\n # Covert list to a line\n line = \"{0:d} {1:d} {2} {3:d} {4:d} {5:d} {6:.6f} {7:.6f} {8:.6f} {9:.6f} {10:.6f} {11:.6f} {12:.6f} {13:.6f} {14:.6f} {15:.6f} {16:.6f} {17:.6f}\\n\".format(\n *label)\n return line", "def save_to_text(self, file_name, data):\n\n valid_data = ''\n for item in data:\n valid_data = valid_data + item.get_data()+'\\n'\n\n file_save = open(file_name, 'w')\n file_save.write(valid_data)\n file_save.close()", "def get_data_from_nonformat_text():\n pass", "def dump2txt(cls, argv):\n try:\n for filename in argv[1:]:\n parser = cls(filename)\n invalid_str = r\"[\\\\/:*?\\\"<>|]\" # Not allowed to use filename\n # Remove invalid text\n subject = re.sub(invalid_str, \"\", parser.subject)\n # Remove local time \"+09:00\", \"-\"\n title_date = parser.date[:-len(\"+09:00\")].replace(\"-\", \"\")\n # Remove invalid strings\n date = re.sub(invalid_str, \"\", title_date)\n result = parser.get_attr_data()\n # Overwrite same date+subject eml\n with open(f'{date}_{subject}.txt', 'w',\n encoding='utf-8') as _f:\n _f.write(result)\n except BaseException as e:\n with open('eml2ext_error.txt', 'w', encoding='utf-8') as _f:\n print(f'error {e}')\n # _f.write(e)", "def text(self) -> str:\n return self.load().open().read().decode('utf-8')", "def loadOBJModel(file_name):\n file_text = open(file_name)\n text = file_text.readlines()\n vertex = []\n normals = []\n uv = []\n faces_vertex = []\n faces_normal = []\n faces_uv = []\n for line in text:\n info = line.split(\" \")\n if info[0] == \"v\":\n vertex.append(\n (float(info[1]), float(info[2]) - 0.1, float(info[3])))\n elif info[0] == \"vn\":\n normals.append((float(info[1]), float(info[2]), float(info[3])))\n elif info[0] == \"vt\":\n uv.append((float(info[1]), float(info[2])))\n elif info[0] == \"f\":\n p1 = info[1].split(\"/\")\n p2 = info[2].split(\"/\")\n p3 = info[3].split(\"/\")\n faces_vertex.append((int(p1[0]), int(p2[0]), int(p3[0])))\n faces_uv.append((int(p1[1]), int(p2[1]), int(p3[1])))\n faces_normal.append((int(p1[2]), int(p2[2]), int(p3[2])))\n return vertex, normals, uv, faces_vertex, faces_normal, faces_uv", "def __str__(self):\n txt = (self.name, self.description)\n return txt", "def __str__(self):\n return '\\n'.join(self.contents)", "def to_text(self):\n co = self.to_config()\n return co.write()", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def get_text(adm, obj):\n return adm['data'][slice(*extent(obj))]", "def get_texte(name):\r\n #with open(name, 'r', encoding='utf-8') as myfile:\r\n with open(name, 'r', encoding='utf-8') as myfile:\r\n data=myfile.read()\r\n return data", "def asformat(self, format):", "def __str__(self):\n\t\treturn self.text", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def generate_txt(self):\n txt_string = ''\n rp_obj = self.env['res.partner']\n for txt in self:\n vat = rp_obj._find_accounting_partner(\n txt.company_id.partner_id).vat[2:]\n vat = vat\n for txt_line in txt.txt_ids:\n vendor, buyer = self.get_buyer_vendor(txt, txt_line)\n period = txt.period_id.name.split('/')\n period2 = period[0] + period[1]\n # TODO: use the start date of the period to get the period2\n # with the 'YYYYmm'\n operation_type = ('V' if txt_line.invoice_id.type in\n ['out_invoice', 'out_refund'] else 'C')\n document_type = self.get_type_document(txt_line)\n document_number = self.get_document_number(\n txt_line, 'inv_number')\n control_number = self.get_number(\n txt_line.invoice_id.nro_ctrl, 'inv_ctrl', 20)\n document_affected = self.get_document_affected(txt_line)\n voucher_number = self.get_number(\n txt_line.voucher_id.number, 'vou_number', 14)\n amount_exempt, amount_untaxed = \\\n self.get_amount_exempt_document(txt_line)\n amount_untaxed = amount_untaxed\n alicuota = self.get_alicuota(txt_line)\n amount_total, amount_exempt = self.get_amount_line(\n txt_line, amount_exempt)\n\n txt_string = (\n txt_string + buyer + '\\t' + period2.strip() + '\\t' +\n txt_line.invoice_id.date_invoice + '\\t' + operation_type +\n '\\t' + document_type + '\\t' + vendor + '\\t' +\n document_number + '\\t' + control_number + '\\t' +\n str(round(amount_total, 2)) + '\\t' +\n str(round(txt_line.untaxed, 2)) + '\\t' +\n str(round(txt_line.amount_withheld, 2)) + '\\t' +\n document_affected + '\\t' + voucher_number + '\\t' +\n str(round(amount_exempt, 2)) + '\\t' + str(alicuota) +\n '\\t' + '0' + '\\n')\n return txt_string", "def parse_obj(lt_objs,content):\n\n # loop over the object list\n\n\n for obj in lt_objs:\n\n # if it's a textbox, print text and location\n if isinstance(obj, pdfminer.layout.LTRect):\n content[0].append(int(obj.x0))\n content[0].append(int(obj.x1))\n content[1].append(int(obj.y1))\n content[1].append(int(obj.y0))", "def __str__(self):\n return self.descricao", "def __str__(self):\n return ''.join(self.contents)", "def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')", "def creer_labyrinthe_depuis_chaine(self, chaine):\n labyLoad = {}\n y = 0\n x = 0\n for obj in chaine:\n if obj == \"\\n\":\n labyLoad[x, y] = obj\n y += 1\n x = 0\n else:\n labyLoad[x, y] = obj\n x += 1\n return labyLoad", "def __str__(self):\n s = \"Ext Object (Type: 0x%02x, Data: \" % self.type\n s += \" \".join([\"0x%02x\" % ord(self.data[i:i + 1])\n for i in xrange(min(len(self.data), 8))])\n if len(self.data) > 8:\n s += \" ...\"\n s += \")\"\n return s", "def __str__(self) -> str:\n return \"\\n\".join(str(x) for x in self.content)", "def createObject(dirPath,gSettings,ICs):\n \n with open(os.path.join('../in','object.txt')) as f:\n objFile = f.readlines()\n \n objFile[3] = \"{:+22.15E}; MJD [TT]\\n\".format(ICs[0])\n objFile[4] = \"{:+22.15E}; SMA [km]\\n\".format(ICs[1])\n objFile[5] = \"{:+22.15E}; ECC [-]\\n\".format(ICs[2])\n objFile[6] = \"{:+22.15E}; INC [deg]\\n\".format(ICs[3])\n objFile[7] = \"{:+22.15E}; RAAN [deg]\\n\".format(ICs[4])\n objFile[8] = \"{:+22.15E}; AOP [deg]\\n\".format(ICs[5])\n objFile[9] = \"{:+22.15E}; M [deg]\\n\".format(ICs[6])\n\n SCraft = gSettings[\"Spacecraft\"]\n objFile[11] = \"{:+22.15E}; Mass [kg]\\n\".format(SCraft[\"Mass\"])\n objFile[12] = \"{:+22.15E}; Area (drag) [m^2]\\n\".format(SCraft[\"Drag area\"])\n objFile[13] = \"{:+22.15E}; Area (SRP) [m^2]\\n\".format(SCraft[\"SRP area\"])\n objFile[14] = \"{:+22.15E}; CD [-]\\n\".format(SCraft[\"CD\"])\n objFile[15] = \"{:+22.15E}; CR [-]\\n\".format(SCraft[\"CR\"])\n\n with open(os.path.join(dirPath,'object.txt'),'w') as f:\n f.writelines(objFile)", "def parse(self,obj,padding=0):\n data = ''\n\n if issubclass(obj.__class__,(BinData,)):\n data += self._parse_bindata(obj,padding)\n elif issubclass(obj.__class__,(Entry,)):\n data += self._parse_entry(obj,padding)\n elif issubclass(obj.__class__,(EntryList,)):\n data += self._parse_entry_list(obj,padding)\n elif issubclass(obj.__class__,(EntryTable,)):\n data += '\\n' + self._parse_entry_table(obj)\n else:\n raise Exception('Invalid class for Parser: %s' % obj.__class__)\n\n return data", "def __dxf__(self):\n return tags2str(self)", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def get_text(self):", "def __str__(self):\n string = ''\n contentsstring = ''\n if self.contents == []:\n contentsstring = 'Empty'\n else:\n for i in self.contents:\n contentsstring +='\\n '\n contentsstring += str(i)\n string += 'Name: ' + str(self.name) + '\\nColor: ' + str(self.color) + '\\nSize: ' + str(len(self.contents)) + '\\nMax Size: ' + str(self.max_size) + '\\nContents: ' + contentsstring\n return string", "def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def __str__(self):\n return self.getBooksString() + \"\\n\" + self.getPatronsString()", "def __str__(self):\n return '{} {}'.format(self.nombre, self.apellido)", "def text(self) -> str:", "def format_item_display(self, obj):\n return u\"%s - %s\" % (escape(obj.nombre),obj.rfc)", "def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return", "def __str__(self):\t\t\n\t\tcadena = []\n\t\tactual = self.prim\t\t\n\t\twhile actual:\n\t\t\tif type(actual.dato) == str:\n\t\t\t\tcadena.append(\"'\" + str(actual.dato) + \"'\")\n\t\t\telse:\t\n\t\t\t\tcadena.append(str(actual.dato))\n\t\t\tactual = actual.prox\n\t\treturn \"[\" + \", \".join(cadena) + \"]\"", "def format_content(text):\n coords = get_coordinates(text)\n if coords is None:\n raise Exception(\"Unable to determine coordinates\")\n\n content = {\n \"name\": get_name(text),\n \"cp\": get_cp(text),\n \"level\": get_level(text),\n \"latitude\": coords[\"lat\"],\n \"longitude\": coords[\"lon\"]\n }\n return content", "def format(self):\n return \"dok\"", "def to_content(cls, data: Mapping) -> str:", "def __str__(self):\n return self.data.__str__()", "def __str__(self):\n return self.data.__str__()", "def serialize(self):", "def __str__(self) -> str:\n respuesta: str = f\"\"\"\n{self.documento} rev {self.revision:02d} ({self.f_actualizacion}))\n\"\"\"\n return respuesta", "def read_file_object(self, file_obj, file_format='FASTA'):\n if ( file_format.upper() == 'FASTA' ):\n read_func = read_fasta\n #elif ( file_format.upper() == 'NEXUS' ):\n # read_func = read_nexus\n #elif ( file_format.upper() == 'PHYLIP' ):\n # read_func = read_phylip\n #elif ( file_format.upper() == 'COMPACT3' ):\n # read_func = read_compact3\n else:\n raise NotImplementedError(\"Unknown file format (%s) is not supported\" % file_format)\n for name, seq in read_func(file_obj):\n self[name] = seq", "def read_file_object(self, file_obj, file_format='FASTA'):\n if file_format.upper() == 'FASTA':\n read_func = read_fasta\n# elif (file_format.upper() == 'NEXUS'):\n# read_func = read_nexus\n# elif (file_format.upper() == 'PHYLIP'):\n# read_func = read_phylip\n else:\n raise NotImplementedError(\n \"Unknown file format (%s) is not supported\" % file_format)\n for name, seq in read_func(file_obj):\n self[name] = seq.upper()\n return self", "def output_classLabel_to_txt(save_path):\n file_obj = open(save_path,'w')\n length = len(class_label)\n for i in range(0,length):\n line = '%d:%s'%(i,class_label[i])\n file_obj.writelines(line+'\\n')\n return True", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def __str__(self):\n return str(self.data)", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def format(self) -> str:", "def imprimirObjeto(self):\n print( \"Fin:\"+str(self.fin)+\" Estado:\"+str(self.estado)+\"\\nProgreso:\\n\"+\"\\n\".join(self.progeso))", "def get_name_from_txt (txtpath):\r\n f= open(txtpath,\"r\")\r\n contents = json.load(f)\r\n #f.close \r\n return contents", "def __str__(self):\n table = 'objects'.join(self.galcat.__str__().split('objects')[1:])\n return self.__repr__()+'\\n'+table", "def __str__(self):\n return repr(self.content)", "def json_to_python_object(file):\n with open(f\"objects/{file}\") as f:\n json_file = json.load(f) # Load json file\n json_objects = json_file['objects'] # Parse all data into python file\n print(f'Number of objects: {len(json_objects)}') # To print number of objects\n for i in range(0, len(json_objects)): # Make sure that we do this for all objects on file\n points = extract_points(json_objects[i])\n occluded = extract_occluded(json_objects[i])\n attributes = extract_attributes(json_objects[i])\n label = extract_label(json_objects[i])\n print(\"================================\") # Simple object separator (Not necessary, only for visualization)\n print(f'Object number {i + 1}: ')\n print(f'Label: {label} \\n' # Get label from object\n f'Points: {points} \\n' # Get point from object\n f'Occlusion: {occluded} \\n' # Get occlusion value from object\n f'Attributes: {attributes}') # Get attribute list from object", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def __str__(self):\n return '\\tNo readable data representation.'" ]
[ "0.628988", "0.60054374", "0.58121145", "0.5792043", "0.57810664", "0.57079685", "0.57079685", "0.5631726", "0.5618268", "0.5581328", "0.5566677", "0.55541193", "0.55033374", "0.54966205", "0.5477309", "0.53887737", "0.538603", "0.5356381", "0.5347529", "0.53421474", "0.5334693", "0.53248066", "0.5311795", "0.5281728", "0.5279604", "0.52635", "0.52603996", "0.5249381", "0.5247826", "0.52267206", "0.52243024", "0.5221243", "0.52081746", "0.52040815", "0.5201337", "0.5191185", "0.51910836", "0.5170601", "0.5154332", "0.51508373", "0.5145746", "0.5134054", "0.5125695", "0.51232105", "0.5109686", "0.5107999", "0.50972366", "0.50931096", "0.5081452", "0.50728405", "0.5069353", "0.5067434", "0.50666964", "0.5055538", "0.5054905", "0.50454414", "0.50432503", "0.5042118", "0.5030632", "0.5030632", "0.5030632", "0.5030632", "0.5030632", "0.50301534", "0.50222033", "0.50181776", "0.5016427", "0.5016427", "0.5016427", "0.5016427", "0.5016427", "0.5000036", "0.499686", "0.49888405", "0.4986739", "0.49846157", "0.49745968", "0.4965972", "0.49618265", "0.4960488", "0.49549603", "0.49549603", "0.49471965", "0.49435365", "0.49393234", "0.49312645", "0.4929448", "0.4926179", "0.4926179", "0.4926179", "0.4926179", "0.4926179", "0.4923788", "0.49231485", "0.49221689", "0.49214143", "0.49183506", "0.49179658", "0.4912045", "0.490449", "0.48985308" ]
0.0
-1
Accepts any number of Pattern instances.
def __init__(self, *patterns): # Validate. for pattern in patterns: if type(pattern) != models.patterns.Pattern: raise TypeException("Session only initialized with Pattern instances.") self.patterns = patterns
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_patterns(self, patterns: Iterable[AttributeRulerPatternType]) -> None:\n for p in patterns:\n self.add(**p) # type: ignore[arg-type]", "def make_pattern_set(self):\n \n _pattern = []\n for x in range(1,9):\n _pattern.append(self.make_pattern())\n \n self.pattern = _pattern", "def __init__(self, pattern1, pattern2):\n self.pattern1 = pattern1\n self.pattern2 = pattern2", "def add_patterns(self, patterns: Iterable[Dict[str, Any]],) -> None:\n # disable the nlp components after this one\n # in case they haven't been initialized / deserialised yet\n try:\n current_index = self.nlp.pipe_names.index(self.name)\n subsequent_pipes = [\n pipe for pipe in self.nlp.pipe_names[current_index + 1 :]\n ]\n except ValueError:\n subsequent_pipes = []\n\n with self.nlp.disable_pipes(subsequent_pipes):\n fuzzy_pattern_labels = []\n fuzzy_pattern_texts = []\n fuzzy_pattern_kwargs = []\n fuzzy_pattern_ids = []\n regex_pattern_labels = []\n regex_pattern_texts = []\n regex_pattern_kwargs = []\n regex_pattern_ids = []\n\n for entry in patterns:\n try:\n if isinstance(entry, dict):\n if entry[\"type\"] == \"fuzzy\":\n fuzzy_pattern_labels.append(entry[\"label\"])\n fuzzy_pattern_texts.append(entry[\"pattern\"])\n fuzzy_pattern_kwargs.append(entry.get(\"kwargs\", {}))\n fuzzy_pattern_ids.append(entry.get(\"id\"))\n elif entry[\"type\"] == \"regex\":\n regex_pattern_labels.append(entry[\"label\"])\n regex_pattern_texts.append(entry[\"pattern\"])\n regex_pattern_kwargs.append(entry.get(\"kwargs\", {}))\n regex_pattern_ids.append(entry.get(\"id\"))\n else:\n warnings.warn(\n f\"\"\"Spaczz pattern \"type\" must be \"fuzzy\" or \"regex\",\\n\n not {entry[\"label\"]}. Skipping this pattern.\"\"\",\n PatternTypeWarning,\n )\n else:\n raise TypeError((\"Patterns must be an iterable of dicts.\"))\n except KeyError:\n raise ValueError(\n (\n \"One or more patterns do not conform\",\n \"to spaczz pattern structure:\",\n \"{label (str), pattern (str), type (str),\",\n \"optional kwargs (Dict[str, Any]),\",\n \"and optional id (str)}.\",\n )\n )\n\n fuzzy_patterns = []\n for label, pattern, kwargs, ent_id in zip(\n fuzzy_pattern_labels,\n self.nlp.pipe(fuzzy_pattern_texts),\n fuzzy_pattern_kwargs,\n fuzzy_pattern_ids,\n ):\n fuzzy_pattern = {\n \"label\": label,\n \"pattern\": pattern,\n \"kwargs\": kwargs,\n \"type\": \"fuzzy\",\n }\n if ent_id:\n fuzzy_pattern[\"id\"] = ent_id\n fuzzy_patterns.append(fuzzy_pattern)\n\n regex_patterns = []\n for label, pattern, kwargs, ent_id in zip(\n regex_pattern_labels,\n regex_pattern_texts,\n regex_pattern_kwargs,\n regex_pattern_ids,\n ):\n regex_pattern = {\n \"label\": label,\n \"pattern\": pattern,\n \"kwargs\": kwargs,\n \"type\": \"regex\",\n }\n if ent_id:\n regex_pattern[\"id\"] = ent_id\n regex_patterns.append(regex_pattern)\n\n self._add_patterns(fuzzy_patterns, regex_patterns)", "def __init__(self, *regexes): #todo: maybe design a container for regexes (because of precedence)\n self._regexes: list = regexes\n for regex in self._regexes:\n try:\n assert type(regex) is rgx.RegEx\n except AssertionError as e:\n print(type(regex), e)\n self._ignored = set()", "def gen_matches(self, subseq, startpos):\n \n raise TypeError, \"PatternBase is an abstract base class\"", "def by_regex(cls, *patterns):\n return cls(*(to_matcher(RegexMatcher, p) for p in patterns))", "def listenForPatterns(self, patterns):\n self._patterns = patterns\n for pattern in self._patterns:\n if len(pattern) > self._patternLimit:\n self._patternLimit = len(pattern)\n \n if self._enabled:\n self.disable()\n self.enable()", "def __init__(self, pattern):\r\n self.pattern = pattern", "def __init__(self, patterns=None):\n Container.__init__(self, patterns)", "def _add_patterns(\n self, fuzzy_patterns: List[Dict[str, Any]], regex_patterns: List[Dict[str, Any]]\n ) -> None:\n for entry in fuzzy_patterns + regex_patterns:\n label = entry[\"label\"]\n if \"id\" in entry:\n ent_label = label\n label = self._create_label(label, entry[\"id\"])\n self._ent_ids[label] = (ent_label, entry[\"id\"])\n pattern = entry[\"pattern\"]\n kwargs = entry[\"kwargs\"]\n if isinstance(pattern, Doc):\n self.fuzzy_patterns[label][\"patterns\"].append(pattern)\n self.fuzzy_patterns[label][\"kwargs\"].append(kwargs)\n elif isinstance(pattern, str):\n self.regex_patterns[label][\"patterns\"].append(pattern)\n self.regex_patterns[label][\"kwargs\"].append(kwargs)\n else:\n raise ValueError(\n (\n \"One or more patterns do not conform\",\n \"to spaczz pattern structure:\",\n \"{label (str), pattern (str), type (str),\",\n \"optional kwargs (Dict[str, Any]),\",\n \"and optional id (str)}.\",\n )\n )\n\n for label, pattern in self.fuzzy_patterns.items():\n self.fuzzy_matcher.add(label, pattern[\"patterns\"], pattern[\"kwargs\"])\n for label, pattern in self.regex_patterns.items():\n self.regex_matcher.add(label, pattern[\"patterns\"], pattern[\"kwargs\"])", "def test_add_patterns_with_other_pipeline_components(\n patterns: List[Dict[str, Any]]\n) -> None:\n nlp = spacy.blank(\"en\")\n if spacy.__version__ < \"3.0.0\":\n nlp.add_pipe(nlp.create_pipe(\"ner\"))\n ruler = SpaczzRuler(nlp)\n nlp.add_pipe(ruler, first=True)\n else:\n _ = nlp.add_pipe(\"ner\")\n ruler = nlp.add_pipe(\"spaczz_ruler\", first=True)\n ruler.add_patterns(patterns)\n assert len(nlp.get_pipe(\"spaczz_ruler\")) == len(patterns)", "def test(self, patterns):\n for p in patterns:\n print(p[1], '->', self.feedForward(p[0]))", "def toClean(self, *patterns):\n self.cleanables.extend([*patterns])", "def createAttrPatterns(*args, patternDefinition: AnyStr=\"\", patternFile: AnyStr=\"\",\n patternType: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def test_add_patterns(ruler: SpaczzRuler, patterns: List[Dict[str, Any]]) -> None:\n assert len(ruler) == len(patterns)", "def __init__(self,pattern):\n\t\tself.__type__ = 'pol'\n\t\tif type(pattern)!=list and type(pattern)!=tuple :\n\t\t\traise InvalidArgumentException(\"No puedo construir un polinomio con este argumento\")", "def all(cls, *patterns: \"JsonPattern\") -> \"JsonPattern\":\n return jsii.sinvoke(cls, \"all\", [*patterns])", "def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))", "def site_patterns(*args):\n pattern_list = args\n return [SiteRegexURLResolver('', pattern_list)]", "def add_pattern(self, pattern):\n self.patterns.append(pattern)", "def _make_patterns(patterns):\n field_registry = display_fields.FieldRegistry()\n\n pattern_list = display_pattern.ScreenPatternList(\n field_registry=field_registry,\n )\n for pattern in patterns:\n pattern_list.add(pattern.split('\\n'))\n return pattern_list", "def patterns(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"patterns\")", "def __init__(self, pattern):\n self._pattern = re.compile(pattern)", "def add_pattern(self, name, pattern=None):\n assert isinstance(name, str) and len(name) < 32 and name.find(' ') == -1, \"name must be a string with less than 32 characters and contain no spaces\"\n assert isinstance(pattern, (list, np.ndarray, Pattern)), \"pattern must be a list or Pattern\"\n \n if not isinstance(pattern, Pattern):\n pattern = Pattern(name, multipliers=pattern, time_options=self._options.time) \n else: #elif pattern.time_options is None:\n pattern.time_options = self._options.time\n if pattern.name in self._data.keys():\n raise ValueError('Pattern name already exists')\n self[name] = pattern", "def phony(*patterns: Any) -> Any: # type: ignore\n strings: List[str] = []\n for pattern in each_string(*patterns):\n if not isinstance(pattern, AnnotatedStr):\n pattern = AnnotatedStr(pattern)\n pattern.phony = True\n strings.append(pattern)\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(strings) == 1\n return strings[0]\n return strings", "def any(cls, *patterns: \"JsonPattern\") -> \"JsonPattern\":\n return jsii.sinvoke(cls, \"any\", [*patterns])", "def set_patterns(self, strings):\n\t\tself.patterns = []\n\n\t\tfor string in strings:\n\t\t\tif isinstance(string, bytes):\n\t\t\t\tstring = string.decode('utf-8')\n\n\t\t\tself.patterns.append(string)", "def setPatterns(self, value):\n return self._set(patterns=value)", "def do_pattern(l, pattern, repeat=1):\n command = create_pattern_command(pattern, repeat)\n l.write(command)", "def pattern_factory(self):\n\t\treturn self.args[1]", "def _apply_pattern_rules(flags, input_tensors, output_tensors, tensor_list, tensor_map):\n matched_pattern = OpPatterns.OPAQUE_PATTERN\n for rule, target_pattern in OP_PATTERN_RULES.items():\n if matched_pattern != OpPatterns.OPAQUE_PATTERN:\n break\n # One rule for multiple patterns\n if isinstance(target_pattern, tuple):\n for pattern in target_pattern:\n if rule(flags, pattern, SIMPLE_MAPPING, input_tensors,\n output_tensors, tensor_list, tensor_map):\n matched_pattern = pattern\n break\n elif rule(flags, input_tensors, output_tensors,\n tensor_list, tensor_map) and isinstance(target_pattern, OpPatterns):\n # One rule for one pattern\n matched_pattern = OP_PATTERN_RULES[rule]\n break\n elif not isinstance(target_pattern, OpPatterns):\n raise ValueError(\"Wrong Subpattern rule dictionary format: \" +\n \"Pattern expected but received \" + str(type(target_pattern)))\n return matched_pattern", "def test_patterns(ruler: SpaczzRuler, patterns: List[Dict[str, Any]]) -> None:\n assert all([pattern in ruler.patterns for pattern in patterns])", "def compile_patterns(patterns: List[str], anchor: Optional[str]):\n start = ending = ''\n if anchor == 'start':\n patterns = [pattern[1:] for pattern in patterns]\n start = '^'\n elif anchor == 'end':\n patterns = [pattern[:-1] for pattern in patterns]\n ending = '$'\n\n if patterns:\n core = '|'.join(patterns)\n else:\n core = CompanyCleaner.MATCH_NOTHING # If iter is empty, return regex that can match nothing.\n\n return re.compile(start + '(?:' + core + ')+' + ending)", "def arguments_pattern(arguments):\n pattern = []\n \n # reserved keywords for composite commands\n reserved_keywords = (\"to\", \"with\", \">\", \"<\", \"=\", \"apartment\", \"type\")\n \n # check the type of each argument and create a pattern\n for arg in arguments:\n if arg in reserved_keywords:\n pattern.append(arg)\n continue\n \n arg_type = argument_type(arg)\n \n if arg_type == float:\n pattern.append(\"float\")\n elif arg_type == int:\n pattern.append(\"int\")\n else: \n pattern.append(\"string\")\n \n # remove the keywords from the arguments to be able to handle them\n for reserved in reserved_keywords:\n if reserved in arguments:\n arguments.remove(reserved)\n \n # return the pattern as a string\n return \" \".join(pattern)", "def add(\n self: TokenMatcher,\n label: str,\n patterns: List[List[Dict[str, Any]]],\n on_match: TokenCallback = None,\n ) -> None:\n for pattern in patterns:\n if len(pattern) == 0:\n raise ValueError(\"pattern cannot have zero tokens.\")\n if isinstance(pattern, list):\n self._patterns[label].append(list(pattern))\n else:\n raise TypeError(\"Patterns must be lists of dictionaries.\")\n self._callbacks[label] = on_match", "def _matches(o, pattern):\n if not len(o) == len(pattern):\n return False\n comps = zip(o,pattern)\n return all(isinstance(obj,kind) for obj,kind in comps)", "def glob_patterns(separator=os.path.sep, **kwargs):\n terms = [kwargs.pop(field, '*')\n for field in NormalizedSceneId.tuple_type._fields]\n assert not kwargs, 'Unrecognized field names: {}'.format(kwargs)\n\n # terms which are not str are assumed to contain choices (list of str)\n choice_inds = [i for i, val in enumerate(terms) if not isinstance(val, str)]\n val_lists = [terms[i][:] for i in choice_inds]\n\n patterns = []\n for values in it.product(*val_lists):\n for i, ind in enumerate(choice_inds):\n terms[ind] = values[i]\n patterns.append(separator.join(terms))\n return patterns", "def register_patterns(self) -> None:\n\n if (patterns := getattr(self, \"WORDS\", None)) is not None:\n for k, v in patterns.items():\n self.register_replacement(Replacement(rf\"\\b{k}\\b\", v))\n\n if (patterns := getattr(self, \"PATTERNS\", None)) is not None:\n for k, v in patterns.items():\n self.register_replacement(Replacement(k, v))\n\n if (replacements := getattr(self, \"REPLACEMENTS\", None)) is not None:\n for replacement in replacements:\n self.register_replacement(replacement)", "def __init__(self,\n pattern_vec=None,\n ):\n\n # Initialize members of the class\n self.pattern_vec = pattern_vec", "def add_pattern(self, pattern, callback):\n self.patterns.append((pattern, callback))", "def __init__(self, *urls):\n\n self.urlpatterns = []\n\n try:\n if isinstance(urls[0], str):\n prefix = urls[0]\n urls = urls[1:]\n else:\n prefix = None\n except IndexError:\n prefix = None\n\n for t in urls:\n if isinstance(t, (list, tuple)):\n t = url(*t)\n\n if prefix and hasattr(t, 'add_prefix'):\n t.add_prefix(prefix)\n\n self.urlpatterns.append(t)", "def set_inputs(self, pattern: np.ndarray):\n self.x = np.array(pattern[:self.n_inputs]).reshape((1, self.n_inputs))", "def DiscoverPatterns(parameters, graph):\n patternCount = 0\n # get initial one-edge patterns\n parentPatternList = GetInitialPatterns(graph, parameters.temporal)\n if DEBUGFLAG:\n print(\"Initial patterns (\" + str(len(parentPatternList)) + \"):\")\n for pattern in parentPatternList:\n pattern.print_pattern(' ')\n discoveredPatternList = []\n while ((patternCount < parameters.limit) and parentPatternList):\n print(str(parameters.limit - patternCount) + \" patterns left\")\n childPatternList = []\n # extend each pattern in parent list (***** todo: in parallel)\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if ((len(parentPattern.instances) > 1) and (patternCount < parameters.limit)):\n patternCount += 1\n extendedPatternList = Pattern.ExtendPattern(parentPattern, parameters.temporal)\n while (extendedPatternList):\n extendedPattern = extendedPatternList.pop(0)\n if DEBUGFLAG:\n print(\"Extended Pattern:\")\n extendedPattern.print_pattern(' ')\n if (len(extendedPattern.definition.edges) <= parameters.maxSize):\n # evaluate each extension and add to child list\n extendedPattern.evaluate(graph)\n if ((not parameters.prune) or (extendedPattern.value >= parentPattern.value)):\n Pattern.PatternListInsert(extendedPattern, childPatternList, parameters.beamWidth, parameters.valueBased)\n # add parent pattern to final discovered list\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n parentPatternList = childPatternList\n # insert any remaining patterns in parent list on to discovered list\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n return discoveredPatternList", "def input(self, *input):\n for i in input:\n self._parser.feed(i)", "def get_pattern(flags: dict, input_tensors: list,\n output_tensors: list, tensor_list: list, tensor_map: dict):\n # If nothing matches, default pattern would be opaque pattern\n matched_pattern = OpPatternRecognizer._apply_pattern_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n matched_subpattern = OpPatternRecognizer.apply_subpattern_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n matched_special_op = OpPatternRecognizer.apply_spec_rules(flags,\n input_tensors,\n output_tensors,\n tensor_list,\n tensor_map)\n return matched_pattern, matched_subpattern, matched_special_op", "def patterns(self):\n return self._pattern_reg", "def iter_recipes(self, pattern):\n raise NotImplementedError()", "def num_patterns(self):\n return len(self._pattern_reg)", "def applyAttrPattern(*args, nodeType: AnyStr=\"\", patternName: AnyStr=\"\", **kwargs)->int:\n pass", "def collect(tp, *args):\n\n def decorator(f):\n add_triple_pattern(tp, f, args)\n\n return decorator", "def __call__(self, *patterns):\n\n\n # defines the decorator that adds the patterns to the function lookup\n def decorator(func):\n func_args = inspect.getargs(func.__code__)\n func_name = func.__name__\n\n if len(patterns) != len(func_args.args):\n raise ChainsmokePatternMatchError(\n \"Number of patterns needs to equal number of args in {func_name}\".format(func_name=func_name))\n\n self.funcs[patterns] = func\n\n # define a function that gives a result from the matched function\n def inner(*inner_args):\n if not self.funcs.get((otherwise,)):\n raise ChainsmokePatternMatchError(\n \"Incomplete pattern match for {func_name}; try adding an 'otherwise' case\".format(\n func_name=func_name))\n\n matched_function = self.find_func(inner_args)\n return matched_function(*inner_args)\n\n return inner\n\n return decorator", "def expect_all(\n self, pattern_list: List[str], timeout: float = 10, strict: bool = True\n ) -> None:\n pattern_list = list(pattern_list)\n\n start_time = time.time()\n while pattern_list:\n time_spent = time.time() - start_time\n if time_spent > timeout:\n raise TIMEOUT(timeout)\n if strict:\n idx = self.expect_exact(pattern_list, timeout - time_spent)\n else:\n idx = self.expect(pattern_list, timeout - time_spent)\n pattern_list.pop(idx)", "def check_pattern(pattern, n_qubits):\n\n pattern_flat = []\n for pat in pattern:\n pattern_flat.extend(pat)\n\n if np.max(pattern_flat) >= n_qubits:\n print(\"Invalid pattern. Qubit index in the pattern exceeds the number of qubits.\")\n\n _, uni_counts = np.unique(np.array(pattern_flat), return_counts=True)\n if (uni_counts > 1).any():\n raise ValueError(\"Invalid pattern. Duplicate qubit index.\")", "def matched_sub_graph_instances(self, graph: Graph):\n if self.replacement_desc.match_kind == 'points': # instance is specified with lists of start/end nodes\n match = self._match_sub_graph_for_points(graph)\n if match is not None:\n yield match\n elif self.replacement_desc.match_kind == 'scope': # instance is specified with a node name pattern\n for instance in self.replacement_desc.sub_graph_instances():\n match = self._match_sub_graph_for_scope(graph, instance)\n if match is not None:\n yield match\n else:\n raise Error('Unsupported match kind \"{}\". Match kinds \"points\" or \"scope\" are supported only. '.format(\n self.replacement_desc.match_kind) +\n refer_to_faq_msg(35))", "def relate_pattern(a, b, pattern, **kwargs):\n return lib.relate_pattern(a, b, pattern, **kwargs)", "def BuildPatterns(self, entry):\n N = self.N\n for ent in WaveFunction.symmetry(entry, self.options['Ref'], self.options['Rot']):\n index = len(self.patterns)\n\n if self.options['PeriIpt']:\n width, height = len(ent) - 1, len(ent[0]) - 1\n ent = [ent[x][:] + ent[x][:N - 1] for x in range(len(ent))]\n ent = ent[:] + ent[:N - 1]\n else:\n width, height = len(ent) - N + 1, len(ent[0]) - N + 1\n\n matrix = [[None] * height for _ in range(width)]\n for x in range(width):\n for y in range(height):\n # Extract an N*N matrix as a pattern with the upper left corner being (x, y).\n pat = tuple(tuple(ent[x1][y:y + N]) for x1 in range(x, x + N))\n\n # If this pattern already exists, simply increment its weight. Otherwise, records\n # the new pattern and initializes its weight as 1, then increment the pattern index.\n try:\n matrix[x][y] = self.patterns[pat]\n self.weights[matrix[x][y]] += 1\n except KeyError:\n self.patterns[pat] = matrix[x][y] = index\n self.weights.append(1)\n self.rules.append([set() for _ in range(4)])\n index += 1\n self.make_rule((x, y), matrix)", "def add_pattern(self, start, stop, pattern):\n self.coord2pattern[start] = []\n self.coord2pattern[start].append(pattern)", "def patterns(self: TokenMatcher) -> List[Dict[str, Any]]:\n all_patterns = []\n for label, patterns in self._patterns.items():\n for pattern in patterns:\n p = {\"label\": label, \"pattern\": pattern, \"type\": self.type}\n all_patterns.append(p)\n return all_patterns", "def fmt_capture(kwargs: Any, *patterns: Any) -> Any: # type: ignore\n results = [copy_annotations(pattern, _fmt_capture(kwargs, pattern)) for pattern in each_string(*patterns)]\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(results) == 1\n return results[0]\n return results", "def process(patterns, text):\n\n for i, p in enumerate(patterns):\n pattern = _fix_pattern(p)\n\n found = []\n for grammar, replace in pattern:\n\n find_and_replace = create_find_and_replace(grammar, replace)\n results = parse_grammar(find_and_replace, text)\n if not results:\n break\n else:\n found.append(len(results))\n text = _transform_results(results, text)\n\n if found:\n log.info('=> pattern {} found {} time(s) in {} pass(es)'\n .format(i + 1, sum(found), len(found)))\n else:\n log.info('__ pattern {} not found'\n .format(i + 1))\n\n return text", "def precious(*patterns: Any) -> Any: # type: ignore\n strings: List[str] = []\n for pattern in each_string(*patterns):\n if not isinstance(pattern, AnnotatedStr):\n pattern = AnnotatedStr(pattern)\n pattern.precious = True\n strings.append(pattern)\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(strings) == 1\n return strings[0]\n return strings", "def oneof(chars):\n return Pattern(\"[\" + re.escape(chars) + \"]\")", "def add_triple_pattern(tp, collector, args):\n tp_parts = [part.strip() for part in tp.strip().split(' ')]\n tp = ' '.join(tp_parts)\n if tp not in __triple_patterns.keys():\n __triple_patterns[tp] = set([])\n if collector is not None:\n __triple_patterns[tp].add((collector, args))", "def test_build_sequence_multiple_values(self):\n # Test basic sequence rule\n r = Rule(schema={'type': 'seq', 'sequence': [{'type': 'str'}, {'type': 'int'}]})\n assert r.type == \"seq\"\n assert r.matching == \"any\"\n assert len(r.sequence) == 2\n assert isinstance(r.sequence, list)\n assert all(isinstance(r.sequence[i], Rule) for i in range(len(r.sequence)))\n assert r.sequence[0].type == \"str\"\n assert r.sequence[1].type == \"int\"\n\n # Test sequence without explicit type\n r = Rule(schema={'sequence': [{'type': 'str'}, {'type': 'int'}]})\n assert r.type == \"seq\"\n assert r.matching == \"any\"\n assert len(r.sequence) == 2\n assert isinstance(r.sequence, list)\n assert all(isinstance(r.sequence[i], Rule) for i in range(len(r.sequence)))\n assert r.sequence[0].type == \"str\"\n assert r.sequence[1].type == \"int\"\n\n # Test adding matchin rules", "def add_pattern(self, name, pattern=None):\n self._pattern_reg.add_pattern(name, pattern)", "def _maybe_add_pattern(attr, patterns):\n handler_type = getattr(attr, '_gen_handler', False)\n\n if not handler_type:\n return\n if handler_type not in ['call', 'cast', 'info']:\n raise AttributeError(\"unknown handler type {}\".format(handler_type))\n\n o = attr._gen_order\n p = attr._gen_pattern\n LOG.debug(\"adding {} {} with pattern {}\".format(handler_type,\n attr,\n p))\n patterns[handler_type].append((o, p))", "def relate_pattern(self, other, pattern): # -> bool:\n ...", "def set_pattern_object_references(self):\n if self.inflows.value:\n for obj_inflow in self.inflows.value:\n obj_inflow.baseline_pattern_object = self.patterns.find_item(obj_inflow.baseline_pattern)\n\n if self.aquifers.value:\n for obj_aquifer in self.aquifers.value:\n obj_aquifer.upper_evaporation_pattern_object = \\\n self.patterns.find_item(obj_aquifer.upper_evaporation_pattern)\n\n if self.dwf.value:\n for obj_dwf in self.dwf.value:\n del obj_dwf.time_pattern_objects[:]\n # order of patterns should be kept intact\n for i in range(0, len(obj_dwf.time_patterns)):\n if obj_dwf.time_patterns[i]:\n pat = self.patterns.find_item(obj_dwf.time_patterns[i].strip(\"\\\"\"))\n obj_dwf.time_pattern_objects.append(pat)\n else:\n obj_dwf.time_pattern_objects.append(None)", "def construct(cls, columns: typing.List[str]) -> \"SpaceDelimitedTextPattern\":\n return jsii.sinvoke(cls, \"construct\", [columns])", "def set_pattern(colors=('green', 'blue', 'red')): # (10)\n for i in range(0, int(ceil(float(NUM_LEDS)/float(len(colors))))):\n for color in colors:\n push_color(color)", "def findMatches(sequence, patterns):\n#\n#\n# idGenerator = IdGenerator()\n# root = Edge('', None, idGenerator)\n# i = 0\n# sequence = sequence + '$'\n# print len(sequence)\n# for i in range(len(sequence)):\n# seq = sequence[i:]\n# edge = root\n# while len(seq) > 0:\n# edge = edge.addSequence(seq, i)\n# seq = seq[1:]\n# print i\n # root = buildTrie(generateSequences(sequence))\n matches = [[m.start() for m in re.finditer('(?=' + pattern + ')', sequence)] for pattern in patterns]\n return matches", "def npatterns(self):\n return len(self.patterns)", "def setPattern(self,Apattern,Bpattern,Cpattern):\n self.coeffPattern = [Apattern,Bpattern,Cpattern]\n for i in range(self.m):\n self._updateEstimatorSize(i)", "def __init__(self, pattern, flags=0):\n if flags:\n str_flags = hre.decodeflags(flags)\n pattern = r\"(?%s:%s)\"%(str_flags, pattern)\n super(Regex, self).__init__(pattern)", "def listAttrPatterns(*args, patternType: bool=True, verbose: bool=True,\n **kwargs)->List[AnyStr]:\n pass", "def __init__(self, in_pattern, out_pattern,\r\n allow_multiple_clients=False,\r\n skip_identities_fn=None, name=None, pdb=False,\r\n tracks=(), get_nodes=None):\r\n self.in_pattern = in_pattern\r\n self.out_pattern = out_pattern\r\n if isinstance(in_pattern, (list, tuple)):\r\n self.op = self.in_pattern[0]\r\n elif isinstance(in_pattern, dict):\r\n self.op = self.in_pattern['pattern'][0]\r\n else:\r\n raise TypeError(\"The pattern to search for must start with \"\r\n \"a specific Op instance.\")\r\n self.__doc__ = (self.__class__.__doc__ +\r\n \"\\n\\nThis instance does: \" +\r\n str(self) + \"\\n\")\r\n self.allow_multiple_clients = allow_multiple_clients\r\n self.skip_identities_fn = skip_identities_fn\r\n if name:\r\n self.__name__ = name\r\n self.pdb = pdb\r\n self._tracks = tracks\r\n self.get_nodes = get_nodes\r\n if tracks != ():\r\n assert get_nodes", "def test_constructPossibleSequenceRegex(self):\n test_cases = [\n ['file03.03.rgb', [r'file(\\d+).03.rgb', r'file03.(\\d+).rgb']],\n ['file3030.030', [r'file(\\d+).030', r'file3030.(\\d+)']],\n ]\n for x, (fileName, regexStrings) in enumerate(test_cases):\n with self.subTest(i=x):\n result = path_core._core.FolderContainer._constructPossibleSequenceRegex(fileName)\n expectedResult = [re.compile(regexString) for regexString in regexStrings]\n self.assertEqual(expectedResult, result)", "def rewrite(self, *args, deep=True, **hints):\n if not args:\n return self\n\n hints.update(deep=deep)\n\n pattern = args[:-1]\n rule = args[-1]\n\n # support old design by _eval_rewrite_as_[...] method\n if isinstance(rule, str):\n method = \"_eval_rewrite_as_%s\" % rule\n elif hasattr(rule, \"__name__\"):\n # rule is class or function\n clsname = rule.__name__\n method = \"_eval_rewrite_as_%s\" % clsname\n else:\n # rule is instance\n clsname = rule.__class__.__name__\n method = \"_eval_rewrite_as_%s\" % clsname\n\n if pattern:\n if iterable(pattern[0]):\n pattern = pattern[0]\n pattern = tuple(p for p in pattern if self.has(p))\n if not pattern:\n return self\n # hereafter, empty pattern is interpreted as all pattern.\n\n return self._rewrite(pattern, rule, method, **hints)", "def any_of(*args:List[str]) -> str:\n return group(\"|\".join(args))", "def to_regex(*args:List[str], flags:int=0, compile:bool=True) -> Union[str, re.compile]:\n pattern = \"\".join(args)\n\n if compile:\n return re.compile(pattern, flags=flags)\n else:\n flagstring = re_flags_to_string(flags)\n pattern = f\"{flagstring}{pattern}\"\n return pattern", "def __init__(self, pattern_type, experimental_scenario, pattern):\n self.pattern_type = pattern_type # if pattern_type=1 --> experimental group, otherwise control group\n self.experimental_scenario = experimental_scenario\n self.pattern = pattern", "def regexp(regexp_list):\n def add_attribute(func):\n if not hasattr(func, \"regexp\"):\n func.regexp = []\n func.regexp.append(regexp_list)\n return func\n return add_attribute", "def save_pattern(self, pattern: Pattern):", "def save_pattern(self, pattern: Pattern):", "def patterngenerator(self, corpus, tokensperpattern, **kwargs):\n # Pre-tokenized all corpus documents, for efficiency\n tokenizedcorpus = [self.tokenizer.transform(doc) for doc in corpus]\n for pattern in self._tokenizedpatterngenerator(tokenizedcorpus, tokensperpattern, **kwargs):\n yield pattern", "def buildPattern(nspecies, s, members=None):\n pattern = []\n for x in range(nspecies):\n if x in s:\n if members:\n pattern.append(str(len(members[x])))\n else:\n pattern.append(\"1\")\n else:\n pattern.append(\"0\")\n return pattern", "def repeat(self, min=0, max=None):\n if max is not None and min > max:\n raise RuntimeError(\"min <= max needed\")\n\n # if there is at most one real group in the pattern,\n # then there is no structure so far at all\n # and thus we do not have to group, but just can repeat\n # (mind by .suppress() there may also be zero real groups, which also don't have to be grouped)\n # additionally, there is also no need for a further nesting if the sub group was just repeated\n struct_iter = iter(self.structure)\n firstelem = next(struct_iter)\n try:\n next(struct_iter)\n struct_len_1 = False\n except StopIteration:\n struct_len_1 = True\n\n if struct_len_1 and isinstance(firstelem, Repeated):\n # prevent nested repeatings Repeat(Repeat)\n self.pattern = hre.ensure_grouping(self.pattern)\n\n else:\n # the grouping is done by wrapping into a Leaf,\n # so that we can construct a map function which does all restructuring of the regex output\n self.group(\n wrapper = lambda structure: Repeated(Count(), structure), # creates a complete Structure element\n pseudo = True, # pass everything through\n liftkeys = True, # pass everything through\n silent = False, # this adds a grouping level also in the pattern\n )\n if max is None:\n self.pattern = r\"%s{%s,}\" % (self.pattern, min)\n elif min == max:\n self.pattern = r\"%s{%s}\" % (self.pattern, min)\n else:\n self.pattern = r\"%s{%s,%s}\" % (self.pattern, min, max)\n self._compiled = None", "def any_term_group(cls, *term_groups: typing.List[str]) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"anyTermGroup\", [*term_groups])", "def test_max_args(self):\n self.assertRaisesRegex(TypeError, '.*takes at most 8 arguments.*', open, 1, 2, 3, 4, 5, 6, 7, 8, 9)", "def Consume(cls, raw):\n assert cls.REGEX is not None, f\"{cls!r} expected to have REGEX attribute not None for {raw!r}\"\n # assert len(cls.CONTENT_RULES) != 0, f\"{cls!r} must have CONTENT_RULES set\"\n\n product = None\n post = None\n\n regexs = [cls.REGEX]if isinstance(cls.REGEX, list) is False else cls.REGEX\n\n for regex in regexs:\n match = regex.search(raw)\n\n if match is not None:\n\n match_start = match.start(0)\n match_end = match.end(0)+1\n groups = match.groupdict()\n if \"content\" in groups:\n del groups['content']\n\n product = cls(match.group(1), match_start, match_end, **groups)\n return raw, product\n else:\n return raw, None", "def target_pattern(lst_tag_types):\n return ''.join([r'\\s{1}\\@(',\n '|'.join(lst_tag_types),\n r')\\(([^\\)]+)\\)'])", "def __extract_pattern_nodes(graph):\n tp_nodes = graph.subjects(RDF.type, AGORA.TriplePattern)\n for tpn in tp_nodes:\n subject = list(graph.objects(tpn, AGORA.subject)).pop()\n predicate = list(graph.objects(tpn, AGORA.predicate)).pop()\n obj = list(graph.objects(tpn, AGORA.object)).pop()\n subject_str = list(graph.objects(subject, RDFS.label)).pop().toPython()\n predicate_str = graph.qname(predicate)\n if (obj, RDF.type, AGORA.Variable) in graph:\n object_str = list(graph.objects(obj, RDFS.label)).pop().toPython()\n else:\n object_str = list(graph.objects(obj, AGORA.value)).pop().toPython()\n __plan_patterns[tpn] = '{} {} {}'.format(subject_str, predicate_str, object_str)", "def pattern_gen():\n pattern = \"\"\n\n return pattern", "def Consume(cls, raw):\n assert cls.REGEX is not None, f\"{cls!r} expected to have REGEX attribute not None for {raw!r}\"\n # assert len(cls.CONTENT_RULES) != 0, f\"{cls!r} must have CONTENT_RULES set\"\n\n product = None\n post = None\n\n regexs = [cls.REGEX]if isinstance(cls.REGEX, list) is False else cls.REGEX\n\n for regex in regexs:\n match = regex.search(raw)\n\n if match is not None:\n\n match_start = match.start()\n match_end = match.end()\n product = cls(match.group(\"content\"), match_start, match_end)\n return raw, product\n else:\n return raw, None", "def has_free(self, *patterns):\n if not patterns:\n return False\n p0 = patterns[0]\n if len(patterns) == 1 and iterable(p0) and not isinstance(p0, Basic):\n # Basic can contain iterables (though not non-Basic, ideally)\n # but don't encourage mixed passing patterns\n raise TypeError(filldedent('''\n Expecting 1 or more Basic args, not a single\n non-Basic iterable. Don't forget to unpack\n iterables: `eq.has_free(*patterns)`'''))\n # try quick test first\n s = set(patterns)\n rv = self.has_xfree(s)\n if rv:\n return rv\n # now try matching through slower _has\n return self._has(iterfreeargs, *patterns)", "def search_by_pattern(self, tl):\n print(\"Search by regex pattern\")\n pattern = input(\"Please enter search pattern: \")\n return tl.findall_pattern(pattern)", "def to_pattern(obj):\n if isinstance(obj, Pattern):\n return obj\n return Glob(str(obj))", "def _repeat_pattern(pattern, width):\n assert width % pattern.width == 0\n return operation.repeat(pattern, width // pattern.width)", "def test_multi_sink(self):\n with self.assertRaises(ValidationError):\n with Graph('g') as graph:\n pike.glob('a', '*')\n pike.glob('b', '*')" ]
[ "0.63439953", "0.630216", "0.6195141", "0.6144406", "0.6016073", "0.5959742", "0.58526075", "0.58162165", "0.5781618", "0.5735671", "0.57103336", "0.566348", "0.5650091", "0.5646195", "0.557469", "0.5488463", "0.5486494", "0.5430798", "0.53853893", "0.536901", "0.5356352", "0.53440046", "0.5335034", "0.5328101", "0.5323031", "0.5319917", "0.53175545", "0.52567315", "0.52468705", "0.52353853", "0.5226557", "0.5223813", "0.52194303", "0.52033186", "0.5188881", "0.51655453", "0.5163878", "0.5144095", "0.5133141", "0.51323473", "0.51225436", "0.510544", "0.509109", "0.50893104", "0.5088611", "0.5084949", "0.50696594", "0.5047363", "0.5008415", "0.49656692", "0.4965528", "0.49573418", "0.49495336", "0.49490812", "0.49385357", "0.49228907", "0.49186134", "0.49173105", "0.49015966", "0.4897781", "0.48976487", "0.4897561", "0.48974594", "0.48871645", "0.48852545", "0.4883432", "0.48715714", "0.48663577", "0.48536676", "0.48534682", "0.48497725", "0.48453927", "0.48372647", "0.4828177", "0.48216525", "0.482049", "0.480717", "0.4806548", "0.48035508", "0.47967082", "0.47965124", "0.47860596", "0.4776379", "0.47753868", "0.47753868", "0.47673136", "0.47648755", "0.4763785", "0.4760403", "0.47584093", "0.47504506", "0.47419697", "0.4725484", "0.47114667", "0.47108778", "0.4710384", "0.47056097", "0.4701549", "0.4693202", "0.46915346" ]
0.6190371
3
Return all row and column groups.
def get_regular_groups(self, grid, min=3): row_groups = self._get_row_groups(grid.grid, models.patterns.RowPattern, min) col_groups = self._get_row_groups(grid.grid.T, models.patterns.ColumnPattern, min) return row_groups + col_groups
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def groups(self, *columns):\n # TODO: This really needs to just use Pandas.MultiIndex, stack(),\n # and pivot(). I just need to rework the FactorExprNode stuff\n # to produce a MultiIndex; then, this DataCube can just pass\n # in self._expr.\n raise NotImplementedError", "def _get_groupings(dist_matrix_header, dist_matrix, groups, within=True,\r\n suppress_symmetry_and_hollowness_check=False):\r\n # Note: Much of this code is taken from Jeremy Widmann's\r\n # distances_by_groups() function, part of make_distance_histograms.py from QIIME 1.8.0.\r\n if not suppress_symmetry_and_hollowness_check:\r\n if not is_symmetric_and_hollow(dist_matrix):\r\n raise ValueError(\"The distance matrix must be symmetric and \"\r\n \"hollow.\")\r\n result = []\r\n group_items = groups.items()\r\n\r\n for i, (row_group, row_ids) in enumerate(group_items):\r\n row_indices = _get_indices(dist_matrix_header, row_ids)\r\n if within:\r\n # Handle the case where indices are the same so we need to omit\r\n # the diagonal.\r\n block = dist_matrix[row_indices][:, row_indices]\r\n\r\n size = len(row_indices)\r\n indices = []\r\n for i in range(size):\r\n for j in range(i, size):\r\n if i != j:\r\n indices.append(block[i][j])\r\n if indices:\r\n result.append((row_group, row_group, indices))\r\n else:\r\n # Handle the case where indices are separate: just return blocks.\r\n for j in range(i + 1, len(groups)):\r\n col_group, col_ids = group_items[j]\r\n col_indices = _get_indices(dist_matrix_header, col_ids)\r\n vals = dist_matrix[row_indices][:, col_indices]\r\n\r\n # Flatten the array into a single-level list.\r\n vals = map(None, vals.flat)\r\n if vals:\r\n result.append((row_group, col_group, vals))\r\n return result", "def get_all_groups(self):\n self.cursor.execute(\"select * from groups\")\n self.connection.commit()\n return self.cursor.fetchall()", "def _get_row_groups(self, array, pattern, min):\n groups = []\n\n for row, col in zip(array, xrange(len(array))):\n start = 0\n\n while start + min <= len(row):\n size = 1\n orb_type = type(row[start])\n\n for cell in xrange(start + 1, len(row)):\n if orb_type != type(row[cell]):\n break\n size += 1\n start += 1\n\n if size >= min:\n groups.append(pattern(\n orb_type, size, (col, start - size + 1)\n ))\n\n start += 1\n\n return groups", "def cells(self):\n return ((row, col) for row in self.rows for col in self.cols)", "def _get_md_row_groups(pieces):\n row_groups = []\n for piece in pieces:\n for rg in range(piece.get_metadata().num_row_groups):\n row_group = piece.get_metadata().row_group(rg)\n for c in range(row_group.num_columns):\n if not row_group.column(c).statistics:\n return []\n row_groups.append(row_group)\n return row_groups", "def row_group_limits():\r\n from pymatgen import Element, periodic_table\r\n \r\n # Get all available elements in periodic table.\r\n rs = [e.row for e in periodic_table.Element]\r\n gs = [e.group for e in periodic_table.Element]\r\n \r\n return (max(rs), max(gs))", "def _get_groups(X, y):\n if SK18:\n X, y = _indexable(X, y)\n return X, y, None", "def get_groups_from_project_multiple(project_ID, characteristics_groups, return_all=False, return_matrix=False):\n needed_characteristic = [\n 'cell type',\n 'developmental stage',\n 'inferred cell type - ontology labels'\n ]\n \n # Read the metadata file using the API\n metadata, matrix, gene_names, cell_names = read_files(project_ID)\n\n # If there is not metadata for this project, return empty lists\n if metadata is None and return_matrix:\n return [], [], None, None\n elif metadata is None:\n return [], []\n\n metadata = process_metadata(metadata, cell_names)\n metadata_cells = len(metadata)\n number_genes = len(gene_names)\n \n rows = []\n combinations_subgroups = []\n \n for characteristics in characteristics_groups:\n # Initialitation of parameters\n subgroups = init_subgroups(metadata)\n project_characteristics = metadata.columns\n used_characteristics = []\n\n # Start the subgroup generation using the characteristics\n for characteristic in characteristics:\n # If the characteristic is not in the project, we skip it\n if characteristic not in project_characteristics:\n continue\n\n # For each subgroup created, divide it using the current characteristic\n subgroups_aux = []\n for subgroup in subgroups:\n subgroup_aux = get_subgroups(subgroup, characteristic)\n\n subgroups_aux = subgroups_aux + subgroup_aux\n\n # Check if we have lost cells\n cells_aux = sum([len(x['dataframe']) for x in subgroups_aux])\n if cells_aux < metadata_cells and characteristic not in needed_characteristic:\n continue \n \n # Update parameters\n used_characteristics = used_characteristics + [characteristic]\n subgroups = subgroups_aux\n\n # If there are no subgroups left, stop\n if not subgroups:\n break\n\n row = create_row(project_ID, subgroups, used_characteristics, metadata_cells, number_genes)\n\n # If the combination isnt repeated, save it\n if row not in rows:\n rows.append(row)\n combinations_subgroups.append(subgroups)\n\n # If all the combinations are needed\n if return_all:\n if return_matrix: # If matrix has to be returned\n return rows, combinations_subgroups, matrix, gene_names\n \n return rows, combinations_subgroups\n \n # Get best combination\n row, index = best_subgroup_combination(rows)\n subgroups = combinations_subgroups[index]\n\n if return_matrix: # If matrix has to be returned\n return row, subgroups, matrix, gene_names \n \n return row, subgroups", "def get_groups(self):\n return [self.primary_group] + list(self.secondary_groups)", "def getGroups(self):\n return [g[0] for g in grp.getgrall()]", "def iter_groups(self):\n\t\treturn iter(self._groups)", "def rows(self):\n for row in range(self.min_row, self.max_row+1):\n yield tuple('%s%d' % (get_column_letter(col), row)\n for col in range(self.min_col, self.max_col+1))", "def grids(self):\n x = self.xvalues\n if self.ndim == 1:\n return x\n if self.ndim == 2:\n return x[None, :], x[:, None]\n if self.ndim == 3:\n return x[None, :, None], x[:, None, None], x[None, None, :]", "def get_all_groups(self):\n return self.groups + ['all']", "def column_groups(self) -> pulumi.Output[Optional[Sequence['outputs.DataSetColumnGroup']]]:\n return pulumi.get(self, \"column_groups\")", "def all_groups(self):\n return self._all_groups", "def __iter__(self):\n for g, xs in self._groups.items():\n dtype = dt.Struct(self._item_fields)\n df = ta.Column(dtype).append(\n tuple(\n tuple(\n self._parent._data.child_at(\n self._parent._data.type().get_child_idx(f.name)\n )[x]\n for f in self._item_fields\n )\n for x in xs\n )\n )\n\n yield g, df", "def Group(self) -> _n_5_t_0:", "def Group(self) -> _n_5_t_0:", "def _iter_groups(self, df, y=None):\n groups = df.groupby(self.groupby).indices\n for key, sub_idx in groups.items():\n sub_df = df.iloc[sub_idx]\n if y is not None:\n # y is either a numpy array or a pd.Series so index accordingly\n sub_y = y.iloc[sub_idx] if type(y) is pd.Series else y[sub_idx]\n else:\n sub_y = None\n yield key, sub_df, sub_y", "def get_group_names(self):\n return [self.frame.columns[i] for i in self.group_cols]", "def calc_group(self, row, col):\n return ((row // 3) * 3 + (col // 3))", "def groups(self):\n return []", "def get_grid(self):\n self.fullws = []\n for row in self.word_search_grid:\n rowdata = []\n for column in row:\n rowdata += [column.entry.get()]\n self.fullws += [rowdata]\n self.logic.set_grid(self.fullws)", "def _local_groupby(df_rows, axis=0):\n concat_df = pd.concat(df_rows, axis=axis)\n return concat_df.groupby(concat_df.index)", "def get_variable_groups(all_inputs):\n row_length = len(all_inputs[0])\n for single_input in all_inputs[1:]:\n if len(single_input) != row_length:\n raise ValueError(\n \"Please make sure the length is the same if you want to input multiple values when the type of variables is t_array or t_mapping\")\n\n final_groups = list()\n row_length = len(all_inputs[0])\n col_length = len(all_inputs)\n for i in range(1, row_length):\n temp_list = list()\n for j in range(col_length):\n temp_list.append((all_inputs[j][0], all_inputs[j][i]))\n final_groups.append(temp_list)\n return final_groups", "def read_row_group_arrays(file, rg, columns, categories, schema_helper, cats,\n selfmade=False, assign=None):\n out = assign\n maps = {}\n\n for column in rg.columns:\n if (_is_list_like(schema_helper, column) or\n _is_map_like(schema_helper, column)):\n name = \".\".join(column.meta_data.path_in_schema[:-2])\n else:\n name = \".\".join(column.meta_data.path_in_schema)\n if name not in columns:\n continue\n\n read_col(column, schema_helper, file, use_cat=name+'-catdef' in out,\n selfmade=selfmade, assign=out[name],\n catdef=out.get(name+'-catdef', None))\n\n if _is_map_like(schema_helper, column):\n if name not in maps:\n maps[name] = out[name].copy()\n else:\n if column.meta_data.path_in_schema[0] == 'key':\n key, value = out[name], maps[name]\n else:\n value, key = out[name], maps[name]\n out[name][:] = [dict(zip(k, v)) if k is not None else None\n for k, v in zip(key, value)]", "def get_groups(self):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_get_groups_query+\" ORDER BY $groupname_field$\",{'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: get_groups: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_groupname_field]", "def _possible_grids(self, num_windows):\n if num_windows < 2:\n end = 2\n else:\n end = num_windows // 2 + 1\n for rows in range(1, end):\n cols = int(math.ceil(num_windows / rows))\n yield (rows, cols, ROWCOL)\n if rows != cols:\n # also want the reverse test\n yield (cols, rows, COLROW)", "def _get_subgroups(self):\n groups = [] # array of arrays\n for i in range(self.filter.shape[0]):\n for j in range(i):\n if self.filter[i][j]:\n if len(groups) < 1:\n groups.append([j, i])\n continue\n found = False\n for group_i, _ in enumerate(groups):\n if i in groups[group_i]:\n if j not in groups[group_i]:\n groups[group_i].append(j)\n found = True\n elif j in groups[group_i]:\n if i not in groups[group_i]:\n groups[group_i].append(i)\n found = True\n if not found:\n groups.append([i, j])\n return groups", "def find_groups(self, mesh):\n grps = []\n dim = mesh.give_dim()\n if dim:\n ctypes = self._dct[dim]\n grps = self._exp.find_groups_from_ctypes(mesh, ctypes)\n log.debug(\"GroupExplorator.find_groups for mesh %s returns %s with dim %s and dct %s\", mesh, grps, dim, self._dct)\n return grps", "def column_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataSetColumnGroupArgs']]]]:\n return pulumi.get(self, \"column_groups\")", "def get_groups(self, cell: Cell) -> Set[Group]:\n\t\tassert cell in self._cells\n\t\treturn self._cells_to_group_map[cell]", "def get_cells(self):\n return [\n cell for column in self.children for cell in column.get_cells()]", "def find_matrix_roi_groups(self):\n matrix_groups = list()\n\n log.debug(\"Looking for Matrix ROI groups (tiling datasets).\")\n root = self.tree.getroot()\n groups = root.findall(\"matl:group\", self.xmlns)\n for grp in groups:\n grp_type = grp.attrib[self.xsi + \"type\"]\n if grp_type == \"matl:DefineMatrixROI\":\n log.debug(\"Group %s is a Matrix ROI.\", grp.attrib[\"objectId\"])\n matrix_groups.append(grp)\n if grp_type == \"matl:MosaicROI\":\n log.debug(\"Group %s is a Mosaic ROI.\", grp.attrib[\"objectId\"])\n matrix_groups.append(grp)\n\n log.info(\"Found %i Matrix ROIs (tiling datasets).\", len(matrix_groups))\n return matrix_groups", "def info_materials_groups_get():\n session = info_map.Session()\n\n mat = aliased(info_map.Material)\n grp = aliased(info_map.Group)\n\n q = session.query(mat.group_id,grp.name).join(grp).distinct()\n groups = [Group(group=row.group_id,name=row.name) for row in q.all()]\n return groups, 200", "def groupRows(rows, column):\n filteredRows = filterRows(lambda row: row[column] != '', rows)\n if not filteredRows:\n return []\n groups = [[]]\n index = 0\n lastData = filteredRows[0][column]\n for row in filteredRows:\n if lastData != row[column]:\n index += 1\n lastData = row[column]\n groups.append([row])\n else:\n groups[index].append(row)\n return [group for group in groups]", "def collect_columns():\n return ((x, y) for x in range(72) for y in range(x + 9, 81, 9))", "def cells(self):\n return chain.from_iterable(self.cols)", "def makeGroupsFromCutFile(self):\n if self.cutfile == None:\n print \"Cannot make groups without a cuts file\"\n return ([],[])\n else:\n groups = []\n labels = []\n yields = []\n all_cols = self.qie.columns.values\n # For each predefined group\n for grouplist in cut_groups:\n labels.append(grouplist[0])\n g = None\n # For each cut in that group\n for cut in grouplist[1]:\n # Get min and max values for main cuts (TODO: handle marginal cuts)\n cut_min = self.cuts[cut][0]\n cut_max = self.cuts[cut][1]\n # For each df column corresponding to that cut (sometimes more than one measurement)\n for col in all_cols:\n if col.split(\"_\")[0] == cut:\n g_tmp = (self.qie[col] < cut_min) | (self.qie[col] > cut_max)\n if 'NoneType' in str(type(g)) :\n g = g_tmp\n else: \n g = g | g_tmp\n # Make exclusive groups\n if len(groups) > 0:\n g = g & (self.NotGroup(groups))\n groups.append(g)\n yields.append(g.sum())\n # Make final group containing all other chips\n groups.append(self.NotGroup(groups))\n labels.append(\"Good\")\n yields.append(groups[-1].sum())\n self.makeYieldsTable(yields, labels)\n # Add column to data frame containing \"Good\" (1), \"bad\" (0), \"marginal\" (2,..) info\n self.qie[\"Sorting\"] = np.where(groups[-1], 1, 0)\n print sum(self.qie[\"Sorting\"])\n #print self.qie\n self.makeSortingFile()\n return (groups, labels)", "def group_by(self, *columns):\n for column in columns:\n self.groups.append(column)\n\n return self", "def grid(plots):\n ydim, xdim = plots.shape\n w = mean_width(plots)\n columns = []\n for i in plots.identifiers:\n r = plots.region_by_identifier(i)\n r.identifier = i\n if len(columns) == 0:\n c = Column(w, xdim)\n c.append(r)\n columns.append(c)\n continue\n\n included = False\n for c in columns:\n if c.in_column(r):\n c.append(r)\n included = True\n break\n\n if not included:\n c = Column(w, xdim)\n c.append(r)\n columns.append(c)\n\n # Sort the columns left to right.\n columns.sort(key=lambda i: i.x_mean)\n\n for c in columns:\n # Sort the rows top to bottom.\n c.sort(key=lambda i: i.centroid[0])\n\n return columns", "def collect_rows():\n return ((x, y) for x in range(80) for y in range(x + 1, 9 + (x//9)*9))", "def data_group():\n ...", "def get_cells(self, row, col):\r\n surrounding_cells = self.get_surrounding_cells(row, col)\r\n closed_cells = self.filter_cells(surrounding_cells, '?')\r\n mine_cells = self.filter_cells(surrounding_cells, 'x')\r\n numbered_cells = list(set(surrounding_cells).difference(closed_cells))\r\n numbered_cells = list(set(numbered_cells).difference(mine_cells))\r\n return surrounding_cells, closed_cells, mine_cells, numbered_cells", "def getPrintGrid(self):\n print_grid_ = []\n for row in self.grid:\n print_row = []\n for stone in row:\n if not stone: print_char = NO_STONE_CHAR\n else: print_char = stone.print_char\n print_row += [ print_char ]\n print_grid_ += [ print_row ]\n return print_grid_", "def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)", "def get_item_groups(dataset):\n return dataset.groupby(\"name\", as_index=False, sort=False).groups", "def data_for_grouping():\n return RaggedArray(\n [[1, 0], [1, 0], [], [], [0, 0], [0, 0], [1, 0], [2, 0]])", "def get_group_array(self):\n raise NotImplementedError", "def get_rows(self):\n\t\trows = [self.header]\n\t\tfor d in self.data:\n\t\t\trow = []\n\t\t\tfor h in self.header:\n\t\t\t\ttry:\n\t\t\t\t\trow.append(d[h])\n\t\t\t\texcept KeyError:\n\t\t\t\t\trow.append(empty)\n\t\t\trows.append(row)\n\t\treturn rows", "def UngroupRows():\n\n from globals import Matrix\n if(Matrix.SideAxis.Groups.Count < 2):\n return #nothing to do\n\n if(Matrix.TopAxis.Groups.Count > 1):\n raise Exception(\"The table cannot have nesting or concatenation on the top\")\n # replicate the existing top group\n masterTopGroup = Matrix.TopAxis.Groups[0]\n\n \n masterSideGroup = Matrix.SideAxis.Groups[0]\n for iGrp in range(1,Matrix.SideAxis.Groups.Count):\n #activeGroup means the one we are transferring from side to top\n activeGroup = Matrix.SideAxis.Groups[iGrp]\n\n\n #topGroup is the new group we are creating for the top\n topGroup = Matrix.TopAxis.Groups.AddNew(None,activeGroup.Name + \"_top\",activeGroup.Label)\n for masterMember in masterTopGroup:\n newMember = topGroup.AddNewMember(masterMember.Name,masterMember.Label,masterMember.IsVisible,masterMember.IsSummaryScore)\n Matrix.TopAxis.DataMembers.Add(newMember)\n\n # transfer any values over\n for member in activeGroup:\n sourceRow = Matrix[member]\n #find the target row in the first group\n for targetMember in masterSideGroup:\n if targetMember.Label == member.Label:\n targetRow = Matrix[targetMember]\n \n for intColOffsetId in range(masterTopGroup.Count):\n sourceCol = masterTopGroup[intColOffsetId]\n targetCol = topGroup[intColOffsetId]\n for val in sourceRow[sourceCol]:\n targetRow[targetCol].AddValue(val)\n \n masterTopGroup.Label = Matrix.SideAxis.Groups[0].Label\n \n while Matrix.Count > masterSideGroup.Count:\n Matrix.DeleteRow(masterSideGroup.Count)", "def get_contribution_dataframe_groups(self):\n pargrp_dict = {}\n par = self.pst.parameter_data\n groups = par.groupby(\"pargp\").groups\n for grp,idxs in groups.items():\n pargrp_dict[grp] = list(par.loc[idxs,\"parnme\"])\n return self.get_contribution_dataframe(pargrp_dict)", "def group_by(self, columns):\n\n return self._get(\"group\", columns, Table)", "def data_grouping(self):\n group_container, film_container, plank_container = [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)], \\\n [[] for a in range(self.tot_conditions)]\n\n for i in self.data_labels:\n group = int(i[:-1])\n group_container[group - 1].append(i)\n film_container[group - 1].append(self.film_count[self.data_labels.index(i)])\n plank_container[group - 1].append(self.plank_count[self.data_labels.index(i)])\n\n return group_container, film_container, plank_container", "def group_list(self, array):\n\n if array.ndim == 1:\n return [np.array(array[self.row_indices[k]])\n for k in self.group_labels]\n else:\n return [np.array(array[self.row_indices[k], :])\n for k in self.group_labels]", "def generate_groups():\n groups = group_elements(\n generate_examples(file_name),\n cfg.tfrecord_size)\n\n # pairing groups to unique numbers and \n # filtering nulls from zip_longest\n groups = (\n list(filter(is_not_none, group))\n for group in groups\n )\n\n yield from groups", "def get_line_groups(self, fb_brw):\n group_max = self.get_group_size(fb_brw)\n lines_height = self.get_lines_height(fb_brw)\n res = []\n line_subset = OrderedDict()\n group_height = 0\n first_page = group_max - 1\n other_page = group_max - 2\n page = 1\n\n page_max = first_page\n for (line, line_height) in lines_height.iteritems():\n if line_height + group_height <= page_max:\n line_subset.update([(line, line_height)])\n group_height += line_height\n else:\n # save group\n res.append(self.get_group(line_subset, group_height,\n page, page_max))\n # init new group\n line_subset = OrderedDict([(line, line_height)])\n group_height = line_height\n page_max = other_page\n page += 1\n res.append(self.get_group(line_subset, group_height, page, page_max))\n return res", "def get_rows(self):\n raise NotImplementedError('Subclass this to make the rows')", "def build_index_groups(train):\n nz_row, nz_col = train.nonzero()\n nz_train = list(zip(nz_row, nz_col))\n\n grouped_nz_train_byrow = group_by(nz_train, index=0)\n nz_row_colindices = [(g, np.array([v[1] for v in value]))\n for g, value in grouped_nz_train_byrow]\n\n grouped_nz_train_bycol = group_by(nz_train, index=1)\n nz_col_rowindices = [(g, np.array([v[0] for v in value]))\n for g, value in grouped_nz_train_bycol]\n return nz_train, nz_row_colindices, nz_col_rowindices", "def groupby(self):\n try:\n return plist([x.groupby() for x in self])\n except Exception:\n groups = collections.OrderedDict()\n for i, x in enumerate(self):\n if x not in groups:\n groups[x] = plist()\n groups[x].append(self.__root__[i])\n return plist(groups.values())", "def _parse_groupped_data(self):\n for i, val in enumerate(self.values.keys()):\n xy = self.values[val]\n self._set_and_get(\"x_\", val, xy[:, 0])\n self._set_and_get(\"y_\", val, xy[:, 1])", "def groups(self):\n yield self\n for member_group in self._groups():\n yield member_group", "def get_grid_list (self):\n return [\n 'area', 'ald', 'poi', 'ice', 'lake pond',\n 'drainage', 'degree-day','climate event'\n ]", "def test_get_groupings_within_tiny_dataset(self):\r\n self.assertEqual(_get_groupings(self.tiny_dist_matrix_header,\r\n self.tiny_dist_matrix, self.tiny_groups, within=True), [])", "def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]", "def group_data(self):\n groups = []\n bug_map = self.bug_map()\n union_map = self.union_map(list(bug_map.keys()))\n # test_id grouping\n for union_id in set(union_map.values()):\n group = []\n for k, v in union_map.items():\n if v == union_id:\n group.extend(bug_map[k])\n if len(group) > 1:\n groups.append(group)\n return groups", "def group(self):\n self.column = self.column.apply(lambda value: parse_float(value))\n group_dframe = self.dframe[self.groups].join(self.column)\n indices = group_dframe.reset_index().set_index(\n self.groups + [self.name])\n\n def max_index_for_row(row):\n groups = row[self.groups]\n value = row[self.name]\n\n xsection = indices.xs(groups, level=self.groups)\n\n if isnan(value):\n return minint()\n\n max_index = xsection.get_value(value, 'index')\n\n if isinstance(max_index, Series):\n max_index = max_index.max()\n\n return max_index\n\n groupby_max = self._groupby().max().reset_index()\n column = groupby_max.apply(max_index_for_row, axis=1).apply(int)\n column.name = self.name\n\n return DataFrame(column).join(groupby_max[self.groups])", "def test_get_groupings_between_tiny_dataset(self):\r\n self.assertEqual(_get_groupings(self.tiny_dist_matrix_header,\r\n self.tiny_dist_matrix, self.tiny_groups, within=False), [])", "def splitColumns(self, numColumns):\n if numColumns < 2:\n return [self]\n if len(self) <= numColumns:\n return [OutputGroup([item]) for item in self]\n groupList = []\n numEach = len(self) // numColumns\n for colNum in range(numColumns - 1):\n groupList.append(self[colNum * numEach : (colNum+1) * numEach])\n groupList.append(self[(numColumns-1) * numEach : ])\n numChanges = 1\n while numChanges:\n numChanges = 0\n for colNum in range(numColumns - 1):\n if groupList[colNum].totalHeight() > groupList[colNum+1].\\\n totalHeight() + groupList[colNum][-1].height:\n groupList[colNum+1].insert(0, groupList[colNum][-1])\n groupList[colNum] = groupList[colNum][:-1]\n numChanges += 1\n if groupList[colNum].totalHeight() + groupList[colNum+1][0].\\\n height <= groupList[colNum+1].totalHeight():\n groupList[colNum].append(groupList[colNum+1][0])\n groupList[colNum+1] = groupList[colNum+1][1:]\n numChanges += 1\n return groupList", "def iterrows(self):\n return (self.Row(*row_vals) for row_vals in izip(*self.columns))", "def potential_groups(self, player) -> Set[Group]:\n directions = [\n (-1, 1), # up-right diagonal\n (0, 1), # horizontal\n (1, 1), # down-right diagonal\n (1, 0), # vertical\n ]\n groups = set()\n\n for row in range(len(self.state[0])):\n for col in range(len(self.state[0][0])):\n for row_diff, col_diff in directions:\n if self.is_potential_group(player, row, col, row_diff, col_diff):\n groups.add(Group(\n player,\n start=Square(row, col),\n end=Square(row + 3 * row_diff, col + 3 * col_diff),\n ))\n\n return groups", "def get_row_dict(self) -> HeaderToWells:\n return self._grid.rows", "def grid(self) -> dict:\n raise NotImplementedError", "def rows(self):\n return self.Rows(self)", "def rows(self):\r\n raise NotImplementedError", "def calib_groups(self):\n return None if self.calib_bitmask is None else self.calib_bitmask.keys()", "def find_groups_from_ctypes(self, mesh, ctypes):\n raise NotImplementedError", "def rows(self):\r\n return Rows(self)", "def grid(self, (z, x, y)):\n # sources.py -> MapnikRenderer -> grid\n content = self.reader.grid(z, x, y, self.grid_fields, self.grid_layer)\n return content", "def getGroups():\r\n return Group.getGroups()", "def T(self):\n self = +self\n ret = Matr()\n for hcolp in range(len(self.header)):\n ret.append(Matr())\n for row in range(len(self)):\n ret[hcolp].append(self[row,hcolp])\n return ret", "def find_frame_calib_groups(self, row):\n return self.calib_bitmask.flagged_bits(self['calibbit'][row])", "def groups(self):\n return self.get_data(\"groups\")", "def rows(self):\r\n raise NotImplementedError()", "def elements_by_row(self):\n if not hasattr(self, '_rows'):\n self._rows = [[] for i in range(self.dims[0])]\n for element in self.active_elements:\n col, row = self.axis_position(element)\n self._rows[row].append(element)\n \n return self._rows", "def _cells(self):\n\n # Matrix size\n size = self.specification['DIMENSION']\n # Edge weight format\n edge_format = self.specification['EDGE_WEIGHT_FORMAT'].split('_')\n # Matrix type: FULL, UPPER, LOWER\n matrix = edge_format[0]\n\n # Data direction: ROW, COL; diagonal offset\n offset = 0\n if matrix != 'FULL':\n if edge_format[1] in ['ROW', 'COL']:\n direction = edge_format[1]\n # No diagonal entries, matrix needs to be offseted by 1\n offset = 1\n else:\n direction = edge_format[2]\n\n # Initial position in the matrix\n row = col = -1\n\n # Increments `b`. If `b` is larger than `bound` increments `a` and\n # sets `b` to `ret`.\n def calc(a, b, bound, ret):\n a = max(a, 0)\n b += 1\n if b > bound:\n a += 1\n b = ret\n return a, max(b, ret)\n\n # Generate the coordinates\n while True:\n if matrix == 'FULL':\n row, col = calc(row, col, size - 1, 0)\n elif matrix == 'UPPER' and direction == 'ROW':\n row, col = calc(row, col, size - 1, row + 1 + offset)\n elif matrix == 'LOWER' and direction == 'COL':\n col, row = calc(col, row, size - 1, col + 1 + offset)\n elif matrix == 'LOWER' and direction == 'ROW':\n row, col = calc(\n row, col, row - offset if offset else max(row, 0), 0)\n elif matrix == 'UPPER' and direction == 'COL':\n col, row = calc(\n col, row, col - offset if offset else max(col, 0), 0)\n\n # End when all coordinates are generated\n if row >= size or col >= size:\n break\n\n yield row, col", "def get_chunks(self, data, scale=1):\r\n x_chunks, y_chunks = [(0, self.rows)], [(0, self.cols)]\r\n if data.shape[0] > self.rows:\r\n x_chunks = self.perform_chunking(data.shape[0], self.rows)\r\n else:\r\n x_chunks = [(0, data.shape[0])]\r\n if data.shape[1] > self.cols:\r\n y_chunks = self.perform_chunking(data.shape[1], self.cols)\r\n else:\r\n y_chunks = [(0, data.shape[1])]\r\n return x_chunks, y_chunks", "def grouped_bins(self):\n # Load the vector version #\n df = self.grouped_vectors\n # Empty data frame to contain result #\n result = pandas.DataFrame()\n # Iterate #\n for i, row in df.iterrows():\n # Compute a data frame containing the recreated bins #\n current = binner(row[self.sum_col], self.sum_col, self.bin_width)\n # Keep the current values of the group columns as an index #\n col_values = [row[col] for col in self.group_cols]\n current = current.assign(**dict(zip(self.group_cols, col_values)))\n current = current.set_index(self.group_cols)\n # Append #\n result = result.append(current)\n # Return #\n return result", "def get_row_group_density(mt, rg_limits):\r\n \r\n fraction_matrix = zeros(rg_limits)\r\n \r\n composition = Composition(mt['pretty_formula'])\r\n \r\n for element in composition:\r\n fraction = composition.get_atomic_fraction(element)\r\n elem = Element(element)\r\n row = elem.row\r\n group = elem.group\r\n fraction_matrix[row-1][group-1] = fraction\r\n \r\n return fraction_matrix", "def iterall(self):\r\n return (column for name, column in self.iteritems())", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def all(self):\n for group in self.groups():\n yield group\n for env in self.envs():\n yield env", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def rows(self):\n return list(self)" ]
[ "0.61957633", "0.6163433", "0.6040524", "0.5894949", "0.58843005", "0.58239186", "0.58054775", "0.5755542", "0.56944263", "0.56619304", "0.56394035", "0.56309", "0.56161165", "0.56100893", "0.55997294", "0.55897653", "0.5586745", "0.5579677", "0.55577207", "0.55577207", "0.5531093", "0.5509804", "0.54825634", "0.5482236", "0.5477484", "0.54531723", "0.544685", "0.5445219", "0.5429757", "0.5429742", "0.54239976", "0.5398363", "0.53931314", "0.5352524", "0.5331918", "0.53305954", "0.5314105", "0.53106624", "0.53028077", "0.52872753", "0.5284663", "0.5274596", "0.5274148", "0.5258787", "0.5253935", "0.5242868", "0.5240866", "0.5240607", "0.5239786", "0.5234623", "0.52316684", "0.52166265", "0.5212956", "0.5208941", "0.52084035", "0.5207467", "0.5206175", "0.5202371", "0.52003676", "0.5199205", "0.5184338", "0.5175566", "0.51712286", "0.51693666", "0.5162511", "0.5161141", "0.5132679", "0.5132608", "0.5131336", "0.5128639", "0.5124106", "0.51231223", "0.5121013", "0.51204836", "0.5120471", "0.51182836", "0.510652", "0.5093688", "0.5091127", "0.5085601", "0.5080629", "0.5067324", "0.50528", "0.50414425", "0.5039739", "0.5034935", "0.5034908", "0.50320804", "0.5027849", "0.50256604", "0.5023893", "0.50139976", "0.5003433", "0.5001996", "0.5001996", "0.5001996", "0.49932805", "0.49916652", "0.49916652", "0.49873924" ]
0.68432385
0
Return groups with a min size that exist with grid on xaxis.
def _get_row_groups(self, array, pattern, min): groups = [] for row, col in zip(array, xrange(len(array))): start = 0 while start + min <= len(row): size = 1 orb_type = type(row[start]) for cell in xrange(start + 1, len(row)): if orb_type != type(row[cell]): break size += 1 start += 1 if size >= min: groups.append(pattern( orb_type, size, (col, start - size + 1) )) start += 1 return groups
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_regular_groups(self, grid, min=3):\n row_groups = self._get_row_groups(grid.grid, models.patterns.RowPattern, min)\n col_groups = self._get_row_groups(grid.grid.T, models.patterns.ColumnPattern, min)\n return row_groups + col_groups", "def remove_small_boxes(boxlist, min_size):\n # TODO maybe add an API for querying the ws / hs\n xywh_boxes = boxlist.convert(\"xywh\").bbox\n _, _, ws, hs = xywh_boxes.unbind(dim=1)\n keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1)\n return boxlist[keep]", "def get_mines(self):\n\t\treturn ((x, y) for x in range(self.width)\n\t\t for y in range(self.height) if self.mines[x][y])", "def filter_instances_by_size(self, im, unique_instances, min_building_size):\n # create array to store building instances to ignore\n ignored_instances = np.array([])\n # if min_building_size is negative, error\n if min_building_size < 0:\n raise ValueError(\"Building size filter cannot be a negative number\")\n # return list of instances to check and list of instances to ignore\n # if min_building_size is 0, return original array of instances, ignored_instances is empty\n if min_building_size == 0:\n return unique_instances, ignored_instances\n else:\n for i in range(len(unique_instances)):\n _, current_building_size = self.get_current_building_mask(im, unique_instances[i])\n if current_building_size < min_building_size:\n ignored_instances = np.append(ignored_instances, i)\n return np.setdiff1d(unique_instances, ignored_instances), ignored_instances", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n return keep", "def min_dist_grid(self, list_of_grids, self_pos):\n\t\t\n\t\tdistance = []\n\t\tcp_list_of_grids = list(list_of_grids)\n\t\t\n\n\t\tfor grid in cp_list_of_grids:\n\t\t\tdistance.append((dist(grid, self_pos), cp_list_of_grids.index(grid)))\n\t\tgrid_point = min(distance)\n\t\tidx = grid_point[1]\n\t\tpoint = cp_list_of_grids[idx]\n \t\n \t\tself_pos[0] = point[0]\n \t\tself_pos[1] = point[1]\n\n \t\tself.Bubble_last_pos = [point[0], point[1]]", "def row_group_limits():\r\n from pymatgen import Element, periodic_table\r\n \r\n # Get all available elements in periodic table.\r\n rs = [e.row for e in periodic_table.Element]\r\n gs = [e.group for e in periodic_table.Element]\r\n \r\n return (max(rs), max(gs))", "def _query_min_max_size(self):\n\n # Collect contributions of child widgets\n mima1 = [0, 1e9, 0, 0]\n for child in self.children:\n mima2 = child._size_limits\n mima1[0] = max(mima1[0], mima2[0])\n mima1[1] = min(mima1[1], mima2[1])\n mima1[2] += mima2[2]\n mima1[3] += mima2[3]\n\n # Dont forget padding and spacing\n extra_padding = 2\n extra_spacing = 2\n for i in range(4):\n mima1[i] += extra_padding\n mima1[2] += extra_spacing\n mima1[3] += extra_spacing\n\n # Own limits\n mima3 = super()._query_min_max_size()\n\n # Combine own limits with limits of children\n return [max(mima1[0], mima3[0]),\n min(mima1[1], mima3[1]),\n max(mima1[2], mima3[2]),\n min(mima1[3], mima3[3])]", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def remove_small_boxes(boxes, min_size):\r\n ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]\r\n keep = (ws >= min_size) & (hs >= min_size)\r\n keep = np.where(keep)[0]\r\n return keep", "def get_mines(self):\n mines = []\n for i in range(self.rows):\n for j in range(self.cols):\n if self.board[i][j].category == Tiles.mine:\n mines.append((i, j))\n return mines", "def _filter_boxes(boxes, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n keep = np.where((ws >= min_size) & (hs >= min_size))[0]\n\n return keep", "def calculate_min_max_tiles(self):", "def known_mines(self):\n if len(self.cells) <= self.count:\n return self.cells\n return set([])", "def test_min_root_gb_filter(self):\n filters = dict(min_root_gb=80)\n expected = [\n 'cg1.2xlarge',\n 'cg1.4xlarge',\n 'cg1.large',\n 'cg1.xlarge',\n 'm1.large',\n 'm1.xlarge',\n 'sh1.16xlarge',\n 'sh1.2xlarge',\n 'sh1.32xlarge',\n 'sh1.4xlarge',\n 'sh1.8xlarge',\n 'sh1.large',\n 'sh1.xlarge',\n 'tp64.8x8']\n self.assertFilterResults(filters, expected)", "def _filter_boxes(self, boxes, min_size, im_info):\n # Scale min_size to match image scale\n min_size *= im_info[2]\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n x_ctr = boxes[:, 0] + ws / 2.\n y_ctr = boxes[:, 1] + hs / 2.\n keep = np.where((ws >= min_size) & (hs >= min_size) &\n (x_ctr < im_info[1]) & (y_ctr < im_info[0]))[0]\n return keep", "def _filter_imgs(self, min_size=32):\n\n valid_inds = []\n for i, img_info in enumerate(self.data_infos):\n if min(img_info[\"width\"], img_info[\"height\"]) < min_size:\n continue\n if self.filter_empty_gt and len(img_info[\"ann\"][\"bboxes\"]) > 0:\n valid_inds.append(i)\n else:\n valid_inds.append(i)\n\n return valid_inds", "def CalcMin(self):\n calced = wx.Size(0, 0)\n for item in self.GetChildren():\n calced.IncTo(item.CalcMin())\n\n return calced", "def xminmax ( self ) :\n return self.xvar.minmax()", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n for i, img_info in enumerate(self.img_infos):\n # Filter out empty images\n if img_info['ann']['bboxes'].shape[0] > 0:\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 10)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def getGridSquareMinXY(gridSquaresShp):\n\n dataSource = ogr.Open(gridSquaresShp)\n layer = dataSource.GetLayer()\n gridSquares = {}\n for feature in layer:\n gridSq = feature.GetField(\"GRIDSQ\")\n geom = feature.GetGeometryRef()\n minX, maxX, minY, maxY = geom.GetEnvelope()\n gridSquares[gridSq] = (minX, minY)\n\n return gridSquares", "def MIN(*args):\n return _group_function(min, *args)", "def get_minnpix(self, pixel_size, radius):\n\n npix = latticepoints(radius,pixel_size)\n return npix", "def _filter_imgs(self, min_size=32):\r\n valid_inds = []\r\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\r\n for i, img_info in enumerate(self.img_infos):\r\n if self.filter_empty_gt and self.img_ids[i] not in ids_with_ann:\r\n continue\r\n if min(img_info['width'], img_info['height']) >= min_size:\r\n valid_inds.append(i)\r\n return valid_inds", "def test_group_small_cols(self):\n taxa = DataTableFactory(PACKET_DIR).taxonomy()\n taxa = group_small_cols(taxa, top=2)\n self.assertEqual(taxa.shape[1], 3)", "def get_minmax(self):\n x_minmax = [np.min(self.grid['x']), np.max(self.grid['x'].max())]\n z_minmax = [np.min(self.grid['z']), np.max(self.grid['z'].max())]\n return x_minmax, z_minmax", "def min_inst(df, n=1):\n classes = df.groupby('class_label')\n counts = classes.inst.transform('count')\n sel_classes = df[counts > n]\n return sel_classes", "def test_clusterFilterMinSize(self):\n # settings - all clusters visible\n settings = clusterFilter.ClusterFilterSettings()\n settings.updateSetting(\"neighbourRadius\", 2.1)\n settings.updateSetting(\"minClusterSize\", 2)\n settings.updateSetting(\"maxClusterSize\", -1)\n \n # set PBC\n self.lattice.PBC[:] = 1\n \n # filter input\n filterInput = base.FilterInput()\n filterInput.inputState = self.lattice\n visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)\n filterInput.visibleAtoms = visibleAtoms\n filterInput.NScalars = 0\n filterInput.fullScalars = np.empty(0, np.float64)\n filterInput.NVectors = 0\n filterInput.fullVectors = np.empty(0, np.float64)\n \n # call filter\n result = self.filter.apply(filterInput, settings)\n self.assertIsInstance(result, base.FilterResult)\n \n # make sure num visible is correct\n self.assertEqual(len(visibleAtoms), len(self.bigClusterIndexes))\n \n # check clusters are correct\n clusterList = result.getClusterList()\n self.assertEqual(len(clusterList), 1)\n cluster = clusterList[0]\n self.assertEqual(len(cluster), len(self.bigClusterIndexes))\n for index in self.bigClusterIndexes:\n self.assertTrue(index in cluster)", "def group_sizes(self):\n return self.g_sizes", "def _filter_boxes2(boxes, max_size, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n if max_size > 0:\n keep = np.where(np.minimum(ws, hs) < max_size)[0]\n elif min_size > 0:\n keep = np.where(np.maximum(ws, hs) > min_size)[0]\n return keep", "def ensure_chunk_size(da: xr.DataArray, **minchunks: int) -> xr.DataArray:\n if not uses_dask(da):\n return da\n\n all_chunks = dict(zip(da.dims, da.chunks))\n chunking = dict()\n for dim, minchunk in minchunks.items():\n chunks = all_chunks[dim]\n if minchunk == -1 and len(chunks) > 1:\n # Rechunk to single chunk only if it's not already one\n chunking[dim] = -1\n\n toosmall = np.array(chunks) < minchunk # Chunks that are too small\n if toosmall.sum() > 1:\n # Many chunks are too small, merge them by groups\n fac = np.ceil(minchunk / min(chunks)).astype(int)\n chunking[dim] = tuple(\n sum(chunks[i : i + fac]) for i in range(0, len(chunks), fac)\n )\n # Reset counter is case the last chunks are still too small\n chunks = chunking[dim]\n toosmall = np.array(chunks) < minchunk\n if toosmall.sum() == 1:\n # Only one, merge it with adjacent chunk\n ind = np.where(toosmall)[0][0]\n new_chunks = list(chunks)\n sml = new_chunks.pop(ind)\n new_chunks[max(ind - 1, 0)] += sml\n chunking[dim] = tuple(new_chunks)\n\n if chunking:\n return da.chunk(chunks=chunking)\n return da", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def MinX(*args, **kwargs):\n return _gdi_.DC_MinX(*args, **kwargs)", "def get_grid_complement_missing_threshold(x, d_list, missing_threshold_complement_mode=False):\n if missing_threshold_complement_mode == True:\n min_threshold = min(d_list['missing_threshold'])\n output = missing_stats(x, min_threshold)\n if list(output['missing_fraction']) == []:\n d_list['missing_threshold'] = [0]\n else:\n d_list['missing_threshold'] = list(output['missing_fraction'])\n grid = list(ParameterGrid(d_list))\n return grid", "def filter_x_per_y(df, at_least, x, per):\n return df.groupby(per, as_index=False, sort=False).filter(\n lambda g: g[x].nunique() >= at_least\n )", "def known_mines(self):\n if len(self.cells)==self.count:\n return self.cells\n return set()\n #raise NotImplementedError", "def known_mines(self):\n \n if len(self.cells) == self.count:\n return self.cells", "def minimumAbove(requestContext, seriesList, n):\n results = []\n for series in seriesList:\n if min(series) > n:\n results.append(series)\n return results", "def filter_toofew_toolong(df, min_each_group, max_length):\n df = df[~(df.question.apply(lambda x : len(x)) > max_length)]\n\n counts = df[\"index\"].value_counts()\n idxs = np.array(counts.index)\n \n # index numbers of groups with count >= mineachgroup\n list_idx = [i for i, c in zip(idxs, counts) if c >= min_each_group]\n\n # filter out data with \"index\" in list_idx \n df = df[df[\"index\"].isin(list_idx)]\n return df", "def ensure_minimum(cls, specimen_id, views, minimum=100, start_nkp=None):\n nkp = start_nkp or FeaturesView.nkp\n\n def _make_fv(kp):\n feature_views = [FeaturesView.from_view(v, kp).tidy() for v in views]\n min_kp = min([len(v.keypoints) for v in feature_views])\n feature_views = [v.tidy(n=min_kp) for v in feature_views]\n return cls(specimen_id, feature_views)\n\n while nkp < FeaturesView.nkp * 2:\n fv = _make_fv(nkp)\n if len(fv.global_matches) > minimum:\n break\n nkp += 1000\n\n return fv", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def _default_sampling_xrange(self):\n from scipy.stats import rv_continuous\n dataset = self.rvdist.rvs(1000) if rv_continuous in self.rvdist.__class__.__mro__ \\\n else self.rvdist.dataset\n scale = np.nanmax(dataset) - np.nanmin(dataset)\n return [np.nanmin(dataset) - scale*0.05, np.nanmax(dataset) + scale*0.05]", "def test_sizergrid():\n regular_grid(8, 3)\n mpl.show()", "def _grid_hint_size(self) -> int:", "def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine", "def truncateInWindows(x,delta_x):\n g = np.zeros(len(x))\n group = 0\n x0 = x[0]\n for k in range(len(x)):\n g[k] = group\n if x[k]-x0>delta_x:\n x0 = x[k]\n group+=1\n return g", "def return_extents(self):\n\n return [qm.tree.mins, qm.tree.maxs]", "def getSpatialGrid(self, scaled=True):\n if scaled:\n return np.meshgrid(self.x_axis_scaled, self.x_axis_scaled)\n else:\n return np.meshgrid(self.x_axis_unscaled, self.x_axis_unscaled)", "def hasGroupsSizeX(self, deck):\n return reduce(lambda a, b: gcd(a, b), Counter(deck).values()) > 1", "def getNewGrid(self, _grid_size):\n grid_ = []\n for _ in range(_grid_size[0]):\n grid_ += [[ None for _ in range(_grid_size[1]) ]]\n return grid_", "def min_blocks(self):\n return self._min_blocks", "def get_grid_allow_missing(x, d_list):\n del d_list['impute_category_strategy']\n del d_list['impute_numerical_strategy']\n grid = list(ParameterGrid(d_list))\n return grid", "def used_xvals(self):\n return [x for x in self.xvals() if any([len(self.get_plaquette(x, y)) > 0\n for y in self.yvals()])]", "def get_start_grid(cols=4, rows=4):\n grid = [[0]*cols for i in range(rows)]\n for i in range(2):\n empties = get_empty_cells(grid)\n y,x = random.choice(empties)\n grid[y][x] = 2 if random.random() < 0.9 else 4\n return grid", "def create_grid(size_x, size_y, default=None):\n return [[default for _x in range(size_y)] for _y in range(size_x)]", "def xlim(self):\r\n lim = [ax.get_xlim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim", "def isXSnappedToGrid( self ):\n return self._xSnapToGrid", "def test_min_memory_mb_filter(self):\n filters = dict(min_memory_mb=513)\n expected = [\n 'cg1.2xlarge',\n 'cg1.4xlarge',\n 'cg1.large',\n 'cg1.medium',\n 'cg1.small',\n 'cg1.xlarge',\n 'm1.large',\n 'm1.medium',\n 'm1.small',\n 'm1.xlarge',\n 'sh1.16xlarge',\n 'sh1.2xlarge',\n 'sh1.32xlarge',\n 'sh1.4xlarge',\n 'sh1.8xlarge',\n 'sh1.large',\n 'sh1.medium',\n 'sh1.small',\n 'sh1.xlarge',\n 'tp64.8x8']\n self.assertFilterResults(filters, expected)", "def get_xbins(xcolname):\n\n xmin, xmax = (float(\"inf\"), -float(\"inf\"))\n for res in self.rsts:\n xdata = self._base_unit(res.df, xcolname)\n xmin = min(xmin, xdata.min())\n xmax = max(xmax, xdata.max())\n\n return {\"size\" : (xmax - xmin) / 1000}", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def thin_xticks(ax, n):\n ax.xaxis.set_major_locator(MaxNLocator(n + 1))", "def test_get_groupings_within_tiny_dataset(self):\r\n self.assertEqual(_get_groupings(self.tiny_dist_matrix_header,\r\n self.tiny_dist_matrix, self.tiny_groups, within=True), [])", "def get_min_max(self, groups, key):\n group = groups.get_group(key)\n min = group.loc[group[\"dif\"].idxmin()]\n max = group.loc[group[\"dif\"].idxmax()]\n minmax = {\"min\": min, \"max\": max}\n return minmax", "def min(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.min, **kwargs)", "def __init__(self, x=None, mbar=10, eq=True, to_plot=False, mbar_min=1, xg=None, beyond_domain=None):\n logger.debug('Initializing inducing grid.')\n k = mbar; del mbar # mbar is an alias\n k_min = mbar_min; del mbar_min # mbar_min is an alias\n if xg is None: # then generate a grid from the scattered points x\n # deal with inputs\n assert isinstance(x,np.ndarray)\n assert x.ndim == 2\n self.eq = eq\n if not isinstance(k,(tuple,list,np.ndarray)):\n k = (k,)*x.shape[1]\n\n # get some statistics and counts (just assuming 1d along each dimension)\n (n_train, self.grid_dim) = x.shape # number of training points, number of grid dimensions\n self.grid_sub_dim = np.ones(self.grid_dim, dtype=int) # number of sub dimensions along each grid dim\n self.input_dim = np.sum(self.grid_sub_dim) # total number of dimensions\n self.grid_shape = np.zeros(self.grid_dim, dtype=int); # number of points along each sub dimension\n x_rng = np.vstack((np.amin(x,axis=0), np.amax(x,axis=0), np.ptp(x,axis=0))).T\n n_unq = np.array([np.unique(x[:,i]).size for i in range(self.grid_dim)])\n if not np.all(n_unq >= 2):\n logger.debug('some dimension have < 2 unique points')\n for i,ki in enumerate(k):\n if ki <= 1:\n self.grid_shape[i] = np.int32(np.maximum(np.ceil(ki*n_unq[i]),k_min));\n else:\n assert np.mod(ki,1) == 0, \"if k > 1 then it must be an integer\"\n # don't allow the number of points to be greater than n_unq\n self.grid_shape[i] = np.int32(np.maximum(np.minimum(ki, n_unq[i]), k_min));\n self.num_data = np.prod(np.float64(self.grid_shape)) # total number of points on the full grid\n\n # check if bounds are to be added, in which case I want to call recursively\n if beyond_domain is not None:\n assert np.all(self.grid_shape >= 2), \"bounds need at least 2 points per dim\"\n # get the grid with no bounds but 2 less points per dimension\n xg = InducingGrid(x=x, k=self.grid_shape-2, eq=eq, to_plot=False, k_min=0, xg=None, beyond_domain=None).xg\n for i in range(x.shape[1]):\n xg[i] = np.vstack((x_rng[i,0]-beyond_domain*x_rng[i,2], xg[i], x_rng[i,1]+beyond_domain*x_rng[i,2])) # add the points that go beyond domain\n # since xg is now specified, it will be added to the grid below\n else:\n #figure out if the grid should be on unique points\n on_unique = self.grid_shape == n_unq # whether or not the grid is exactly on unique values\n\n # create the grid\n # self.xg is a list of length n_dims which specifies the grid along each dimension.\n self.xg = np.empty(self.grid_dim, dtype=object)\n for i_d in range(self.grid_dim):\n if on_unique[i_d]: # then place the grid on the unique values\n self.xg[i_d] = np.unique(x[:,i_d]).reshape((-1,1))\n elif self.eq: # equally spaced grid points\n self.xg[i_d] = np.linspace(x_rng[i_d,0],x_rng[i_d,1],num=self.grid_shape[i_d]).reshape((-1,1))\n elif self.grid_shape[i_d] == 2: # then just place on the ends\n self.xg[i_d] = x_rng[i_d,:2].reshape((-1,1))\n else: # non equally spaced grid points\n \"\"\"\n do a two-pronged kmeans clustering strategy where you find clusters of clusters:\n 1) indentify clusters in the data, I don't want to reconsider points in the same cluster twice\n 1.5) filter any clusters which are close together\n 2) rerun kmeans using the cluster centers to get the grid points\n 2.5) filter any nodes which are close together\n This makes clusters which aren't too close together and also encourages spread throughout the space\n \"\"\"\n # TODO: it seems that it's actually important to bound clusters, not just have them nearby\n # I can try to implement this maybe\n\n node_tol = x_rng[i_d,2]/(3*self.grid_shape[i_d])\n # 1) identify clusters in x. Use more than the final number of grid points\n x_clusters = MiniBatchKMeans( # will be faster for large problems\n n_clusters=np.minimum(3*self.grid_shape[i_d],n_unq[i_d]), n_init=1, max_iter=100, tol=0.001,\n ).fit(np.unique(x[:,i_d]).reshape((-1,1)) # I don't want to recount duplicates more than once\n ).cluster_centers_.reshape((-1,1))\n\n # 1.5) remove clusters which are close together\n x_clusters = uniquetol(x_clusters.squeeze(),\n tol=node_tol/2, # set a loose tol here\n ).reshape((-1,1))\n self.grid_shape[i_d] = np.minimum(x_clusters.size, self.grid_shape[i_d])\n\n if self.grid_shape[i_d] == x_clusters.size: # then place the nodes on the clusters\n self.xg[i_d] = x_clusters\n elif self.grid_shape[i_d] > 2: # perform the second kmeans clustering\n # 2) get the final grid points\n self.xg[i_d] = KMeans(\n n_clusters=self.grid_shape[i_d]-2, n_init=1, max_iter=100, tol=0.001, verbose=False,\n ).fit(np.vstack((x_rng[i_d,0], x_clusters, x_rng[i_d,1])) # add the extreme values back to bias the nodes\n ).cluster_centers_.reshape((-1,1))\n\n # 2.5) remove nodes which are close together\n self.xg[i_d] = uniquetol(self.xg[i_d].squeeze(), tol=node_tol).reshape((-1,1))\n else: # initiaze empty grid, extreme values will be added later\n self.xg[i_d] = np.zeros((0,1))\n\n # sort the inducing points and place nodes at the extreme values\n self.xg[i_d].sort(axis=0)\n self.xg[i_d] = np.vstack((x_rng[i_d,0],self.xg[i_d],x_rng[i_d,1]))\n if np.abs(self.xg[i_d][1,0] - self.xg[i_d][0,0]) < node_tol: #check if too close together at ends\n self.xg[i_d] = np.delete(self.xg[i_d],1,axis=0)\n if np.abs(self.xg[i_d][-1,0] - self.xg[i_d][-2,0]) < node_tol: #check if too close together at ends\n self.xg[i_d] = np.delete(self.xg[i_d],-2,axis=0)\n assert x_rng[i_d,0] == self.xg[i_d][0,0] and x_rng[i_d,1] == self.xg[i_d][-1,0], \"extremum values didn't make it into set\"\n self.grid_shape[i_d] = self.xg[i_d].size\n if xg is not None: # a grid has already been specified so use this instead\n self.xg = np.asarray(xg)\n self.grid_dim = self.xg.shape[0] # number of grid dimensions\n self.grid_shape = np.zeros(self.grid_dim, dtype=int) # number of points along each sub dimension\n self.grid_sub_dim = np.zeros(self.grid_dim, dtype=int) # number of sub dimensions along each grid dim\n for i,X in enumerate(self.xg): # loop over grid dimensions\n assert X.ndim == 2, \"each element in xg must be a 2d array\"\n self.grid_sub_dim[i] = X.shape[1]\n self.grid_shape[i] = X.shape[0]\n self.input_dim = np.sum(self.grid_sub_dim) # total number of dimensions\n self.num_data = np.prod(np.float64(self.grid_shape)) # total number of points on the full grid\n self.eq = None\n\n # plot the grid\n if to_plot is True:\n self.plot(x)\n elif isinstance(to_plot, str):\n self.plot(x, fname=to_plot)", "def get_smallest_complexes(self) -> List[Tuple[KappaComplex, int]]:\n min_known_size = min(self._known_sizes)\n return self.get_complexes_of_size(min_known_size)", "def min_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min_nodes\")", "def test_min_memory_mb_AND_root_gb_filter(self):\n filters = dict(min_memory_mb=16384, min_root_gb=80)\n expected = [\n 'cg1.2xlarge',\n 'cg1.4xlarge',\n 'cg1.xlarge',\n 'm1.xlarge',\n 'sh1.16xlarge',\n 'sh1.2xlarge',\n 'sh1.32xlarge',\n 'sh1.4xlarge',\n 'sh1.8xlarge',\n 'sh1.xlarge',\n 'tp64.8x8']\n self.assertFilterResults(filters, expected)", "def OpenXmin(self, *args):\n return _Bnd.Bnd_Box_OpenXmin(self, *args)", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def get_start_grid(cols=4, rows=4):\n\tgrid = [[\"\"]*cols for i in range(rows)]\n\tfor i in range(2):\n\t\tempties = get_empty_cells(grid)\n\t\ty,x = random.choice(empties)\n\t\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn grid", "def quickMinMax(self, targetSize=1e6):\n data = self.image\n if targetSize < 2: # keep at least two pixels\n targetSize = 2\n while True:\n h, w = data.shape[:2]\n if h * w <= targetSize: break\n if h > w:\n data = data[::2, ::] # downsample first axis\n else:\n data = data[::, ::2] # downsample second axis\n return self._xp.nanmin(data), self._xp.nanmax(data)", "def localMin(eccMap, binSize):\r\n\r\n eccMap2 = np.array(eccMap)\r\n cutStep = np.arange(np.nanmin(eccMap2[:]) - binSize,\r\n np.nanmax(eccMap2[:]) + binSize * 2,\r\n binSize)\r\n NumOfMin = 0\r\n i = 0\r\n while (NumOfMin <= 1) and (i < len(cutStep)):\r\n currThr = cutStep[i]\r\n marker = np.zeros(eccMap.shape, dtype=np.int)\r\n marker[eccMap2 <= (currThr)] = 1\r\n marker, NumOfMin = ni.measurements.label(marker)\r\n i = i + 1\r\n\r\n # if NumOfMin == 1:\r\n # print 'Only one local minumum was found!!!'\r\n # elif NumOfMin == 0:\r\n # print 'No local minumum was found!!!'\r\n # else:\r\n # print str(NumOfMin) + ' local minuma were found!!!'\r\n #\r\n # if NumOfMin > 1:\r\n # plt.figure()\r\n # plt.imshow(marker,vmin=np.amin(marker), vmax=np.amax(marker),cmap='jet',interpolation='nearest')\r\n # plt.colorbar()\r\n # plt.title('marker from local min')\r\n\r\n return marker", "def makeStartingGrid(self):\n return util.make2DArray(self.xN, self.yN, False)", "def vis_grid(Xs):\n (N, H, W, C) = Xs.shape\n A = int(ceil(sqrt(N)))\n G = np.ones((A * H + A, A * W + A, C), Xs.dtype)\n G *= np.min(Xs)\n n = 0\n for y in range(A):\n for x in range(A):\n if n < N:\n G[y * H + y:(y + 1) * H + y, x * W + x:(x + 1) * W + x, :] = Xs[n, :, :, :]\n n += 1\n # normalize to [0,1]\n maxg = G.max()\n ming = G.min()\n G = (G - ming) / (maxg - ming)\n return G", "def lowest_x(self, n, x, description):\n before = self.item_count()\n self.filter(np.argsort(x)[:n])\n after = self.item_count()\n with msg(f'Using {n} with lowest {description}: {after} of {before}', done=False, enabled=self.output):pass", "def with_group_size(self, lower: int = 1, upper: int = 1) -> Creature:\n result = self.clone()\n result.group_size = Range(lower, upper)\n return result", "def min_facet_width(self, min_facet_width):\n\n self._min_facet_width = min_facet_width", "def group_top(group: pygame.sprite.Group, rect: pygame.Rect):\r\n same_centerx_lower_centery = filter(lambda sprt: sprt.rect.centerx == rect.centerx\r\n and sprt.rect.centery >= rect.centery, group)\r\n return min(map(lambda sprt: sprt.rect.top, same_centerx_lower_centery), default=RESOLUTION[1])", "def test_get_groupings_between_tiny_dataset(self):\r\n self.assertEqual(_get_groupings(self.tiny_dist_matrix_header,\r\n self.tiny_dist_matrix, self.tiny_groups, within=False), [])", "def GetMinSize(self):\r\n\r\n return self.min_size", "def find_smaller_box(boxes):\n\n min_area = rect_area(boxes[0])\n smaller_box = boxes[0]\n for box in boxes[1:]:\n area = rect_area(box)\n if area < min_area:\n min_area = area\n smaller_box = box\n\n return smaller_box", "def coarse_groups(self):\n return self._num_coarse_groups( )", "def least_constraining_values(self, cell):\r\n vals = {}\r\n for val in cell.domain:\r\n vals[val] = 0\r\n for i, j in cell.neighbors:\r\n if val in self.grid[i][j].domain:\r\n vals[val] += 1\r\n x = sorted(vals.items(), key=lambda i: i[1])\r\n res = []\r\n for i in x:\r\n res.append(i[0])\r\n return res", "def groups(self):\n\n\t\tprint \"completed minimization\"\n\t\tcopy(self.rootdir+'counterions-minimized.gro',self.rootdir+'system.gro')\n\t\tcopy(self.rootdir+'counterions.top',self.rootdir+'system.top')\n\t\tif self.simscale == 'aamd': grouptype = 'standard'\n\t\tif self.simscale == 'cgmd': grouptype = 'cgmd_water'\n\t\tself.grouping(grouptype=grouptype)", "def GetNiceExtentsBySpacing(minval,maxval,spacing,tolerance):\n pass", "def get_grid_width(self):\n # replace with your code\n return 0", "def _get_seasons_grid(start_season, start_season_type):\n if start_season not in list(range(config.START_SEASON, config.CURRENT_SEASON + 1)):\n raise ValueError(f\"Start season of {start_season} not valid.\")\n\n if start_season_type not in config.SEASON_TYPES:\n raise ValueError(f\"Start season type {start_season_type} not valid.\")\n\n initial_season_types = [\n s for s in config.SEASON_TYPES\n if config.SEASON_TYPES_ORDER[s] >= config.SEASON_TYPES_ORDER[start_season_type]\n ]\n grid = [(start_season, season_type) for season_type in initial_season_types]\n\n if start_season < config.CURRENT_SEASON:\n for season in range(start_season + 1, config.CURRENT_SEASON + 1):\n grid += [(season, season_type) for season_type in config.SEASON_TYPES]\n\n return grid", "def labelmask_filter_objsize(labelmask, size_min, size_max):\n # Count pixels in each object.\n (labels, counts) = np.unique(labelmask, return_counts=True)\n # Select objects in desired size range, update filtered mask.\n labels_selected = labels[(counts >= size_min) & (counts <= size_max)]\n labelmask_filtered = np.where(np.isin(labelmask, labels_selected), \n labelmask, 0)\n return labelmask_filtered", "def kill_small_conts(contours,hierarchy,pxl_size):\n conts = []\n hier = []\n min_pxl_area = const.MIN_LEAF_AREA/pxl_size**2 \n for i,c in enumerate(contours):\n if (cv2.contourArea(c) > min_pxl_area):\n conts.append(c)\n # hier.append(hierarchy[i])\n \n return (conts,hier)", "def initial_clusters(self, points):\n groups = {}\n d = int(256 / (self.initial_k))\n for i in range(self.initial_k):\n j = i * d\n groups[(j, j, j)] = []\n for i, p in enumerate(points):\n # if i%100000 == 0:\n # print('processing pixel:', i)\n go = min(groups.keys(), key=lambda c: euclidean_distance(p, c)) \n groups[go].append(p)\n return [g for g in groups.values() if len(g) > 0]", "def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))", "def near_split(x, num_bins=None, size_bins=None):\n if num_bins:\n quotient, remainder = divmod(x, num_bins)\n return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)\n elif size_bins:\n return near_split(x, num_bins=int(np.ceil(x / size_bins)))" ]
[ "0.5814698", "0.56901836", "0.56703305", "0.5670049", "0.55432373", "0.55418265", "0.55418265", "0.5527169", "0.5518961", "0.5517546", "0.55114937", "0.55114937", "0.5501184", "0.5484425", "0.54797846", "0.5464368", "0.54571414", "0.53910476", "0.5375301", "0.5354567", "0.5345678", "0.52965134", "0.5288231", "0.52749205", "0.52707756", "0.5241883", "0.52254266", "0.52058136", "0.52029324", "0.51689345", "0.5167767", "0.51659954", "0.5161934", "0.51588875", "0.5157694", "0.5150462", "0.5150462", "0.5150462", "0.5147852", "0.51408404", "0.5138222", "0.513589", "0.5131451", "0.51093304", "0.5104224", "0.5092803", "0.5088618", "0.5086254", "0.50795555", "0.5065124", "0.5032085", "0.50315356", "0.5016148", "0.5006064", "0.5003779", "0.4994373", "0.49902213", "0.49823427", "0.49789315", "0.496697", "0.49652037", "0.49648842", "0.4961326", "0.49560735", "0.49392644", "0.49335462", "0.49335462", "0.49313122", "0.49197835", "0.49161872", "0.49159536", "0.49102172", "0.49079132", "0.49070984", "0.49046114", "0.4897581", "0.48949543", "0.48946756", "0.48912016", "0.48848167", "0.48835322", "0.48762625", "0.4875497", "0.4873631", "0.4869989", "0.48520797", "0.4841534", "0.48296958", "0.48295194", "0.48294252", "0.4827004", "0.4825927", "0.48246166", "0.48218098", "0.48216167", "0.48092142", "0.480686", "0.4802934", "0.4801718", "0.4801718" ]
0.48264712
91
Sends message to destination
async def connection_made(self, transport: asyncio.transports.BaseTransport) -> None: self.transport = transport transport.write(self.message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send(msg, dest=None):", "def send_message(message, destination):\n\n #Your code here\n pass", "def send(self):\n if(self.target):\n try:\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)\n except socket.error, err:\n print err", "def sendMessage(self,data,destination):\r\n \r\n self.log('Signal','Sending command to networked node \"%s\"' % destination,'sendMessage')\r\n \r\n if destination in self.nodes:\r\n\r\n self.log('Signal','Sent command to %s cmd: %s' % (destination,data),'sendMessage')\r\n \r\n self.server.sendMessage(data,destination)", "def send(self, message):\n pass", "def send_message(self, message):\n pass", "def send(self, message):\n\t\tmessage_string = self.send_address + \" \" + message + \" /\"\n\t\tself.add_to_queue(message_string)", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "def sendMessage(self, destinationUUID, message):\n with self.lock:\n packet = {}\n packet[destinationUUID] = message\n # print '\\tService.sendMessage():', message\n self.outbox.put(packet)", "def send(self, service_id, destination_address, data):\n self.send_from(service_id, destination_address, self.address, data)", "def send(self,header,dest,msg):\n message = self.create_message(header,dest,msg)\n\n if message == None:\n print(\"Not a valid Message\")\n else:\n message = json.dumps(message) # turns message dictionary into json string\n message = message.encode(FORMAT) # encodes message w/ UTF-8\n msg_len = len(message) # gets message length\n send_length = str(msg_len).encode(FORMAT) #encodes message length w/ UTF-8\n send_length += b' ' * (PREFIX-len(send_length)) #pads send length up to 64 bits\n\n conn = self.connections[dest][\"CONN\"]\n conn.send(send_length)\n sleep(0.1)\n conn.send(message)", "def send_message(self):\n self.preprocess_text()\n message_text = self.create_message_text()\n \n telnyx.Message.create(\n from_=configs.source_number,\n to=self.destination_number,\n text=message_text,\n )", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def send(self, msg, label=\"\"):\n self.remoter.tx(msg) # send to remote\n log.debug(\"%s sent %s:\\n%s\\n\\n\", self.remoter, label, bytes(msg))", "def send(self, msg):\n self.house.PLM.send_queue.put( msg )", "def send_message(self, data):\n self.transport.write(data)", "def sendto(self, name, msg):\n self.send(\"send/{}/{}:{}\".format(self.msg_id, name, msg))\n self.msg_id += 1", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def send(self, msg, destination) -> None:\n self.numsends.next()\n if self._force_drop(msg):\n _MTTRACE(\"SEND_FORCEDROP\\n[%s]\", msg)\n self.numforcedrops.next()\n return # **************** EARLY RETURN *****\n\n if self._nonforce_drop():\n _MTTRACE(\"SEND_RANDDROP\\n[%s]\", msg)\n self.numrandomdrops.next()\n return # **************** EARLY RETURN *****\n\n _MTTRACE(\"SEND_SENDING\\n[%s] to %s\", msg, destination)\n\n if self.closed:\n self.numforcedrops.next()\n return\n if self.no_delays():\n self._receive_data(msg)\n else:\n delay = random.random() * self.maxdelay\n def timertask():\n if not self.closed:\n # Delayed send\n self._receive_data(msg)\n else:\n self.numforcedrops.next()\n\n self.scheduler.schedule(delay, timertask)", "def send_message(self, end_point):\n self.message_controller.send_message(end_point, self)", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def send(self, msg):\n self.message('Me', msg)", "async def send(self, message):", "def send(self, message) -> None:\n raise NotImplementedError", "def send_destination(self):\n\n print('send the target to the robot')\n move_base_action_goal=MoveBaseActionGoal()\n move_base_action_goal.goal.target_pose.header.frame_id=\"map\"\n move_base_action_goal.goal.target_pose.pose.orientation.w=1\n move_base_action_goal.goal.target_pose.pose.position.x=self.x_des\n move_base_action_goal.goal.target_pose.pose.position.y=self.y_des\n print('des_x='+str(self.x_des))\n print('des_y='+str(self.y_des))\n self.des_pub.publish(move_base_action_goal)", "def msg(self, target, message):\n self.server.message_queue.put(('tests!tests@tes.t', target, message))", "def send_message(self, msg: dict):\n txrx_debug('{} sending {} msg to {}'.format(msg['src'], msg['type'], msg['dst']))\n self.sock.send(dumps(msg).encode('utf-8'))", "def send(self, message):\n self.sock.send(message)", "def message_routed(self, message):\n \n # Send it through the transport\n self.send_message(message = message)", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def _send(self, message):\r\n if not message:\r\n return\r\n\r\n self._maybe_print('twitch out queued: ' + message)\r\n self.buffer.append(message + \"\\n\")", "def send(self, msg, destination, *args, **kwargs):\n self.logger.debug(\"sending message to {d}: {m}\".format(d=destination, m=msg))\n self.connection.send(body=self.security.encode(msg, b64=self.use_b64),\n destination=destination,\n **kwargs)\n return self", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def send(self, event, message):\n pass", "def outgoing (self, message):\n pass", "def outgoing (self, message):\n pass", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def __send_message(self, data):\n if RemotePlayerProxy.DEBUG:\n print(f'[RPP] [SEND] -> [{self.name}]: {data}')\n\n try:\n self.__socket.sendall(bytes(data, 'ascii'))\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(e)", "def sendMessage(self, name, message):\n time.sleep(int(self.getOwnName()) * 0.05)\n self.getSocket(name).send_pyobj(message)", "def send(self, msg: object, target_id: str = None, sender_id: str = None):\n return self._dispatcher.dispatch(\n msg, self.name, target_id or self.id, sender_id)", "def send_message(self, message, direction = 'forward'):\n if direction == 'forward':\n if self.connected_as_client:\n self.queue_lock.acquire()\n time.sleep(self.delay)\n self.message_queue.put(message)\n self.queue_lock.release()\n else:\n print \"no node to send to\"\n else:\n if self.connected_as_server:\n self.queue_lock.acquire()\n time.sleep(self.delay)\n self.response_queue.put(message)\n self.queue_lock.release()\n else:\n print \"no node to respond to\"", "def send(self):\r\n if self.connection:\r\n self.connection.send(self.getLine())\r\n else:\r\n print \"(0) message without connection could not be sent\"", "def transmit(self, msg):\r\n # send our message to the client\r\n self.conn.sendall(msg)", "async def async_send_message(self, message=\"\", **kwargs):\n if \"target\" in kwargs:\n await self._async_send_to_device(message, kwargs[\"target\"])\n else:\n await self._async_send_to_all_devices(message)", "def _send_message(self, path, arg_lst):\n self._client.send_message(path, arg_lst)", "def sendMessage(self,message):\n if message is not None: self.controlProtocol.sendMessage(message)", "def send(self, message_body: str, target: str):\n\t\tif target == 'local':\n\t\t\tself.client_process(message_body)\n\t\telse:\n\t\t\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n\t\t\t\ttry:\n\t\t\t\t\tsock.settimeout(1)\n\t\t\t\t\tsock.connect((target, self.channel_port))\n\t\t\t\t\tsock.send(message_body.encode())\n\t\t\t\texcept socket.timeout:\n\t\t\t\t\tself.registry.delete_ip(target)", "def send(self, msg, adb_info):\n with self._transport_lock:\n self._send(msg, adb_info)", "def send_message(self, message):\n self.client.queue.put(message)", "def send(self, msg, receiver):\n raise NotImplementedError", "def send(self, msg):\r\n\r\n # don't need to handle barrier messages\r\n if not hasattr(msg, 'command'):\r\n return\r\n\r\n subcmd = OvsSender.subcmds[msg.command]\r\n \r\n\r\n # TODO: this is different for remote switches (ie, on physical network)\r\n dest = msg.switch.name\r\n\r\n params = []\r\n if msg.match.nw_src is not None:\r\n params.append(\"nw_src={0}\".format(msg.match.nw_src))\r\n if msg.match.nw_dst is not None:\r\n params.append(\"nw_dst={0}\".format(msg.match.nw_dst))\r\n if msg.match.dl_src is not None:\r\n params.append(\"dl_src={0}\".format(msg.match.dl_src))\r\n if msg.match.dl_dst is not None:\r\n params.append(\"dl_dst={0}\".format(msg.match.dl_dst))\r\n if msg.match.dl_type is not None:\r\n params.append(\"dl_type={0}\".format(msg.match.dl_type))\r\n\r\n params.append(\"priority={0}\".format(msg.priority))\r\n actions = [\"flood\" if a == OFPP_FLOOD else str(a) for a in msg.actions]\r\n\r\n if msg.command == OFPFC_ADD:\r\n params.append(\"action=output:\" + \",\".join(actions))\r\n\r\n paramstr = \",\".join(params)\r\n cmd = \"{0} {1} {2} {3}\".format(OvsSender.command,\r\n subcmd,\r\n dest,\r\n paramstr)\r\n ret = os.system(cmd)\r\n return ret", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "def send_message(self, message):\n self.send_message_queue.put(message)", "def fsend(var, wrapper, message):\n wrapper.source.client.send(message)", "def passing_message(target, message):\n \n shortest_path = strong_peer_graph.findShortestPath(STRONG_PEER_ID,target)\n next_node = shortest_path[1]\n\n send_message(neighbor_strong_peer_sockets[next_node],'', message)", "def send(self, msg):\n self.__sock.send(msg)", "def send_to(self, target, msg):\n\t\tif self.cid is None:\n\t\t\traise UsageError(\"Not in a group!\")\n\t\tidb, payload = msg[0], msg[1:]\n\t\tself.sendMessage(idb + chr(target) + payload, True)", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send (self, data):\n return self.sending.send(data)", "def send(self, dest=None, tag=None, comm=None):\n\n comm.send(self, dest=dest, tag=tag)\n return None", "def action(self, message, destination=None):\n if destination is None:\n destination = self.default_destination\n\n self._bot.action(message, destination)", "def send_message(self, message):\n\n self.socket.send(message.serialize())", "def sendMessage(self):\n #print('sendMessage\\r')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def send(self,message):\n self.transport.write(message, (\"228.0.0.5\", udpbport))", "def sendto(self, data, addr):\n asyncio.ensure_future(self.__inner_protocol.send_data(data, addr))", "def send_message(self, data):\n self.agent_msg_queue.put(data)\n self._send_counter += 1", "def send(self, send_to, subject):\n self.server.send_message(self, send_to, subject)", "def _send_msg(self, msg):\n self._kernel.comm.send(msg)", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def send2(self, message):\n\n self.send(message)\n self.sync(message)", "def _send_frame(self, dest, data):\n self._log.debug(\"write {} to {}\".format(len(data), dest)) \n # send to endpoint\n self._conn.sendto(data, (dest,0))", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send_to_room(self, message, room_name):\r\n room = self.get_room(room_name)\r\n\r\n if room is not None:\r\n room.send_message(message)", "def sendMessage(self):\n print(\"sendMessage\")\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def sendMessage(self):\n print('sendMessage')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def sendmessage(self):\n \n self.message.parentItem = self.rxtxcontroller.transmittable.rootItem\n self.message.can_id = self.idInput.toPlainText()\n self.message.dlc = self.lengthInput.value()\n self.message.cycle_time = self.cycleInput.toPlainText()\n self.message.time = int(round(time.time() * 1000))\n self.message.rxtx = \"TX\"\n self.message.count = 1\n self.message.data = self.dataInput.toPlainText()\n self.accept()", "def send(event):\n\n\tid = get_hostname()\n\n\tmessage = str(id) + \"|\" + str(event)\n\n\tif mq is None: # if no mq exists\n\t\tprint \"mq is None\"\n\n\telse: # if mq exists\n\t\ttry:\n\n\t\t\tmq.send(message)\n\t\t\tprint 'completed sending message'\n\n\t\texcept Exception as e:\n\n\t\t\tprint 'failed to send message: {}'.format(e)", "def send_message(self, serial_message):\n #print(\"Sending message: %s\" % serial_message)\n self.sendString(serial_message)", "def send(self, to, from_, body):\n raise NotImplementedError", "def send(self, msg):\n\n self.sock.sendto(msg, (self.UDP_IP, self.UDP_PORT))", "def send_message(self, message):\n if self.connected:\n self.send(\n json.dumps(message.request))", "def send(self, msg):\n #assert(isinstance(msg, Message))\n\n msg = envelp(msg, self.get_msg_id())\n self.send_raw(msg)\n\n # TODO: Fix this: this little delay is to be able to\n # send messages one after the other\n #\n # without this delay, following code is not working:\n #\n # the_actor.send({'a': 'message'})\n # the_actor.send({'a': 'different message'})\n #\n gevent.sleep(0.000000000000000000000000001)", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def write_message(self, message):\r\n logging.debug(\"Sending message {mes} to {usr}\".format(mes=message, usr=self.id))\r\n self.handler.write_message(message)", "def send(self, msg):\n self._mailbox.put(msg)", "def handle_send_to(self, api, command):\n content = command['content']\n to_addr = command['to_addr']\n endpoint = command.get('endpoint', 'default')\n d = self.app_worker.send_to(to_addr, content, endpoint=endpoint)\n d.addCallback(lambda r: self.reply(command, success=True))\n return d", "def send_message(\n self,\n message: str,\n onboarding_step: int = None,\n done: bool = False,\n delay_time: int = 0,\n ):\n task_data = dict()\n if onboarding_step:\n task_data['on_boarding_step'] = onboarding_step\n act = {\n 'id': constants.ONBOARDING_AGENT,\n 'text': message,\n 'episode_done': done,\n 'task_data': task_data,\n }\n if delay_time > 0:\n time.sleep(delay_time)\n self.agent.observe(act)", "def send_message(self, message, raw = None):\n if not self.opened:\n logger.error(\"%s: Cannot send while closed!\" % \\\n self.__class__.__name__)\n raise utils.TransportError (\"Cannot send while closed!\")\n \n # Add copy to link buffer\n logger.debug(\"%s: Loop-back port write started...\" % \\\n self.__class__.__name__)\n if message != None:\n raw = message.raw_string()\n elif not isinstance(raw,str):\n raw = utils.to_ascii(raw)\n self._link.put(raw)\n logger.debug(\"%s: ...loop-back port write complete.\" % \\\n self.__class__.__name__)\n logger.info(\"%s: Wrote %d bytes:\\n%s\" % \\\n (self.__class__.__name__, len(raw),\n ' '.join(map(utils.hex,map(ord,raw)))))", "def send(self, *args, **kwargs):\n self._dispatcher.send(*args, **kwargs)", "def send_message(self, to, message):\n\t\tmessage_dict = {\n\t\t\tACTION: MESSAGE,\n\t\t\tSENDER: self.username,\n\t\t\tDESTINATION: to,\n\t\t\tTIME: time.time(),\n\t\t\tMESSAGE_TEXT: message\n\t\t}\n\t\tclient_log.debug(f'Сформирован словарь сообщения: {message_dict}')\n\t\t# Необходимо дождаться освобождения сокета для отправки сообщения\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, message_dict)\n\t\t\tself.process_server_ans(get_message(self.transport))\n\t\t\tclient_log.info(f'Отправлено сообщение для пользователя {to}')", "def sendto(self, data: bytes, address: Tuple) -> int:\n ...", "def send(self):\r\n return self.sendRaw(self.message)", "def message_handler(self, dest, source, message):\n pass", "def send_message(self, cmd_id, message_type, status, message=None):\n pass" ]
[ "0.7889077", "0.76901394", "0.7472363", "0.7467401", "0.7311258", "0.7185726", "0.7074459", "0.7047595", "0.70432305", "0.70432305", "0.70432305", "0.7038259", "0.70371765", "0.6928427", "0.69204515", "0.6901408", "0.6896311", "0.6889627", "0.6871901", "0.68246526", "0.68081975", "0.6800867", "0.6797413", "0.67799044", "0.6767916", "0.67631847", "0.67624855", "0.6757327", "0.6740111", "0.67379516", "0.6725725", "0.67105013", "0.6705127", "0.67049754", "0.66991746", "0.66983837", "0.66870517", "0.66675186", "0.6664263", "0.66584516", "0.66584516", "0.6655029", "0.6637671", "0.6625072", "0.662274", "0.6618062", "0.6617896", "0.660802", "0.6594367", "0.65851784", "0.6576557", "0.657372", "0.65717036", "0.6568887", "0.65619874", "0.65480083", "0.6540021", "0.6539798", "0.65325207", "0.65320724", "0.6519854", "0.6516467", "0.65036976", "0.65008765", "0.64947796", "0.6481198", "0.6479865", "0.6478987", "0.64728767", "0.6464193", "0.64629817", "0.6453109", "0.64495736", "0.6448298", "0.64413315", "0.64410865", "0.6429567", "0.6429525", "0.64085436", "0.6406157", "0.64052844", "0.6385404", "0.63853", "0.6368905", "0.63665074", "0.63532317", "0.6350631", "0.6346252", "0.63424", "0.6332395", "0.6325812", "0.6319048", "0.63185024", "0.6316344", "0.63097054", "0.63043964", "0.6299855", "0.6295924", "0.62947387", "0.62874734", "0.6286988" ]
0.0
-1
Parses incoming dns message.
async def data_received(self, data: bytes) -> None: self.response_message.set_result(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dns_entry(self, msg):\n if msg['message'].find('Calling getaddrinfo') > -1:\n match = re.search(r'Calling getaddrinfo for host \\[(?P<host>[^\\]]+)\\]', msg['message'])\n if match:\n hostname = match.groupdict().get('host')\n if hostname not in self.dns:\n self.dns[hostname] = {'start': msg['timestamp']}\n elif msg['message'].find('lookup completed for host') > -1:\n match = re.search(r'lookup completed for host \\[(?P<host>[^\\]]+)\\]', msg['message'])\n if match:\n hostname = match.groupdict().get('host')\n if hostname in self.dns and 'end' not in self.dns[hostname]:\n self.dns[hostname]['end'] = msg['timestamp']", "def parse_body(dns_body):\n\n\tlength = struct.unpack('!B', dns_body[:1])[0]\n\tdns_body = dns_body[1:]\n\n\trequested_domain = ''\n\tquery_length = 0\n\twhile True:\n\t\tquery_length += length + 1\n\t\tpart = struct.unpack('!{}c'.format(length), dns_body[:length])\n\t\tfor ch in part:\n\t\t\trequested_domain += ch.decode()\n\t\tdns_body = dns_body[length:]\n\n\t\tval = struct.unpack('!B', dns_body[:1])[0]\n\t\tif val == 0:\n\t\t\tquery_length += 1\n\t\t\tbreak\n\n\t\trequested_domain += '.'\n\t\tdns_body = dns_body[1:]\n\t\tlength = val\n\n\trecord = struct.unpack('!H', dns_body[1:3])[0]\n\trequested_record = RECORDS[record]\n\n\treturn requested_domain, requested_record, query_length + 4", "def parse(self, log_message):\n\n # try to resolve the IP address\n try:\n ipaddr = log_message[\"SOURCEIP\"].decode(\"utf-8\")\n\n hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ipaddr)\n # print(ipaddr)\n # print(hostname)\n parts = str(hostname).split(\".\")\n name = parts[0]\n # print(name)\n if len(parts) > 1:\n log_message[\"HOST\"] = name\n except:\n return False\n\n # return True, other way message is dropped\n return True", "def _read_dns_(dns, cnt):\r\n \r\n dn_names = None\r\n dn_ids = None\r\n dn_iaps = [None]*10\r\n \r\n for dn in dns.DN:\r\n if dn.ref == 'Name':\r\n dn_names = dn.value\r\n if dn.ref == 'DNId':\r\n dn_ids = dn.value\r\n if dn.ref == 'IAP':\r\n dn_iaps[0] = dn.value\r\n if dn.ref == 'IAP2':\r\n dn_iaps[1] = dn.value\r\n if dn.ref == 'IAP3':\r\n dn_iaps[2] = dn.value\r\n if dn.ref == 'IAP4':\r\n dn_iaps[3] = dn.value\r\n if dn.ref == 'IAP5':\r\n dn_iaps[4] = dn.value\r\n if dn.ref == 'IAP6':\r\n dn_iaps[5] = dn.value\r\n if dn.ref == 'IAP7':\r\n dn_iaps[6] = dn.value\r\n if dn.ref == 'IAP8':\r\n dn_iaps[7] = dn.value\r\n if dn.ref == 'IAP9':\r\n dn_iaps[8] = dn.value\r\n if dn.ref == 'IAP10':\r\n dn_iaps[9] = dn.value\r\n \r\n logger.info('Parsed DN names: %s' % dn_names)\r\n logger.info('Parsed DN ids: %s' % dn_ids)\r\n logger.info('Parsed DN iaps: %s' % dn_iaps)\r\n \r\n for i in range(len(dn_names)):\r\n mydn = Dn()\r\n mydn.set_id(dn_ids[i])\r\n mydn.set_name(dn_names[i])\r\n myiaps = [None]*10\r\n for j in range(10):\r\n myiaps[j] = dn_iaps[j][i]\r\n mydn.set_iaps(myiaps)\r\n cnt.add_dn(mydn)\r\n return cnt", "def datagram_received(self, data, addr):\n message = data.decode()\n sysmatch = SYSLOG_MESSAGE_RE.match(message)\n if sysmatch is None:\n self.log.error('Cannot parse syslog with regex: ' + message)\n return\n\n runner = self.runner\n\n match = DHCPCD_ADD_RE.match(sysmatch.group('msg'))\n if match is not None:\n timestamp = timeparser.parse(sysmatch.group('date'))\n\n runner.loop.create_task(runner.network_added(match.group('intf'),\n match.group('route'), timestamp))\n return\n\n match = INTF_REMOVE_RE.match(sysmatch.group('msg'))\n if match is not None:\n runner.loop.create_task(runner.network_removed(match.group('intf')))\n return\n match = WPA_REMOVE_RE.match(sysmatch.group('msg'))\n if match is not None:\n runner.loop.create_task(runner.network_removed(match.group('intf')))\n return\n\n match = WPA_ADD_RE.match(sysmatch.group('msg'))\n if match is None:\n match = KERNEL_ADD_RE.match(sysmatch.group('msg'))\n if match is not None:\n # probably interface with static ip was connected\n timestamp = timeparser.parse(sysmatch.group('date'))\n\n runner.loop.create_task(runner.network_added(match.group('intf'),\n None, timestamp))\n return", "def parse(cls, packet):\n buffer = DNSBuffer(packet)\n try:\n header = DNSHeader.parse(buffer)\n questions = []\n rr = []\n auth = []\n ar = []\n for i in range(header.q):\n questions.append(DNSQuestion.parse(buffer))\n for i in range(header.a):\n rr.append(RR.parse(buffer))\n for i in range(header.auth):\n auth.append(RR.parse(buffer))\n for i in range(header.ar):\n ar.append(RR.parse(buffer))\n return cls(header, questions, rr, auth=auth, ar=ar)\n except DNSError:\n raise\n except (BufferError, BimapError) as e:\n raise DNSError(\"Error unpacking DNSRecord [offset=%d]: %s\" % (\n buffer.offset, e))", "def parse_message(self, message):\n pass", "def run(self):\r\n print \"*Ping* We've got a message!\"\r\n # Handle DNS request\r\n resolver = Resolver(self.caching, self.ttl)\r\n aliasRecords = []\r\n addressRecords = []\r\n # Read and resolve the questions one-by-one\r\n questions = self.request.questions\r\n for question in questions:\r\n hostname = question.qname\r\n (hostname, aliases, addresses) = resolver.gethostbyname(hostname)\r\n \r\n for alias in aliases:\r\n aliasData = dns.resource.RecordData.create(Type.CNAME, alias)\r\n aliasRecord = dns.resource.ResourceRecord(hostname, Type.CNAME, Class.IN, 9001, aliasData) # TODO fix ttl\r\n aliasRecords.append(aliasRecord)\r\n for address in addresses:\r\n addressData = dns.resource.RecordData.create(Type.A, address)\r\n addressRecord = dns.resource.ResourceRecord(hostname, Type.A, Class.IN, 9001, addressData)\r\n addressRecords.append(addressRecord)\r\n \r\n # Crafting of the response\r\n respHeader = self.request.header\r\n respHeader.qr = 1\r\n respHeader.qd_count = 0\r\n respHeader.an_count = 1\r\n \r\n respMessage = dns.message.Message(respHeader, [], addressRecords + aliasRecords, [], [])\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n respMessageByte = respMessage.to_bytes()\r\n sock.sendto(respMessageByte, self.clientAddr)\r\n print \"Ended request: \" + hostname\r\n sock.close()", "def parseMsg(self):\n # These 4 elements are always present\n # \"ToUserName\"\n # \"FromUserName\"\n # \"CreateTime\"\n # \"MsgType\"\n\n # Following elements depends on MsgType\n # \"MsgId\"\n # \"Content\"\n # \"MediaId\"\n # \"PicUrl\"\n # \"Format\"\n # \"ThumbMediaId\"\n # \"Location_X\"\n # \"Location_Y\"\n # \"Scale\"\n # \"Label\"\n # \"Title\"\n # \"Description\"\n # \"Url\"\n # \"Event\"\n # \"EventKey\"\n # \"Ticket\"\n # \"Latitude\"\n # \"Longitude\"\n # \"Precision\"\n # \"Recognition\"\n\n def getField(req, key):\n if req.find(key) != None:\n return req.find(key).text\n\n\n msg = {}\n req = et.fromstring(self.request.body.decode(\"utf-8\"))\n\n # These 4 elements are always present\n msg[\"ToUserName\"] = getField(req, \"ToUserName\")\n msg[\"FromUserName\"] = getField(req, \"FromUserName\")\n msg[\"CreateTime\"] = getField(req, \"CreateTime\")\n msg[\"MsgType\"] = getField(req, \"MsgType\")\n\n # Following elements depends on MsgType\n msg[\"MsgId\"] = getField(req, \"MsgId\")\n msg[\"Content\"] = getField(req, \"Content\")\n msg[\"MediaId\"] = getField(req, \"MediaId\")\n msg[\"PicUrl\"] = getField(req, \"PicUrl\")\n msg[\"Format\"] = getField(req, \"Format\")\n msg[\"ThumbMediaId\"] = getField(req, \"ThumbMediaId\")\n msg[\"Location_X\"] = getField(req, \"Location_X\")\n msg[\"Location_Y\"] = getField(req, \"Location_Y\")\n msg[\"Scale\"] = getField(req, \"Scale\")\n msg[\"Label\"] = getField(req, \"Label\")\n msg[\"Title\"] = getField(req, \"Title\")\n msg[\"Description\"] = getField(req, \"Description\")\n msg[\"Url\"] = getField(req, \"Url\")\n msg[\"Event\"] = getField(req, \"Event\")\n msg[\"EventKey\"] = getField(req, \"EventKey\")\n msg[\"Ticket\"] = getField(req, \"Ticket\")\n msg[\"Latitude\"] = getField(req, \"Latitude\")\n msg[\"Longitude\"] = getField(req, \"Longitude\")\n msg[\"Precision\"] = getField(req, \"Precision\")\n msg[\"Recognition\"] = getField(req, \"Recognition\")\n return msg", "def _ParseResponse(data):\n hostname = None\n ip = None\n port = None\n ptrname = None\n text = None\n\n try:\n dns_response = dpkt.dns.DNS(data)\n except (dpkt.Error, dpkt.NeedData, dpkt.UnpackError):\n # Ignore bad mDNS response.\n return None\n\n for rr in dns_response.an:\n if rr.type == dpkt.dns.DNS_A:\n ip = socket.inet_ntoa(rr.ip)\n elif rr.type == dpkt.dns.DNS_PTR:\n ptrname = rr.ptrname\n elif rr.type == dpkt.dns.DNS_SRV:\n hostname = rr.srvname\n port = rr.port\n elif rr.type == dpkt.dns.DNS_TXT:\n text = dict(entry.split('=', 1) for entry in rr.text)\n service = Service(hostname, ip, port, ptrname, text)\n\n # Ignore incomplete responses.\n if any(x is None for x in service):\n return None\n return service", "def _parse_msg(self, msg):\n try:\n self.received_msg += msg.decode()\n except:\n self.log.warning(\"invalid parse frame '%s'\" % msg)\n\n while True:\n pos = self.received_msg.find('\\r')\n if pos == -1: # no full msg\n break\n m = self.received_msg[:pos].strip()\n if not len(m):\n break\n self.platform.process_received_message(m)\n self.received_msg = self.received_msg[pos + 1:]", "def msgParse(body, msg):\n\tnet = '10.32.23.0'\n\tmask = 24\n\tif body['event_type'] == 'port.create.end':\n\t\t# list from dict with new ips\n\t\taddIps = body['payload']['port']['fixed_ips'] \n\t\tfor i in addIps:\n\t\t\tif checkNet(net,mask, i['ip_address']):\n\t\t\t\tlogging.info('IP to be created in the provison system: ' + i['ip_address'])\n\t\t\t\tmainProv(event='create', portId=body['payload']['port']['id'],\n\t\t\t\t\t\tipAddr=i['ip_address'])\n\n\tif body['event_type'] == 'port.delete.end':\n\t\tlogging.info('Port to be deleted in the provision system: ' + body['payload']['port_id'])\n\t\tmainProv(event='destroy',portId=body['payload']['port_id'])\n\n\tmsg.ack()", "def message_parser(msg):\n # Start a new message\n new_msg = {\n \"messageType\": msg[\"messageType\"],\n \"messageID\": msg[\"messageID\"],\n \"messageURL\": msg[\"messageURL\"],\n \"messageIssueTime\": msg[\"messageIssueTime\"],\n 'messageBody': {}\n }\n # Break down the incoming message's messageBody and save to new message\n sections = msg[\"messageBody\"].split(\"\\n## \")\n for part in sections:\n try:\n header, body = part.split(\":\", 1) # only split on first occurrence of colon, not all occurrences (ie dates)\n header = header.strip(\"##\").replace(\" \", \"_\").lower() # clean up headers\n body = body.lstrip(\" \").replace(\"\\n\", \" \").replace(\"#\", \"\")\n if header:\n new_msg[\"messageBody\"][header] = body\n except ValueError:\n continue\n # Break down notes if present and save to new message\n if \"notes\" in new_msg[\"messageBody\"] and new_msg[\"messageBody\"][\"notes\"]:\n try:\n notes_wo_dsc = new_msg[\"messageBody\"][\"notes\"].split(\"Disclaimer\")[0] # First set the important stuff to a var\n new_msg[\"messageBody\"][\"notes\"] = {} # now turn notes into an object\n parent_header, children = notes_wo_dsc.split(\":\", 1)\n parent_header = parent_header.lstrip(\" \")\n new_msg[\"messageBody\"][\"notes\"][parent_header] = {} # make a new object for more children\n child_parts = children.split(\" \")\n child_header = None\n new_body = \"\"\n for part in child_parts:\n if part.endswith(\":\"):\n child_header = part.strip(\":\")\n else:\n new_body += part + \" \"\n if child_header:\n new_msg[\"messageBody\"][\"notes\"][parent_header][child_header] = new_body\n except ValueError:\n pass\n # We don't need the disclaimers taking up memory\n if \"disclaimer\" in new_msg[\"messageBody\"]:\n del new_msg[\"messageBody\"][\"disclaimer\"]\n return new_msg", "def parse(self, message: Message):\n\t\tpass", "def parse_entry(msg):\n values = msg.split(';')\n return {\n 'dt': datetime.strptime(\n values[0], '%Y-%m-%d %H:%M:%S.%f'),\n 'event': values[1]\n }", "def gethostbyname(self, hostname, dnsserv='192.112.36.4'):\n ipaddrlist = []\n cnames = []\n temp = []\n if(self.caching):\n rcache = RecordCache(self.ttl)\n rcord = rcache.lookup(hostname, Type.ANY, Class.IN)\n if(rcord):\n for rec in rcord:\n if rec.type_ == Type.A:\n arec = rec.rdata\n ipaddrlist.append(arec.address)\n elif rec.type_ == Type.CNAME:\n crec = rec.rdata\n cnames.append(crec.cname)\n if ipaddrlist:\n return hostname, cnames, ipaddrlist\n elif cnames:\n return self.gethostbyname(cnames[0], dnsserv)\n \n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(self.timeout)\n\n # Create and send query\n question = Question(Name(str(hostname)), Type.A, Class.IN)\n header = Header(9001, 0, 1, 0, 0, 0)\n header.qr = 0\n header.opcode = 0\n header.rd = 1\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (str(dnsserv), 53))\n\n # Receive response\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n print(\"Number of answers: \" +str(len(response.answers)))\n print(\"Number of authorities: \" + str(len(response.authorities)))\n print(\"Number of additionals: \" + str(len(response.additionals)))\n\n # Get data\n aliaslist = cnames\n ipaddrlist = []\n dnslist = []\n \n while response.answers:\n for answer in response.answers:\n if answer.type_ == Type.A:\n print(\"found A RR\")\n if(self.caching):\n rcache.add_record(answer)\n ipaddrlist.append(answer.rdata.address)\n if answer.type_ == Type.CNAME:\n aliaslist.append(answer.rdata.cname)\n if answer.type_ == Type.NS:\n dnslist.append(answer.rdata.nsdname)\n if ipaddrlist:\n return hostname, aliaslist, ipaddrlist\n elif aliaslist:\n question = Question(Name(aliaslist[0]), Type.A, Class.IN)\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n elif dnslist:\n nsname = dnslist.pop()\n maybe_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_dnsserv:\n dnsserv = maybe_dnsserv\n else:\n pass\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n else:\n break\n\n if response.authorities:\n for authority in response.authorities:\n if authority.type_ != Type.NS:\n pass\n dnslist.append(authority.rdata.nsdname)\n while dnslist:\n nsname = dnslist.pop()\n maybe_next_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_next_dnsserv:\n next_dns_serv = maybe_next_dnsserv\n else:\n pass\n (hname, aliasl, ipaddrl) = self.gethostbyname(hostname, nsname)\n if ipaddrl:\n return hname, aliasl, ipaddrl", "def parse_aprs (packet):\n\n print (packet)\n if len(packet) == 0:\n return\n\n chan = ''\n # Split into address and information parts.\n # There could be a leading '[n]' with a channel number.\n m = re.search (r'^(\\[.+\\] *)?([^:]+):(.+)$', packet)\n if m:\n chan = m.group(1)\t# Still enclosed in [].\n addrs = m.group(2)\n info = m.group(3)\n #print ('<>'+addrs+'<>'+info+'<>')\n\n if info[0] == '}':\n # Unwrap third party traffic format\n # Preserve any channel.\n if chan:\n parse_aprs (chan + info[1:])\n else:\n parse_aprs (info[1:])\n elif info[0:3] == '{DE':\n # APRS \"user defined data\" format for EAS.\n #print ('Process \"message\" - ' + info)\n process_eas (chan, info[3:])\n else:\n print ('Not APRS \"user defined data\" format - ' + info)\n else:\n print ('Could not split into address & info parts - ' + packet)", "def handle_dns(bot, ievent):\n if not ievent.args:\n ievent.missing('<host | ip>')\n else:\n is_a = None\n result = None\n # If we support IPv6 ...\n if socket.has_ipv6:\n # ... then check if this is an IPv6 ip\n try:\n socket.inet_pton(socket.AF_INET6, ievent.args[0])\n is_a = 'ipv6'\n except socket.error:\n pass\n # Ah not an IPv6 ip ...\n if not is_a:\n # ... maybe IPv4 ?\n try:\n socket.inet_pton(socket.AF_INET, ievent.args[0])\n is_a = 'ipv4'\n except socket.error:\n pass\n # Not an ip, must be a hostname then\n if not is_a:\n is_a = 'host'\n # If it was an ip ...\n if is_a in ['ipv4', 'ipv6']:\n try:\n # ... try to resolve it\n result = socket.gethostbyaddr(ievent.args[0])\n if result[1]:\n result = 'primary: %s, aliases: %s' % \\\n (result[0], ', '.join(result[1]))\n else:\n result = result[0]\n ievent.reply('%s ip %s resolves to %s' % \\\n (is_a, ievent.args[0], result))\n except Exception, e:\n ievent.reply('could not resolve %s address %s: %s' % \\\n (is_a, ievent.args[0], e[1]))\n # Oh it's a host, lets resolve that\n elif is_a == 'host':\n try:\n result = []\n for info in socket.getaddrinfo(ievent.args[0], None):\n if info[0] in [socket.AF_INET, socket.AF_INET6] and \\\n info[1] == socket.SOCK_STREAM:\n ip = info[4][0]\n if not ip in result:\n result.append(ip)\n if not result:\n ievent.reply('could not resolve hostname %s: not found' % \\\nievent.args[0])\n else:\n ievent.reply('%s resolves to: %s' % (ievent.args[0], \\\n', '.join(result)))\n except Exception, e:\n ievent.reply('could not resolve hostname %s: %s' % \\\n (ievent.args[0], e[1]))\n else:\n ievent.reply('lookup failed, no valid data found')", "def parse(self):\n\t\tsub = self.body.split(' ')\n\t\tif len(sub) == 3:\n\t\t\tself.latitude = float(sub[1])\n\t\t\tself.longitude = float(sub[2])\n\t\telse:\n\t\t\tself.latitude = None\n\t\t\tself.longitude = None\n\t\t\traise Exception(\"Invalid message\")", "def test2_incoming_dnstap(self):\n cmd = [\"python3\", \"-c\", \n \"from dnstap_receiver.receiver import start_receiver; start_receiver()\"]\n\n with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as proc:\n print(\"run dns resolution to generate dnstap message\")\n for i in range(10):\n print(\"make dns resolution %s\" % i)\n r = my_resolver.resolve('www.github.com', 'a')\n time.sleep(1)\n\n proc.kill()\n \n o = proc.stdout.read()\n print(o)\n self.assertRegex(o, b\"_RESPONSE\")", "def parse_answers(dns_resp: str, session_cache):\n\n ID = dns_resp[:4]\n other_flags = dns_resp[4:8]\n questions_count = dns_resp[8:12]\n answers_count = dns_resp[12:16]\n auth_serv_info = dns_resp[16:20]\n additional_info = dns_resp[20:24]\n offset = 0\n ip = \"0.0.0.0\"\n\n # может придти несколько ответов, из каждого вычленим нужные записи\n for i in range(int(answers_count, 16)):\n try:\n ip, offset = DNSHandler.parse_answer(dns_resp, session_cache, offset=offset * i)\n except ValueError:\n print(\"url does not exist\")\n sys.exit(0)\n return ip", "def parse_header(dns_header):\n\n\tglobal HEADERS\n\tglobal RECURSION_DESIRED\n\n\tHEADERS = struct.unpack('!6H', dns_header)\n\tRECURSION_DESIRED = get_bit(HEADERS[FLAGS], 8)", "def parse(msg):\n msg = msg.replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\b\", \"\")\n pseudo = user_account = ip = msg_type = content = target = \"\"\n msg_parsed = message_regex.search(msg)\n if msg_parsed:\n data = msg_parsed.groups()\n if len(data) >= 6:\n pseudo = data[0]\n user_account = data[1]\n ip = data[2]\n msg_type = data[3]\n target = data[4]\n content = data[5]\n if target.startswith(\"#\") and msg_type == \"PRIVMSG\":\n msg_type = \"PUBMSG\"\n return Message(pseudo, user_account, ip, msg_type, content, target)", "def decodeMes(message):\n m = message[0]\n header = m[:12]\n hid, flags, QDCount, ANCount, NSCount, ARCount = struct.unpack('!HHHHHH', header)\n\n if ((flags % 16) == 1):\n print(\"Corrupt Message\", file=open('iter.txt', 'a+'))\n print('Corrupt Message', file=open('tmp.txt', 'a+'))\n exit(1)\n\n index = 12\n\n questionsList = []\n # Get the query\n for i in range(QDCount):\n name, index = decodeName(m, index)\n questionsList.append(name)\n\n # Get the Type and Class of the Question section\n Qtype, Qclass = struct.unpack('!HH', m[index:index+4])\n index +=4\n\n # Possible type values\n A = 1\n CNAME = 5\n SOA = 6\n MX = 15\n\n # Read in the Answer section\n answer_addressList = []\n for i in range(ANCount):\n name, index = decodeName(m, index)\n nameServType, nameServClass, TTL, dataLen = struct.unpack('!HHIH', m[index:index+10])\n index+=10\n\n # If the answer received is a CNAME, exit\n if nameServType == CNAME:\n print(\"CNAME found\", file=open('iter.txt', 'a+'))\n print(\"CNAME found\", file=open('tmp.txt', 'a+'))\n exit(1)\n\n # If the answer is a Mail Exchange Answer\n if nameServType == MX:\n preference, = struct.unpack('!H', m[index:index+2])\n index+=2\n mail_exchange, index = decodeName(m, index)\n print(mail_exchange, file=open('iter.txt', 'a+'))\n print(mail_exchange, file=open('tmp.txt', 'a+'))\n answer_addressList.append(mail_exchange)\n return (answer_addressList, 4)\n\n # If the answer is a IP Address\n if nameServType == A:\n if dataLen == 4:\n ip_address1, ip_address2, ip_address3, ip_address4 = struct.unpack('!BBBB', m[index:index+4])\n ip_address = str(ip_address1)+\".\"+str(ip_address2)+\".\"+str(ip_address3)+\".\"+str(ip_address4)\n answer_addressList.append(ip_address)\n index+=4\n\n # Read in the Authoritative name servers\n nameServerList = []\n for i in range(NSCount):\n name, index = decodeName(m, index)\n nameServType, nameServClass, TTL, dataLen = struct.unpack('!HHIH', m[index:index+10])\n index += 10\n\n if nameServType == SOA:\n return (nameServerList, 3)\n\n name, index = decodeName(m, index)\n\n nameServerList.append(name)\n\n # Read in the Additional Records\n ip_addressList = []\n for i in range(ARCount):\n name, index = decodeName(m, index)\n nameServType, nameServClass, TTL, dataLen = struct.unpack('!HHIH', m[index:index+10])\n index+=10\n\n # Get the IP Addresses of the Authoritative name servers\n if dataLen == 4:\n ip_address1, ip_address2, ip_address3, ip_address4 = struct.unpack('!BBBB', m[index:index+4])\n ip_address = str(ip_address1)+\".\"+str(ip_address2)+\".\"+str(ip_address3)+\".\"+str(ip_address4)\n ip_addressList.append(ip_address)\n index+=4\n\n if (ANCount > 0):\n return (answer_addressList, 2)\n\n elif (ARCount > 0):\n return (ip_addressList, 1)\n\n elif (NSCount > 0):\n return (nameServerList, 0)", "def parse_line(self, line):\n if self.signal_eof:\n return \"\"\n\n match = re.search(\"^([\\w\\s]+from) ([^:]+):(\\d+)(:|,)$\", line)\n if match:\n return self.parse_line_from(match)\n\n match = re.search(\"^([^:]+):(?:((?:\\d+:)?\\d+):)?(?:(error|warning|note):)?(.+)$\", line)\n if match:\n return self.parse_line_err(match)\n\n return line", "def parse_messages(self, orig):\n data=orig[1:len(orig)-1]\n output=[]\n for i in range(0, len(data), 3):\n message_data=data[i].split(',')\n message_text=data[i+1]\n output.append({'status':message_data[1], 'number':message_data[2],'date':message_data[4],'time':message_data[5],'text':message_text})\n return output", "def parse_recvd_data(data):\n parts = data.split(b'\\0')\n msgs = parts[:-1]\n rest = parts[-1]\n return (msgs, rest)", "def datagram_received(self, data, addr):\n self.decode_msg(data, self.state)", "def read_udp_message(socket):\n data, address = socket.recvfrom(4096)\n data = data.decode('utf-8')\n return json.loads(data), address", "def parse_mochad_line(self, line):\n # bail out unless it's an incoming RFSEC message\n if line[15:23] == 'Rx RFSEC':\n\n # decode receive RFSEC message. format is either:\n # 09/22 15:39:07 Rx RFSEC Addr: 21:26:80 Func: Contact_alert_min_DS10A\n # ~ or ~\n # 09/22 15:39:07 Rx RFSEC Addr: 0x80 Func: Motion_alert_SP554A\n line_list = line.split(' ')\n addr = line_list[5]\n func = line_list[7]\n\n func_dict = self.decode_func(func)\n\n return addr, {'func': func_dict}, 'security'\n\n# elif line[15:23] == 'Tx RFSEC':\n\n # decode send RFSEC message. format is either:\n # 09/22 15:39:07 Tx RFSEC Addr: 21:26:80 Func: Contact_alert_min_DS10A\n # ~ or ~\n # 09/22 15:39:07 Tx RFSEC Addr: 0x80 Func: Motion_alert_SP554A\n# line_list = line.split(' ')\n# addr = line_list[5]\n# func = line_list[7]\n#\n# func_dict = self.decode_func(func)\n#\n# return addr, {'func': func_dict}, 'trigger'\n\n elif line[15:20] == 'Rx RF':\n\n # decode receive RF message. format is:\n # 02/13 23:54:28 Rx RF HouseUnit: B1 Func: On\n line_list = line.split(' ')\n house_code = line_list[5];\n house_func = line_list[7]\n\n return house_code, {'func': house_func}, 'radio'\n\n elif line[15:20] == 'Rx PL':\n \n # decode receive PL message. format is:\n # 02/13 23:54:28 Rx PL HouseUnit: A1\n # 02/13 23:54:28 Rx PL House: A Func: On\n line_list = line.split(' ')\n if line[21:27] == 'HouseU':\n house_code = line_list[5]\n with open ('/root/.house_code', 'wb') as f:\n pickle.dump(house_code, f)\n else:\n house_func = line_list[7]\n with open ('/root/.house_code', 'rb') as f:\n house_code = pickle.load(f)\n return house_code, {'func': house_func}, 'powerline'\n \n elif line[15:20] == 'Tx PL':\n \n # decode send RF/PL message. format is:\n # 02/13 23:54:28 Tx PL HouseUnit: A1\n # 02/13 23:54:28 Tx PL House: A Func: On\n line_list = line.split(' ')\n if line[21:27] == 'HouseU':\n house_code = line_list[5]\n with open ('/root/.house_code', 'wb') as f:\n pickle.dump(house_code, f)\n else:\n house_func = line_list[7]\n with open ('/root/.house_code', 'rb') as f:\n house_code = pickle.load(f)\n return house_code, {'func': house_func}, 'button'\n \n return '', ''", "def parse_msg(msg, stationary_R=False):\n x_hat, y_hat, zone_num, zone_letter = utm.from_latlon(msg['lat'], msg['lon'])\n\n heading = msg['heading']\n\n # convert from degrees from true north to\n # degrees from x axis (UTM easting)\n heading = (-heading + 90.) % 360.\n\n measurement = np.array([x_hat, y_hat, msg['speed'], heading])\n\n x_rms = msg['rms_lat']\n y_rms = msg['rms_lon']\n\n if not stationary_R:\n return measurement, x_rms, y_rms\n else:\n return measurement", "def collect_results(name: str) -> dict:\n full_response = {}\n\n target_name = dns.name.from_text(name)\n\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n if response is not None:\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n\n if response is not None:\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n\n if response is not None:\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n if response is not None:\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response", "def _parse_reply(self, msg_list): #{\n logger = self.logger\n\n if len(msg_list) < 4 or msg_list[0] != b'|':\n logger.error('bad reply: %r' % msg_list)\n return None\n\n msg_type = msg_list[2]\n data = msg_list[3:]\n result = None\n srv_id = None\n\n if msg_type == b'ACK':\n srv_id = data[0]\n elif msg_type in (b'OK', b'YIELD'):\n try:\n result = self._serializer.deserialize_result(data)\n except Exception, e:\n msg_type = b'FAIL'\n result = e\n elif msg_type == b'FAIL':\n try:\n error = jsonapi.loads(msg_list[3])\n if error['ename'] == 'StopIteration':\n result = StopIteration()\n elif error['ename'] == 'GeneratorExit':\n result = GeneratorExit()\n else:\n result = RemoteRPCError(error['ename'], error['evalue'], error['traceback'])\n except Exception, e:\n logger.error('unexpected error while decoding FAIL', exc_info=True)\n result = RPCError('unexpected error while decoding FAIL: %s' % e)\n else:\n result = RPCError('bad message type: %r' % msg_type)\n\n return dict(\n type = msg_type,\n req_id = msg_list[1],\n srv_id = srv_id,\n result = result,\n )", "def handle_request(self):\n try:\n data = self.sock.recv(1024)\n except socket.error as e: # ...,e:\n if e == 10040:\n print('Message too long, ignoring.')\n return\n raise\n self.append_to_seq(parse_packet(data))", "def parse(lines): \n replied = len(lines)\n avg_delay, lost = 0, 0\n qos = 1.0\n \n if replied != 0:\n for line in lines:\n line.strip() #remove leading and trailing spaces\n \"\"\"\n Each line has the following fields:\n [status code] [reply time (seconds since epoch)] [source IP] [source url] [source query] [serving delay]\n \n e.g.:\n 200 1296756182 192.168.10.2 /home.php ?N=192 11045\n 200 1296756183 192.168.10.2 /home.php ?N=192 230036\n 200 1296756183 192.168.10.2 /home.php ?N=192 230684\n \"\"\"\n status, time, sourceIP, url, query, delay = line.split()\n \n time = int(time)\n delay = int(delay)\n \n if delay > DEADLINE:\n lost += 1\n avg_delay += delay\n avg_delay /= replied\n qos = (replied - lost) / replied\n\n return {'replied': replied, 'delay' : avg_delay, 'qos' : qos, 'lost': lost}", "def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)", "def handleQuery(self, message, protocol, address):\n if protocol.transport.socket.type == socket.SOCK_STREAM:\n self.peer_address = protocol.transport.getPeer()\n elif protocol.transport.socket.type == socket.SOCK_DGRAM:\n self.peer_address = IPv4Address('UDP', *address)\n else:\n self.logger.warn(\"Unexpected socket type %r\", protocol.transport.socket.type)\n\n # Make peer_address available to resolvers that support that attribute\n for resolver in self.resolver.resolvers:\n if hasattr(resolver, 'peer_address'):\n resolver.peer_address = self.peer_address\n\n return server.DNSServerFactory.handleQuery(self, message, protocol, address)", "def decode(self,buf):\n eth = dpkt.ethernet.Ethernet(buf)\n pkt_len = len(buf)\n if(eth.type== dpkt.ethernet.ETH_TYPE_IP):\n ip = eth.data\n dst_ip = socket.inet_ntoa(ip.dst)\n src_ip = socket.inet_ntoa(ip.src)\n octet_list = string.split(dst_ip,'.')\n broadcast = False\n for o in octet_list:\n if (o == \"255\"):\n broadcast = True\n break\n if((octet_list[0] == \"224\") or (octet_list[0] == \"239\")):\n broadcast = True #Its multicast actually.\n if not broadcast:\n if(ip.p == dpkt.ip.IP_PROTO_TCP):\n pass\n elif(ip.p == dpkt.ip.IP_PROTO_UDP):\n udp =ip.data\n if((udp.dport == 53) or (udp.sport == 53)): # A request. \n if(udp.dport == 53): # A request. \n return self.dns_handler.handle_dns_request(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n if(udp.sport == 53): # A DNS response\n self.dns_handler.handle_dns_response(ip.src,ip.dst,ip.p,udp.sport,udp.dport,udp.data)\n else:\n pass", "def collect_results(name: str) -> dict:\n full_response = {}\n target_name = dns.name.from_text(name)\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response", "def parse(string):\n if string.strip() == Parser.OK_MSG or string.startswith(Parser.NOT_OK_MSG):\n return Parser._handle_ok_ack(string)\n results = Parser._handle_dict(string)\n results.extend(Parser._handle_else(string))\n return results", "def dns_lookup(self, hostname, aux):\n\n resolver = Resolver()\n\n # If the host doesn't have the A record (IPv4),\n # trying to find its AAAA record (IPv6).\n try:\n addr = resolver.query(hostname, \"A\")[0] # <---+\n ver = 4 # |\n except Exception as e: # From the dnspython lib. --------+\n try: # |\n addr = resolver.query(hostname, \"AAAA\")[0] # <---+\n ver = 6\n except Exception as e:\n addr = ver = aux._ERR_PREFIX\n\n return (addr, ver)", "def parsemsg(self,s):\n\t\tprefix = ''\n\t\ttrailing = []\n\t\tif not s:\n\t\t\traise IRCBadMessage(\"Empty line.\")\n\t\tif s[0] == ':':\n\t\t\tprefix, s = s[1:].split(' ', 1)\n\n\t\tif s.find(' :') != -1:\n\t\t\ts, trailing = s.split(' :', 1)\n\t\t\targs = s.split()\n\t\t\targs.append(trailing)\n\t\telse:\n\t\t\targs = s.split()\n\t\tcommand = args.pop(0)\n\t\t\t\t\n\t\treturn {'channel':args[0],'handle':prefix.split('@')[0],'text':args[1]}", "async def parse(self, raw: str) -> dict:", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def _parse_line(self, line):\n msg_info = {'raw_message': line}\n line_split = line.split(None, 2)\n try:\n msg_info['timestamp'] = datetime.strptime(' '.join(line_split[:2]), self.time_format)\n msg_info['message'] = line_split[2]\n except (ValueError, IndexError):\n pass\n return msg_info", "def _read_message(self):\n if self.__eof:\n return None\n result = {}\n line = sys.stdin.readline()\n while line == '\\n':\n line = sys.stdin.readline()\n if not line:\n self.__eof = True\n return None\n s = line.split(\" \", 1)\n result['_number'] = int(s[0])\n result['_text'] = s[1].strip()\n\n while not self.__eof:\n line = sys.stdin.readline()\n if not line:\n self.__eof = True\n return result\n if line == '\\n':\n return result\n s = line.split(\":\", 1)\n result[s[0]] = s[1].strip()", "def _parse(self):\n \n global _tcp_buf\n _tcp_buf = {}\n number = 1\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n self.pcap_packets.append(pcap_packet)\n pcap_packet.pcap_num = number\n number += 1\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n \n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n \n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip.packet[pcap_packet.ip.header_len: ])\n \n #skip the packets that is not http packet\n if (pcap_packet.tcp.src_port != 80 and pcap_packet.tcp.dst_port != 80):\n continue\n \n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, pcap_packet.pcap_num)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def _linux_parse(line, s):\n output_line = {}\n\n if line.startswith('PING '):\n s.ipv4 = 'bytes of data' in line\n\n if s.ipv4 and line[5] not in string.digits:\n s.hostname = True\n # fixup for missing hostname\n line = line[:5] + 'nohost' + line[5:]\n elif s.ipv4 and line[5] in string.digits:\n s.hostname = False\n elif not s.ipv4 and ' (' in line:\n s.hostname = True\n else:\n s.hostname = False\n\n if s.ipv4 and not s.hostname:\n dst_ip, dta_byts = (2, 3)\n elif s.ipv4 and s.hostname:\n dst_ip, dta_byts = (2, 3)\n elif not s.ipv4 and not s.hostname:\n dst_ip, dta_byts = (2, 3)\n else:\n dst_ip, dta_byts = (3, 4)\n\n line = line.replace('(', ' ').replace(')', ' ')\n s.destination_ip = line.split()[dst_ip].lstrip('(').rstrip(')')\n s.sent_bytes = line.split()[dta_byts]\n\n return None\n\n if line.startswith('---'):\n s.footer = True\n return None\n\n if s.footer:\n if 'packets transmitted' in line:\n if ' duplicates,' in line:\n s.packets_transmitted = line.split()[0]\n s.packets_received = line.split()[3]\n s.packet_loss_percent = line.split()[7].rstrip('%')\n s.duplicates = line.split()[5].lstrip('+')\n s.time_ms = line.split()[11].replace('ms', '')\n return None\n\n s.packets_transmitted = line.split()[0]\n s.packets_received = line.split()[3]\n s.packet_loss_percent = line.split()[5].rstrip('%')\n s.duplicates = '0'\n s.time_ms = line.split()[9].replace('ms', '')\n return None\n\n split_line = line.split(' = ')[1]\n split_line = split_line.split('/')\n output_line = {\n 'type': 'summary',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'packets_transmitted': s.packets_transmitted or None,\n 'packets_received': s.packets_received or None,\n 'packet_loss_percent': s.packet_loss_percent or None,\n 'duplicates': s.duplicates or None,\n 'time_ms': s.time_ms or None,\n 'round_trip_ms_min': split_line[0],\n 'round_trip_ms_avg': split_line[1],\n 'round_trip_ms_max': split_line[2],\n 'round_trip_ms_stddev': split_line[3].split()[0]\n }\n\n return output_line\n\n # ping response lines\n\n # request timeout\n if 'no answer yet for icmp_seq=' in line:\n timestamp = False\n isequence = 5\n\n # if timestamp option is specified, then shift icmp sequence field right by one\n if line[0] == '[':\n timestamp = True\n isequence = 6\n\n output_line = {\n 'type': 'timeout',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,\n 'icmp_seq': line.replace('=', ' ').split()[isequence]\n }\n\n return output_line\n\n # normal responses\n if ' bytes from ' in line:\n\n line = line.replace('(', ' ').replace(')', ' ').replace('=', ' ')\n\n # positions of items depend on whether ipv4/ipv6 and/or ip/hostname is used\n if s.ipv4 and not s.hostname:\n bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)\n elif s.ipv4 and s.hostname:\n bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)\n elif not s.ipv4 and not s.hostname:\n bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)\n elif not s.ipv4 and s.hostname:\n bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)\n\n # if timestamp option is specified, then shift everything right by one\n timestamp = False\n if line[0] == '[':\n timestamp = True\n bts, rip, iseq, t2l, tms = (bts + 1, rip + 1, iseq + 1, t2l + 1, tms + 1)\n\n output_line = {\n 'type': 'reply',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,\n 'response_bytes': line.split()[bts],\n 'response_ip': line.split()[rip].rstrip(':'),\n 'icmp_seq': line.split()[iseq],\n 'ttl': line.split()[t2l],\n 'time_ms': line.split()[tms],\n 'duplicate': 'DUP!' in line\n }\n\n return output_line", "def parse(self):\n try:\n if self.bitstream:\n # Parse message header\n self.bitstream.bytepos = 0\n\n if self.bitstream.endswith(\"\\n\"):\n pass\n\n else:\n raise PacketIncomplete(\"Packet does not end with carriage return\")\n\n if self.bitstream.find('0x 50 52 56 41 54',bytealigned=True): # If 'PRVAT' text in bitstream\n self.dataformat = 'NMEA'\n else:\n self.dataformat = 'TRITECH'\n\n if self.dataformat=='NMEA' and self.id != Message.CONFIGURATION_PARAM:\n # go to first comma\n self.bitstream.bytepos = self.bitstream.find('0x2C', bytealigned = True)[0]/8 + 1\n self.payload = self.bitstream.read('bytes:6')\n #skip comma\n self.bitstream.read('bytes:1')\n self.dataunits = self.bitstream.read('bytes:1')\n\n\n elif self.dataformat=='TRITECH' and self.id != Message.CONFIGURATION_PARAM:\n self.bitstream.bytepos = 0\n self.payload = self.bitstream.read('bytes:6')\n self.dataunits = self.bitstream.read('bytes:1')\n else:\n self.bitstream.bytepos = 0\n length_string = 'bytes:'+ str(len(self.bitstream)/8)\n self.payload = self.bitstream.read(length_string)\n\n else:\n pass\n\n except ValueError as e:\n raise PacketCorrupted(\"Unexpected error\", e)", "def _lv_pydns_lookup(name):\n if not DNS.defaults[\"server\"]:\n DNS.DiscoverNameServers()\n req = DNS.Request(name=name, qtype=\"srv\", protocol=\"udp\")\n for retries_left in [3, 2, 1, 0]:\n try:\n response = req.req()\n if response and response.header[\"tc\"]:\n # truncated, rerun with tcp\n req = DNS.Request(name=name, qtype=\"srv\", protocol=\"tcp\")\n continue\n break\n except DNS.Base.DNSError:\n if not retries_left:\n raise\n time.sleep(1) # retry after sleeping a second\n if not response or not response.answers:\n return []\n result = []\n for a in response.answers:\n if a[\"typename\"].lower() != \"srv\":\n continue\n if isinstance(a[\"data\"], list):\n result.extend(a[\"data\"])\n else:\n result.append(a[\"data\"])\n return result", "def parse_mess_ASCII(self, mess):\n # Decode message - possibly not necessary if we are doing this in the receiving stage\n mess_str = mess.decode(self.encoding)\n\n # Extract individual data-points into list\n mess_list = mess_str.split(' ')\n\n mess_type = mess_list[0]\n\n # Maybe do more with this function, or perhaps just do these basics, and let further processing be done once the response type is known\n return mess_type, mess_list", "def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)", "def datagramReceived(self, datagram_, address):\n #if DEBUG: print \"Datagram received from \"+ repr(address) \n datagram = simplejson.loads(datagram_)\n if not hasattr(datagram,'keys'):\n if DEBUG: print \"unknown UDP message:\\n\", datagram\n pdb.set_trace()\n return\n if 'loop_started' in datagram.keys():\n return\n if 'shotnumber_started' in datagram.keys():\n #dc.get('_exp_sync').shotnumber = datagram['shotnumber_started']\n #return\n self.server.pxi_time = float(datagram['time'])\n self.server.pxi_time_server_time = float(datagram['time']) - float(time.time())#Make this so that it synchronizes the clocks CP\n\n msg = {\"data_context\": 'PXI',\n \"shotnumber\":datagram['shotnumber_started']}\n msg = simplejson.dumps(msg, ensure_ascii = False).encode('utf8')\n self.server.broadcast(msg) \n if DEBUG: print datagram\n \n self.server.active_parser_ip = datagram['server_ip_in_charge']#Make this so that it synchronizes the clocks CP\n self.server.active_parser_port = datagram['server_port_in_charge']#Make this so that it synchronizes the clocks CP\n dc = self.server.command_library.__determineContext__({'data_context':'PXI'}) \n if not dc.dict.has_key('_exp_sync'):\n exp_sync = sync.Experiment_Sync_Group(self.server, dc.name)\n dc.update({'_exp_sync':exp_sync})\n dc.get('_exp_sync').shotnumber = int(datagram['shotnumber_started'])\n print \"Shot started:\", datagram['shotnumber_started'], \"pxi_time:\", self.server.pxi_time, \"time.time():\", float(time.time())\n return\n \n \n if 'fake_shotnumber_started' in datagram.keys():\n if self.server.ip == '10.1.1.124':\n return\n print datagram\n msg = {\"data_context\": datagram['data_context'],\n \"shotnumber\":datagram['fake_shotnumber_started']}\n msg = simplejson.dumps(msg, ensure_ascii = False).encode('utf8')\n self.server.broadcast(msg) \n dc = self.server.command_library.__determineContext__(datagram) \n if not dc.dict.has_key('_exp_sync'):\n exp_sync = sync.Experiment_Sync_Group(self.server, dc.name)\n dc.update({'_exp_sync':exp_sync})\n dc.get('_exp_sync').shotnumber = int(datagram['fake_shotnumber_started'])\n if DEBUG: print \"Fake Shot started:\", datagram['fake_shotnumber_started'], \"pxi_time:\", datagram['time'], \"time.time():\", float(time.time())\n dc.update({'Test_instrument':glab_instrument.Glab_Instrument(params={'server':self.server,'create_example_pollcallback':True})})\n return\n \n try:\n datagram[\"server_ping\"] \n except KeyError:\n if DEBUG: print \"unknown UDP message:\\n\", datagram\n return\n ping_command = commands.ServerCommand(self.server, self.server.catch_ping, datagram)\n self.server.command_queue.add(ping_command)", "def processpacket(p):\n\n\tglobal SynSentToTCPService\n\tglobal SynAckSentToTCPClient\n\tglobal LiveTCPService\n\tglobal LiveTCPClient\n\tglobal LiveUDPService\n\tglobal LiveUDPClient\n\tglobal NmapServerDescription\n\tglobal ManualServerDescription\n\tglobal ClientDescription\n\tglobal MacAddr\n\tglobal OSDescription\n\tglobal ServiceFPs\n\tglobal SipPhoneMatch\n\tglobal Devel\n\tglobal IsRouter\n\tglobal DNSRecord\n\tglobal HostIPs\n\n\tif (type(p) == Dot3) and (type(p['LLC']) == LLC):\n\t\tUnhandledPacket(p)\n\t\t#Spanning Tree Protocol\n\t\t#Debug(\"802.3\")\n\t\t#p.show()\n\t\t#print type(p['LLC'])\n\telif (p['Ethernet'] == None):\n\t\tDebug(\"non-ethernet packet\")\t\t#Need more details on how to handle.\n\t\tUnhandledPacket(p)\n\t\t#p.show()\n\t\t#print type(p)\n\t\t#quit()\n\telif p['Ethernet'].type == 0x0806:\t\t#ARP\n\t\t#pull arp data from here instead of tcp/udp packets, as these are all local\n\t\tif (p['ARP'].op == 1):\t\t\t#1 is request (\"who-has\")\n\t\t\tpass\n\t\tif (p['ARP'].op == 2):\t\t\t#2 is reply (\"is-at\")\n\t\t\tif (p['ARP.psrc'] != None) and (p['ARP.hwsrc'] != None):\n\t\t\t\tIPAddr=p['ARP.psrc']\n\t\t\t\tMyMac=p['ARP.hwsrc'].upper()\n\t\t\t\tif (not MacAddr.has_key(IPAddr)) or (MacAddr[IPAddr] != MyMac):\n\t\t\t\t\tReportId(\"MA\", IPAddr, 'Ethernet', MyMac, '')\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x0800:\t\t#IP\n\t\tsIP=str(p['IP'].src)\n\t\tdIP=str(p['IP'].dst)\n\t\t#Best to get these from arps instead; if we get them from here, we get router macs for foreign addresses.\n\t\t#if not MacAddr.has_key(sIP):\n\t\t#\tReportId(\"MA\", sIP, \"Ethernet\", p['Ethernet'].src, '')\n\t\t#if not MacAddr.has_key(dIP):\n\t\t#\tReportId(\"MA\", dIP, \"Ethernet\", p['Ethernet'].dst, '')\n\n\t\tif p['IP'].proto == 1:\t\t\t#ICMP\n\t\t\tType = p['ICMP'].type\n\t\t\tCode = p['ICMP'].code\n\n\t\t\tif (Type == 0):\t\t\t\t\t\t#Echo reply\n\t\t\t\tif (not(OSDescription.has_key(sIP))):\n\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", 'icmp echo reply')\n\t\t\telif (Type == 3) and (type(p[IPerror]) == IPerror):\t#Unreachable, check that we have an actual embedded packet\n\t\t\t\t#if (type(p[IPerror]) != IPerror):\n\t\t\t\t#\tp.show()\n\t\t\t\t#\tprint type(p[IPerror])\n\t\t\t\t#\tquit()\n\t\t\t\tOrigdIP = p[IPerror].dst\n\t\t\t\tif (Code == 0):\t\t\t\t\t#Net unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"NetUn\", \"router\", \"\")\n\t\t\t\telif (Code == 1):\t\t\t\t#Host unreachable\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unreachable')\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"HostUn\", \"router\", \"\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 17):\t#Port unreachable and embedded protocol = 17, UDP, as it should be\n\t\t\t\t\tDNSServerLoc = p[IPerror].src + \",UDP_53\"\n\t\t\t\t\tif (p[UDPerror].sport == 53) and (ManualServerDescription.has_key(DNSServerLoc)) and (ManualServerDescription[DNSServerLoc] == \"dns/server\"):\n\t\t\t\t\t\t#If orig packet coming from 53 and coming from a dns server, don't do anything (closed port on client is a common effect)\n\t\t\t\t\t\t#Don't waste time on port unreachables going back to a dns server; too common, and ephemeral anyways.\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t#If orig packet coming from something other than 53, or coming from 53 and NOT coming from a dns server, log as closed\n\t\t\t\t\t\tOrigDPort = str(p[UDPerror].dport)\n\t\t\t\t\t\tOrigDstService = OrigdIP + \",UDP_\" + OrigDPort\n\t\t\t\t\t\tif ((not LiveUDPService.has_key(OrigDstService)) or (LiveUDPService[OrigDstService] == True)):\n\t\t\t\t\t\t\tLiveUDPService[OrigDstService] = False\n\t\t\t\t\t\t\tReportId(\"US\", OrigdIP, \"UDP_\" + OrigDPort, \"closed\", \"port unreachable\")\n\t\t\t\telif (Code == 3) and (p[IPerror].proto == 6) and (p[TCPerror].dport == 113):\t#Port unreachable and embedded protocol = 6, TCP, which it shouldn't. May be the same firewall providing the TCP FR's\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 6):\t\t\t\t#Net unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'net unknown')\n\t\t\t\telif (Code == 7):\t\t\t\t#Host unknown\n\t\t\t\t\tif (not(OSDescription.has_key(OrigdIP))):\n\t\t\t\t\t\tReportId(\"IP\", OrigdIP, \"IP\", \"dead\", 'host unknown')\n\t\t\t\telif (Code == 9):\t\t\t\t#Network Administratively Prohibited\n\t\t\t\t\tpass\t\t\t\t\t#Can't tell much from this type of traffic. Possibly list as firewall?\n\t\t\t\telif (Code == 10):\t\t\t\t#Host Administratively Prohibited\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 11):\t\t\t\t#Network unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 12):\t\t\t\t#Host unreachable for TOS\n\t\t\t\t\tpass\n\t\t\t\telif (Code == 13):\t\t\t\t#Communication Administratively prohibited\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (Type == 8):\t\t\t\t\t#ping\n\t\t\t\t#FIXME - check payload for ping sender type, perhaps\n\t\t\t\tpass\n\t\t\telif (Type == 11):\t\t\t\t\t#Time exceeded\n\t\t\t\tif (Code == 0):\t\t\t\t\t#TTL exceeded\n\t\t\t\t\tif (not(IsRouter.has_key(sIP))):\n\t\t\t\t\t\t#FIXME - put original target IP as column 5?\n\t\t\t\t\t\tReportId(\"RO\", sIP, \"TTLEx\", \"router\", \"\")\n\t\t\t\telse:\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 2:\t\t#IGMP\n\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 6:\t\t#TCP\n\t\t\tsport=str(p['TCP'].sport)\n\t\t\tdport=str(p['TCP'].dport)\n\t\t\t#print p['IP'].src + \":\" + sport + \" -> \", p['IP'].dst + \":\" + dport,\n\t\t\tif (p['TCP'].flags & 0x17) == 0x12:\t#SYN/ACK (RST and FIN off)\n\t\t\t\tCliService = dIP + \",TCP_\" + sport\n\t\t\t\tif not SynAckSentToTCPClient.has_key(CliService):\n\t\t\t\t\tSynAckSentToTCPClient[CliService] = True\n\n\t\t\t\t#If we've seen a syn sent to this port and have either not seen any SA/R, or we've seen a R in the past:\n\t\t\t\t#The last test is for a service that was previously closed and is now open; report each transition once.\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == False)) ):\n\t\t\t\t\tLiveTCPService[Service] = True\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", '')\n\t\t\telif (p['TCP'].flags & 0x17) == 0x02:\t#SYN (ACK, RST, and FIN off)\n\t\t\t\tService = dIP + \",TCP_\" + dport\n\t\t\t\tif not SynSentToTCPService.has_key(Service):\n\t\t\t\t\tSynSentToTCPService[Service] = True\n\t\t\t\t#Debug(\"trying to fingerprint \" + sIP)\n\t\t\t\ttry:\n\t\t\t\t\tp0fdata = p0f(p)\n\t\t\t\t\t#FIXME - reasonably common occurence, don't whine, just fix it.\n\t\t\t\t\t#if (len(p0fdata) >1):\n\t\t\t\t\t#\tDebug(\"More than one OS fingerprint for \" + sIP + \", using the first.\")\n\t\t\t\t\tif (len(p0fdata) >=1):\n\t\t\t\t\t\tPDescription = p0fdata[0][0] + \" \" + p0fdata[0][1] + \" (\" + str(int(p0fdata[0][2]) + 1)\t#FIXME - Grabbing just the first candidate, may need to compare correlation values; provided?\n\t\t\t\t\t\tif (p0fdata[0][2] == 0):\n\t\t\t\t\t\t\tPDescription = PDescription + \" hop away)\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPDescription = PDescription + \" hops away)\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t#[N][2] param appears to be distance away in hops (but add 1 to this to get real hop count?)\n\t\t\t\t\t\tPDescription = PDescription.replace(',', ';')\t\t#Commas are delimiters in output\n\t\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\t\texcept:\n\t\t\t\t\tPDescription = 'p0f failure'\n\t\t\t\t\tif (not(OSDescription.has_key(sIP))) or (OSDescription[sIP] != PDescription):\n\t\t\t\t\t\tDebug(\"P0f failure in \" + sIP + \":\" + sport + \" -> \" + dIP + \":\" + dport)\n\t\t\t\t\t\tOSDescription[sIP] = PDescription\n\t\t\t\t\t\tReportId(\"IP\", sIP, \"IP\", \"live\", PDescription)\n\t\t\telif (p['TCP'].flags & 0x07) == 0x01:\t#FIN (SYN/RST off)\n\t\t\t\tCliService = sIP + \",TCP_\" + dport\n\t\t\t\tif ( (SynAckSentToTCPClient.has_key(CliService)) and ((not LiveTCPClient.has_key(CliService)) or (LiveTCPClient[CliService] == False)) ):\n\t\t\t\t\tLiveTCPClient[CliService] = True\n\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", '')\n\t\t\telif (p['TCP'].flags & 0x07) == 0x04:\t#RST (SYN and FIN off)\n\t\t\t\t#FIXME - handle rst going in the other direction?\n\t\t\t\tService = sIP + \",TCP_\" + sport\n\t\t\t\tif ( (SynSentToTCPService.has_key(Service)) and ((not LiveTCPService.has_key(Service)) or (LiveTCPService[Service] == True)) ):\n\t\t\t\t\tLiveTCPService[Service] = False\n\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"closed\", '')\n\t\t\telif ((p['TCP'].flags & 0x3F) == 0x15) and (sport == \"113\"):\t#FIN, RST, ACK (SYN, PSH, URG off)\n\t\t\t\t#This may be a firewall or some other device stepping in for 113 with a FIN/RST.\n\t\t\t\tpass\n\t\t\telif (p['TCP'].flags & 0x17) == 0x10:\t#ACK (RST, SYN, and FIN off)\n\t\t\t\t#FIXME - check for UnhandledPacket placement in ACK\n\t\t\t\tFromPort = sIP + \",TCP_\" + sport\n\t\t\t\tToPort = dIP + \",TCP_\" + dport\n\t\t\t\tPayload = str(p['Raw.load'])\t\t\t#For some reason this doesn't handle p['Raw'].load\n\t\t\t\tif ( (LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True) and (LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\n\t\t\t\t\tprint \"Logic failure: both \" + FromPort + \" and \" + ToPort + \" are listed as live services.\"\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\telif ((LiveTCPService.has_key(FromPort)) and (LiveTCPService[FromPort] == True)):\t#If the \"From\" side is a known TCP server:\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort) ):\t\t#Check nmap fingerprint strings for this server port\n\t\t\t\t\t\tif (ServiceFPs.has_key(int(sport))):\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs[int(sport)]:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\t#Debugging:\n\t\t\t\t\t\t\t\t\t#FIXME - removeme once understood:\n\t\t\t\t\t\t\t\t\t#File \"/home/wstearns/med/programming/python/passer/passer.py\", line 504, in processpacket\n\t\t\t\t\t\t\t\t\t#OutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\t#TypeError: expected a character buffer object\n\t\t\t\t\t\t\t\t\tif (OneTuple[1] == None):\n\t\t\t\t\t\t\t\t\t\tDebug(\"Null description for \" + OneTuple[0])\n\t\t\t\t\t\t\t\t\t\t#quit()\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\t#Example: Replace \"$1\" with MatchObj.group(1)\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), str(MatchObj.group(Index)))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\t\t\t\t\t#Exit for loop, no need to check any more fingerprints now that we've found a match\n\n\t\t\t\t\tif (not NmapServerDescription.has_key(FromPort)):\t\t#If the above loop didn't find a server description\n\t\t\t\t\t\tif (ServiceFPs.has_key('all')):\t\t\t\t#Now recheck against regexes not associated with a specific port (port 'all').\n\t\t\t\t\t\t\tfor OneTuple in ServiceFPs['all']:\n\t\t\t\t\t\t\t\tMatchObj = OneTuple[0].search(Payload)\n\t\t\t\t\t\t\t\tif (MatchObj != None):\n\t\t\t\t\t\t\t\t\tOutputDescription = OneTuple[1]\n\t\t\t\t\t\t\t\t\tif len(MatchObj.groups()) >= 1:\n\t\t\t\t\t\t\t\t\t\t#We have subexpressions matched, these need to be inserted into the description string\n\t\t\t\t\t\t\t\t\t\tfor Index in range(1,len(MatchObj.groups())+1):\n\t\t\t\t\t\t\t\t\t\t\tOutputDescription = OutputDescription.replace('$' + str(Index), MatchObj.group(Index))\n\t\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", OutputDescription)\n\t\t\t\t\t\t\t\t\tNmapServerDescription[sIP + \",TCP_\" + sport] = OutputDescription\n\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tif (not ManualServerDescription.has_key(FromPort) ):\n\t\t\t\t\t\tif (sport == \"22\") and (Payload != None) and (Payload.find('SSH-') > -1):\n\t\t\t\t\t\t\tif ( (Payload.find('SSH-1.99-OpenSSH_') > -1) or (Payload.find('SSH-2.0-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/openssh\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/openssh\"\n\t\t\t\t\t\t\telif (Payload.find('SSH-1.5-') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"ssh/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"ssh/generic\"\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' ESMTP Sendmail ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/sendmail\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/sendmail\"\n\t\t\t\t\t\telif (sport == \"25\") and (Payload != None) and (Payload.find(' - Welcome to our SMTP server ESMTP') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"smtp/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"smtp/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t#Check for port 80 and search for \"Server: \" once\n\t\t\t\t\t\telif (sport == \"80\") and (Payload != None) and (Payload.find('Server: ') > -1):\n\t\t\t\t\t\t\tif (Payload.find('Server: Apache') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/apache\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/apache\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Embedded HTTP Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/embedded\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/embedded\"\n\t\t\t\t\t\t\telif (Payload.find('Server: gws') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/gws\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/gws\"\n\t\t\t\t\t\t\telif (Payload.find('Server: KFWebServer') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/kfwebserver\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/kfwebserver\"\n\t\t\t\t\t\t\telif (Payload.find('Server: micro_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/micro-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/micro-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Microsoft-IIS') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/iis\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/iis\"\n\t\t\t\t\t\t\telif (Payload.find('Server: lighttpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/lighttpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/lighttpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: MIIxpc') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mirrorimage\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mirrorimage\"\n\t\t\t\t\t\t\telif (Payload.find('Server: mini_httpd') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/mini-httpd\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/mini-httpd\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nc -l -p 80') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nc\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nc\"\n\t\t\t\t\t\t\telif (Payload.find('Server: nginx/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nginx\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nginx\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Nucleus') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/nucleus\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/nucleus\"\n\t\t\t\t\t\t\telif (Payload.find('Server: RomPager') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/rompager\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/rompager\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Server') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/server\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/server\"\n\t\t\t\t\t\t\telif (Payload.find('Server: Sun-ONE-Web-Server/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/sun-one\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/sun-one\"\n\t\t\t\t\t\t\telif (Payload.find('Server: TrustRank Frontend') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/trustrank\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/trustrank\"\n\t\t\t\t\t\t\telif (Payload.find('Server: YTS/') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/yahoo\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/yahoo\"\n\t\t\t\t\t\t\telif (Payload.find('HTTP/1.0 404 Not Found') > -1) or (Payload.find('HTTP/1.1 200 OK') > -1):\n\t\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"http/generic\")\n\t\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"http/generic\"\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"110\") and (Payload != None) and (Payload.find('POP3 Server Ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"pop3/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"pop3/generic\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find('* OK dovecot ready') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/dovecot\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/dovecot\"\n\t\t\t\t\t\telif (sport == \"143\") and (Payload != None) and (Payload.find(' IMAP4rev1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"imap/generic\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"imap/generic\"\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\t\t\telif (sport == \"783\") and (Payload != None) and (Payload.find('SPAMD/1.1 ') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"spamd/spamd\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"spamd/spamd\"\n\t\t\t\t\t\telif ( (sport == \"3128\") or (sport == \"80\") ) and (Payload != None) and (Payload.find('Via: ') > -1) and (Payload.find(' (squid/') > -1):\n\t\t\t\t\t\t\tReportId(\"TS\", sIP, \"TCP_\" + sport, \"listening\", \"proxy/squid\")\n\t\t\t\t\t\t\tManualServerDescription[sIP + \",TCP_\" + sport] = \"proxy/squid\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ServerPayloadDir, FromPort, Payload)\n\t\t\t\telif ((LiveTCPService.has_key(ToPort)) and (LiveTCPService[ToPort] == True)):\t\t#If the \"To\" side is a known TCP server:\n\t\t\t\t\tClientKey = sIP + \",TCP_\" + dport\t#Note: CLIENT ip and SERVER port\n\t\t\t\t\tif (not ClientDescription.has_key(ClientKey)):\n\t\t\t\t\t\tif (dport == \"22\") and (Payload != None) and ( (Payload.find('SSH-2.0-OpenSSH_') > -1) or (Payload.find('SSH-1.5-OpenSSH_') > -1) ):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"ssh/openssh\")\n\t\t\t\t\t\t#As cute as it is to catch this, it miscatches any relay that's carrying a pine-generated mail.\n\t\t\t\t\t\t#elif (dport == \"25\") and (Payload != None) and (Payload.find('Message-ID: <Pine.') > -1):\n\t\t\t\t\t\t#\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"smtp/pine\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: libwww-perl/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/libwww-perl\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Lynx') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/lynx\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Mozilla') > -1) and (Payload.find(' Firefox/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/firefox\")\n\t\t\t\t\t\telif ( (dport == \"80\") or (dport == \"3128\") ) and (Payload != None) and (Payload.find('User-Agent: Wget/') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"http/wget\")\n\t\t\t\t\t\telif (dport == \"143\") and (Payload != None) and (Payload.find('A0001 CAPABILITY') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"imap/generic\")\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\t\t\telif (dport == \"783\") and (Payload != None) and (Payload.find('PROCESS SPAMC') > -1):\n\t\t\t\t\t\t\tReportId(\"TC\", sIP, \"TCP_\" + dport, \"open\", \"spamd/spamc\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t\t\t#LogNewPayload(ClientPayloadDir, ClientKey, Payload)\n\t\t\t\telse:\t#Neither port pair is known as a server\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\t\t\t#Following is debugging at best; it should only show up early on as the sniffer listens to conversations for which it didn't hear the SYN/ACK\n\t\t\t\t\t#print \"note: neither \" + FromPort + \" nor \" + ToPort + \" is listed as a live service.\"\n\t\t\telse:\t#Other TCP flag combinations here\n\t\t\t\tUnhandledPacket(p)\n\t\telif p['IP'].proto == 17 and (type(p['UDP']) == UDP):\t\t#UDP. We have to check the object type as well as we do get (corrupted? truncated?) packets with type 17 that aren't udp: AttributeError: 'NoneType' object has no attribute 'sport'\n\t\t\t#FIXME - possibly run udp packets through ServiceFPs as well?\n\t\t\tsport=str(p['UDP'].sport)\n\t\t\tdport=str(p['UDP'].dport)\n\t\t\tSrcService = sIP + \",UDP_\" + sport\n\t\t\tDstService = dIP + \",UDP_\" + dport\n\t\t\tSrcClient = sIP + \",UDP_\" + dport\n\t\t\tPayload = p['Raw.load']\n\n\t\t\t#Multicast DNS: http://files.multicastdns.org/draft-cheshire-dnsext-multicastdns.txt\n\t\t\t#- usually sent to 224.0.0.251 (or FF02::FB) (link-local multicast).\n\t\t\t#\t- if \".local.\" in query, these MUST be the target IPs\n\t\t\t#\t- non-local queries may be sent to these or normal dns servers\n\t\t\t#\t- rdns queries for \"254.169.in-addr.arpa.\" MUST be sent to 224.0.0.251\n\t\t\t#\t- rdns queries for \"8.e.f.ip6.arpa.\", \"9.e.f.ip6.arpa.\",\"a.e.f.ip6.arpa.\", and \"b.e.f.ip6.arpa.\" MUST be sent to the IPv6 mDNS link-local multicast address FF02::FB.\n\t\t\t#- sent to udp port 5353\n\t\t\t#- generic clients may use \"single-dns-object.local.\", such as \"sparrow.local.\"\n\t\t\t#- responses have IP TTL = 255 to check that packet originated on-lan\n\n\t\t\t#Multicast DNS, placed next to normal dns, out of numerical order\n\t\t\tif (dport == \"5353\") and ( (p['IP'].ttl == 1) or (p['IP'].ttl == 255) ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcClient)) or (LiveUDPService[SrcClient] == False)):\n\t\t\t\t\tLiveUDPService[SrcClient] = True\n\t\t\t\t\tif (dIP == \"224.0.0.251\"):\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/broadcastclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mdns/client\")\n\n\t\t\t\t\t#Extract dns answers like with 53; change elif to if and add 5353 to ports on next if?\n\t\t\t\t\t#At the moment, no; scapy does not appear to parse 5353 as dns.\n\t\t\t\t\t#else:\n\t\t\t\t\t#\tUnhandledPacket(p)\n\t\t\t#FIXME - add check for \"if isinstance(p['DNS'], whatevertype):\there and at all p[] accesses.\n\t\t\telif (sport == \"53\") and (isinstance(p['DNS'], DNS)) and (p['DNS'].qr == 1):\t\t#qr == 1 is a response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t#FIXME - Also report the TLD from one of the query answers to show what it's willing to answer for?\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"dns/server\")\n\t\t\t\t#Now we extract dns answers. First, check that there's no dns error:\n\t\t\t\tif (p['DNS'].rcode == 0):\t\t\t#No error\n\t\t\t\t\tDNSBlocks = [ ]\n\t\t\t\t\tCNAMERecs = [ ]\t\t\t\t#We hold onto all cnames until we've processed all PTR's and A's here\n\t\t\t\t\tif (p['DNS'].ancount > 0):\t\t#If we have at least one answer from the answer block, process it\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].an)\n\t\t\t\t\tif (p['DNS'].arcount > 0):\t\t#Likewise for the \"additional\" block\n\t\t\t\t\t\tDNSBlocks.append(p[DNS].ar)\n\t\t\t\t\tfor OneAn in DNSBlocks:\n\t\t\t\t\t\t#Thanks to Philippe Biondi for showing me how to extract additional records.\n\t\t\t\t\t\t#Debug(\"Start dns extract\" + str(p['DNS'].ancount))\n\t\t\t\t\t\t#OneAn = p[DNS].an\n\t\t\t\t\t\t#while OneAn is not NoPayload:\t\t#This doesn't seem to stop at the end of the list; incorrect syntax.\n\t\t\t\t\t\twhile isinstance(OneAn,DNSRR):\t\t#Somewhat equivalent:\twhile not isinstance(an, NoPayload):\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t#print \"Type: \" + str(type(OneAn))\t\t#All of type scapy.DNSRR\n\t\t\t\t\t\t\tif (OneAn.rclass == 1) and (OneAn.type == 1):\t\t#\"IN\" class and \"A\" type answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\t#Check new hostname to see if it's in the list.\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",A\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",A\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"A\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 2):\t\t\t#\"IN\" class and \"NS\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Perhaps later\n\t\t\t\t\t\t\t\t#Like cnames, this is object -> nameserver hostname, so these would need to be queued like cnames until we're done with A's and PTR's.\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 5):\t\t\t#\"IN\" class and \"CNAME\" answer\n\t\t\t\t\t\t\t\tCNAMERecs.append(OneAn)\t\t\t\t\t#Remember the record; we'll process these after the PTR's and A's\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 6):\t\t\t#\"IN\" class and \"SOA\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Not immediately useful, perhaps later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 12):\t\t#\"IN\" class and \"PTR\" type answer\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#For input of '182.111.59.66.in-addr.arpa.' :\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rrname.replace(\".in-addr.arpa.\", \"\")\t\t# '182.111.59.66'\n\t\t\t\t\t\t\t\tDNSIPAddr = DNSIPAddr.split('.')\t\t\t\t# ['182', '111', '59', '66']\n\t\t\t\t\t\t\t\tDNSIPAddr.reverse()\t\t\t\t\t\t# ['66', '59', '111', '182']\n\t\t\t\t\t\t\t\tDNSIPAddr = string.join(DNSIPAddr, '.')\t\t\t\t# '66.59.111.182'\n\t\t\t\t\t\t\t\t#Check that we end up with a legal IPv4 address before continuing; we're getting garbage.\n\t\t\t\t\t\t\t\tif (re.search('^[1-9][0-9\\.]*[0-9]$', DNSIPAddr) == None):\n\t\t\t\t\t\t\t\t\tDebug(\"Odd PTR rrname: \" + OneAn.rrname)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tDNSHostname = OneAn.rdata.lower()\n\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",PTR\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",PTR\"])):\n\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"PTR\", DNSHostname, \"\")\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 15):\t\t#\"IN\" class and \"MX\" answer\n\t\t\t\t\t\t\t\tpass\t\t\t\t\t\t\t#Possibly later\n\t\t\t\t\t\t\telif (OneAn.rclass == 1) and (OneAn.type == 28):\t\t#\"IN\" class and \"AAAA\" answer\n\t\t\t\t\t\t\t\tDNSIPAddr = OneAn.rdata.upper()\n\t\t\t\t\t\t\t\tDNSHostname = OneAn.rrname.lower()\n\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(DNSIPAddr + \",AAAA\")) or (not(DNSHostname in DNSRecord[DNSIPAddr + \",AAAA\"])):\n\t\t\t\t\t\t\t\t\tReportId(\"DN\", DNSIPAddr, \"AAAA\", DNSHostname, \"\")\n\n\t\t\t\t\t\t\t#Move to the next DNS object in the \"an\" block\n\t\t\t\t\t\t\tOneAn = OneAn.payload\n\t\t\t\t\tfor OneCNAME in CNAMERecs:\t\t#Now that we have all A/PTR's, go back and turn cname records into pseudo-A's\n\t\t\t\t\t\tif isinstance(OneCNAME,DNSRR):\n\t\t\t\t\t\t\tAlias = OneCNAME.rrname.lower()\n\t\t\t\t\t\t\tExisting = OneCNAME.rdata.lower()\n\t\t\t\t\t\t\tif isFQDN(Alias) and isFQDN(Existing):\n\t\t\t\t\t\t\t\tif HostIPs.has_key(Existing):\n\t\t\t\t\t\t\t\t\tfor OneIP in HostIPs[Existing]:\t\t\t\t#Loop through each of the IPs for the canonical name, and\n\t\t\t\t\t\t\t\t\t\tif (not DNSRecord.has_key(OneIP + \",CNAME\")) or (not(Alias in DNSRecord[OneIP + \",CNAME\"])):\n\t\t\t\t\t\t\t\t\t\t\tReportId(\"DN\", OneIP, \"CNAME\", Alias, \"\")\t#report them as kind-of A records for the Alias.\n\t\t\t\t\t\t\t\t#If we don't have a A/PTR record for \"Existing\", just ignore it. Hopefully we'll get the Existing A/PTR in the next few answers, and will re-ask for the CNAME later, at which point we'll get a full cname record.\n\t\t\t\t\t\t\t\t#else:\n\t\t\t\t\t\t\t\t#\tDebug(\"CNAME \" + Alias + \" -> \" + Existing + \" requested, but no IP's for the latter, skipping.\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tDebug(\"One of \" + Alias + \" and \" + Existing + \" isn't an FQDN, skipping cname processing.\")\n\t\t\t\telif (p['DNS'].rcode == 1):\t\t\t#FormErr: server responding to an improperly formatted request\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 2):\t\t\t#ServFail: domain exists, root nameservers list authoritative name servers, but authNS's won't answer queries\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 3):\t\t\t#NXDOMAIN: root nameservers don't have any listing (domain doesn't exist or is on hold)\n\t\t\t\t\tpass\n\t\t\t\telif (p['DNS'].rcode == 5):\t\t\t#Query refused\n\t\t\t\t\tpass\n\t\t\t\telse:\t#rcode indicates an error\n\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"53\") and (type(p['DNS']) == DNS) and (p['DNS'].qr == 0):\t#dns query\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"dns/client\")\n\t\t\telif (sport == \"67\") and (dport == \"68\"):\t\t#Bootp/dhcp server talking to client\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"bootpordhcp/server\")\n\t\t\telif (sport == \"68\") and (dport == \"67\"):\t\t#Bootp/dhcp client talking to server\n\t\t\t\tif (sIP != \"0.0.0.0\"):\t\t\t\t#If the client is simply renewing an IP, remember it.\n\t\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"bootpordhcp/client\")\n\t\t\t\t#else:\t\t\t\t\t\t#If you want to record which macs are asking for addresses, do it here.\n\t\t\t\t#\tpass\n\t\t\telif (sport == \"123\") and (dport == \"123\") and (p['NTP'].stratum != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/generic\")\n\t\t\telif (dport == \"123\") and ( (dIP == \"216.115.23.75\") or (dIP == \"216.115.23.76\") or (dIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ntp/vonageclient\")\n\t\t\telif (sport == \"123\") and ( (sIP == \"216.115.23.75\") or (sIP == \"216.115.23.76\") or (sIP == \"69.59.240.75\") ):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"ntp/vonageserver\")\n\t\t\telif (dport == \"137\"):\t\t\t#netbios-ns\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (p['Ethernet'].dst.upper() == \"FF:FF:FF:FF:FF:FF\"):\t\t\t#broadcast\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/broadcastclient\")\n\t\t\t\t\telif (Payload != None) and (Payload.find('CKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA') > -1):\t#wildcard\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/wildcardclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"netbios-ns/unicastclient\")\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"500\") and (dport == \"500\") and (p['ISAKMP'].init_cookie != ''):\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"isakmp/generic\")\n\t\t\telif (dport == \"512\"):\t\t\t#BIFF\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('@') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"biff/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (dport == \"1026\") or (dport == \"1027\") or (dport == \"1028\") ):\t#winpopup spam client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and ( (Payload.find('Download Registry Update from:') > -1) or (Payload.find('CRITICAL ERROR MESSAGE! - REGISTRY DAMAGED AND CORRUPTED.') > -1) or (Payload.find('Your system registry is corrupted and needs to be cleaned immediately.') > -1) or (Payload.find('CRITICAL SYSTEM ERRORS') > -1) ):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"winpopup/spamclient\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"1434\"):\t\t#Probable mssql attack\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Qh.dll') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"mssql/clientattack\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"1900\") and (dport == \"1900\") and (dIP == \"239.255.255.250\"):\t\t#SSDP\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('NOTIFY') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"ssdp/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"3865\") and (dIP == \"255.255.255.255\"):\t\t#XPL, http://wiki.xplproject.org.uk/index.php/Main_Page\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"xpl/client\")\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (dIP == \"216.115.30.28\") or (dIP == \"69.59.227.77\") or (dIP == \"69.59.232.33\") or (dIP == \"69.59.240.84\") ):\t\t#Vonage SIP client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061 SIP/2.0') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tSipMatch = SipPhoneMatch.search(Payload)\n\t\t\t\t\t\tif (SipMatch != None) and (len(SipMatch.groups()) >= 1):\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client, phone number: \" + SipMatch.group(1))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"sip/vonage_client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"5061\") and (dport == \"5061\") and ( (sIP == \"216.115.30.28\") or (sIP == \"69.59.227.77\") or (sIP == \"69.59.232.33\") or (sIP == \"69.59.240.84\") ):\t#Vonage SIP server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('.vonage.net:5061>') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"sip/vonage_server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"6515\") and (dport == \"6514\") and (dIP == \"255.255.255.255\"):\t\t#mcafee ASaP broadcast, looking for a proxy out. http://www.myasap.de/intl/EN/content/virusscan_asap/faq_new.asp\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('<rumor version=') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"asap/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"9052\") or (sport == \"9053\") or (sport == \"9054\") ) and ( (sIP == \"205.188.146.72\") or (sIP == \"205.188.157.241\") or (sIP == \"205.188.157.242\") or (sIP == \"205.188.157.243\") or (sIP == \"205.188.157.244\") or (sIP == \"64.12.51.145\") or (sIP == \"64.12.51.148\") or (sIP == \"149.174.54.131\") ):\t#Possibly AOL dns response\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('dns-01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"aoldns/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27005\") and ( (dport == \"27016\") or (dport == \"27017\") ):\t\t\t\t#Halflife client live game\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27013\") and (dIP == \"207.173.177.12\"):\t\t\t\t#variable payload, so can't (Payload != None) and (Payload.find('Steam.exe') > -1)\t\t\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (sport == \"27013\") and (sIP == \"207.173.177.12\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (sport == \"27016\") or (sport == \"27017\") ) and (dport == \"27005\"):\t\t\t\t#halflife server live game\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"27015\") or (dport == \"27016\") or (dport == \"27025\") or (dport == \"27026\") ):\t\t#Variable payload, so can't: (Payload != None) and (Payload.find('basic') > -1)\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\telif (dport == \"27017\") and ( (dIP == \"69.28.148.250\") or (dIP == \"69.28.156.250\") or (dIP == \"72.165.61.161\") or (dIP == \"72.165.61.185\") or (dIP == \"72.165.61.186\") or (dIP == \"72.165.61.188\") or (dIP == \"68.142.64.164\") or (dIP == \"68.142.64.165\") or (dIP == \"68.142.64.166\") ):\t#Steamfriends client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"steamfriends/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27017\") and ( (sIP == \"69.28.148.250\") or (sIP == \"69.28.156.250\") or (sIP == \"72.165.61.161\") or (sIP == \"72.165.61.185\") or (sIP == \"72.165.61.186\") or (sIP == \"72.165.61.188\") or (sIP == \"68.142.64.164\") or (sIP == \"68.142.64.165\") or (sIP == \"68.142.64.166\") ):\t#Steamfriends server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('VS01') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"steamfriends/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif ( (sport == \"21020\") or (sport == \"21250\") or (sport == \"27016\") or (sport == \"27017\") or (sport == \"27018\") or (sport == \"27030\") or (sport == \"27035\") or (sport == \"27040\") or (sport == \"28015\") ):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Team Fortress') > -1):\n\t\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sport == \"27019\"):\t\t\t\t\t\t\t#halflife server\n\t\t\t\tif ((not LiveUDPService.has_key(SrcService)) or (LiveUDPService[SrcService] == False)):\n\t\t\t\t\tLiveUDPService[SrcService] = True\n\t\t\t\t\tReportId(\"US\", sIP, \"UDP_\" + sport, \"open\", \"halflife/server\")\n\t\t\telif ( (dport == \"1265\") or (dport == \"20100\") or (dport == \"21550\") or (dport == \"27000\") or (dport == \"27017\") or (dport == \"27018\") or (dport == \"27019\") or (dport == \"27022\") or (dport == \"27030\") or (dport == \"27035\") or (dport == \"27050\") or (dport == \"27078\") or (dport == \"27080\") or (dport == \"28015\") or (dport == \"28100\") or (dport == \"45081\") ):\t\t#Halflife client\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('Source Engine Query') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (dport == \"24441\"):\t\t\t#Pyzor\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('User:') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"pyzor/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\t#FIXME - interesting issue; the ttl<5 test will catch traceroutes coming into us, but not ones we're creating to go out. Hmmm.\n\t\t\telif ( (dport >= \"33434\") and (dport <= \"33524\") ) and (p['IP'].ttl <= 5):\t#udptraceroute client\n\t\t\t\tif ((not LiveUDPClient.has_key(sIP + \"UDP_33434\")) or (LiveUDPClient[sIP + \"UDP_33434\"] == False)):\n\t\t\t\t\tLiveUDPClient[sIP + \"UDP_33434\"] = True\n\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_33434\", \"open\", \"udptraceroute/client\")\n\t\t\telif (dport == \"40348\"):\n\t\t\t\tif ((not LiveUDPClient.has_key(SrcClient)) or (LiveUDPClient[SrcClient] == False)):\n\t\t\t\t\tif (Payload != None) and (Payload.find('HLS') > -1):\n\t\t\t\t\t\tLiveUDPClient[SrcClient] = True\n\t\t\t\t\t\tReportId(\"UC\", sIP, \"UDP_\" + dport, \"open\", \"halflife/client\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tUnhandledPacket(p)\n\t\t\telif (p['IP'].frag > 0):\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"207.46.51.74\") or (sIP == \"65.55.251.10\"):\t\t\t\t#Bigfish.com - dns?\n\t\t\t\tUnhandledPacket(p)\n\t\t\telif (sIP == \"61.215.106.146\"):\t\t\t\t#junk\n\t\t\t\tUnhandledPacket(p)\n\t\t\telse:\n\t\t\t\tUnhandledPacket(p)\n\t\telse:\n\t\t\tDebug(\"Other IP protocol (\" + str(p['IP'].src) + \"->\" + str(p['IP'].dst) + \"): \" + str(p['IP'].proto))\n\t\t\tUnhandledPacket(p)\n\telif p['Ethernet'].type == 0x86DD:\t\t#IPv6\n\t\tUnhandledPacket(p)\n\telse:\n\t\tprint \"Unregistered ethernet type:\", p['Ethernet'].type\n\t\tUnhandledPacket(p)", "def parse_input(self, input_string):\n ip_addresses = {}\n\n tree = ET.parse(StringIO(input_string))\n root = tree.getroot()\n\n for e1 in root:\n if e1.tag == \"host\":\n host = e1\n\n ports = None\n address = None\n\n for e2 in host:\n if e2.tag == \"ports\":\n ports = e2\n if e2.tag == \"address\" and e2.attrib['addrtype'] != \"mac\":\n address = e2\n\n if ports is None:\n continue\n\n ip_address = address.attrib['addr']\n if ip_address not in ip_addresses:\n ip_addresses[ip_address] = []\n\n for port in ports:\n\n if port.tag != \"port\":\n continue\n\n port_protocol = port.attrib[\"protocol\"]\n port_number = str(port.attrib['portid'])\n port_state = port.find(\"state\").attrib['state']\n\n # lets only pay attention to open ports\n if port_state in [\"open\"]:\n s = dumps([port_protocol, port_number, port_state])\n\n ip_addresses[ip_address].append(s)\n\n service = port.find(\"service\")\n\n if \"name\" in service.attrib:\n s = dumps([port_protocol, port_number, port_state, service.attrib[\"name\"]])\n ip_addresses[ip_address].append(s)\n\n #s = dumps([port_protocol, port_state, service.attrib[\"name\"]])\n #ip_addresses[ip_address].append(s)\n\n s_list = [port_protocol, port_number, port_state, service.attrib[\"name\"]]\n #s_no_port_list = [port_protocol, port_state, service.attrib[\"name\"]]\n\n for sid in [\"product\", \"version\", \"extrainfo\", \"servicefp\"]:\n if sid in service.attrib:\n s_list.append(service.attrib[sid])\n s = dumps(s_list)\n ip_addresses[ip_address].append(s)\n\n #s_no_port_list.append(service.attrib[sid])\n #s = dumps(s_no_port_list)\n #ip_addresses[ip_address].append(s)\n\n for script_element in port:\n if script_element.tag != \"script\":\n continue\n # todo parse script tag from xml\n script_id = script_element.attrib[\"id\"]\n\n for table in script_element:\n if table.tag == \"table\":\n for elem in table:\n key = \"\"\n if \"key\" in elem.attrib:\n key = elem.attrib[\"key\"]\n\n if elem.text is not None:\n s = dumps([port_protocol, port_number, port_state, service.attrib[\"name\"],\n script_id, key, elem.text])\n else:\n s = dumps(\n [port_protocol, port_number, port_state, service.attrib[\"name\"],\n script_id, key])\n ip_addresses[ip_address].append(s)\n\n if table.tag == \"elem\":\n elem = table\n key = \"\"\n if \"key\" in elem.attrib:\n key = elem.attrib[\"key\"]\n\n if elem.text is not None:\n s = dumps([port_protocol, port_number, port_state, service.attrib[\"name\"],\n script_id, key, elem.text])\n else:\n s = dumps(\n [port_protocol, port_number, port_state, service.attrib[\"name\"],\n script_id, key])\n ip_addresses[ip_address].append(s)\n\n print \"no of IP's taken from NMAP: \" + str(len(ip_addresses.viewkeys()))\n return ip_addresses", "def _parse_message(self, data):\n try:\n _, values = data.split(':')\n self.serial_number, self.value = values.split(',')\n self.value = int(self.value, 16)\n\n is_bit_set = lambda b: self.value & (1 << (b - 1)) > 0\n\n # Bit 1 = unknown\n self.battery = is_bit_set(2)\n self.supervision = is_bit_set(3)\n # Bit 4 = unknown\n self.loop[2] = is_bit_set(5)\n self.loop[1] = is_bit_set(6)\n self.loop[3] = is_bit_set(7)\n self.loop[0] = is_bit_set(8)\n\n except ValueError:\n raise InvalidMessageError('Received invalid message: {0}'.format(data))", "def recursive_dns_lookup(target_name, qtype, root_servers_list):\n\n # Base case\n if not root_servers_list:\n return None\n\n # Create dns query based on the target_name (website)\n # and qtype (queue type: CNAME, A, AAAA, or MX)\n dns_query = dns.message.make_query(target_name, qtype)\n\n for server in root_servers_list:\n # Doing a try catch to check if the dns server times out,\n # if it does then we continue and try another server\n try:\n query_response = dns.query.udp(dns_query, server, 3)\n except dns.exception.Timeout:\n continue\n # If there's an answer in the response\n if query_response.answer:\n # Search through the response.answer for possible answers\n for response_answers in query_response.answer:\n #print(\"response_answers: \", response_answers)\n for response_answer in response_answers:\n #print(\"Response_answer\", response_answer)\n target_name = str(response_answer)[:-1] # Removes the period at the end\n #print(\"Target_name\", target_name)\n # If we don't get the reponse we're after then\n # continue searching through the root_servers\n if response_answer.rdtype != qtype:\n if response_answer.rdtype == 5:\n return recursive_dns_lookup(target_name, qtype, ROOT_SERVERS)\n else:\n # Return the answer we wanted\n return query_response\n else: # If there isn't an answer in the response then we check additional\n\n # If we do have something in additional then get the stuff inside\n if query_response.additional:\n ip_addresses = []\n for response_additional in query_response.additional:\n #print(\"response_additional: \", response_additional)\n # Convert to string then send to function for parsing the address out\n response_additional_str = str(response_additional)\n\n #print(\"function get_address resp:\", resp)\n resp_elements = response_additional_str.split()\n #print(\"function get_address resp_elements:\", resp_elements)\n ip_address = []\n for resp_element in resp_elements:\n #print(\"function get_address resp_element:\", resp_element)\n if resp_element != 'A':\n continue\n else:\n #print(\"function get_address resp_element = A:\", resp_element)\n #print(\"function get_address address:\", resp_elements[-1])\n ip_address.append(resp_elements[-1])\n ip_addresses += ip_address\n\n return recursive_dns_lookup(target_name, qtype, ip_addresses)", "def process_net_message(message, address):\n if message[0] == '<' and message[-1] == '>':\n message = message[1:-1]\n if \":\" in message:\n command, data = message.split(\":\")\n else:\n command = message\n data = None\n\n if command == \"JOIN\":\n print(\"added player to player list:\", data, address)\n ip_address, port = address\n active_player_dict[str(address)] = Player(ip_address, port, data, random.randint(0, 639),\n random.randint(0, 479))\n elif command == \"QUIT\":\n print(\"player removed from player list:\", address)\n del active_player_dict[str(address)]\n elif command == \"KD\":\n data = chr(int(data))\n if data not in active_player_dict[str(address)].keys_down:\n active_player_dict[str(address)].keys_down.append(data)\n elif command == \"KU\":\n data = chr(int(data))\n if data in active_player_dict[str(address)].keys_down:\n active_player_dict[str(address)].keys_down.remove(data)\n elif command == \"keepAlive\":\n data = int(data)\n if active_player_dict[str(address)].alive > 0: #time for player to be alive is not zero\n active_player_dict[str(address)].alive = data\n currentTime = time.time()\n else:\n print(\"invalid message.\")", "async def reverse_lookup(resolver, ip):\n result = (ip, \"\")\n allowed_chars = \"abcdefghijklmnopqrstuvwxyz0123456789-.\"\n log.info(\"Requesting PTR record for %s.\", ip)\n try:\n resp = await resolver.gethostbyaddr(ip)\n # Make sure records comply to NetBox and DNS expected format\n if all([bool(c.lower() in allowed_chars) for c in resp.name]):\n result = (ip, resp.name.lower())\n log.debug(\"PTR record for %s is '%s'.\", ip, result[1])\n else:\n log.debug(\n \"Invalid characters detected in PTR record '%s'. Nulling.\",\n resp.name\n )\n except aiodns.error.DNSError as err:\n log.info(\"Unable to find record for %s: %s\", ip, err.args[1])\n return result", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(parse_message_string, string)\n d.addCallback(self._r_handle_message_contents, protocol)", "def _parse_udp_packet(self, packet_bytes):\n opcode = packet_bytes[:2]\n if opcode == 5:\n reply = self.error_messages[int.from_bytes(packet_bytes[2:4], 'big')]\n print(reply)\n elif opcode == 4:\n reply = \"ACK\"\n else:\n reply = \"UNK\"\n return reply", "def parse_message(data):\r\n\tlist_data = data.split(\"|\")\r\n\tcmd = list_data[0]\r\n\tif len(list_data) != 3 or len(cmd) != CMD_FIELD_LENGTH:\r\n\t\treturn None, None\r\n\tdata_len = list_data[1].replace(\" \", \"\")\r\n\tif len(data_len) != LENGTH_FIELD_LENGTH or not data_len.isdigit():\r\n\t\treturn None, None\r\n\tmsg = list_data[2]\r\n\tdata_len = int(data_len)\r\n\tif len(msg) != data_len \\\r\n\t\tor not 0 <= data_len <= 9999:\r\n\t\treturn None, None\r\n\tcmd = cmd.replace(\" \", \"\") # remove spaces\r\n\t# The function should return 2 values\r\n\treturn cmd, msg", "def parse_dhm_request(msg: str) -> int:\n return int(msg.split(':')[1])", "def _parse_data(self, queue_msg):\r\n try:\r\n result = json.loads(queue_msg)\r\n except (TypeError, ValueError):\r\n log.error(\"External message should be a JSON serialized dict.\"\r\n \" Received queue_msg = %s\", queue_msg)\r\n raise\r\n msg = result['msg']\r\n return msg", "def fromData(self, data):\n\n self.reset()\n request = \"\"\n version = None\n args = {}\n\n # Parse raw data to construct message (strip empty lines)\n lines = [line.strip() for line in data.splitlines() if line.strip() != \"\"]\n # If message is empty, return false\n if not lines:\n return False\n # Parse request line\n requestLinePattern = re.compile(r'^\\s*(\\w+)\\s+SOLIPSIS/(\\d+\\.\\d+)\\s*$')\n requestLineMatch = requestLinePattern.match(lines[0])\n if requestLineMatch is None:\n raise EventParsingError(\"Invalid request syntax: \" + lines[0])\n\n # Request is first word of the first line (e.g. NEAREST, or BEST ...)\n request = requestLineMatch.group(1).upper()\n # Extract protocol version\n version = float(requestLineMatch.group(2))\n\n # Basic sanity check\n if version > VERSION:\n raise EventParsingError(\"Unexpected protocol version: %s\" % str(version))\n elif version < VERSION:\n self.logger.info(\"Received message from older protocol version: %s\" % str(version))\n if not REQUESTS.has_key(request):\n raise EventParsingError(\"Unknown request: \" + request)\n\n # Get args for this request\n argList = REQUESTS[request]\n\n # Now let's parse each parameter line in turn\n argPattern = re.compile(r'^\\s*([-\\w]+)\\s*:\\s*(.*?)\\s*$')\n for line in lines[1:]:\n argMatch = argPattern.match(line)\n if argMatch is None:\n raise EventParsingError(\"Invalid message syntax:\\r\\n\" + data)\n\n # Get arg name and arg value\n argName = argMatch.group(1)\n argVal = argMatch.group(2)\n\n # Log optional\n if argName not in argList:\n self.logger.debug(\"Optional argument '%s' in message '%s'\" % (argName, request))\n\n # Each arg has its own syntax-checking regex\n # (e.g. for a calibre we expect a 3-digit number)\n if ARGS_SYNTAX.has_key(argName):\n argSyntax = re.compile('^' + ARGS_SYNTAX[argName] + '$')\n else:\n raise EventParsingError(\"Unknown arg '%s'\" % (argName))\n if not argSyntax.match(argVal):\n raise EventParsingError(\"Invalid arg syntax for '%s': '%s'\" % (argName, argVal))\n\n # The syntax is correct => add this arg to the arg list\n if args.has_key(argName):\n raise EventParsingError(\"Duplicate value for arg '%s'\" % argName)\n args[argName] = ARGS_CONSTRUCTOR[argName](argVal)\n\n # Check that all required fields have been encountered\n for argName in argList:\n if not args.has_key(argName):\n raise EventParsingError(\"Missing argument '%s' in message '%s'\" % (argName, request))\n\n # Everything's ok\n self.request = request\n self.args = args\n self.data = data\n return True", "def _parse_message(msg):\n lines, body = _split_lines(msg)\n # The first line is the start line.\n start_line = lines[0]\n # Remaining lines are the header.\n header = _parse_header(lines[1 :])\n return start_line, header, body", "def parse_message(msg):\n idx = 8\n tag, nickLen = struct.unpack(\"<LL\", msg[:idx])\n if VERSION != (tag>>16):\n raise Exception(\"Wrong version\")\n sender_nickname = msg[idx:idx+nickLen]\n idx += nickLen\n \n length = struct.unpack(\"<L\", msg[idx:idx+4])[0]\n idx += 4\n sender_pubkey = msg[idx:idx+length]\n idx += length\n\n length = struct.unpack(\"<L\", msg[idx:idx+4])[0]\n idx += 4\n dest_pubkey = msg[idx:idx+length]\n idx += length\n\n length = struct.unpack(\"<L\", msg[idx:idx+4])[0]\n idx += 4\n nonce = msg[idx:idx+length]\n idx += length\n\n length = struct.unpack(\"<L\", msg[idx:idx+4])[0]\n idx += 4\n cipher = msg[idx:idx+length]\n idx += length\n \n return sender_nickname, sender_pubkey, dest_pubkey, nonce, cipher", "def _parse(self) -> bool:\n\n # First, check if this packet has a '!' at an offset position\n # This is allowed as per APRS 1.01 C5 P18\n if hasattr(self, '_offset'):\n # Packets with the '!' offset do not have a timestamp or messaging capabilities\n # Chop everything off the info field before the '!'\n self._info = self._info[self._offset:]\n\n elif self.data_type_id == '!':\n # Packet has no timestamp, station has no messaging capability\n self.timestamp = None\n self.messaging = False\n\n elif self.data_type_id == '/':\n # Packet has timestamp, station has no messaging capability\n self.messaging = False\n\n # Parse timestamp\n (self.timestamp, self.timestamp_type) = APRSUtils.decode_timestamp(self._info[0:8])\n\n elif self.data_type_id == '=':\n # Packet has no timestamp, station has messaging capability\n self.timestamp = None\n self.messaging = True\n\n elif self.data_type_id == '@':\n # Packet has timestamp, station has messaging capability\n self.messaging = True\n\n # Parse timestamp\n (self.timestamp, self.timestamp_type) = APRSUtils.decode_timestamp(self._info[0:8])\n\n else:\n # This isn't a position packet\n raise ParseError(\"Unknown position data type: {}\".format(self.data_type_id))\n\n if self.timestamp is None:\n data = self._info\n else:\n data = self._info[7:]\n\n # Check to see if the position data is compressed or uncompressed\n if re.match(r'[0-9\\s]{4}\\.[0-9\\s]{2}[NS].[0-9\\s]{5}\\.[0-9\\s]{2}[EW]', data):\n # Parse the uncompressed position values from the information field\n (self.latitude, self.longitude, self.ambiguity, self.symbol_table, self.symbol_id\n ) = self._parse_uncompressed_position(data)\n\n # Ensure compressed is set to False\n self.compressed = False\n\n if len(data) > 19:\n # This packet has additional data in the information field, so attempt to parse it\n (phg, radio_range, dfs, self.course, self.speed, self.altitude,\n comment) = self._parse_data(data[19:])\n\n if self.symbol_table == \"/\" and self.symbol_id == \"\\\\\":\n # If the symbol table is /, and the symbol ID is \\, it implies a DF report\n # 26th and 30th characters should be /\n logger.debug(\"Symbol table and symbol indicates a DF report\")\n\n if len(comment) < 8:\n # Packets with DF information must be at least 8 characters long\n raise ParseError(\"Missing DF values\")\n\n if comment[0] != \"/\" or comment[4] != \"/\":\n # Packets with DF information must also include the bearing and NRQ values\n # See APRS 1.01 C7 P30\n raise ParseError(\n \"Invalid DF values (character in position 0 and 4 should be '/'\"\n )\n\n # Extract the bearing\n self.bearing = int(comment[1:4])\n logger.debug(f\"DF bearing is {self.bearing} degrees\")\n\n # Decode the NRQ value\n (self.number, self.df_range, self.quality) = APRSUtils.decode_nrq(comment[5:8])\n\n # Strip the bearing/NRQ value from the comment\n self.comment = comment[8:]\n\n elif self.symbol_table in [\"/\", \"\\\\\"] and self.symbol_id == \"_\":\n # / or \\, and _ for the symbol table and symbol implies a weather report\n # TODO - Implementation\n logger.debug(\"Symbol table and symbol indicates a weather report\")\n\n elif phg:\n # Decode the power, height, gain and directivity values\n (self.power, self.height, self.gain, self.directivity) = \\\n APRSUtils.decode_phg(phg)\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n elif radio_range:\n # The radio range is specified as 4 digits, which denote the range in miles\n self.radio_range = int(radio_range)\n logger.debug(f\"Radio range is {radio_range} miles\")\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n elif dfs:\n # Decode the signal strength, height, gain and directivity values\n (self.strength, self.height, self.gain, self.directivity) = \\\n APRSUtils.decode_dfs(dfs)\n\n # The PHG value has already been stripped from the comment\n self.comment = comment\n\n else:\n # No additional data found\n self.comment = comment\n\n else:\n # Parse the compressed position values from the information field\n\n # Get the compressed position\n compressed_position = data[0:13]\n\n try:\n (self.latitude, self.longitude, self.altitude, self.course, self.speed,\n self.radio_range, self.compression_fix, self.compression_source,\n self.compression_origin) = self._parse_compressed_position(compressed_position)\n\n except Exception as e:\n # TODO Catch specific errors (for example, OverflowError)\n raise ParseError(\"Couldn't parse compressed position: {}\".format(e))\n\n # Ensure compressed is set to True\n self.compressed = True\n\n # Parse the symbol table and symbol ID\n self.symbol_table = data[0]\n self.symbol_id = data[9]\n\n # TODO - parse altitude information\n\n self.comment = data[13:]\n logger.debug(\"Comment is {}\".format(self.comment))\n\n # If we get this far, then we've parsed the packet\n return True", "def _parse_message(self, string, protocol):\n #print(\"Parsing message: %s\" % string)\n msg = parse_message_string(string)\n result = MessageResult(original_message=msg)\n\n if isinstance(msg, MethodCallMessage):\n # Handle method call\n res = self._method_call(msg)\n response_msg = ResponseMessage(result_code=0, result=res, response_to=msg.id)\n result.response = create_message_string(response_msg)\n elif isinstance(msg, SubscribeMessage):\n # Handle subscription to event\n response_msg = ResponseMessage(result_code=0, result=None, response_to=msg.id)\n result.response = create_message_string(response_msg)\n else:\n raise MessageHandleError(MessageHandleError.RESULT_UNEXPECTED_MESSAGE, msg)\n\n return result", "def _parse_msg(self, b):\n msg = None\n r = self.matcher.match(b)\n if r:\n address = int(r.group(1), 16)\n function = int(r.group(2), 16)\n # Convert data into bytes\n data = []\n for i in range(0, len(r.group(3)), 2):\n datum = int(r.group(3)[i:i+2], 16)\n data.append(datum)\n # Construct message\n msg = ModbusMessage(address, function, data, int(time.time() * 1000))\n # Verify LRC\n msg_lrc = int(r.group(5), 16)\n if msg_lrc != msg.compute_lrc():\n self.logger.warning('LRC mismatch, frame dropped.')\n msg = None\n return msg", "def parse(self, string):\n parse = re.match(\"^((?:[0-9]{1,3}\\.){3}[0-9]{1,3})\\s\\(((?:\\d)*\\.(?:\\d)*|(?:\\d)*)\\sms\\)$\", string)\n parse_result = parse.groups()\n return parse_result[0], parse_result[1]", "def parse(self) -> None:\n self._parse_zone_files()\n self._process_rules()\n self._process_zones()\n self._process_links()", "def __parse(self):\n lines = self.data.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n if line[0] == '#':\n continue\n tokens = line.split(\"\\t\")\n time_str = tokens[self.timecol]\n if time_str.find('start:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n elif time_str.find('end:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n break\n else:\n duration = float(tokens[6])\n fms = int(tokens[2])\n hfms = int(tokens[3])\n svs = int(tokens[4])\n self.calls.append((fms, hfms, svs))\n self.durations.append(duration)\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.length = (self.times[len(self.times) - 1] -\\\n self.times[0]).seconds", "def testJunkAfterAll(self):\n rec = \"v=spf1 ip4:213.5.39.110 -all MS=83859DAEBD1978F9A7A67D3\"\n domain = \"avd.dk\"\n\n parsed_record = checkdmarc.parse_spf_record(rec, domain)\n self.assertEqual(len(parsed_record[\"warnings\"]), 1)", "def getdns(self):\r\n filename = r\"dns_profiles.txt\"\r\n fp = open(filename)\r\n data = []\r\n for lines in fp.readlines():\r\n data.append(list(map(float, lines.split())))\r\n #use the fundamental string function 'append','split' to extract floating point number\r\n fp.close()\r\n dns_data = np.array(data) #transfer list to array\r\n self.dns_z = dns_data[:, 0] / 1000 #z-plus -> z/h\r\n self.dns_u = dns_data[:, 1] # u-plus\r\n self.dns_uw = dns_data[:, 2]\r\n self.dns_uu = dns_data[:, 3]\r\n self.dns_ww = dns_data[:, 4]\r\n self.dns_vv = dns_data[:, 5]\r\n self.dns_tau = dns_data[:, 7]\r\n self.dns_tot = dns_data[:, 8]", "def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):\n address = {}\n errors = []\n parts = address_str.split('$')\n if DEBUG:\n address['debug_address_str'] = address_str\n address['debug_part_1'] = parts[0]\n address['debug_part_last'] = parts[-1]\n address['debug_length'] = len(parts)\n #if len(parts) == 1:\n #print('cannot split: {}: {}'.format(debug_type, address_str))\n match = re.search(address_zip_us_re, parts[-1])\n if match:\n if DEBUG:\n address['debug_parser'] = 'A'\n address['city'] = match.group(1)\n address['region'] = match.group(2).upper()\n address['postalCode'] = match.group(3)\n address['countryId'] = 'US'\n if len(parts) == 2:\n if DEBUG:\n address['debug_parser'] = 'B'\n address['addressLine1'] = parts[0]\n else:\n if len(parts) == 3:\n if DEBUG:\n address['debug_parser'] = 'C'\n address['addressLine1'] = parts[0]\n if parts[0] != parts[1]:\n if DEBUG:\n address['debug_parser'] = 'D'\n address['addressLine2'] = parts[1]\n else:\n match2 = re.search(address_zip_us_lax_re, address_str)\n if match2:\n if DEBUG:\n address['debug_parser'] = 'E'\n address['region'] = match2.group(2).upper()\n address['postalCode'] = match2.group(3)\n address['countryId'] = 'US'\n # FIXME: Cannot reliably parse the remainder for city and street address\n errors.append('Partial parse street address: {}'.format(address_str))\n address['addressLine1'] = match2.group(1)\n else:\n # This is the remainder that we could not parse.\n # So just put it all into \"addressLine1\" to be manually adjusted later.\n if DEBUG:\n address['debug_parser'] = 'F'\n errors.append('Cannot parse street address: {}'.format(address_str))\n address['addressLine1'] = address_str\n return (address, errors)", "def extended_parse(self):\n\t\t## Do the initial parsing\n\t\tself.parse()\n\n\t\t## First, cycle through the hosts, and append hostgroup information\n\t\tindex = 0\n\t\tfor host in self.data['all_host']:\n\t\t\tif host.has_key('register') and host['register'] == '0': continue\n\t\t\tif not host.has_key('host_name'): continue\n\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\n\t\t\t## Append any hostgroups that are directly listed in the host definition\n\t\t\tif host.has_key('hostgroups'):\n\t\t\t\tfor hostgroup_name in self._get_list(host, 'hostgroups'):\n\t\t\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\t\t\t\t\tif hostgroup_name not in self.data['all_host'][index]['meta']['hostgroup_list']:\n\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup_name)\n\n\t\t\t## Append any services which reference this host\n\t\t\tservice_list = []\n\t\t\tfor service in self.data['all_service']:\n\t\t\t\tif service.has_key('register') and service['register'] == '0': continue\n\t\t\t\tif not service.has_key('service_description'): continue\n\t\t\t\tif host['host_name'] in self._get_active_hosts(service):\n\t\t\t\t\tservice_list.append(service['service_description'])\n\t\t\tself.data['all_host'][index]['meta']['service_list'] = service_list\n\t\t\t\t\t\n\n\t\t\t## Increment count\n\t\t\tindex += 1\n\n\t\t## Loop through all hostgroups, appending them to their respective hosts\n\t\tfor hostgroup in self.data['all_hostgroup']:\n\n\t\t\tfor member in self._get_list(hostgroup,'members'):\n\t\t\t\tindex = 0\n\t\t\t\tfor host in self.data['all_host']:\n\t\t\t\t\tif not host.has_key('host_name'): continue\n\n\t\t\t\t\t## Skip members that do not match\n\t\t\t\t\tif host['host_name'] == member:\n\n\t\t\t\t\t\t## Create the meta var if it doesn' exist\n\t\t\t\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\n\t\t\t\t\t\tif hostgroup['hostgroup_name'] not in self.data['all_host'][index]['meta']['hostgroup_list']:\n\t\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup['hostgroup_name'])\n\n\t\t\t\t\t## Increment count\n\t\t\t\t\tindex += 1\n\n\t\t## Expand service membership\n\t\tindex = 0\n\t\tfor service in self.data['all_service']:\n\t\t\tservice_members = []\n\n\t\t\t## Find a list of hosts to negate from the final list\n\t\t\tself.data['all_service'][index]['meta']['service_members'] = self._get_active_hosts(service)\n\n\t\t\t## Increment count\n\t\t\tindex += 1", "def parse(data: bytes, port: int, origin: helpers.ConnectionType):\n # Ignore packets from master server... game server is more interesting\n if port == helpers.MASTER_PORT:\n return\n # Iteratively parse packet data until nothing is left to parse\n reads = 0\n while len(data) >= 2:\n reads += 1\n pid = data[:2]\n handler = PACKET_HANDLERS.get(pid, None)\n if handler:\n # Parse data without packet id prepended\n # Returned data will be parsed next iteration\n data = handler(data[2:], origin=origin)\n else:\n # This packet doesn't have a handler\n # Print it once for inspection\n if reads <= 1:\n print(f'[{pid}] - {data}\\n')\n # Remove the first byte and try parsing again later\n data = data[1:]", "def handleQuery(self, message, protocol, address):\n\n # Add transport to each query\n for query in message.queries:\n query.device_addr = self._get_addr(protocol, address)\n \n server.DNSServerFactory.handleQuery(self, message, protocol, address)", "def query_dns_server(packet):\n\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n except socket.error:\n print \"[Error]: Faild to create socket. Exiting...\"\n exit(1)\n\n # get DNS server IPs from dns_servers.conf file\n dns_servers = serverconf.read_file()\n # default port for DNS\n server_port = 53\n\n for server_ip in dns_servers:\n got_response = False\n\n # send message to server\n sock.sendto(packet, (server_ip, server_port))\n # receive answer\n recv = sock.recvfrom(1024)\n\n # if no answer is received, try another server\n if recv:\n got_response = True\n break\n\n # output error message if no server could respond\n if not got_response:\n print \"[Error]: No response received from server. Exiting...\"\n exit(0)\n\n return recv[0]", "def dns(self, **kwargs):\n self.logger.debug(f\"Get RealTime DNS data\")\n url_path = 'dns'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def reverse_dns_sna(ipaddress):\n\n r = requests.get(\"http://api.statdns.com/x/%s\" % ipaddress)\n\n if r.status_code == 200:\n names = []\n\n for item in r.json()['answer']:\n name = str(item['rdata']).strip(\".\")\n names.append(name)\n\n return names\n elif r.json()['code'] == 503:\n # NXDOMAIN - no PTR record\n return None", "def parse_ping(stdout):\n parsed_lines = []\n for line in stdout:\n # 64 bytes from 100.0.0.1: icmp_seq=1 ttl=63 time=1.32 ms\n parsed = {}\n match = re.search(r\"icmp_seq=(\\d+)\", line)\n if match:\n parsed['icmp_seq'] = match.group(1)\n else:\n continue\n\n match = re.search(r\"(\\d+) bytes\", line)\n if match:\n parsed['bytes'] = match.group(1)\n\n match = re.search(r\"ttl=(\\d+)\", line)\n if match:\n parsed['ttl'] = match.group(1)\n\n match = re.search(r\"time=([\\.\\d]+)\", line)\n if match:\n parsed['time'] = match.group(1)\n\n match = re.search(r\"Time[\\w\\s]+exceeded\", line)\n if match:\n parsed['ttl_exceed'] = True\n else:\n parsed['ttl_exceed'] = False\n\n if parsed != {}:\n parsed_lines.append(parsed)\n\n return parsed_lines", "def msgs_from_bytes(self, b):\n msgs = []\n # User remainder bytes\n parse_bytes = self.remainder + b.decode('ascii')\n # Find the first frame delimiter\n i = parse_bytes.find('\\r\\n')\n while i >= 0:\n # Try to parse a single message\n m = self._parse_msg(parse_bytes[:i])\n # Remove parsed bytes and delimter\n parse_bytes = parse_bytes[i+2:]\n # Add parsed message, if any\n if m:\n msgs.append(m)\n self.logger.debug('Parsed ASCII frame: address={}, function={}, len={}'.format(m.address, m.function, len(m.data) if m.data else 0))\n #else - warn?\n i = parse_bytes.find('\\r\\n')\n # Store any remaining bytes for the next pass\n self.remainder = parse_bytes\n return msgs", "def parse_message(message):\n request_iter = request_regex.finditer(message.body())\n requests = []\n for build_request in request_iter:\n requests.append(determine_request(build_request))\n if requests:\n results = serverset.build_request(requests)\n message.reply(build_reply(results))", "def _parse(self):\n pass", "def consume(self, frame):\n # NOTE: This function is called in coroutine context, but is not the coroutine itself.\n # Enable PRINT_COROUTINE_ENTRY_EXIT in shodohflo.fstrm if needed.\n if DNS_STATS:\n timer = self.consume_stats.start_timer()\n\n message = dnstap.Dnstap(frame).field('message')[1]\n self.process_message(message)\n\n if DNS_STATS:\n timer.stop()\n return True", "def handleIncoming(self):\r\n\t\trawQueue = list()\r\n\r\n\t\twhile True:\r\n\t\t\tif not self.activeConnection:\r\n\t\t\t\ttime.sleep(.1)\r\n\t\t\t\tcontinue\r\n\t\t\ttry:\r\n\t\t\t\trawQueue.append(self.serialPort.read(1).decode('ascii'))\r\n\t\t\texcept serial.serialutil.SerialException as e:\r\n\t\t\t\tcontinue\r\n\t\t\t# print(rawQueue[-1], int.from_bytes(rawQueue[-1], byteorder='big'))\r\n\t\t\t# if len(rawQueue) >= 1000:\r\n\t\t\t# \trawQueue.pop(0)\r\n\t\t\t# print(rawQueue)\r\n\t\t\tif rawQueue[0] != '$': # we pop items until the first one is a $ sign\r\n\t\t\t\t# print('popping the first character')\r\n\t\t\t\trawQueue.pop(0)\r\n\t\t\tif '\\n' in rawQueue: # we assume with the \\n we have a valid message\r\n\t\t\t\t# print('valid message')\r\n\t\t\t\trawQueue.pop(0) # remove the $\r\n\t\t\t\trawPayload = rawQueue[0:rawQueue.index(\"*\")]\r\n\t\t\t\tstringPayload = \"\".join(rawPayload)\r\n\t\t\t\tvalueList = stringPayload.split(\",\")\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\tfor i in range(1, len(valueList)):\r\n\t\t\t\t\tvalueList[i] = int(valueList[i])\r\n\t\t\t\tvalueList[0] = messageTypes[valueList[0]]\r\n\r\n\t\t\t\tself.eventQueue.put(valueList)\r\n\t\t\t\trawQueue.clear()\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\t# we are going to ignore checksums for now\r", "def parse(self, data, s):\n fn = s.fileno()\n\n perf = re.compile(\"<xml><perf>(\\d+)</perf></xml>\")\n numbers = re.compile(\"<xml>(?:<number>(\\d+)</number>)+</xml>\")\n\n if not self.accepting:\n self.mq[fn].put_nowait(\"die\\n\")\n self.inputs.remove(s)\n else:\n self.log(\"Recieved %s from %s\", repr(data),\n self.conns[s].addr)\n\n has_perf = perf.match(data)\n has_nums = numbers.match(data)\n\n if data == \"quit\":\n # Goto self.signal\n kill(getpid(), SIGINT)\n elif data == \"current\":\n worker = self.curWorker\n if not worker:\n worker = \"None\"\n self.mq[fn].put(str(worker) + '\\n')\n elif has_perf:\n self.mq[fn].put(\"<xml><range>\"+str(self.seqNum)+\"</range></xml>\")\n self.seqNum += int(has_perf.group(1))\n self.curWorker = str(self.conns[s])\n self.log(\"Sequence now: %s\", self.seqNum)\n elif has_nums:\n self.perfect_numbers.append(list(has_nums.groups()))\n self.log(\"Found perfect numbers: %s\",\n str(list(has_nums.groups())))\n\n if s not in self.outputs:\n self.outputs.append(s)", "def process_dns_response(self, dns_response: str) -> str:\n if self.expected_address and self.expected_address not in dns_response:\n return f\"CRITICAL: Unexpected address in the DNS response. Expected: {self.expected_address}, \" \\\n f\"Response: {dns_response}\"\n\n return f\"OK: DNS response received. Response: {dns_response}\"", "def _decode_message(self, label: str, buf, typedef=None, pos=0, end=None, group=False):\n print(str(pos) + \" decode_message \" + label)\n if end is None:\n end = len(buf)\n\n if typedef is None:\n typedef = {}\n else:\n # Don't want to accidentally modify the original\n typedef = copy.deepcopy(typedef)\n output = {}\n\n while pos < end:\n oldpos = pos\n tag, pos = decoder._DecodeVarint(buf, pos)\n try:\n field_number, wire_type = wire_format.UnpackTag(tag)\n except Exception as exc:\n raise (ValueError,\n 'Could not read valid tag at pos %d. Ensure it is a valid protobuf message: %s'\n % (pos-len(tag), exc), sys.exc_info()[2])\n # Convert to str\n field_number = str(field_number)\n orig_field_number = field_number\n \n field_typedef = None\n if field_number in typedef:\n field_typedef = typedef[field_number]\n else:\n field_typedef = {}\n field_typedef['type'] = self.wire_type_defaults[wire_type]\n field_type = field_typedef['type']\n if self.debug:\n ft = field_type\n if ft == None:\n ft = \"None\"\n print(\"@\" + str(oldpos) + \"-\" + str(pos-1) + \":\" + label + \" field_number \" +\n str(field_number) +\n \" wire_type \" + str(wire_type) +\n \" field_type \" + str(ft))\n # If field_type is None, its either an unsupported wire type, length delim or group\n # length delim we have to try and decode first\n field_out = None\n if field_type == 'LD':\n field_out, pos = self.decode_message_LD(label, buf, pos, field_typedef)\n elif field_type == 'endGroup':\n # TODO Should probably match the field_number to START_GROUP\n if not group:\n raise ValueError(\"Found END_GROUP before START_GROUP\")\n # exit out\n return output, typedef, pos\n elif field_type == 'message':\n field_out, pos = self.decode_message_message(\n label, buf, pos, field_typedef, field_number)\n elif field_type == 'group':\n group_typedef = None\n # Check for a anonymous type\n if 'group_typedef' in field_typedef:\n group_typedef = field_typedef['group_typedef']\n field_out, group_typedef, pos = self.decode_group(\n label, buf, group_typedef, pos)\n # Save type definition\n field_typedef['group_typedef'] = group_typedef\n else:\n # Verify wiretype matches\n if self.wiretypes[field_type] != wire_type:\n raise ValueError(\"Invalid wiretype for field number %s. %s is not wiretype %s\"\n % (field_number, field_type, wire_type))\n # Simple type, just look up the decoder\n field_out, pos = self.decoders[field_type](buf, pos)\n field_typedef['type'] = field_type\n if 'name' not in field_typedef:\n field_typedef['name'] = ''\n field_key = field_number\n if '-' not in field_number and 'name' in field_typedef and field_typedef['name'] != '':\n field_key = field_typedef['name']\n # Deal with repeats\n if field_key in output:\n if isinstance(field_out, list):\n if isinstance(output[field_number], list):\n output[field_key] += field_out\n else:\n output[field_key] = field_out.append(output[field_key])\n else:\n if isinstance(output[field_number], list):\n output[field_key].append(field_out)\n else:\n output[field_key] = [output[field_key], field_out]\n else:\n output[field_key] = field_out\n typedef[orig_field_number] = field_typedef\n if self.debug:\n print(str(field_key) + \" field_out:\" + str(field_out))\n if pos > end:\n raise decoder._DecodeError(\"Invalid Message Length, pos=\" +\n str(pos) + \" end=\" + str(end))\n # Should never hit here as a group\n if group:\n raise ValueError(\"Got START_GROUP with no END_GROUP.\")\n print(\"decode_message finish \" + str(pos))\n return output, typedef, pos", "def parse_domain(self, domainfile):\n\n with open(domainfile) as dfile:\n dfile_array = self._get_file_as_array(dfile)\n #Deal with front/end define, problem, :domain\n if dfile_array[0:4] != ['(', 'define', '(', 'domain']:\n print('PARSING ERROR: Expected (define (domain ... at start of domain file')\n sys.exit()\n self.domain = dfile_array[4]\n\n dfile_array = dfile_array[6:-1]\n opencounter = 0\n keyword = ''\n obj_list = []\n is_obj_list = True\n for word in dfile_array:\n if word == '(':\n opencounter += 1\n elif word == ')':\n opencounter -= 1\n elif word.startswith(':'):\n if word[1:] not in DFILE_KEYWORDS:\n pass\n elif keyword != 'requirements':\n keyword = word[1:]\n if opencounter == 0:\n if keyword == 'action':\n self.actions.append(obj_list)\n obj_list = []\n if keyword == 'types':\n for element in obj_list:\n self.types.setdefault('object', []).append(element)\n self.type_list.add('object')\n self.type_list.add(element)\n obj_list = []\n keyword = ''\n\n if keyword == 'requirements': #Requirements list\n if word != ':requirements':\n if not word.startswith(':'):\n print('PARSING ERROR: Expected requirement to start with :')\n sys.exit()\n elif word[1:] not in DFILE_REQ_KEYWORDS:\n print('WARNING: Unknown Rquierement ' + word[1:])\n #print 'Requirements must only be: ' + str(DFILE_REQ_KEYWORDS)\n #sys.exit()\n else:\n self.requirements.add(word[1:])\n elif keyword == 'action':\n obj_list.append(word)\n elif not word.startswith(':'):\n if keyword == 'types': #Typed list of objects\n if is_obj_list:\n if word == '-':\n is_obj_list = False\n else:\n obj_list.append(word)\n else:\n #word is type\n for element in obj_list:\n if not word in self.type_list:\n self.types.setdefault('object', []).append(word)\n self.type_list.add(word)\n self.types.setdefault(word, []).append(element)\n self.type_list.add(element)\n self.type_list.add(word)\n is_obj_list = True\n obj_list = []\n elif keyword == 'constants': #Typed list of objects\n if is_obj_list:\n if word == '-':\n is_obj_list = False\n else:\n obj_list.append(word)\n else:\n #word is type\n for element in obj_list:\n if word in self.type_list:\n self.constants.setdefault(word, []).append(element)\n #self.object_list.add(element)\n else:\n print(self.type_list)\n print(\"ERROR unknown type \" + word)\n sys.exit()\n is_obj_list = True\n obj_list = []\n elif keyword == 'predicates' or keyword == 'private': #Internally typed predicates\n if word == ')':\n if keyword == 'private':\n #print \"...skip agent: \" + str(obj_list[:3])\n obj_list = obj_list[3:]\n keyword = 'predicates'\n if len(obj_list) == 0:\n #print \"...skip )\"\n continue\n p_name = obj_list[0]\n #print \"parse predicate: \" + p_name + \" \" + str(obj_list)\n pred_list = self._parse_name_type_pairs(obj_list[1:],self.type_list)\n self.predicates.append(Predicate(p_name, pred_list, True, False))\n obj_list = []\n elif word != '(':\n obj_list.append(word)\n elif keyword == 'functions': #functions\n if word == ')':\n p_name = obj_list[0]\n if obj_list[0] == '-':\n obj_list = obj_list[2:]\n #print \"function: \" + word + \" - \" + str(obj_list)\n self.functions.append(Function(obj_list))\n obj_list = []\n elif word != '(':\n obj_list.append(word)\n\n #Work on the actions\n new_actions = []\n for action in self.actions:\n if action[0] == '-':\n action = action[2:]\n act_name = action[1]\n act = {}\n action = action[2:]\n keyword = ''\n for word in action:\n if word.startswith(':'):\n keyword = word[1:]\n else:\n act.setdefault(keyword, []).append(word)\n self.agent_types.add(act.get('agent')[2])\n agent = self._parse_name_type_pairs(act.get('agent'),self.type_list)\n param_list = agent + self._parse_name_type_pairs(act.get('parameters')[1:-1],self.type_list)\n up_params = Predicate('', param_list, True, False)\n pre_list = self._parse_unground_propositions(act.get('precondition'))\n eff_list = self._parse_unground_propositions(act.get('effect'))\n new_act = Action(act_name, up_params, pre_list, eff_list)\n\n new_actions.append(new_act)\n self.actions = new_actions", "def testSplitSPFRecord(self):\n\n rec = '\"v=spf1 ip4:147.75.8.208 \" \"include:_spf.salesforce.com -all\"'\n\n parsed_record = checkdmarc.parse_spf_record(rec, \"example.com\")\n\n self.assertEqual(parsed_record[\"parsed\"][\"all\"], \"fail\")", "def _parse_contact_information(self):\n left_column = self.content.find(\"div\", class_=\"linkeSpalte40\")\n graubox = left_column.find(\n lambda tag: tag.name == \"div\" and tag[\"class\"] == [\"grauBox\"]\n )\n\n emails_raw = graubox.find_all(\"a\", class_=\"mail\")\n websites_raw = graubox.find_all(\"a\", class_=\"noDecoration\")\n telephone_raw = graubox.find_all(\"span\", class_=\"telefonnummer\")\n address_raw = [\n e.nextSibling for e in graubox.find_all(\"em\") if e.text == \"Anschrift:\"\n ]\n\n address = address_raw[0].li.get_text(\"\\n\") if address_raw else None\n emails = [re.sub(r\"^mailto:\", \"\", e.attrs[\"href\"]) for e in emails_raw]\n phone_numbers = [t.text for t in telephone_raw]\n websites = [w.attrs[\"href\"] for w in websites_raw]\n\n return {\n \"address\": address,\n \"emails\": emails,\n \"phone_numbers\": phone_numbers,\n \"websites\": websites,\n }", "def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(self._parse_message, string, protocol)\n d.addCallback(self._r_process_message, protocol)\n d.addCallbacks(callback=self._r_send_result, errback=self._r_send_error, callbackArgs=(protocol,), errbackArgs=(protocol,))", "def parse_pasv_resp(self, msg_rec):\n num_ip_bytes = 4\n index_of_port_1 = 4\n index_of_port_2 = 5\n try:\n print_debug(msg_rec)\n # Parse out IP & Port from the parenthesis within the PASV resp.\n host_info = msg_rec[msg_rec.index(\"(\") + 1:msg_rec.rindex(\")\")]\n # Break up IP & Port based on comma separated delimiter.\n host_info_split = host_info.split(',')\n # Put octets together, delimited by periods.\n host_ip_list = [host_info_split[i] for i in range(num_ip_bytes)]\n host_ip = '.'.join(host_ip_list)\n # Get Port as a valid port number.\n host_port = int(host_info_split[index_of_port_1]) * 256 + \\\n int(host_info_split[index_of_port_2])\n except Exception as e:\n print_debug(\"Error: \" + str(e))\n return \"\", \"\"\n return host_ip, host_port", "def _parse_message(self, data):\r\n if TwitchChatStream._check_has_ping(data):\r\n self._maybe_print('got ping')\r\n self._send_pong()\r\n\r\n channel_name_or_false = TwitchChatStream._check_has_channel(data)\r\n if channel_name_or_false:\r\n current_channel = channel_name_or_false[0]\r\n print('Connected to channel: ' + current_channel)\r\n\r\n if TwitchChatStream._check_has_message(data):\r\n msg = {\r\n 'channel': re.findall(r'^:.+![a-zA-Z0-9_]+'\r\n r'@[a-zA-Z0-9_]+'\r\n r'.+ '\r\n r'PRIVMSG (.*?) :',\r\n data)[0],\r\n 'username': re.findall(r'^:([a-zA-Z0-9_]+)!', data)[0],\r\n 'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)',\r\n data)[0]\r\n }\r\n if msg['channel'].startswith('#'):\r\n msg['channel'] = msg['channel'][1:]\r\n self._maybe_print(\r\n 'got msg: #{} @{} -- {}'.format(msg['channel'], msg['username'], msg['message']))\r\n return msg\r\n elif len(data):\r\n self._maybe_print('other data: {}'.format(data))\r\n else:\r\n return None", "def parse(self, input):\n pass", "def parse_raw_entry(raw_entry):\n entry_start = raw_entry[0]\n\n # get the timestamp\n ts_len = 23\n ts = entry_start[:ts_len]\n # get the IP, if there is one\n idx = entry_start.find(' ', ts_len+1)\n ip = entry_start[ts_len+1:idx]\n # get the database, if there is one\n consumed = idx\n idx = entry_start.find(' ', consumed+1)\n db = entry_start[consumed+1:idx]\n # get the log type\n consumed = idx\n idx = entry_start.find(' ', consumed+1)\n type = entry_start[consumed+1:idx]\n # finally, combined the message\n consumed = idx\n remaining = entry_start[consumed+1:]\n foo = [remaining]\n foo.extend(raw_entry[1:])\n msg = ''.join(foo).strip()\n\n return Entry(ts, ip, db, type, msg)" ]
[ "0.6813776", "0.6387554", "0.6330695", "0.62516606", "0.6083002", "0.59635645", "0.5952545", "0.5765341", "0.5750907", "0.57202256", "0.56227237", "0.5602068", "0.55890054", "0.55267555", "0.5508064", "0.55026555", "0.54993564", "0.5461191", "0.5436913", "0.54354006", "0.5335148", "0.53330714", "0.52950346", "0.52910084", "0.5282006", "0.5243971", "0.5243751", "0.5228624", "0.51833904", "0.5176363", "0.5174349", "0.5162382", "0.5148217", "0.5141654", "0.51214457", "0.5115762", "0.51062506", "0.5106188", "0.51032144", "0.50955373", "0.5079168", "0.5078109", "0.5070053", "0.5066554", "0.50606084", "0.5057413", "0.5051718", "0.50415355", "0.5021875", "0.50155663", "0.5004751", "0.5002528", "0.50012565", "0.50000405", "0.49980813", "0.49941877", "0.49916527", "0.4983528", "0.49807376", "0.49758798", "0.4927874", "0.4921715", "0.4920237", "0.4917246", "0.49169973", "0.49133134", "0.4907169", "0.4906084", "0.4905399", "0.48992172", "0.48897457", "0.48850647", "0.48765403", "0.48728764", "0.48671284", "0.4866971", "0.48586833", "0.48570997", "0.48519924", "0.48477775", "0.484069", "0.48316118", "0.48308876", "0.48240587", "0.4820717", "0.48207113", "0.4817933", "0.48138165", "0.4808404", "0.48055142", "0.47991416", "0.4798953", "0.4795868", "0.4795368", "0.47948426", "0.47877514", "0.47862786", "0.47828847", "0.47825196", "0.47794682", "0.47751415" ]
0.0
-1
Create or replace Secret and SecretACL
def create_or_replace_from_model(cls, secret, secret_acl, user_ids=None, session=None): secret.updated_at = timeutils.utcnow() secret_acl.updated_at = timeutils.utcnow() secret.save(session=session) if secret_acl.id: secret_acl.save(session=session) else: secret_acl.create(session=session) cls._create_or_replace_acl_users(secret_acl=secret_acl, user_ids=user_ids, session=session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_secret(secret_name, secret_value, environment):\n environment.add_cleanup(\n environment.cfy.secrets.delete,\n kwargs={\n 'secret_name': secret_name,\n },\n )\n environment.cfy.secrets.create(\n secret_name=secret_name,\n secret_value=secret_value,\n )", "def create_secret(logger,namespace,body,v1=None):\n if v1 is None:\n v1 = client.CoreV1Api()\n logger.debug('new client - fn create secret')\n try:\n name = body['metadata']['name']\n except KeyError:\n logger.debug(\"No name in body ?\")\n raise kopf.TemporaryError(\"can not get the name.\")\n try:\n data = body.get('data')\n except KeyError:\n data = ''\n logger.error(\"Empty secret?? could not get the data.\")\n \n secret_type = 'Opaque'\n if 'type' in body:\n secret_type = body['type']\n\n metadata = {'name': name, 'namespace': namespace}\n api_version = 'v1'\n kind = 'Secret'\n body = client.V1Secret(api_version, data , kind, metadata, type = secret_type)\n # kopf.adopt(body)\n logger.info(f\"cloning secret in namespace {namespace}\")\n try:\n api_response = v1.create_namespaced_secret(namespace, body)\n except client.rest.ApiException as e:\n if e.reason == 'Conflict':\n logger.warning(f\"secret `{name}` already exist in namesace '{namespace}'\")\n return 0\n logger.error(f'Can not create a secret, it is base64 encoded? data: {data}')\n logger.error(f'Kube exception {e}')\n return 1\n return 0", "def resetSecret(self):\n self.secret = str(uuid())\n self.put()", "def set_secret(self, name: str, plaintext: str) -> \"Secret\":\n _args = [\n Arg(\"name\", name),\n Arg(\"plaintext\", plaintext),\n ]\n _ctx = self._select(\"setSecret\", _args)\n return Secret(_ctx)", "async def write_secret(self, name: str, value: str, content_type: str, tags: dict):\n pass", "def add_ipsec_secrets(self, **kwargs):\r\n\r\n if 'auth_type' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\"Mandatory Argument 'auth_type' is missing\")\r\n raise Exception(\"Mandatory Argument 'auth_type' is missing\")\r\n auth_type = kwargs.get('auth_type')\r\n ipsec_secret_file = self.conf_dir + '/ipsec.secrets'\r\n result = self.linux_handle.shell(command='ls ' + ipsec_secret_file).response()\r\n if not re.search(r'No such file or directory', result):\r\n self.linux_handle.log(\"Moving existing %s to %s.orig\" % (ipsec_secret_file, ipsec_secret_file))\r\n cmd = \"mv -f %s %s.orig\" % (ipsec_secret_file, ipsec_secret_file)\r\n self.linux_handle.shell(command=cmd)\r\n line = ''\r\n if auth_type.lower() == 'PSK'.lower():\r\n if 'preshared_key' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\"For auth_type=psk, argument 'preshared_key' is mandatory\")\r\n raise Exception(\"Missing argument: For auth_type=psk, argument 'preshared_key' is mandatory\")\r\n if 'host_id' in kwargs:\r\n line = kwargs.get('host_id') + ' '\r\n if 'peer_id' in kwargs:\r\n line = line + ' ' + kwargs.get('peer_id') + ' '\r\n line = line + ' : PSK \"' + kwargs.get('preshared_key') + '\"'\r\n else:\r\n if 'local_cert' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\" 'local_cert' is mandatory argument\")\r\n raise Exception(\"'local_cert' is mandatory argument\")\r\n line = ' : ' + auth_type.upper() + ' ' + kwargs.get('local_cert')\r\n if 'passphrase' in kwargs:\r\n line = line + ' ' + kwargs.get('passphrase')\r\n self.linux_handle.log('Adding %s into secrets file' %line)\r\n\r\n xauth = None\r\n if 'xauth_user' in kwargs and 'xauth_pwd' in kwargs:\r\n xauth = kwargs.get('xauth_user') + ' : XAUTH ' + kwargs.get('xauth_pwd')\r\n\r\n with open('ipsec.secrets', 'w') as out:\r\n out.write(line + \"\\n\")\r\n if xauth is not None:\r\n out.write(xauth + \"\\n\")\r\n out.close()\r\n\r\n if not self.linux_handle.upload(local_file='ipsec.secrets', remote_file=ipsec_secret_file,\r\n protocol='scp'):\r\n self.linux_handle.log(\"Uploading ipsec.secrets file failed\")\r\n raise Exception(\"Uploading ipsec.secrets file failed\")\r\n\r\n self.linux_handle.log(\"Updating ipsec.secrets file successfull\")\r\n return True", "def UpdateSecretKey():\n _LOG.info('Updating webapp2_secret_key.')\n webapp2_secret_key = Webapp2SecretKey(id='current_secret_key')\n webapp2_secret_key.secret_key = os.urandom(16).encode('hex')\n webapp2_secret_key.put()\n return True", "def apply_secrets():\n for name, value in Secrets.__dict__.items():\n if name[0] != '_':\n os.environ[name] = value", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def secret(self) -> \"Secret\":\n warnings.warn(\n (\n 'Method \"secret\" is deprecated: insecure, leaves secret in cache.'\n ' Superseded by \"set_secret\"'\n ),\n DeprecationWarning,\n stacklevel=4,\n )\n _args: list[Arg] = []\n _ctx = self._select(\"secret\", _args)\n return Secret(_ctx)", "def _wrap_secret(self, val):\n return {\"SecretString\": val}", "async def add_secret(app: Sanic, secret: str, passphrase: str, ttl: Optional[int]) -> str:\n\n key = get_fernet_key(app, passphrase)\n\n sign = hmac.digest(key=key, msg=passphrase.encode(), digest='sha512').hex()\n secret_key = secrets.token_hex(16)\n\n cipher = fernet.Fernet(key)\n encrypted = cipher.encrypt(secret.encode()).decode()\n\n expires = None\n if ttl:\n expires = datetime.utcnow() + timedelta(seconds=ttl)\n\n await app.db.secrets.insert_one({\n 'secret': encrypted,\n 'secret_key': secret_key,\n 'signature': sign,\n 'expires': expires, # for mongo index\n 'ttl': ttl, # for fernet check\n })\n\n return secret_key", "def secretstore():\n pass", "def manage_createNewSecret(self, REQUEST):\n manager = getUtility(IKeyManager)\n manager.rotate()\n response = REQUEST.response\n response.redirect(\n '%s/manage_secret?manage_tabs_message=%s' %\n (self.absolute_url(), 'New+secret+created.')\n )", "def create(self):\n id_access_secretkey = uuid.uuid4()\n id_webuser = Base.logged_id_webuser or None\n keys = Token().generate_secretkey(config.PACKAGE_NAME)\n\n with Database() as db:\n db.insert(Table(id_access_secretkey, id_webuser, config.PACKAGE_NAME,\n keys['randomkey'], keys['secretkey']))\n db.commit()\n\n return {\n 'secretkey': keys['secretkey'],\n 'message': 'access secretkey successfully created'\n }", "def secret(self) -> \"Secret\":\n warnings.warn(\n 'Method \"secret\" is deprecated: been superseded by \"set_secret\"',\n DeprecationWarning,\n stacklevel=4,\n )\n _args: list[Arg] = []\n _ctx = self._select(\"secret\", _args)\n return Secret(_ctx)", "def put_slice_secret( observer_pkey_pem, slice_name, slice_secret, slice_fk=None, opencloud_slice=None ):\n \n ss = None \n \n if opencloud_slice is None:\n # look up the slice \n try:\n if slice_fk is None:\n opencloud_slice = models.Slice.objects.get( name=slice_name )\n else:\n opencloud_slice = models.Slice.objects.get( id=slice_fk.id )\n except Exception, e:\n logger.exception(e)\n logger.error(\"Failed to load slice (%s, %s)\" % (slice_fk, slice_name) )\n return False \n \n ss = models.SliceSecret( slice_id=opencloud_slice, secret=slice_secret )\n \n ss.save()\n \n return True", "def update(secret: str, value: str, env: Optional[str], config: str) -> None:\n layer = Layer.load_from_yaml(config, env)\n gen_all(layer)\n _raise_if_no_k8s_cluster_exists(layer)\n\n configure_kubectl(layer)\n amplitude_client.send_event(amplitude_client.UPDATE_SECRET_EVENT)\n secret_value = base64.b64encode(value.encode(\"utf-8\")).decode(\"utf-8\")\n patch = [{\"op\": \"replace\", \"path\": f\"/data/{secret}\", \"value\": secret_value}]\n load_kube_config()\n v1 = CoreV1Api()\n v1.patch_namespaced_secret(\"secret\", layer.name, patch)\n\n print(\"Success\")", "def secret_key(self, val):\n self.__secret_key = val", "def secret():\n pass", "def _fresh_secret(self, request: SecretRequest) -> Secret:\n if type(request) is AWSSecretRequest:\n secret = self.vault.aws(request.role, request.mount_point)\n elif type(request) is DatabaseSecretRequest:\n if request.engine.split('+', 1)[0] == MYSQL:\n secret = self.vault.mysql(request.role, request.mount_point)\n else:\n raise NotImplementedError('No other database engine available')\n elif type(request) is GenericSecretRequest:\n secret = self.vault.generic(request.path, request.key,\n request.mount_point)\n return secret", "def add(ctx, secret, name, issuer, period, oath_type, digits, touch, algorithm,\n counter, force):\n\n digits = int(digits)\n\n if not secret:\n while True:\n secret = click.prompt('Enter a secret key (base32)', err=True)\n try:\n secret = parse_b32_key(secret)\n break\n except Exception as e:\n click.echo(e)\n\n ensure_validated(ctx)\n\n _add_cred(ctx, CredentialData(secret, issuer, name, oath_type, algorithm,\n digits, period, counter, touch), force)", "def ReplaceSecret(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceSecret')\n return self._RunMethod(\n config, request, global_params=global_params)", "def ReplaceSecret(self, request, global_params=None):\n config = self.GetMethodConfig('ReplaceSecret')\n return self._RunMethod(\n config, request, global_params=global_params)", "def add(key, value, **kwargs):\n cluster_call(\"secret_add\", key=key, value=value, **kwargs, prefix=f\"Adding secret {key}...\", postfix=\"added.\")", "def secret() -> None:\n pass", "def with_secrets(self, kind, source):\n\n if kind == \"vault\" and isinstance(source, list):\n source = {\"project\": self.metadata.project, \"secrets\": source}\n\n self.spec.secret_sources.append({\"kind\": kind, \"source\": source})\n return self", "def create(self, validated_data):\n resource = Resource.objects.create(**validated_data.get(\"resource\"))\n return Secret.objects.create(resource=resource)", "def _instantiateSecrets(cmd, secrets, hide):\n if secrets:\n for (i, secret) in enumerate(secrets):\n if hide:\n secret = '<hidden>'\n cmd = cmd.replace(f':{i}:', secret)\n return cmd", "def prepare_secrets(c, rebuild_venv=False, no_secret_cache=False):\n cli_tasks.prepare_secrets.run(c, rebuild_venv, no_secret_cache)", "def create_temporary_secret():\n return uuid.uuid4().hex", "def test_secrets_add_wrong_format(secret):\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n message = 'For literal strings use \"SECRET_NAME=VALUE\" format'\n\n result = runner.invoke(cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", secret])\n assert result.exit_code == 1\n assert message in result.output", "def create_secrets(file):\n with open(file, 'w') as secfile:\n secfile.write((\n '# _credentials: Maintain your credentials below. Do not remove unused fields.\\n'\n 'USER = \\'\\'\\nPASSWORD = \\'\\'\\n# _courses: Define which courses should be crawled\\nCOURSES = []\\n\\n'\n '# local: Required if you want to download files and store them in a local folder'\n ' (for example in the Dropbox client folder)\\n'\n 'PATH = \\'\\' # Path to the destination folder\\n\\n'\n '# dropbox (-d): Required if you want to download files and upload them to Dropbox\\n'\n 'DROPBOX_TOKEN = \\'\\' # Personal Dropbox API token\\n'\n 'PATH_IN_DB = \\'\\' # Destination path of downloaded files within Dropbox\\n'))\n print('File app_secrets.py was created. Please maintain your credentials.')\n sys.exit(1)", "def store_barbican_secret_for_coriolis(\n barbican, secret_info, name='Coriolis Secret'):\n payload = json.dumps(secret_info)\n\n secret = barbican.secrets.create(\n name=name, payload=payload,\n payload_content_type='application/json')\n secret_ref = secret.store()\n\n return secret_ref", "def _generateSecretKey():\n return f\"secret.{str(datetime.now())}\"", "def add(\n ctx,\n secret,\n name,\n issuer,\n period,\n oath_type,\n digits,\n touch,\n algorithm,\n counter,\n force,\n password,\n remember,\n):\n\n digits = int(digits)\n\n if not secret:\n while True:\n secret = click_prompt(\"Enter a secret key (base32)\")\n try:\n secret = parse_b32_key(secret)\n break\n except Exception as e:\n click.echo(e)\n\n _init_session(ctx, password, remember)\n\n _add_cred(\n ctx,\n CredentialData(\n name, oath_type, algorithm, secret, digits, period, counter, issuer\n ),\n touch,\n force,\n )", "def create_secret_link(link_id, secret_id, parent_share_id, parent_datastore_id):\n\n try:\n Secret_Link.objects.create(\n link_id = link_id,\n secret_id = secret_id,\n parent_datastore_id = parent_datastore_id,\n parent_share_id = parent_share_id\n )\n except:\n return False\n\n return True", "async def admin_secret(self, ctx: commands.Context, *token: str):\n the_token = await self.config.secret()\n token = ' '.join(token)\n if not token:\n await ctx.author.send(f'Team management secret: {the_token}')\n else:\n await self.config.secret.set(token)\n message = [display(ctx.author),\n f'set the team management secret to {token}.']\n if the_token:\n message.append(f'(was `{the_token}`)')\n await self.admin_msg(' '.join(message))", "def test_create_container_w_duplicate_secret_refs(self):\n\n secret_resp = self.secret_behaviors.create_secret_from_config()\n secret_refs = [SecretRef(name='1', ref=secret_resp.ref),\n SecretRef(name='2', ref=secret_resp.ref)]\n\n container_resp = self.behaviors.create_container(\n 'name', 'generic', secret_refs)\n\n self.assertEqual(container_resp.status_code, 400)", "def _secret(value):\n\n match = SECRET_ARN_RE.match(value)\n if match:\n named_groups = match.groupdict()\n return AwsSecret(arn=value,region=named_groups[\"Region\"])\n\n raise argparse.ArgumentTypeError('Given argument \"%s\" is not a valid secret' % value)", "def regenerate_project_secret_key(cursor, project):\n project_id = extract_obj_id(project)\n secret_key = uuid.uuid4().hex\n haystack = (secret_key, project_id)\n\n query = \"UPDATE projects SET secret_key=? WHERE _id=?\"\n\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((secret_key, None))", "def test_mask_secret_nomatch():\n secrets = [\n \"8bca8d2e-1cd6-4ec0-8e55-9614aa01cf88\",\n \"683c08d7-bc07-4d72-b098-46ef00b74aec\",\n ]\n assert utils.mask_secrets(\"ls -lh /tmp\", secrets) == \"ls -lh /tmp\"", "def add_to_ipsec_secrets(strongswan_obj, **kwargs):\r\n\r\n return strongswan_obj.add_ipsec_secrets(**kwargs)", "def make_secure_val(val):\n return \"%s|%s\" % (val, hmac.new(secret, val).hexdigest())", "def _configure_ipsec_secrets(self, ipsec_confs):\n secrets_tpl = '../config/tpl/ipsec/ipsec.secrets'\n secret_confs = []\n\n for name, conf in ipsec_confs.items():\n secret_conf = {\n 'right_public_ip': conf['right_public_ip'],\n 'psk': env.get('ipsec_psk_%s' % name),\n }\n secret_confs.append(secret_conf)\n\n # Configure the /etc/ipsec.d/<name>.conf file with passwords\n with hide(*fab_output_hides):\n return upload_template_changed(\n secrets_tpl,\n '/etc/ipsec.secrets',\n context={'confs': secret_confs},\n use_sudo=True,\n mode=0600,\n use_jinja=True\n )", "def _create_shared_secret():\n\n randint = random.SystemRandom().randint\n bits = load_config(\"instavpn.json\")[\"shared_secret_bits\"]\n return urlsafe_b64encode(\"\".join(chr(randint(0, 255)) for _ in xrange(bits/8)))", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def make_secure_val(val):\n return '%s|%s' % (val, hmac.new(secret, val).hexdigest())", "def create_secret(self, name, namespace):\n secret_manifest = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Secret\",\n \"metadata\": {\n \"name\": name,\n \"annotations\": {\n \"kubernetes.io/service-account.name\": name\n }\n }\n }\n self.v1_client.create_namespaced_secret(namespace=namespace,\n body=secret_manifest)", "def secret(self, id: SecretID) -> \"Secret\":\n _args = [\n Arg(\"id\", id),\n ]\n _ctx = self._select(\"secret\", _args)\n return Secret(_ctx)", "def set_lair_secret(self, _id: str, secret_key: str, secret_val: str):\n payload = {\"key\": secret_key, \"value\": secret_val}\n url = self._get_url(subpath=\"files\", route=\"set_secret\", template_args={\"id\": _id})\n response = self.session.post(url, json=payload)\n return response", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def deploy_secret(self, deploy_secret):\n self._deploy_secret = deploy_secret", "def secret(self, value):\n if value > 0:\n self._secret = value\n else:\n print(\"Please insert positive value.\")", "def get_aurora_secret():\n secret_name = \"grow-data-key\"\n region_name = \"eu-west-1\"\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name=region_name\n )\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId=secret_name\n )\n except ClientError as e:\n if e.response['Error']['Code'] == 'DecryptionFailureException':\n raise e\n elif e.response['Error']['Code'] == 'InternalServiceErrorException':\n raise e\n elif e.response['Error']['Code'] == 'InvalidParameterException':\n raise e\n elif e.response['Error']['Code'] == 'InvalidRequestException':\n raise e\n elif e.response['Error']['Code'] == 'ResourceNotFoundException':\n raise e\n else:\n if 'SecretString' in get_secret_value_response:\n secret = get_secret_value_response['SecretString']\n secret = ast.literal_eval(secret)\n return secret\n else:\n decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])\n return decoded_binary_secret", "def _update_input_config(input_config,secret_key):\n\n for key in input_config.keys():\n if input_config[key].get('arguments') is None:\n input_config[key]['arguments'] = {'secret':secret_key}\n elif input_config[key]['arguments'].get('secret') is None:\n input_config[key]['arguments']['secret'] = secret_key", "def put(self, credential):\n pass", "def populate_secrets_pre(vault_secret_keys, core_auth_cookies, extra_fns):\n\n for path in vault_secret_keys:\n vault.ensure_secret_key(path)\n\n for fn in extra_fns:\n if fn:\n fn(vault, config, random_secret)\n\n for name in core_auth_cookies:\n vault.ensure_secret(f'liquid/{name}/cookie', lambda: {\n 'cookie': random_secret(64),\n })", "def createSaltKey(operation,newPassword,newPasswordTag):\n \n newPasswordEncrypted=encrypt(GlobalSaltKeyValue,newPassword)\n \n if os.path.isfile(GlobalKeyVaultFile):\n if checkTag(GlobalKeyVaultFileSection,newPasswordTag):\n if operation == 'update':\n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password updated\"\n else:\n print \"Error:0001-Section and password tag already exists.\"\n sys.exit(2)\n\n else:\n if operation == 'add': \n addUpdateTag(newPasswordTag, newPasswordEncrypted)\n print \"Success-Password added\"\n else:\n print \"Error:0002-No matching tag found.\"\n sys.exit(2)\n else:\n print \"Error:0003-Missing file \", GlobalKeyVaultFile\n sys.exit(2)", "def new_super_secret(self):\n super_secret_obj = gen_new_super_secret()\n\n temp_cursor = user_db.cursor()\n\n temp_cursor.execute(\n \"\"\"\n UPDATE users\n SET super_secret=?,\n secret_salt=?\n WHERE user_id=?\n \"\"\",\n (\n super_secret_obj[\"super_secret\"],\n super_secret_obj[\"salt\"],\n self.user_id,\n ),\n )\n user_db.commit()\n\n return {\n \"key\": super_secret_obj[\"key\"],\n \"salt\": super_secret_obj[\"salt\"]\n }", "def test_move_secret_to_different_folder_with_member_permission_edit_on_source_folder_for_user_a(\n core_session,\n users_and_roles,\n create_secret_inside_folder,\n pas_general_secrets,\n cleanup_secrets_and_folders):\n folder_id_list, folder_name, secret_id_list = create_secret_inside_folder\n folders_list = cleanup_secrets_and_folders[1]\n params = pas_general_secrets\n prefix = guid()\n pas_power_user = users_and_roles.get_user('Privileged Access Service Power User')\n user_name = pas_power_user.get_login_name()\n user_id = pas_power_user.get_id()\n\n # API to get new session for User A\n pas_power_user_session = users_and_roles.get_session_for_user('Privileged Access Service Power User')\n assert pas_power_user_session.auth_details is not None, 'Failed to Login with PAS Power User'\n logger.info(f'User with PAS Power User Rights login successfully :user_Name: {user_name}'\n f' & Password: {pas_power_user.get_password()} ')\n\n # Api to give user permissions to folder\n user_permissions_result = give_user_permissions_to_folder(core_session, user_name, user_id, folder_id_list[0],\n 'View,Grant')\n assert user_permissions_result, f'Not Able to set user permissions to folder{user_permissions_result}'\n logger.info(f'User Permissions to folder: {user_permissions_result}')\n\n # Api to give member permissions to folder\n member_perm_result, member_perm_success = set_member_permissions_to_folder(core_session,\n user_name,\n 'View,Grant,Edit',\n user_id,\n folder_id_list[0])\n assert member_perm_success, f'Not Able to set member permissions to Folder{member_perm_result[\"Result\"]}'\n logger.info(f'Member permissions to folder:{member_perm_result}')\n\n # Api to create secret folder for User A\n secret_folder_success, secret_folder_parameters, secret_folder_id = create_folder(pas_power_user_session,\n prefix + params['name'],\n params['description'])\n\n logger.info(f' Folder created successfully: {secret_folder_success} & details are {secret_folder_parameters}')\n assert secret_folder_success is True, f'Failed to create a folder {secret_folder_id}'\n folders_list.append(secret_folder_id)\n\n # Api to move secret into another Folder\n result_move = move_secret(pas_power_user_session, secret_id_list[0], secret_folder_id)\n assert result_move['success'], f'Not Able to move the secret into Folder: {result_move[\"Result\"]}'\n logger.info(f'Moving secret with edit permissions to another folder:{result_move}')", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def install_secret_key(app, filename='secret_key'):\n filename = os.path.join(app.instance_path, filename)\n\n try:\n app.config['SECRET_KEY'] = open(filename, 'rb').read()\n except IOError:\n print('Error: No secret key. Create it with:')\n full_path = os.path.dirname(filename)\n if not os.path.isdir(full_path):\n print('mkdir -p {filename}'.format(filename=full_path))\n print('head -c 24 /dev/urandom > {filename}'.format(filename=filename))\n sys.exit(1)", "def testUpdateAccessDenied(self):\n self.runPut(None, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_403()", "def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)", "def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:\n ...", "def unconfigure_enable_password(device,secret=True,privilege=None):\n cmd=\"no enable\"\n if secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege :\n cmd+=f\" level {privilege}\"\n\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure enable password or secret:\\n{e}'\n )", "def __init__(__self__, *,\n db_name: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n roles: pulumi.Input[Sequence[pulumi.Input[str]]],\n server: pulumi.Input[str],\n admin_secret: Optional[pulumi.Input[str]] = None,\n admin_secret_key_vault: Optional[pulumi.Input[str]] = None,\n key_vault_secret_formats: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n key_vault_secret_prefix: Optional[pulumi.Input[str]] = None,\n key_vault_to_store_secrets: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"db_name\", db_name)\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"roles\", roles)\n pulumi.set(__self__, \"server\", server)\n if admin_secret is not None:\n pulumi.set(__self__, \"admin_secret\", admin_secret)\n if admin_secret_key_vault is not None:\n pulumi.set(__self__, \"admin_secret_key_vault\", admin_secret_key_vault)\n if key_vault_secret_formats is not None:\n pulumi.set(__self__, \"key_vault_secret_formats\", key_vault_secret_formats)\n if key_vault_secret_prefix is not None:\n pulumi.set(__self__, \"key_vault_secret_prefix\", key_vault_secret_prefix)\n if key_vault_to_store_secrets is not None:\n pulumi.set(__self__, \"key_vault_to_store_secrets\", key_vault_to_store_secrets)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "def secrets(self, secrets):\n\n self._secrets = secrets", "def generate_secret(self,\n passphrase: str, otpstring: str, key: bytes,\n **kwargs\n ):\n assert self._state is not None, 'Unseal the vault first'\n otp = YubikeyOTP.parse(otpstring, key)\n\n kdf_config = self._vault_kdf.settings.copy()\n kdf_config.update(**kwargs)\n\n assert otp.public_uid not in self._state, \\\n 'This YubiKey is already in use'\n self._state[otp.public_uid] = YKContext.init(\n key=key, passphrase=passphrase, otp=otp,\n **kdf_config\n )", "def make_secret(length=SecretLength.GOOGLE_AUTH):\n if hasattr(length, \"value\"):\n length = length.value\n\n return token_bytes(length)", "def save(self, *args, **kwargs):\n if not self.id:\n self.api_key = self.__generate_key(self.__api_key_length)\n self.api_secret = self.__generate_key(self.__api_secret_length)\n super(Token, self).save(*args, **kwargs)", "def reset_secret(self, save=False):\n client = cas.get_client()\n client.revoke_application_tokens(self.client_id, self.client_secret)\n self.client_secret = generate_client_secret()\n\n if save:\n self.save()\n return True", "def test_mask_secret_nosecrets():\n assert utils.mask_secrets(\"ls -lh /tmp\", None) == \"ls -lh /tmp\"", "def configure_masked_unmasked_enable_secret_password (device,\n password,\n privilege=None,\n ccp_name=None,\n algorithm_type=None,\n masked=True,\n secret=True,):\n cmd=\"enable \"\n if ccp_name :\n cmd+=f\" common-criteria-policy {ccp_name}\"\n if algorithm_type :\n cmd+=f\" algorithm-type {algorithm_type}\"\n if masked :\n cmd+=\" masked-secret\"\n elif secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege:\n cmd+=f\" level {privilege}\"\n if not(masked) :\n cmd+=f\" {password}\"\n\n masked_secret_dialog = Dialog(\n [\n Statement(\n pattern=r\".*Enter secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm secret:.*\",\n action=f\"sendline({password})\",\n loop_continue=True,\n continue_timer=False,\n ),\n ]\n )\n\n try:\n out=device.configure(cmd,reply=masked_secret_dialog)\n if re.search(r'[p|P]assword',out) and not(re.search(r'migrate',out)):\n raise SubCommandFailure(out)\n\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not configure enable password\"\n \"Error: {error}\".format(error=e)\n )", "def change_secret_word(word_list_path):\n FIELD_NAME = \"secret word index\"\n secret_word_index = change_field(FIELD_NAME, is_castable_to_int, input_mod_func=int)\n\n return choose_word(word_list_path, secret_word_index)", "def with_secret(self, secret):\n if not isinstance(secret, str):\n raise TypeError('Secret must be a string')\n\n self.secret = secret\n\n return self", "def set_SecretKey(self, value):\n super(RetrieveUserDashboardInputSet, self)._set_input('SecretKey', value)", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def _init_secret_token_map(model_context):\n method_name = '_init_secret_token_map'\n global _secret_token_map\n\n log_method = _logger.warning\n if model_context.get_validate_configuration().allow_unresolved_secret_tokens():\n log_method = _logger.info\n\n _secret_token_map = dict()\n\n # add name/key pairs for files in sub-directories of directories in WDT_MODEL_SECRETS_DIRS.\n\n locations = env_helper.getenv(str_helper.to_string(_secret_dirs_variable))\n if locations is not None:\n for secret_dir in locations.split(\",\"):\n if not os.path.isdir(secret_dir):\n # log at WARN or INFO, but no exception is thrown\n log_method('WLSDPLY-01738', _secret_dirs_variable, secret_dir, class_name=_class_name,\n method_name=method_name)\n continue\n\n for subdir_name in os.listdir(secret_dir):\n subdir_path = os.path.join(secret_dir, subdir_name)\n if os.path.isdir(subdir_path):\n _add_file_secrets_to_map(subdir_path, subdir_name, model_context)\n\n # add name/key pairs for files in directories assigned in WDT_MODEL_SECRETS_NAME_DIR_PAIRS.\n # these pairs will override if they were previously added as sub-directory pairs.\n\n dir_pairs_text = env_helper.getenv(str_helper.to_string(_secret_dir_pairs_variable))\n if dir_pairs_text is not None:\n dir_pairs = dir_pairs_text.split(',')\n for dir_pair in dir_pairs:\n result = dir_pair.split('=')\n if len(result) != 2:\n log_method('WLSDPLY-01735', _secret_dir_pairs_variable, dir_pair, class_name=_class_name,\n method_name=method_name)\n continue\n\n secret_dir = result[1]\n if not os.path.isdir(secret_dir):\n log_method('WLSDPLY-01738', _secret_dir_pairs_variable, secret_dir, class_name=_class_name,\n method_name=method_name)\n continue\n\n name = result[0]\n _add_file_secrets_to_map(secret_dir, name, model_context)", "def __init__(__self__,\n resource_name: str,\n args: SecretBackendRoleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _write(self):\n raw_data = {'file_version': 1}\n raw_creds = []\n raw_data['data'] = raw_creds\n for (cred_key, cred) in self._data.items():\n raw_key = dict(cred_key)\n raw_cred = json.loads(cred.to_json())\n raw_creds.append({'key': raw_key, 'credential': raw_cred})\n self._locked_json_write(raw_data)", "def test_generate_secret_file(self, mock_context, mock_create_aws, mock_file_open, mock_json, mock_dump):\n context = ef_password.EFPWContext()\n context.env, context.service = self.env, self.service\n context.secret_file = self.secret_file\n context.match = 'password'\n mock_context.return_value = context\n mock_create_aws.return_value = {\"kms\": self.mock_kms}\n mock_json.return_value = {\"params\": {\"test\": {\"password\": \"mock_secret1\"}}}\n ef_password.main()\n self.mock_kms.decrypt.assert_not_called()\n self.mock_kms.encrypt.assert_called_once_with(\n KeyId='alias/{}-{}'.format(self.env, self.service),\n Plaintext=\"mock_secret1\".encode()\n )\n mock_file_open.assert_called_with(self.secret_file, 'w')\n handle = mock_file_open()\n mock_dump.assert_called_once_with({'params': {'test': {'password': '{{aws:kms:decrypt,Y2lwaGVyX2Jsb2I=}}'}}},\n handle, indent=2, separators=(',', ': '))\n handle.write.assert_called_with('\\n')", "def grant_write(self, identity: aws_cdk.aws_iam.IGrantable, objects_key_pattern: typing.Any=None) -> aws_cdk.aws_iam.Grant:\n ...", "async def build_secret_index(self):\n pass", "def test_update_container_privilege(self):\n pass", "def configure(self, account_id):\n print(\"Configuring Vault\")\n client = self.connect(VAULT_TOKEN)\n\n # Audit Backend\n if 'syslog/' not in client.sys.list_enabled_audit_devices():\n audit_options = {\n 'log_raw': 'True',\n }\n client.sys.enable_audit_device('syslog', options=audit_options)\n else:\n print(\"audit_backend already created.\")\n\n # Policies\n policies = []\n path = os.path.join(POLICY_DIR, \"*.hcl\")\n for policy in glob.glob(path):\n name = os.path.basename(policy).split('.')[0]\n policies.append(name)\n with open(policy, 'r') as fh:\n client.sys.create_or_update_policy(name, fh.read())\n\n # AWS Authentication Backend\n # Enable AWS auth in Vault\n if 'aws/' not in client.sys.list_auth_methods():\n try:\n client.sys.enable_auth_method('aws')\n except Exception as e:\n raise VaultError(\"Error while enabling auth back end. {}\".format(e))\n else:\n print(\"aws auth backend already created.\")\n\n #Define policies and arn \n arn = 'arn:aws:iam::{}:instance-profile/'.format(account_id)\n\n #For each policy configure the policies on a role of the same name\n for policy in policies:\n client.create_ec2_role(policy,\n bound_iam_instance_profile_arn = arn + policy,\n policies = policy,\n mount_point = 'aws')\n print('Successful write to aws/role/' + policy)\n \n # AWS Secret Backend\n if 'aws/' not in client.sys.list_mounted_secrets_engines():\n try:\n client.sys.enable_secrets_engine('aws')\n except Exception as e:\n raise VaultError('Error while enabling secret back end. {}'.format(e))\n else:\n print(\"aws secret backend already created.\")\n\n path = os.path.join(POLICY_DIR, \"*.iam\")\n for iam in glob.glob(path):\n name = os.path.basename(iam).split('.')[0]\n with open(iam, 'r') as fh:\n # if we json parse the file first we can use the duplicate key trick for comments\n client.secrets.aws.create_or_update_role(name, 'iam_user', policy_document = fh.read())", "def generate_secret_code():\n length = game_config['secret_rules']['length']\n secret_choices = game_config['secret_rules']['choices']\n secret = []\n\n for i in range(length):\n secret.append(secret_choices[random.randint(0, length - 1)])\n\n return secret", "def update(cls, name, value):\n\n db = get_db_handle()\n secret = cls.get_instance(name)\n secret_data = secret.data[0] # using backref\n\n pass_phrase = secret_data.pass_phrase\n\n LOG.debug(\"Encrypting new data\")\n encrypted_msg = Crypto.encrypt_AES_GCM(value, pass_phrase)\n\n (kdf_salt, ciphertext, iv, auth_tag) = encrypted_msg\n\n query = db.data_table.update(\n kdf_salt=kdf_salt,\n ciphertext=ciphertext,\n iv=iv,\n auth_tag=auth_tag,\n pass_phrase=pass_phrase,\n ).where(db.data_table.secret_ref == secret)\n\n query.execute()\n\n query = db.secret_table.update(last_update_time=datetime.datetime.now()).where(\n db.secret_table.name == name\n )\n\n query.execute()", "def store_credentials(pwd:str, user: str, client_id: str, secret: str):\n\n pwd, user, client_id, secret = [s.encode() for s in (pwd, user, client_id, secret)]\n\n # 128 only sets the MSb (bit 7) and 255 simply flips the bits\n # MSb is always set, to ensure uniqueness of the delimeter used in the file.\n x = randint(129, 254)\n y = randint(129, 254)\n\n # The checksum is useless to a cracker without the original of\n # either the password or client secret\n checksum = xor_crypt(secret[7::-1]+secret[-1:-9:-1], pwd)\n pwd = xor_crypt(pwd, x)\n secret = xor_crypt(secret, y)\n user = xor_crypt(user, y)\n client_id = xor_crypt(client_id, y)\n secret = xor_crypt(secret, pwd)\n\n with open(\"credentials.bin\", 'wb') as creds:\n # Getting original y requires encrypted password and original x\n # Getting encrpyted password requires original x\n # Getting original x requires the password\n creds.write(b'\\0'.join((\n user,\n client_id,\n secret,\n bytes([reduce(xor, xor_crypt(pwd, x), x),\n reduce(xor, pwd, x^y)\n ]),\n # XOR-ed by 'y' to ensure every byte of the checksum has it's MSb set\n xor_crypt(checksum, y)\n )))", "def create_dev():\n bucket_name = \"issue-label-bot-dev_secrets\"\n blob_name = \"kf-label-bot-dev.2019-12-30.private-key.pem\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(blob_name)\n contents = blob.download_as_string().decode()\n\n subprocess.check_call([\"kubectl\", \"-n\", \"label-bot-dev\", \"create\",\n \"secret\", \"generic\",\n \"github-app\",\n f\"--from-literal=kf-label-bot-dev.private-key.pem=\"\n f\"{contents}\"])", "def do_server(wrapping_key_public):\n secret = os.urandom(32)\n logging.info(f'secret: {hexlify(secret)}')\n\n ref_path = 'server-secret-for-reference.bin'\n logging.debug(f'creating {ref_path}')\n with open(ref_path, 'wb') as f:\n f.write(secret)\n\n # generate IV\n iv = os.urandom(12)\n logging.debug(f'iv: {hexlify(iv)}')\n\n # generate 256-bit AES encryption key\n ephemeral_key = os.urandom(32)\n logging.debug(f'ephemeral_key: {hexlify(ephemeral_key)}')\n\n # xor_mask = os.urandom(32)\n xor_mask = b'\\x00' * 32\n logging.debug(f'xor_mask: {hexlify(xor_mask)}')\n\n # xor with mask to get transportKey\n transport_key = bytes([ephemeral_key[i] ^ xor_mask[i] for i in range(32)])\n logging.debug(f'transport_key: {hexlify(transport_key)}')\n\n logging.debug(f'wrapping the transport key with the public RSA wrapping key')\n encrypted_transport_key = wrap(wrapping_key_public, transport_key)\n\n logging.debug(f'encrypting the secure secret with the AES ephermeral key')\n encrypted_secret, tag = encrypt(ephemeral_key, iv, secret)\n\n logging.debug(f'encrypted_secret: {hexlify(encrypted_secret)}')\n logging.debug(f'tag: {hexlify(tag)}')\n\n authorizationList = AuthorizationList()\n\n key_description = KeyDescription()\n key_description['keyFormat'] = KM_KEY_FORMAT_RAW\n key_description['keyParams'] = authorizationList\n\n secure_key_wrapper = SecureKeyWrapper()\n secure_key_wrapper['version'] = 0\n secure_key_wrapper['encryptedTransportKey'] = encrypted_transport_key\n secure_key_wrapper['initializationVector'] = iv\n secure_key_wrapper['keyDescription'] = key_description\n secure_key_wrapper['encryptedKey'] = encrypted_secret\n secure_key_wrapper['tag'] = tag\n\n encoded_secure_key_wrapper = encode_secure_key_wrapper(secure_key_wrapper)\n\n return encoded_secure_key_wrapper, xor_mask", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def update_azure_keyvault_secrets_provider_addon_profile(\n self,\n azure_keyvault_secrets_provider_addon_profile: ManagedClusterAddonProfile,\n ) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_SECRET_ROTATION_ENABLED = addon_consts.get(\n \"CONST_SECRET_ROTATION_ENABLED\"\n )\n CONST_ROTATION_POLL_INTERVAL = addon_consts.get(\n \"CONST_ROTATION_POLL_INTERVAL\"\n )\n\n if self.context.get_enable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"true\"\n\n if self.context.get_disable_secret_rotation():\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_SECRET_ROTATION_ENABLED\n ] = \"false\"\n\n if self.context.get_rotation_poll_interval() is not None:\n azure_keyvault_secrets_provider_addon_profile = (\n self.ensure_azure_keyvault_secrets_provider_addon_profile(\n azure_keyvault_secrets_provider_addon_profile\n )\n )\n azure_keyvault_secrets_provider_addon_profile.config[\n CONST_ROTATION_POLL_INTERVAL\n ] = self.context.get_rotation_poll_interval()\n return azure_keyvault_secrets_provider_addon_profile", "def add_secrets(self, id: str, body: dict[str, Any]) -> dict[str, Any]:\n return self.client.post(self._url(\"%s/secrets\" % id), data=body)", "def _update_context(self, context_key: bytes, otp: YubikeyOTP, salt: bytes):\n context = YKSecretContext(\n session_ctr=otp.token.session_ctr,\n usage_ctr=otp.token.usage_ctr,\n salt=salt\n )\n self._context = context.dump(context_key)", "def glance_rename_and_set_private(glance, image, new_name, new_description):\n try:\n glance.images.update(image_id=image.id,\n visibility='private',\n name=new_name,\n description=new_description)\n except Exception:\n logger.exception(\"Renaming (-> private) Glance image '%s' [%s] -> '%s' failed\",\n image.name, image.id, new_name)\n return 1\n\n return 0", "def test_secrets_add_already_exist():\n status_code = 409\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n message = \"One of the secrets already exists. No secrets were added.\"\n mock_http_response = Mock(\n status_code=status_code,\n reason=\"Conflict\",\n json=Mock(return_value={\"message\": \"Conflict\"}),\n )\n rs_api_client_mock = Mock()\n rs_api_client_mock.api.add_secrets = Mock(side_effect=HTTPError(mock_http_response))\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\"reana_client.api.client.current_rs_api_client\", rs_api_client_mock):\n result = runner.invoke(\n cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", \"USER=reanauser\"]\n )\n assert message in result.output\n assert result.exit_code == 1", "def _create_or_replace_acl_users(cls, secret_acl, user_ids, session=None):\n\n if user_ids is None:\n return\n\n user_ids = set(user_ids)\n\n now = timeutils.utcnow()\n secret_acl.updated_at = now\n\n for acl_user in secret_acl.acl_users:\n if acl_user.user_id in user_ids: # input user_id already exists\n acl_user.updated_at = now\n acl_user.save(session=session)\n user_ids.remove(acl_user.user_id)\n else:\n acl_user.delete(session=session)\n\n for user_id in user_ids:\n acl_user = secret_acl_user.SecretACLUser(acl_id=secret_acl.id,\n user_id=user_id)\n acl_user.create(session=session)\n\n if secret_acl.id:\n secret_acl.save(session=session)\n else:\n secret_acl.create(session=session)" ]
[ "0.64323485", "0.62295574", "0.6069437", "0.596446", "0.5957099", "0.59304917", "0.5887804", "0.58768106", "0.5779729", "0.577644", "0.57579327", "0.57563096", "0.570285", "0.5653871", "0.56200135", "0.56157786", "0.56119484", "0.55998737", "0.5584007", "0.5539168", "0.553808", "0.5516214", "0.546472", "0.546472", "0.5453201", "0.5451072", "0.5424507", "0.5393309", "0.53867453", "0.5385552", "0.5362907", "0.536234", "0.53500885", "0.5347979", "0.5330109", "0.5327145", "0.5320838", "0.5302482", "0.53014815", "0.529598", "0.5265474", "0.523799", "0.52371514", "0.5236054", "0.5235724", "0.52342427", "0.5232857", "0.5232857", "0.5232857", "0.52310294", "0.5224327", "0.5214742", "0.5214328", "0.51279753", "0.51195794", "0.51176643", "0.5086804", "0.50799054", "0.50769085", "0.50760454", "0.5067019", "0.50648797", "0.5061259", "0.5041381", "0.5013794", "0.5010967", "0.495973", "0.49556747", "0.49504825", "0.49424556", "0.49390918", "0.4934044", "0.4929852", "0.49186686", "0.49080828", "0.4905751", "0.49021533", "0.48877966", "0.48748052", "0.48727193", "0.48638964", "0.48575008", "0.48529977", "0.4850004", "0.48367354", "0.48317066", "0.4829799", "0.48274305", "0.48268315", "0.48222804", "0.48217553", "0.48156467", "0.48125705", "0.48113748", "0.48092028", "0.48074967", "0.4807494", "0.48036838", "0.48027557", "0.48016146" ]
0.5757704
11
Create or replace acl_user
def _create_or_replace_acl_users(cls, secret_acl, user_ids, session=None): if user_ids is None: return user_ids = set(user_ids) now = timeutils.utcnow() secret_acl.updated_at = now for acl_user in secret_acl.acl_users: if acl_user.user_id in user_ids: # input user_id already exists acl_user.updated_at = now acl_user.save(session=session) user_ids.remove(acl_user.user_id) else: acl_user.delete(session=session) for user_id in user_ids: acl_user = secret_acl_user.SecretACLUser(acl_id=secret_acl.id, user_id=user_id) acl_user.create(session=session) if secret_acl.id: secret_acl.save(session=session) else: secret_acl.create(session=session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_user(cls, user):\r\n pass", "def set(isamAppliance, name, user_name, type='embedded_ldap', check_mode=False, force=False):\n new_user = True\n ret_obj = ibmsecurity.isam.base.management_authorization.role.get(isamAppliance, name)\n\n if (ret_obj['data']['users'] == None):\n ret_obj['data']['users'] = []\n else:\n for usr in ret_obj['data']['users']:\n if usr['name'] == user_name:\n if usr['type'] == type:\n if force is False:\n return isamAppliance.create_return_object()\n new_user = False\n else: # Replace user with new type\n ret_obj['data']['users'].remove(usr)\n break\n\n if new_user is True:\n ret_obj['data']['users'].append({'name': user_name, 'type': type})\n\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put(\n \"Add user to management authorization role\",\n \"/authorization/roles/{0}/v1\".format(name), ret_obj['data'])", "def new_user(cls, user):\n pass", "def new_user():\n pass", "def create_user(change):\n return change()", "def update_user():", "def insertNewUser(self,user, access_token):\n newUser = UserToken(username=user, user_key = access_token.key, user_secret = access_token.secret)\n newUser.put()", "def update_course_creator_group(caller, user, add):\r\n if add:\r\n auth.add_users(caller, CourseCreatorRole(), user)\r\n else:\r\n auth.remove_users(caller, CourseCreatorRole(), user)", "def addOnUserCreate(call, args=(), kwargs={}, nodeClass='*'):\n pass", "def _create_user(self, new_user):\n new_user = User(user_name=new_user['user_name'], pin=new_user['pin'], user_type='customer')\n self.session.output(new_user.get_user_info(), '\\n[ New user created ]')", "def add_user(self, attrs):\n pass", "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def create_or_modify_account(module, idrac, slot_uri, slot_id, empty_slot_id, empty_slot_uri, user_attr):\n generation, firmware_version = idrac.get_server_generation\n msg, response = \"Unable to retrieve the user details.\", {}\n if (slot_id and slot_uri) is None and (empty_slot_id and empty_slot_uri) is not None:\n msg = \"Successfully created user account.\"\n payload = get_payload(module, empty_slot_id, action=\"create\")\n if module.check_mode:\n module.exit_json(msg=\"Changes found to commit!\", changed=True)\n if generation >= 14:\n response = idrac.invoke_request(ATTRIBUTE_URI, \"PATCH\", data={\"Attributes\": payload})\n elif generation < 14:\n xml_payload, json_payload = convert_payload_xml(payload)\n time.sleep(10)\n response = idrac.import_scp(import_buffer=xml_payload, target=\"ALL\", job_wait=True)\n elif (slot_id and slot_uri) is not None:\n msg = \"Successfully updated user account.\"\n payload = get_payload(module, slot_id, action=\"update\")\n xml_payload, json_payload = convert_payload_xml(payload)\n value = compare_payload(json_payload, user_attr)\n if module.check_mode:\n if value:\n module.exit_json(msg=\"Changes found to commit!\", changed=True)\n module.exit_json(msg=\"No changes found to commit!\")\n if not value:\n module.exit_json(msg=\"Requested changes are already present in the user slot.\")\n if generation >= 14:\n response = idrac.invoke_request(ATTRIBUTE_URI, \"PATCH\", data={\"Attributes\": payload})\n elif generation < 14:\n time.sleep(10)\n response = idrac.import_scp(import_buffer=xml_payload, target=\"ALL\", job_wait=True)\n elif (slot_id and slot_uri and empty_slot_id and empty_slot_uri) is None:\n module.fail_json(msg=\"Maximum number of users reached. Delete a user account and retry the operation.\")\n return response, msg", "def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user", "def make_user_admin(connection,user):\r\n with connection:\r\n connection.execute(MAKE_USER_ADMIN,(user,))", "def customise_auth_user_resource(r, tablename):\n\n auth = current.auth\n\n def approve_user(r, **args):\n\n from gluon import redirect\n\n db = current.db\n user = db(db.auth_user.id == r.id).select(limitby = (0, 1)\n ).first()\n org_group_id = user.org_group_id\n if org_group_id:\n # Check if this is a COVID-19 Test Station\n ogtable = current.s3db.org_group\n org_group = db(ogtable.id == org_group_id).select(ogtable.name,\n limitby = (0, 1)\n ).first()\n if org_group and org_group.name == TESTSTATIONS:\n # Custom Approval process\n redirect(URL(c= \"default\", f=\"index\", args=[\"approve\", r.id]))\n\n # Default Approval\n auth.s3_approve_user(user)\n current.session.confirmation = T(\"User Account has been Approved\")\n redirect(URL(args=[r.id, \"roles\"]))\n\n current.s3db.configure(\"auth_user\",\n approve_user = approve_user,\n )", "def customise_auth_user_resource(r, tablename):\n\n auth = current.auth\n\n def approve_user(r, **args):\n\n from gluon import redirect\n\n db = current.db\n user = db(db.auth_user.id == r.id).select(limitby = (0, 1)\n ).first()\n org_group_id = user.org_group_id\n if org_group_id:\n # Check if this is a COVID-19 Test Station\n ogtable = current.s3db.org_group\n org_group = db(ogtable.id == org_group_id).select(ogtable.name,\n limitby = (0, 1)\n ).first()\n if org_group and org_group.name == TESTSTATIONS:\n # Custom Approval process\n redirect(URL(c= \"default\", f=\"index\", args=[\"approve\", r.id]))\n\n # Default Approval\n auth.s3_approve_user(user)\n current.session.confirmation = T(\"User Account has been Approved\")\n redirect(URL(args=[r.id, \"roles\"]))\n\n current.s3db.configure(\"auth_user\",\n approve_user = approve_user,\n )", "def save_user_ref(sender, created, instance, **_):\n if created:\n UserExtend.objects.create(user=instance)\n UserSettings.objects.create(user=instance)", "def update_user_forward(apps, schema_editor):\n group = Group.objects.update_or_create(\n id=1,\n name=\"Administrator\"\n )\n Group.objects.update_or_create(\n id=2,\n name=\"Manager\"\n )\n Group.objects.update_or_create(\n id=3,\n name=\"Leader\"\n )\n Group.objects.update_or_create(\n id=4,\n name=\"Sale\"\n )", "def change_ownership(obj, userid):\n assert isinstance(userid, string_types)\n old_owner = obj.creators[0]\n if userid == old_owner:\n return\n #Remove Owner group from old owner\n obj.local_roles.remove(old_owner, ROLE_OWNER)\n #Add new owner\n obj.local_roles.add(userid, ROLE_OWNER)\n #Set new owner in creators attr - this will also trigger reindex catalog event so keep it last!\n obj.set_field_appstruct({'creators': (userid,)})\n return userid", "def update_user(cursor, username, attr, value):\n if attr not in ['username', 'password', 'email', 'groups']:\n raise ValueError(f\"{attr!r} is not a valid user attribute\")\n if attr == 'password':\n value = argon2.hash(value)\n elif attr == 'groups':\n current = get_usergroups(cursor, username)\n for group in current.difference(value):\n remove_usergroup(cursor, username, group)\n for group in value.difference(current):\n create_usergroup(cursor, username, group)\n return \n\n cursor.execute(f\"\"\"\n UPDATE users\n SET\n {attr} = ?\n WHERE\n username = ?\n \"\"\", (value, username))", "def create_or_replace_from_model(cls, secret, secret_acl,\n user_ids=None, session=None):\n secret.updated_at = timeutils.utcnow()\n secret_acl.updated_at = timeutils.utcnow()\n secret.save(session=session)\n if secret_acl.id:\n secret_acl.save(session=session)\n else:\n secret_acl.create(session=session)\n\n cls._create_or_replace_acl_users(secret_acl=secret_acl,\n user_ids=user_ids,\n session=session)", "def addUser(self, accountId, username, accesstype, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method addUser\")", "def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)", "def createDeveloper(self):\n self.createUser()\n self.user.is_developer = True\n self.user.put()", "def create_admin():\n from pyceo import prompt\n from .manage import create_user\n\n u = User.by_login(u'admin')\n if not u:\n print 'Creating the `admin` user…'\n email = prompt('>>> `admin` email?\\n')\n create_user(u'admin', 'admin', fullname=u'Admin', email=email)\n u = User.by_login(u'admin')\n\n u.add_role(u'admin')\n db.commit()\n return u", "def users_create():", "def create_user(self):\n unique_id = str(uuid.uuid4())\n new_user_properties = {\n \"name\": self.name,\n \"mission_statement\": self.mission_statement,\n \"unique_id\": unique_id,\n \"email\": self.email.lower(),\n \"is_mentor\": True,\n \"is_tutor\": True,\n \"is_visible\": True,\n \"is_available_for_in_person\": True,\n \"is_admin\": True}\n new_user_node = Node.cast(AgoraLabel.USER, new_user_properties)\n try:\n self.graph_db.create(new_user_node)\n except:\n pass\n return new_user_node", "def add_admin_user(firstname, lastname, email, password, pseudo):\n\n async def create_user(firstname: str, lastname: str, email: str, password: str, pseudo: str) -> None:\n await Tortoise.init(config=TORTOISE_ORM)\n user = User(firstname=firstname, lastname=lastname, email=email, pseudo=pseudo, is_admin=True)\n user.set_password(password)\n try:\n await user.save()\n except IntegrityError as e:\n click.secho(f'Unable to create user, reason: {e}', fg='red')\n raise click.Abort()\n await Tortoise.close_connections()\n\n anyio.run(create_user, firstname, lastname, email, password, pseudo)\n click.secho(f'admin user {pseudo} created!', fg='green')", "def test_replace_user(self):\n pass", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def assign_default_role(course_id, user):\r\n role, __ = Role.objects.get_or_create(course_id=course_id, name=\"Student\")\r\n user.roles.add(role)", "def _add_user(user, state):\r\n if not user.is_staff and CourseCreator.objects.filter(user=user).count() == 0:\r\n entry = CourseCreator(user=user, state=state)\r\n entry.save()\r\n return True\r\n\r\n return False", "def existing_user_id(new_user_id, mapp):\n mapp.create_user(\n user=new_user_id, password=1234,\n email=new_user_id + \"@example.com\")\n return new_user_id", "def update_tag_user_acl(session, tag_id=None, user_id=None,\n allow_install=False, allow_uninstall=False, allow_reboot=False,\n allow_schedule=False, allow_wol=False, allow_snapshot_creation=False,\n allow_snapshot_removal=False, allow_snapshot_revert=False,\n allow_tag_creation=False, allow_tag_removal=False, allow_read=False,\n date_modified=datetime.now(),\n username='system_user'\n ):\n session = validate_session(session)\n user = None\n\n if user_id and tag_id:\n user = session.query(TagUserAccess).\\\n filter(TagUserAccess.user_id == user_id).\\\n filter(TagUserAccess.tag_id == tag_id).first()\n if user:\n try:\n user.allow_install = allow_install\n user.allow_uninstall = allow_uninstall\n user.allow_reboot = allow_reboot\n user.allow_schedule = allow_schedule\n user.allow_wol = allow_wol\n user.allow_snapshot_creation = allow_snapshot_creation\n user.allow_snapshot_removal = allow_snapshot_removal\n user.allow_snapshot_revert = allow_snapshot_revert\n user.allow_tag_creation = allow_tag_creation\n user.allow_tag_removal = allow_tag_removal\n user.allow_read = allow_read\n user.date_modified = date_modified\n session.commit()\n return({\n 'pass': True,\n 'message': 'ACL for User %s was modified for Tag %s' % \\\n (user_id, tag_id)\n })\n except Exception as e:\n session.rollback()\n return({\n 'pass': False,\n 'message': 'Failed to modify ACL for User %s on Tag %s' % \\\n (user_id, tag_id)\n })\n else:\n return({\n 'pass': False,\n 'message': 'Invalid user_id %s and or tag_id' % \\\n (user_id, tag_id)\n })", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n return user", "def create_user(email, password, f_name, l_name):\n pass", "def create_custom_user(sender, instance, signal, created, **kwargs):\n from gpsfun.main.User.models import GPSFunUser\n if created:\n GPSFunUser.objects.create(user=instance)\n instance.gpsfunuser.save()", "def set_role(userid, role, group, request=None):", "def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()", "def post_save_user(instance, raw, created, **kwargs):\n from cms.utils.permissions import get_current_user\n # read current user from thread locals\n creator = get_current_user()\n if not creator or not created or not hasattr(creator, 'pk'):\n return\n \n from cms.models import PageUser\n from django.db import connection\n \n # i'm not sure if there is a workaround for this, somebody any ideas? What\n # we are doing here is creating PageUser on Top of existing user, i'll do it \n # through plain SQL, its not nice, but...\n \n # TODO: find a better way than an raw sql !!\n \n cursor = connection.cursor()\n query = \"INSERT INTO %s (user_ptr_id, created_by_id) VALUES (%d, %d)\" % (\n PageUser._meta.db_table,\n instance.pk, \n creator.pk\n )\n cursor.execute(query) \n cursor.close()", "def add_user(db, user_data):\n username, password, email, position, phone = user_data[:5]\n\n # Set the new user id\n #users = db['user'].find()\n #next_id = max(u['_id'] for u in users) + 1\n\n # Set Access Level. 1 will be for a user that has some content to view.\n # Default level is 0\n access_level_map = {'D': 3, 'S': 2}\n access_level = access_level_map.get(position, 0)\n\n security_questions = []\n security_answers = []\n\n security_answers_hash = [generate_password_hash(ans)\n for ans in security_answers]\n\n password_hash = generate_password_hash(password)\n\n\n # Create the data JSON\n new_user = db['user'].insert_one({\n 'username': username,\n 'access_level': access_level,\n 'email': email,\n 'position': position,\n 'phone': phone,\n 'security_questions': security_questions,\n 'login_timestamp':str(datetime.datetime.utcnow()),\n 'deleted': False\n })\n\n db['security'].insert_one({\n 'user_id': str(new_user.inserted_id),\n 'password': password_hash,\n 'security_answers': security_answers_hash\n })\n\n # Insert user into DB\n return True", "def new_user(testapp):\n SessionFactory = testapp.app.registry[\"dbsession_factory\"]\n with transaction.manager:\n dbsession = get_tm_session(SessionFactory, transaction.manager)\n new_user = User(username=\"test\", password=pwd_context.hash(\"test\"))\n dbsession.add(new_user)", "def create_user(absolute_uid):\n\n try:\n user = User(absolute_uid=absolute_uid)\n with current_app.session_scope() as session:\n session.add(user)\n session.commit()\n\n except IntegrityError as error:\n current_app.logger.error('IntegrityError. User: {0:d} was not'\n 'added. Full traceback: {1}'\n .format(absolute_uid, error))\n raise", "def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id", "def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = 'grant+creator+access@edx.org'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()", "def _create_user(userid, **kw):\n\n new_user = User(userid, **kw)\n USERS[new_user.token] = new_user\n return USERS[new_user.token]", "def make_new_user():\n\n new_user = User(\n first_name=request.form['first_name'],\n last_name=request.form['last_name'],\n image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def create_new_user(self):\n username = 'pseudo'\n email = 'carole@tests.com'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def _create_nsem_user():\n users = User.objects.filter(username=settings.CWWED_NSEM_USER)\n if users.exists():\n user = users[0]\n else:\n user = User.objects.create_user(settings.CWWED_NSEM_USER, password=settings.CWWED_NSEM_PASSWORD)\n group, _ = Group.objects.get_or_create(name=settings.CWWED_NSEM_GROUP)\n perm_names = [\n 'add_{}'.format(NsemPsa._meta.model_name),\n 'add_{}'.format(NamedStormCoveredDataSnapshot._meta.model_name),\n ]\n perms = Permission.objects.filter(codename__in=perm_names)\n # set permission\n user.user_permissions.set(list(perms))\n group.permissions.set(list(perms))\n # add user to group\n group.user_set.add(user)", "def createHost(self):\n self.createUser()\n self.user.host_for = [self.program.scope.key()]\n self.user.put()", "def CreateUser(self, row):\n if 'quota_limit' in row.keys() and row['quota_limit']:\n quota = row['quota_limit']\n else:\n quota = 25000\n if 'pw_hash_function' in row.keys() and row['pw_hash_function']:\n pw_hash_function = row['pw_hash_function']\n else:\n pw_hash_function = None\n if 'suspended' in row.keys() and row['suspended']:\n suspended_flag = row['suspended']\n else:\n suspended_flag = 'FALSE'\n try:\n self.gd_client.CreateUser(\n row['user_name'], row['family_name'], row['given_name'],\n row['password'], suspended=suspended_flag,\n password_hash_function=pw_hash_function, quota_limit=quota)\n row['status'] = 'success'\n except gdata.apps.service.AppsForYourDomainException, e:\n row['status'] = 'fail gdata error code:%s %s'% (\n e.error_code, ERROR_DICT[str(e.error_code)])\n except KeyError:\n print ('user_name, given_name, family_name, password are required\\n'\n 'headers when action is create')\n sys.exit()\n # if user is admin, IP_whistelisted, or change password required, \n # we need to do the following \n if ('admin' not in row.keys() and 'change_pw' not in row.keys()\n and 'ip_whitelisted' not in row.keys()):\n return\n try:\n user_feed = self.gd_client.RetrieveUser(row['user_name'])\n if 'admin' in row.keys() and row['admin']:\n user_feed.login.admin = row['admin']\n else:\n user_feed.login.admin = 'FALSE'\n if 'change_pw' in row.keys() and row['change_pw']:\n user_feed.login.change_password = row['change_pw']\n else:\n user_feed.login.change_password = 'FALSE'\n if 'ip_whitelisted' in row.keys() and row['ip_whitelisted']:\n user_feed.login.ip_whitelisted = row['ip_whitelisted']\n else:\n user_feed.login.ip_whitelisted = 'FALSE'\n self.gd_client.UpdateUser(row['user_name'], user_feed)\n except gdata.apps.service.AppsForYourDomainException, e:\n row['status'] = (\n 'fail: gdata error code:%s %s'%\n (e.error_code, ERROR_DICT[str(e.error_code)]))", "def add_new_user(self, user):\n # print(\"Saving new user\")\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, True, False, False))\n\n self.execute(TABELLE['users']['insert'],\n (user['id'], user['username']))", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def create_user_as_admin(self, *args, **kwargs):\n profile = self.create_user(*args, **kwargs)\n profile.make_administrator()\n return profile", "def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='test@test.com')[0]\n user.set_password('testabc123')\n user.save()\n\n return user", "def on_user_create(self, user):", "def test_make_user_acl(self, make_acl_mock):\n zk = zksasl.SASLZkClient()\n zk.make_user_acl('foo', 'rw')\n\n make_acl_mock.assert_called_once_with(\n scheme='sasl', credential='foo', read=True,\n write=True, create=False, delete=False, admin=False\n )", "def update_user_profile(sender, instance, created, **kwargs):\n if created:\n GameplanUser.objects.create(user=instance)\n instance.gameplanuser.save()", "def addAdmin(username, sshId, user, identity):\n if identity:\n env.key_filename = identity\n if user:\n env.user = user\n sudo('adduser --disabled-password --gecos \",,,\" %s' % username)\n sudo('usermod -p \"\" %s' % username)\n sudo('chage -d 0 %s' % username)\n sudo('gpasswd --add %s admin' % username)\n authorizeSshKey(username, sshId)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def add_user_to_group(user, group):\n Command.run(['usermod', '-a', '-G', user, group])", "def create_admin():\n admin = models.User(username= 'gallery_admin', email='galleryblockchain@gmail.com', address='#0000' , password =bcrypt.generate_password_hash('toledano',\n current_app.config.get('BCRYPT_LOG_ROUNDS')).decode('utf-8'), admin=True)\n admin.save()", "def create_admin():\n db.session.add(User(email='ad@min.com', password='admin', admin=True))\n db.session.commit()", "def create_admin():\n db.session.add(User(email='ad@min.com', password='admin', admin=True))\n db.session.commit()", "def create_acl(self, context, sg):\n self.security_group_driver.create_acl(context, sg)", "def fusion_api_add_user(self, body, api=None, headers=None):\n return self.user.create(body, api, headers)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n # create new Stellar account\n stellar.api.create_account(user=instance)", "def create_user(self, conn, name, password, group):\n user = conn.user.allocate(name, password, \"\", [group])\n return user", "def do_createuser(self, *args):\n self.connection_obj.initialize_table()\n print(\"UserTable Created Successful\")", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def promote_user(username):\n user = User.get_user_by_username(username)\n user.is_admin = True\n user.save()", "def new_user(global_config, timestamped_email, id_api):\n yield id_api.create_user_if_not_exists(timestamped_email, global_config.users.default.password)", "def newuser(self, username, password,\n force_password_change_at_next_login_req=False,\n useusernameascn=False, userou=None, surname=None, givenname=None,\n initials=None, profilepath=None, scriptpath=None, homedrive=None,\n homedirectory=None, jobtitle=None, department=None, company=None,\n description=None, mailaddress=None, internetaddress=None,\n telephonenumber=None, physicaldeliveryoffice=None, sd=None,\n setpassword=True, uidnumber=None, gidnumber=None, gecos=None,\n loginshell=None, uid=None, nisdomain=None, unixhome=None,\n smartcard_required=False):\n\n displayname = \"\"\n if givenname is not None:\n displayname += givenname\n\n if initials is not None:\n displayname += ' %s.' % initials\n\n if surname is not None:\n displayname += ' %s' % surname\n\n cn = username\n if useusernameascn is None and displayname != \"\":\n cn = displayname\n\n user_dn = \"CN=%s,%s,%s\" % (cn, (userou or \"CN=Users\"), self.domain_dn())\n\n dnsdomain = ldb.Dn(self, self.domain_dn()).canonical_str().replace(\"/\", \"\")\n user_principal_name = \"%s@%s\" % (username, dnsdomain)\n # The new user record. Note the reliance on the SAMLDB module which\n # fills in the default informations\n ldbmessage = {\"dn\": user_dn,\n \"sAMAccountName\": username,\n \"userPrincipalName\": user_principal_name,\n \"objectClass\": \"user\"}\n\n if smartcard_required:\n ldbmessage[\"userAccountControl\"] = str(dsdb.UF_NORMAL_ACCOUNT |\n dsdb.UF_SMARTCARD_REQUIRED)\n setpassword = False\n\n if surname is not None:\n ldbmessage[\"sn\"] = surname\n\n if givenname is not None:\n ldbmessage[\"givenName\"] = givenname\n\n if displayname != \"\":\n ldbmessage[\"displayName\"] = displayname\n ldbmessage[\"name\"] = displayname\n\n if initials is not None:\n ldbmessage[\"initials\"] = '%s.' % initials\n\n if profilepath is not None:\n ldbmessage[\"profilePath\"] = profilepath\n\n if scriptpath is not None:\n ldbmessage[\"scriptPath\"] = scriptpath\n\n if homedrive is not None:\n ldbmessage[\"homeDrive\"] = homedrive\n\n if homedirectory is not None:\n ldbmessage[\"homeDirectory\"] = homedirectory\n\n if jobtitle is not None:\n ldbmessage[\"title\"] = jobtitle\n\n if department is not None:\n ldbmessage[\"department\"] = department\n\n if company is not None:\n ldbmessage[\"company\"] = company\n\n if description is not None:\n ldbmessage[\"description\"] = description\n\n if mailaddress is not None:\n ldbmessage[\"mail\"] = mailaddress\n\n if internetaddress is not None:\n ldbmessage[\"wWWHomePage\"] = internetaddress\n\n if telephonenumber is not None:\n ldbmessage[\"telephoneNumber\"] = telephonenumber\n\n if physicaldeliveryoffice is not None:\n ldbmessage[\"physicalDeliveryOfficeName\"] = physicaldeliveryoffice\n\n if sd is not None:\n ldbmessage[\"nTSecurityDescriptor\"] = ndr_pack(sd)\n\n ldbmessage2 = None\n if any(map(lambda b: b is not None, (uid, uidnumber, gidnumber, gecos,\n loginshell, nisdomain, unixhome))):\n ldbmessage2 = ldb.Message()\n ldbmessage2.dn = ldb.Dn(self, user_dn)\n if uid is not None:\n ldbmessage2[\"uid\"] = ldb.MessageElement(str(uid), ldb.FLAG_MOD_REPLACE, 'uid')\n if uidnumber is not None:\n ldbmessage2[\"uidNumber\"] = ldb.MessageElement(str(uidnumber), ldb.FLAG_MOD_REPLACE, 'uidNumber')\n if gidnumber is not None:\n ldbmessage2[\"gidNumber\"] = ldb.MessageElement(str(gidnumber), ldb.FLAG_MOD_REPLACE, 'gidNumber')\n if gecos is not None:\n ldbmessage2[\"gecos\"] = ldb.MessageElement(str(gecos), ldb.FLAG_MOD_REPLACE, 'gecos')\n if loginshell is not None:\n ldbmessage2[\"loginShell\"] = ldb.MessageElement(str(loginshell), ldb.FLAG_MOD_REPLACE, 'loginShell')\n if unixhome is not None:\n ldbmessage2[\"unixHomeDirectory\"] = ldb.MessageElement(\n str(unixhome), ldb.FLAG_MOD_REPLACE, 'unixHomeDirectory')\n if nisdomain is not None:\n ldbmessage2[\"msSFU30NisDomain\"] = ldb.MessageElement(\n str(nisdomain), ldb.FLAG_MOD_REPLACE, 'msSFU30NisDomain')\n ldbmessage2[\"msSFU30Name\"] = ldb.MessageElement(\n str(username), ldb.FLAG_MOD_REPLACE, 'msSFU30Name')\n ldbmessage2[\"unixUserPassword\"] = ldb.MessageElement(\n 'ABCD!efgh12345$67890', ldb.FLAG_MOD_REPLACE,\n 'unixUserPassword')\n\n self.transaction_start()\n try:\n self.add(ldbmessage)\n if ldbmessage2:\n self.modify(ldbmessage2)\n\n # Sets the password for it\n if setpassword:\n self.setpassword((\"(distinguishedName=%s)\" %\n ldb.binary_encode(user_dn)),\n password,\n force_password_change_at_next_login_req)\n except:\n self.transaction_cancel()\n raise\n else:\n self.transaction_commit()", "def update_user(id):\n pass", "def add_tag_user_acl(session, tag_id=None, user_id=None, allow_read=False,\n allow_install=False, allow_uninstall=False, allow_reboot=False,\n allow_schedule=False, allow_wol=False, allow_snapshot_creation=False,\n allow_snapshot_removal=False, allow_snapshot_revert=False,\n allow_tag_creation=False, allow_tag_removal=False,\n date_created=datetime.now(), date_modified=datetime.now(),\n username='system_user'\n ):\n session = validate_session(session)\n date_created=datetime.now()\n user_for_tag_exists = session.query(TagUserAccess).\\\n filter_by(user_id=user_id).\\\n filter_by(tag_id=tag_id).first()\n if user_id and tag_id and not user_for_tag_exists:\n try:\n add_acl = TagUserAccess(tag_id, user_id=user_id,\n allow_read=allow_read, allow_install=allow_install,\n allow_uninstall=allow_uninstall, allow_reboot=allow_reboot,\n allow_schedule=allow_schedule, allow_wol=allow_wol,\n allow_snapshot_creation=allow_snapshot_creation,\n allow_snapshot_removal=allow_snapshot_removal,\n allow_snapshot_revert=allow_snapshot_revert,\n allow_tag_creation=allow_tag_creation,\n allow_tag_removal=allow_tag_removal,\n date_created=date_created, date_modified=date_modified\n )\n session.add(add_acl)\n session.commit()\n return({\n 'pass': True,\n 'message': 'User ACL %s added for Tag %s' % \\\n (user_id, tag_id)\n })\n except Exception as e:\n session.rollback()\n return({\n 'pass': False,\n 'message': 'Failed to add ACL for User %s on Tag %s:%s' % \\\n (user_id, tag_id, e)\n })", "def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")", "def create(cls, sender, instance, created, **kdws):\n if created:\n username = helpers.make_username(instance.first_name, instance.last_name, instance.email)\n user = User(username=username)\n user.save()\n user = User.objects.get(username=username)\n instance.user = user\n instance.save()", "async def r_create_user(*_, role: str = \"USER\") -> User:\n # create api object\n password = token_hex(10)\n # save to db\n user = await User.create(\n key=token_bytes(32),\n username=token_hex(10),\n password_hash=ARGON.hash(password),\n role=role,\n created=time(),\n )\n # set password field on user to pass them their password 1 time\n user.password = password\n # return api object to resolver\n return user", "def cli_create(dbfile, username, email, password, group):\n with atomic(dbfile) as cursor:\n create_user(cursor, username=username, password=password, \n email=email, groups=group)\n click.echo(f\"Created user {username!r} with password {password!r}\")", "def define_user(self, username, password, role=\"Operator\"):\n\n self._db_manager.create_user(username, password, role)", "def register_user():\n pass", "def create_user_vault(sender, instance, created, **kwargs):\n if created:\n Nonce.objects.create(user=instance)", "def createOrgAdmin(self, org):\n self.createProfile()\n self.profile.org_admin_for = [org.key()]\n self.profile.put()", "def make_user(self, name, snowflake):\n to_exec = \"INSERT INTO users (snowflake_pk, username, balance) VALUES(%s, %s, %s)\"\n self.__cursor.execute(to_exec, (str(snowflake), name, '0'))\n self.__connection.commit()", "def add_node_user_acl(session, node_id=None, user_id=None, allow_read=False,\n allow_install=False, allow_uninstall=False, allow_reboot=False,\n allow_schedule=False, allow_wol=False, allow_snapshot_creation=False,\n allow_snapshot_removal=False, allow_snapshot_revert=False,\n allow_tag_creation=False, allow_tag_removal=False,\n date_created=datetime.now(), date_modified=datetime.now(),\n username='system_user'\n ):\n session = validate_session(session)\n date_created=datetime.now()\n user_for_node_exists = session.query(NodeUserAccess).\\\n filter(NodeUserAccess.user_id == user_id).\\\n filter(NodeUserAccess.node_id == node_id).first()\n if user_id and node_id and not user_for_node_exists:\n try:\n add_acl = NodeUserAccess(node_id, user_id=user_id,\n allow_read=allow_read, allow_install=allow_install,\n allow_uninstall=allow_uninstall, allow_reboot=allow_reboot,\n allow_schedule=allow_schedule, allow_wol=allow_wol,\n allow_snapshot_creation=allow_snapshot_creation,\n allow_snapshot_removal=allow_snapshot_removal,\n allow_snapshot_revert=allow_snapshot_revert,\n allow_tag_creation=allow_tag_creation,\n allow_tag_removal=allow_tag_removal,\n date_created=date_created, date_modified=date_modified\n )\n session.add(add_acl)\n session.commit()\n return({\n 'pass': True,\n 'message': 'User ACL %s added for Node %s' % \\\n (user_id, node_id)\n })\n except Exception as e:\n session.rollback()\n return({\n 'pass': False,\n 'message': 'Failed to add ACL for User %s on Node %s:%s' % \\\n (user_id, node_id, e)\n })", "def test_user_id_role_put(self):\n pass", "def grant_access(acl, list_to_edit):\n if request.POST[list_to_edit]:\n datastore_object = None\n if request.POST[list_to_edit].startswith('user'):\n datastore_object = models.UserProfile.load(request.POST[list_to_edit])\n else:\n datastore_object = models.UserGroup.get_by_id(\n int(request.POST[list_to_edit]))\n if datastore_object.key() not in acl.__getattribute__(list_to_edit):\n acl.__getattribute__(list_to_edit).append(datastore_object.key())", "def create_admin():\n db.session.add(User(\n email=\"ad@min.com\",\n password=\"admin\",\n admin=True,\n confirmed=True)\n )\n db.session.commit()", "def add_admin(user):\n _add_owner(\n _lookup_user(user).biv_id,\n _add_model(pam.Admin())\n )", "def add_user_with_status_granted(caller, user):\r\n if _add_user(user, CourseCreator.GRANTED):\r\n update_course_creator_group(caller, user, True)", "def do_user_create(cs, args):\n cs.users.create(args.username, args.password, args.email, args.realname,\n args.comment)\n print(\"Create user '%s' successfully.\" % args.username)", "def create_new_user():\n return get_user_model().objects.create_user(\n email='test@gmail.com',\n password='test@londodnjisdjfois',\n username='tempusername'\n )", "def create_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass", "def test_put_change_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(new_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def create(self, validated_data):\n user = super(UserSerializer, self).create(validated_data)\n for i in user.groups.all():\n if i.accountants.exists():\n company = get_object_or_404(\n models.Company, pk=i.accountants.all().first().id)\n assign_perm(\"change_user\", company.admins, user)\n assign_perm(\"view_user\", company.admins, user)\n assign_perm(\"delete_user\", company.admins, user)\n assign_perm(\"change_user\", user, user)\n assign_perm(\"view_user\", user, user)\n assign_perm(\"delete_user\", user, user)\n\n user.user_permissions.add(\n Permission.objects.get(name='Can add sale'))\n user.user_permissions.add(\n Permission.objects.get(name='Can delete sale'))\n user.user_permissions.add(\n Permission.objects.get(name='Can add purchase'))\n user.user_permissions.add(\n Permission.objects.get(name='Can change sale'))\n user.user_permissions.add(\n Permission.objects.get(name='Can change purchase'))\n user.user_permissions.add(\n Permission.objects.get(name='Can delete purchase'))\n user.user_permissions.add(\n Permission.objects.get(name='Can add media'))\n user.user_permissions.add(\n Permission.objects.get(name='Can delete media'))\n user.set_password(validated_data['password'])\n user.save()\n return user", "def add_user(user: dict):\n new_user = [user]\n insert_into_table('users', new_user)" ]
[ "0.6455713", "0.6377649", "0.63585895", "0.6333561", "0.62336445", "0.61381245", "0.6114977", "0.6101846", "0.60849553", "0.60306436", "0.6008215", "0.6007202", "0.5928761", "0.59142447", "0.5895722", "0.5890398", "0.5890398", "0.5881839", "0.5862956", "0.5856963", "0.58470154", "0.5834595", "0.58268", "0.58264667", "0.5815935", "0.5812793", "0.57759434", "0.57691693", "0.5749321", "0.57302743", "0.5725254", "0.5721432", "0.57161856", "0.57122606", "0.57121915", "0.5709659", "0.57037073", "0.5700631", "0.56998664", "0.56952006", "0.56919146", "0.567961", "0.56722784", "0.5669758", "0.5661074", "0.5653399", "0.5653283", "0.5649894", "0.5648657", "0.56467205", "0.5642661", "0.5642455", "0.5636509", "0.56357825", "0.5633014", "0.5628212", "0.5626979", "0.56233346", "0.5622992", "0.5621321", "0.5612916", "0.56129116", "0.56129116", "0.56129116", "0.56100315", "0.56093204", "0.56088024", "0.56088024", "0.5604971", "0.56034565", "0.5597174", "0.55968726", "0.5593929", "0.55903625", "0.55877775", "0.5586972", "0.5585432", "0.55846554", "0.558054", "0.5571293", "0.5569816", "0.55697423", "0.5568093", "0.5562243", "0.55588585", "0.55576634", "0.55563915", "0.55551565", "0.5550164", "0.5548762", "0.5542005", "0.5539399", "0.55357087", "0.55353683", "0.5530083", "0.5522663", "0.5521947", "0.55150026", "0.5513895", "0.5511119" ]
0.6061465
9
Delete acl in Secret
def delete_acls_for_secret_model(cls, secret, session=None): cls.db_repo.delete_acls_for_secret(secret, session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_vault_delete_authorization_for_vault_section(self):\n pass", "def delete_acl(self, sg):\n self.security_group_driver.delete_acl(sg)", "def delete(key, **kwargs):\n cluster_call(\n \"secret_delete\",\n key=key,\n **kwargs,\n confirm=f\"Delete secret {key}\",\n prefix=f\"Deleting secret {key}...\",\n postfix=\"deleted.\",\n )", "def delete_acls_for_secret(cls, secret, session=None):\n session = cls.get_session(session=session)\n\n for entity in secret.secret_acls:\n entity.delete(session=session)", "def DeleteAclSample():\n client = CreateClient()\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc)\n acl_entry = gdata.docs.data.AclEntry(\n scope=gdata.acl.data.AclScope(value='user@example.com', type='user'),\n role=gdata.acl.data.AclRole(value='reader'),\n )\n acl_entry = client.AddAclEntry(doc, acl_entry)\n client.DeleteAclEntry(acl_entry)", "def _DeleteAclRule(self, entry):\n\n self.cal_client.Delete(entry.GetEditLink().href)", "def test_delete_acl(self, env):\n # Create ACL Expressions\n self.suite_logger.debug(\"Create ACL Expressions\")\n expressions = [(1, 'DstMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:01:01:01'),\n (2, 'SrcMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:02:02:02')]\n env.switch[1].ui.create_acl(expressions=expressions)\n # Verify ACL Expression\n expressions_table = env.switch[1].ui.get_table_acl(\"ACLExpressions\")\n # Verify first expression has been added\n expr_1 = {\"expressionId\": expressions[0][0],\n \"field\": expressions[0][1],\n \"mask\": expressions[0][2],\n \"data\": expressions[0][3]\n }\n assert expr_1 in expressions_table, \\\n \"Expression {0} was not added\".format(expressions[0])\n # Verify second expression has been added\n expr_2 = {\"expressionId\": expressions[1][0],\n \"field\": expressions[1][1],\n \"mask\": expressions[1][2],\n \"data\": expressions[1][3]\n }\n assert expr_2 in expressions_table,\\\n \"Expression {0} was not added\".format(expressions[1])\n # Delete Expression\n self.suite_logger.debug(\"Delete ACL Expression\")\n env.switch[1].ui.delete_acl(expression_ids=[(2, 'SrcMac'), ])\n # Verify Expression has been deleted\n expressions_table = env.switch[1].ui.get_table_acl(\"ACLExpressions\")\n assert expr_2 not in expressions_table, \\\n \"Expression {0} was not deleted\".format(expressions[1])\n\n # Create ACL Actions\n self.suite_logger.debug(\"Create ACL Actions\")\n actions = [(1, 'Drop', ''),\n (2, 'Count', '')]\n env.switch[1].ui.create_acl(actions=actions)\n # Verify ACL Action\n actions_table = env.switch[1].ui.get_table_acl(\"ACLActions\")\n # Verify first action has been added\n act_1 = {\"actionId\": actions[0][0],\n \"action\": actions[0][1],\n \"param\": actions[0][2]\n }\n assert act_1 in actions_table, \"Action {0} was not added\".format(actions[0])\n # Verify second action has been added\n act_2 = {\"actionId\": actions[1][0],\n \"action\": actions[1][1],\n \"param\": actions[1][2]\n }\n assert act_2 in actions_table, \"Action {0} was not added\".format(actions[1])\n # Delete Action\n self.suite_logger.debug(\"Delete ACL Action\")\n env.switch[1].ui.delete_acl(action_ids=[(2, 'Count'), ])\n # Verify Action has been deleted\n actions_table = env.switch[1].ui.get_table_acl(\"ACLActions\")\n assert act_2 not in actions_table, \"Action {0} was not deleted\".format(actions[1])\n\n # Create ACL Rule\n self.suite_logger.debug(\"Create ACL Rule\")\n rules = [(1, 1, 1, 'Ingress', 'Enabled', 0), ]\n env.switch[1].ui.create_acl(ports=[1, ], rules=rules)\n # Verify ACL Rule has been added\n rules_table = env.switch[1].ui.get_table_acl(\"ACLRules\")\n rule = {\"ruleId\": rules[0][0],\n \"expressionId\": rules[0][1],\n \"actionId\": rules[0][2],\n \"stage\": rules[0][3],\n \"enabled\": rules[0][4],\n \"priority\": rules[0][5]\n }\n assert rule in rules_table, \"Rule {0} was not added\".format(rules[0])\n # Delete Rule\n self.suite_logger.debug(\"Delete ACL Rule\")\n env.switch[1].ui.delete_acl(ports=[1, ], rule_ids=[1, ])\n # Verify Rule has been deleted\n rules_table = env.switch[1].ui.get_table_acl(\"ACLRules\")\n assert rule not in rules_table, \"Rule {0} was not deleted\".format(rules[0])", "def post_access_control_list_delete(self, resource_id, resource_dict):\n pass", "def pre_access_control_list_delete(self, resource_id):\n pass", "def test_remove_authz_wrong(self):\n self.test_add_authz()\n self.app.delete(\"/config/authorize?operation=config\", status=400)\n self.app.delete(\"/config/authorize?dn=/DN=a.test.user\", status=204)", "def delete_bucket_acl(self, bucket, user):\n msg = \"delete_bucket_acl not implemented\"\n raise NotImplementedError(msg)", "def delete_secret_link(link_id):\n\n Secret_Link.objects.filter(link_id=link_id).delete()", "def delete_remote_access_session(arn=None):\n pass", "def deleteSecret(self, clientIP, not_before):\n\n return self._secret_table.delete_item(ip_address=clientIP,not_before=not_before)", "def delete_secret_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n secret_name = args['secret_name']\n\n response = client.delete_secret_request(vault_name, secret_name)\n\n outputs = copy.deepcopy(response)\n outputs['deletedDate'] = convert_timestamp_to_readable_date(\n outputs['deletedDate'])\n outputs['scheduledPurgeDate'] = convert_timestamp_to_readable_date(\n outputs['scheduledPurgeDate'])\n\n readable_response = copy.deepcopy(outputs)\n outputs['attributes'] = convert_time_attributes_to_iso(outputs['attributes'])\n outputs[VAULT_NAME_CONTEXT_FIELD] = vault_name\n\n readable_response['secretId'] = readable_response.pop('id')\n readable_output = tableToMarkdown(f'Delete {secret_name}',\n readable_response,\n ['secretId', 'recoveryId', 'deletedDate',\n 'scheduledPurgeDate'], removeNull=True,\n headerTransform=pascalToSpace)\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Secret',\n outputs_key_field='recoveryId',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def delete_bucketlist():\n pass", "def _delete_all_secrets(self):\n for secret_ref in self.created_entities['secret']:\n self.barbicanclient.secrets.delete(secret_ref, True)", "def remove_access(acl, list_to_edit):\n post_key = '%s_remove_' % list_to_edit\n removal_keys = [k for k in request.POST.keys() if k.startswith(post_key)]\n for key in removal_keys:\n model_type = models.UserGroup\n if list_to_edit.startswith('user'):\n model_type = models.UserProfile\n key_id = int(key.replace(post_key, ''))\n datastore_object = model_type.get_by_id(key_id)\n acl.__getattribute__(list_to_edit).remove(datastore_object.key())", "def test_delete_namespaced_role(self):\n pass", "def deleteSecret(self, clientIP, not_before):\n\n return self._secretdb.execute('delete from %s where ip_address=:ip_address and not_before=:not_before' % self._table_name,\n {'ip_address': ip_address,\n 'not_before': not_before})", "def delete_bucket_encryption(Bucket=None):\n pass", "def delete(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n # Actually run vault\n logging.info(\"Deleting the policy: %s\", address)\n self.vault.requests_request(\"DELETE\", address, headers=self.vault.token_header)", "def test_delete_hyperflex_local_credential_policy(self):\n pass", "def test_delete_hyperflex_cluster_storage_policy(self):\n pass", "def test_delete_cluster_policy(self):\n pass", "def delete_access_list(self, loadbalancer):\n return loadbalancer.delete_access_list()", "def delete(ctx: CLIContext, access_key):\n with Session() as session:\n try:\n data = session.KeyPair.delete(access_key)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='deletion',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='deletion',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n extra_info={\n 'access_key': access_key,\n },\n )", "def delete_acl_rule(self, sgr):\n self.security_group_driver.delete_acl_rule(sgr)", "def delete():", "def main_role_delete(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n id_ = opts[\"id\"]\n client.delete_role(opts[\"formation\"], id_)\n logger.info(f\"Deleted role with id=\\\"{id_}\\\"\")", "def test_remove_authz(self):\n self.test_add_authz()\n self.app.delete(\"/config/authorize?dn=/DN=a.test.user&operation=config\",\n status = 204\n )\n\n audits = Session.query(ConfigAudit).all()\n self.assertEqual(2, len(audits))\n\n authz = Session.query(AuthorizationByDn).get(('/DN=a.test.user', 'config'))\n self.assertEqual(None, authz)", "def revoke_secret_prefix(self, prefix):\n client = self.connect(VAULT_TOKEN)\n client.sys.revoke_secret_prefix(prefix)", "def _delete_rights(self):\n for right in self.rights:\n right.delete()", "def test_delete_cluster_role(self):\n pass", "def resetSecret(self):\n self.secret = str(uuid())\n self.put()", "def delete_port_acl(self, port, acl):\n raise NotImplementedError # pragma: no cover", "def test_delete_cluster_policy_binding(self):\n pass", "def delete_vlan_acl(self, vlan, acl):\n raise NotImplementedError # pragma: no cover", "def s3_delete_data(self):\n\n self.k.delete()", "def test_delete_hyperflex_vcenter_config_policy(self):\n pass", "def test_vault_delete_vault_section(self):\n pass", "def delete_bucket_policy(Bucket=None):\n pass", "def manage_removeSharedSecret(self, REQUEST):\n self._shared_secret = None\n response = REQUEST.response\n response.redirect(\n '%s/manage_secret?manage_tabs_message=%s' %\n (self.absolute_url(), 'Shared+secret+removed.')\n )", "def test_delete_hyperflex_sys_config_policy(self):\n pass", "def test_delete_hyperflex_ucsm_config_policy(self):\n pass", "def _delete_all_acls(self):\n for acl_ref in self.created_entities['acl']:\n entity_ref = acl_ref.replace(\"/acl\", \"\")\n blank_acl_entity = self.barbicanclient.acls.create(\n entity_ref=entity_ref)\n blank_acl_entity.remove()", "def delete_access_list(self):\n return self.manager.delete_access_list(self)", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def test_delete_o_auth_access_token(self):\n pass", "def delete_permissions(queue_url, label):\n client = boto3.client('sqs')\n try:\n client.remove_permission(QueueUrl=queue_url, Label=label)\n except ClientError as e:\n if e.response['Error']['Code'] != 'InvalidParameterValue':\n raise\n # We are failing silently since the label, which we would like to delete, does not exist", "def delete_account_key(configuration):\n os.remove(configuration.cm_key)", "def delete(cls, name):\n\n secret = cls.get_instance(name)\n secret.delete_instance(recursive=True)", "def test_delete_role(self):\n pass", "def test_dashboards_v2_delete_share(self):\n pass", "def terraform_destroy():\n return subprocess.call([\n \"terraform\",\n \"destroy\",\n \"-var-file=terraform/aws/security.tfvars\",\n \"terraform/aws\"\n ])", "def manage_clearSecrets(self, REQUEST):\n manager = getUtility(IKeyManager)\n manager.clear()\n manager.rotate()\n response = REQUEST.response\n response.redirect(\n '%s/manage_secret?manage_tabs_message=%s' %\n (self.absolute_url(), 'All+secrets+cleared.')\n )", "def rbac_policy_delete(request, policy_id):\n neutronclient(request).delete_rbac_policy(policy_id)", "def delPermission(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\",\"perm_name\")\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n perm_actions.getActionManager().deletePermission(request[\"admin_username\"],request[\"perm_name\"])", "def __del__(self):\n self.token_revoke()", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def access_info_delete(context, storage_id):\n _access_info_get_query(context). \\\n filter_by(storage_id=storage_id).delete()", "def remove_permission_from_blob(bucket_name, blob_name, role_type, member_type):\n\n # initialize client, get bucket, & get blob\n _, _, blob = create_client(bucket_name, blob_name)\n \n # get member type\n member = get_member_blob_level(member_type, blob)\n \n # revoke role from member\n revoke_role_blob_level(role_type, member)\n\n blob.acl.save()\n\n print(\n \"removed permission for {} to {} from blob {} in bucket {}\".format(\n member_type, role_type, blob_name, bucket_name\n )\n )", "def bdev_crypto_delete(client, name):\n params = {'name': name}\n return client.call('bdev_crypto_delete', params)", "def revoke_api_access(application):\n try:\n file = open(PATH + '/../DB/access.json', 'r')\n accessData = json.load(file)\n if (application in accessData):\n accessData.pop(application, None)\n\n with open(PATH + '/../DB/access.json', 'w') as f:\n f.write(json.dumps(accessData, indent=4, sort_keys=True)) \n except:\n raise", "def test_delete_o_auth_client_authorization(self):\n pass", "def delete(self, key):", "def test_delete_o_auth_authorize_token(self):\n pass", "def test_delete_namespaced_policy(self):\n pass", "def vault_delete(self, vault_delete):\n self._vault_delete = vault_delete", "def test_delete_hyperflex_cluster_network_policy(self):\n pass", "def test_delete_cluster_role_binding(self):\n pass", "def clear(cls):\n\n db = get_db_handle()\n for secret in db.secret_table.select():\n secret.delete_instance(recursive=True)", "def forget_secret(self, public_uid: bytes):\n assert self._state is not None, 'Unseal the vault first'\n self._state.pop(public_uid)", "def test_delete_hyperflex_node_config_policy(self):\n pass", "def delete_infrastructure(aws_key, aws_secret):\n # Create boto3 clients for AWS resources.\n ec2_client, _, iam_client, redshift_client = create_clients(\n aws_key, aws_secret\n )\n # Get the clusters properties.\n cluster_properties = get_cluster_properties(redshift_client)\n # Clean up resources.\n redshift_client.delete_cluster(\n ClusterIdentifier=IDENTIFIER, SkipFinalClusterSnapshot=True\n )\n iam_client.detach_role_policy(\n RoleName=IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadonlyAccess\"\n )\n iam_client.delete_role(RoleName=IAM_ROLE_NAME)", "def delete_conf(src_ip):\n return delete_route(src_ip)", "def test_delete(self):\n self.basic_login()\n cassette_name = self.cassette_name(\"delete\")\n with self.recorder.use_cassette(cassette_name):\n auth = self.gh.authorize(\n username=self.user,\n password=self.password,\n scopes=[\"gist\"],\n note=\"testing github3.py\",\n )\n assert isinstance(auth, github3.auths.Authorization)\n assert auth.delete() is True", "def test_vault_delete_vault_item(self):\n pass", "def remove_permission_from_bucket(bucket_name, role_type, member_type):\n\n # initialize client & get bucket\n _, bucket, _ = create_client(bucket_name)\n\n policy = bucket.get_iam_policy(requested_policy_version=3)\n \n # get member type\n member_value = get_member_bucket_level(member_type)\n\n # get role type\n role_value = get_role_bucket_level(role_type)\n\n for binding in policy.bindings:\n # print(binding)\n if binding[\"role\"] == role_value and binding.get(\"condition\") is None:\n # revoke role from member\n binding[\"members\"].discard(member_value)\n\n bucket.set_iam_policy(policy)\n\n print(\"removed {} with role {} from {}\".format(member_value, role_value, bucket_name))", "def delete(id):\n db = core.connect()\n permIds = [perm[\"_id\"] for perm in permission.permissionsForStream(id)]\n [permission.delete(permId) for permId in permIds]\n del db[id]", "async def erase(self, guild: discord.Guild):\n role = await self.get_role(guild=guild)\n if role:\n await role.delete()", "def test_delete_bucket(self):\n pass", "def revoke_access_token(cls, jti: str) -> None:\n redis = cls._conn_redis(cls)\n expired_time = int(timedelta(minutes=cls._ACCESS_TOKEN_EXPIRES).total_seconds())\n redis.setex(jti,expired_time,'true')", "def test_delete_namespaced_role_binding(self):\n pass", "def delete(self):\n\n nodeip = request.form.get(\"ip\")\n nodeflag = request.form.get(\"flag\")\n force = True if request.form.get(\"force\") in (\"true\", \"True\", True) else False\n if g.auth:\n return g.swarm_node.rm(nodeip, nodeflag, force)\n else:\n res = {\"msg\": \"Authentication failed, permission denied.\", \"code\": 403}\n logger.warn(res)\n return res, 403", "def del_ro(action, name, exc):\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)", "async def delete(self, key: str):", "def delete_volumeaccessright_record( vac ):\n \n principal_id = vac.owner_id.email \n volume_name = vac.volume.name \n \n try:\n observer_core.ensure_volume_access_right_absent( principal_id, volume_name )\n except Exception, e:\n traceback.print_exc()\n logger.error(\"Failed to revoke access from %s to %s\" % (principal_id, volume_name))\n raise e\n \n return True", "def test_create_describe_delete_acls(kafka_admin_client):\n\n # Check that we don't have any ACLs in the cluster\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0\n\n # Try to add an ACL\n acl = ACL(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n result = kafka_admin_client.create_acls([acl])\n\n assert len(result[\"failed\"]) == 0\n assert len(result[\"succeeded\"]) == 1\n\n # Check that we can list the ACL we created\n acl_filter = ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n acls, error = kafka_admin_client.describe_acls(acl_filter)\n\n assert error is NoError\n assert len(acls) == 1\n\n # Remove the ACL\n delete_results = kafka_admin_client.delete_acls(\n [\n ACLFilter(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n ]\n )\n\n assert len(delete_results) == 1\n assert len(delete_results[0][1]) == 1 # Check number of affected ACLs\n\n # Make sure the ACL does not exist in the cluster anymore\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=\"*\",\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0", "def test_create_describe_delete_acls(kafka_admin_client):\n\n # Check that we don't have any ACLs in the cluster\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0\n\n # Try to add an ACL\n acl = ACL(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n result = kafka_admin_client.create_acls([acl])\n\n assert len(result[\"failed\"]) == 0\n assert len(result[\"succeeded\"]) == 1\n\n # Check that we can list the ACL we created\n acl_filter = ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n acls, error = kafka_admin_client.describe_acls(acl_filter)\n\n assert error is NoError\n assert len(acls) == 1\n\n # Remove the ACL\n delete_results = kafka_admin_client.delete_acls(\n [\n ACLFilter(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n ]\n )\n\n assert len(delete_results) == 1\n assert len(delete_results[0][1]) == 1 # Check number of affected ACLs\n\n # Make sure the ACL does not exist in the cluster anymore\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=\"*\",\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0", "def revoke_secret(self, lease_id):\n client = self.connect(VAULT_TOKEN)\n client.sys.revoke_secret(lease_id)", "def delete(self, cache_key):\r\n pass", "def test_delete_hyperflex_proxy_setting_policy(self):\n pass", "def locked_delete(self):\n self._multistore._delete_credential(self._key)", "def delete(cls, aws_cloud_account_id: str):\n\t\tpass", "def _delete_credential(self, key):\n try:\n del self._data[key]\n except KeyError:\n pass\n self._write()", "def test_aws_service_api_keypair_delete(self):\n pass", "def test_ipam_roles_delete(self):\n pass", "def delete_image_permissions(Name=None, SharedAccountId=None):\n pass", "def dangerously_delete(self, bento_name, bento_version):" ]
[ "0.66316605", "0.65926045", "0.64347994", "0.6348736", "0.6199564", "0.61836725", "0.61462086", "0.61307585", "0.6130368", "0.60911834", "0.6086925", "0.6073761", "0.6018572", "0.6013481", "0.60084015", "0.59860677", "0.593387", "0.58928955", "0.58561933", "0.5819227", "0.5788971", "0.57880604", "0.5766235", "0.5764708", "0.5759262", "0.5758634", "0.5745716", "0.57389873", "0.5733413", "0.57003844", "0.5697893", "0.56976515", "0.5696765", "0.5688598", "0.56879985", "0.5677999", "0.5674889", "0.5673151", "0.5671267", "0.5657892", "0.56567127", "0.5656255", "0.56508124", "0.5645098", "0.5642339", "0.5634177", "0.56337047", "0.5622002", "0.5601244", "0.5591925", "0.5591134", "0.5589628", "0.55713135", "0.5570524", "0.5565076", "0.55547047", "0.55480975", "0.55390346", "0.55376244", "0.5537065", "0.553379", "0.55323094", "0.5522764", "0.5521956", "0.551823", "0.5514856", "0.5514524", "0.55089873", "0.5507841", "0.55057144", "0.5503605", "0.54916096", "0.5471161", "0.54606295", "0.54564774", "0.5453723", "0.5452275", "0.54398894", "0.5428202", "0.54266703", "0.54258657", "0.5414106", "0.54097193", "0.5400419", "0.53969324", "0.53953135", "0.5388417", "0.53834987", "0.53824997", "0.53824997", "0.53808576", "0.5380301", "0.5378414", "0.5375781", "0.5374592", "0.5370533", "0.5366639", "0.53638965", "0.53623194", "0.53604597" ]
0.6557337
2
Delete acl in a secret.
def delete_acls_for_secret(cls, secret, session=None): session = cls.get_session(session=session) for entity in secret.secret_acls: entity.delete(session=session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_acls_for_secret_model(cls, secret, session=None):\n cls.db_repo.delete_acls_for_secret(secret, session)", "def delete_acl(self, sg):\n self.security_group_driver.delete_acl(sg)", "def delete_secret_link(link_id):\n\n Secret_Link.objects.filter(link_id=link_id).delete()", "def delete_bucket_acl(self, bucket, user):\n msg = \"delete_bucket_acl not implemented\"\n raise NotImplementedError(msg)", "def DeleteAclSample():\n client = CreateClient()\n doc = gdata.docs.data.Resource(type='document', title='My Sample Doc')\n doc = client.CreateResource(doc)\n acl_entry = gdata.docs.data.AclEntry(\n scope=gdata.acl.data.AclScope(value='user@example.com', type='user'),\n role=gdata.acl.data.AclRole(value='reader'),\n )\n acl_entry = client.AddAclEntry(doc, acl_entry)\n client.DeleteAclEntry(acl_entry)", "def _DeleteAclRule(self, entry):\n\n self.cal_client.Delete(entry.GetEditLink().href)", "def delete(key, **kwargs):\n cluster_call(\n \"secret_delete\",\n key=key,\n **kwargs,\n confirm=f\"Delete secret {key}\",\n prefix=f\"Deleting secret {key}...\",\n postfix=\"deleted.\",\n )", "def test_vault_delete_authorization_for_vault_section(self):\n pass", "def test_delete_acl(self, env):\n # Create ACL Expressions\n self.suite_logger.debug(\"Create ACL Expressions\")\n expressions = [(1, 'DstMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:01:01:01'),\n (2, 'SrcMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:02:02:02')]\n env.switch[1].ui.create_acl(expressions=expressions)\n # Verify ACL Expression\n expressions_table = env.switch[1].ui.get_table_acl(\"ACLExpressions\")\n # Verify first expression has been added\n expr_1 = {\"expressionId\": expressions[0][0],\n \"field\": expressions[0][1],\n \"mask\": expressions[0][2],\n \"data\": expressions[0][3]\n }\n assert expr_1 in expressions_table, \\\n \"Expression {0} was not added\".format(expressions[0])\n # Verify second expression has been added\n expr_2 = {\"expressionId\": expressions[1][0],\n \"field\": expressions[1][1],\n \"mask\": expressions[1][2],\n \"data\": expressions[1][3]\n }\n assert expr_2 in expressions_table,\\\n \"Expression {0} was not added\".format(expressions[1])\n # Delete Expression\n self.suite_logger.debug(\"Delete ACL Expression\")\n env.switch[1].ui.delete_acl(expression_ids=[(2, 'SrcMac'), ])\n # Verify Expression has been deleted\n expressions_table = env.switch[1].ui.get_table_acl(\"ACLExpressions\")\n assert expr_2 not in expressions_table, \\\n \"Expression {0} was not deleted\".format(expressions[1])\n\n # Create ACL Actions\n self.suite_logger.debug(\"Create ACL Actions\")\n actions = [(1, 'Drop', ''),\n (2, 'Count', '')]\n env.switch[1].ui.create_acl(actions=actions)\n # Verify ACL Action\n actions_table = env.switch[1].ui.get_table_acl(\"ACLActions\")\n # Verify first action has been added\n act_1 = {\"actionId\": actions[0][0],\n \"action\": actions[0][1],\n \"param\": actions[0][2]\n }\n assert act_1 in actions_table, \"Action {0} was not added\".format(actions[0])\n # Verify second action has been added\n act_2 = {\"actionId\": actions[1][0],\n \"action\": actions[1][1],\n \"param\": actions[1][2]\n }\n assert act_2 in actions_table, \"Action {0} was not added\".format(actions[1])\n # Delete Action\n self.suite_logger.debug(\"Delete ACL Action\")\n env.switch[1].ui.delete_acl(action_ids=[(2, 'Count'), ])\n # Verify Action has been deleted\n actions_table = env.switch[1].ui.get_table_acl(\"ACLActions\")\n assert act_2 not in actions_table, \"Action {0} was not deleted\".format(actions[1])\n\n # Create ACL Rule\n self.suite_logger.debug(\"Create ACL Rule\")\n rules = [(1, 1, 1, 'Ingress', 'Enabled', 0), ]\n env.switch[1].ui.create_acl(ports=[1, ], rules=rules)\n # Verify ACL Rule has been added\n rules_table = env.switch[1].ui.get_table_acl(\"ACLRules\")\n rule = {\"ruleId\": rules[0][0],\n \"expressionId\": rules[0][1],\n \"actionId\": rules[0][2],\n \"stage\": rules[0][3],\n \"enabled\": rules[0][4],\n \"priority\": rules[0][5]\n }\n assert rule in rules_table, \"Rule {0} was not added\".format(rules[0])\n # Delete Rule\n self.suite_logger.debug(\"Delete ACL Rule\")\n env.switch[1].ui.delete_acl(ports=[1, ], rule_ids=[1, ])\n # Verify Rule has been deleted\n rules_table = env.switch[1].ui.get_table_acl(\"ACLRules\")\n assert rule not in rules_table, \"Rule {0} was not deleted\".format(rules[0])", "def delete_acl_rule(self, sgr):\n self.security_group_driver.delete_acl_rule(sgr)", "def delete_access_list(self, loadbalancer):\n return loadbalancer.delete_access_list()", "def delete_remote_access_session(arn=None):\n pass", "def delete_vlan_acl(self, vlan, acl):\n raise NotImplementedError # pragma: no cover", "def delete(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n # Actually run vault\n logging.info(\"Deleting the policy: %s\", address)\n self.vault.requests_request(\"DELETE\", address, headers=self.vault.token_header)", "def delete_port_acl(self, port, acl):\n raise NotImplementedError # pragma: no cover", "def rbac_policy_delete(request, policy_id):\n neutronclient(request).delete_rbac_policy(policy_id)", "def delete_secret_request(self, vault_name: str, secret_name: str) -> dict[str, Any]:\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/secrets/{secret_name}'\n response = self.http_request(\n 'DELETE', full_url=url, resource=self.get_vault_resource())\n return response", "def delete_bucket_policy(Bucket=None):\n pass", "def delete_bucketlist():\n pass", "def delete_secret_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n secret_name = args['secret_name']\n\n response = client.delete_secret_request(vault_name, secret_name)\n\n outputs = copy.deepcopy(response)\n outputs['deletedDate'] = convert_timestamp_to_readable_date(\n outputs['deletedDate'])\n outputs['scheduledPurgeDate'] = convert_timestamp_to_readable_date(\n outputs['scheduledPurgeDate'])\n\n readable_response = copy.deepcopy(outputs)\n outputs['attributes'] = convert_time_attributes_to_iso(outputs['attributes'])\n outputs[VAULT_NAME_CONTEXT_FIELD] = vault_name\n\n readable_response['secretId'] = readable_response.pop('id')\n readable_output = tableToMarkdown(f'Delete {secret_name}',\n readable_response,\n ['secretId', 'recoveryId', 'deletedDate',\n 'scheduledPurgeDate'], removeNull=True,\n headerTransform=pascalToSpace)\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Secret',\n outputs_key_field='recoveryId',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def delete_access_list(self):\n return self.manager.delete_access_list(self)", "def post_access_control_list_delete(self, resource_id, resource_dict):\n pass", "def deleteSecret(self, clientIP, not_before):\n\n return self._secret_table.delete_item(ip_address=clientIP,not_before=not_before)", "def pre_access_control_list_delete(self, resource_id):\n pass", "def delete_permissions(queue_url, label):\n client = boto3.client('sqs')\n try:\n client.remove_permission(QueueUrl=queue_url, Label=label)\n except ClientError as e:\n if e.response['Error']['Code'] != 'InvalidParameterValue':\n raise\n # We are failing silently since the label, which we would like to delete, does not exist", "def delete_access_list(self, loadbalancer):\n uri = \"/loadbalancers/%s/accesslist\" % utils.get_id(loadbalancer)\n resp, body = self.api.method_delete(uri)\n return body", "def delete(ctx: CLIContext, access_key):\n with Session() as session:\n try:\n data = session.KeyPair.delete(access_key)\n except Exception as e:\n ctx.output.print_mutation_error(\n e,\n item_name='keypair',\n action_name='deletion',\n )\n sys.exit(1)\n if not data['ok']:\n ctx.output.print_mutation_error(\n msg=data['msg'],\n item_name='keypair',\n action_name='deletion',\n )\n sys.exit(1)\n ctx.output.print_mutation_result(\n data,\n extra_info={\n 'access_key': access_key,\n },\n )", "def test_remove_authz_wrong(self):\n self.test_add_authz()\n self.app.delete(\"/config/authorize?operation=config\", status=400)\n self.app.delete(\"/config/authorize?dn=/DN=a.test.user\", status=204)", "def main_role_delete(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n id_ = opts[\"id\"]\n client.delete_role(opts[\"formation\"], id_)\n logger.info(f\"Deleted role with id=\\\"{id_}\\\"\")", "def remove_access(acl, list_to_edit):\n post_key = '%s_remove_' % list_to_edit\n removal_keys = [k for k in request.POST.keys() if k.startswith(post_key)]\n for key in removal_keys:\n model_type = models.UserGroup\n if list_to_edit.startswith('user'):\n model_type = models.UserProfile\n key_id = int(key.replace(post_key, ''))\n datastore_object = model_type.get_by_id(key_id)\n acl.__getattribute__(list_to_edit).remove(datastore_object.key())", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete_bucket(Bucket=None):\n pass", "def test_delete_namespaced_role(self):\n pass", "def deleteRoleAccess(self, role, read, write, catalog='*', repository='*'):\n self._client.deleteRoleAccess(role, read, write, catalog, repository)", "def access_info_delete(context, storage_id):\n _access_info_get_query(context). \\\n filter_by(storage_id=storage_id).delete()", "def vault_delete(self, vault_delete):\n self._vault_delete = vault_delete", "def delete_bucket_encryption(Bucket=None):\n pass", "def delete(self, sg_id):\n self.client.delete_security_group(sg_id)", "def test_delete_cluster_policy_binding(self):\n pass", "def delete_infrastructure(aws_key, aws_secret):\n # Create boto3 clients for AWS resources.\n ec2_client, _, iam_client, redshift_client = create_clients(\n aws_key, aws_secret\n )\n # Get the clusters properties.\n cluster_properties = get_cluster_properties(redshift_client)\n # Clean up resources.\n redshift_client.delete_cluster(\n ClusterIdentifier=IDENTIFIER, SkipFinalClusterSnapshot=True\n )\n iam_client.detach_role_policy(\n RoleName=IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadonlyAccess\"\n )\n iam_client.delete_role(RoleName=IAM_ROLE_NAME)", "def delete(self, layer='', name='', uid='', params={}):\n return self.__common_client._post_with_layer('delete-access-section', layer, name, uid, params)", "def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))", "def delete(self, layer='', name='', uid='', params={}):\n return self.__common_client._post_with_layer('delete-access-rule', layer, name, uid, params)", "def deleteSecret(self, clientIP, not_before):\n\n return self._secretdb.execute('delete from %s where ip_address=:ip_address and not_before=:not_before' % self._table_name,\n {'ip_address': ip_address,\n 'not_before': not_before})", "def delete(cls, name):\n\n secret = cls.get_instance(name)\n secret.delete_instance(recursive=True)", "def delete_access(request):\n # Get the submitted request parameters.\n\n params = urllib.parse.parse_qs(request.META['QUERY_STRING'])\n\n if \"global_id\" not in params or len(params['global_id']) != 1:\n return HttpResponseBadRequest()\n else:\n global_id = params['global_id'][0]\n\n # Get the GlobalID object for the supplied global ID, creating one if\n # necessary.\n\n global_id_rec,created = GlobalID.objects.get_or_create(global_id=global_id)\n\n # Delete the existing access credentials for this global ID, if it exists.\n\n AccessID.objects.filter(global_id=global_id_rec).delete()\n\n # Tell the caller that we succeeded.\n\n return HttpResponse(status=200)", "def delete(self, app, role, privilege):\n \n # check user's privileges\n h.checkAccess('delete')\n\n model = RolesModel()\n model.deletePrivilege( app, role, privilege )\n\n # returns empty reply", "def test_dashboards_v2_delete_share(self):\n pass", "def delete(cls, aws_cloud_account_id: str):\n\t\tpass", "def delete():", "def _delete_all_acls(self):\n for acl_ref in self.created_entities['acl']:\n entity_ref = acl_ref.replace(\"/acl\", \"\")\n blank_acl_entity = self.barbicanclient.acls.create(\n entity_ref=entity_ref)\n blank_acl_entity.remove()", "def delete_access_key(self, username, accesskeyid):\n try:\n self.iam_client.delete_access_key(\n UserName=username,\n AccessKeyId=accesskeyid\n )\n except ClientError as error:\n if error.response['Error']['Code'] == 'NoSuchEntityException':\n pass", "def delete(id):\n db = core.connect()\n permIds = [perm[\"_id\"] for perm in permission.permissionsForStream(id)]\n [permission.delete(permId) for permId in permIds]\n del db[id]", "def test_remove_authz(self):\n self.test_add_authz()\n self.app.delete(\"/config/authorize?dn=/DN=a.test.user&operation=config\",\n status = 204\n )\n\n audits = Session.query(ConfigAudit).all()\n self.assertEqual(2, len(audits))\n\n authz = Session.query(AuthorizationByDn).get(('/DN=a.test.user', 'config'))\n self.assertEqual(None, authz)", "def revoke_secret_prefix(self, prefix):\n client = self.connect(VAULT_TOKEN)\n client.sys.revoke_secret_prefix(prefix)", "def revoke_secret(self, lease_id):\n client = self.connect(VAULT_TOKEN)\n client.sys.revoke_secret(lease_id)", "def delete(ctx):\n click.echo('deleting')\n ctx.delete()\n click.echo('done')", "def manage_removeSharedSecret(self, REQUEST):\n self._shared_secret = None\n response = REQUEST.response\n response.redirect(\n '%s/manage_secret?manage_tabs_message=%s' %\n (self.absolute_url(), 'Shared+secret+removed.')\n )", "def delete(self, url):\n self._verify_keystore()\n auths = self._read_all()\n try:\n del auths[url]\n except KeyError:\n return 1\n self._shred()\n return self._write_all(auths)", "def delete_conf(src_ip):\n return delete_route(src_ip)", "def test_delete_cluster_policy(self):\n pass", "def remove_permission_from_bucket(bucket_name, role_type, member_type):\n\n # initialize client & get bucket\n _, bucket, _ = create_client(bucket_name)\n\n policy = bucket.get_iam_policy(requested_policy_version=3)\n \n # get member type\n member_value = get_member_bucket_level(member_type)\n\n # get role type\n role_value = get_role_bucket_level(role_type)\n\n for binding in policy.bindings:\n # print(binding)\n if binding[\"role\"] == role_value and binding.get(\"condition\") is None:\n # revoke role from member\n binding[\"members\"].discard(member_value)\n\n bucket.set_iam_policy(policy)\n\n print(\"removed {} with role {} from {}\".format(member_value, role_value, bucket_name))", "def test_delete_o_auth_access_token(self):\n pass", "def delete(self, oid):\n path = '%s/security-groups/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack security group: %s' % truncate(res))\n return res[0]", "def delete_run(arn=None):\n pass", "def test_delete(self):\n self.basic_login()\n cassette_name = self.cassette_name(\"delete\")\n with self.recorder.use_cassette(cassette_name):\n auth = self.gh.authorize(\n username=self.user,\n password=self.password,\n scopes=[\"gist\"],\n note=\"testing github3.py\",\n )\n assert isinstance(auth, github3.auths.Authorization)\n assert auth.delete() is True", "def test_delete_cluster_role_binding(self):\n pass", "def test_vault_delete_vault_section(self):\n pass", "def delPermission(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\",\"perm_name\")\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n perm_actions.getActionManager().deletePermission(request[\"admin_username\"],request[\"perm_name\"])", "def test_delete_namespaced_role_binding(self):\n pass", "def _delete_rights(self):\n for right in self.rights:\n right.delete()", "def test_delete_cluster_role(self):\n pass", "def test_create_describe_delete_acls(kafka_admin_client):\n\n # Check that we don't have any ACLs in the cluster\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0\n\n # Try to add an ACL\n acl = ACL(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n result = kafka_admin_client.create_acls([acl])\n\n assert len(result[\"failed\"]) == 0\n assert len(result[\"succeeded\"]) == 1\n\n # Check that we can list the ACL we created\n acl_filter = ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n acls, error = kafka_admin_client.describe_acls(acl_filter)\n\n assert error is NoError\n assert len(acls) == 1\n\n # Remove the ACL\n delete_results = kafka_admin_client.delete_acls(\n [\n ACLFilter(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n ]\n )\n\n assert len(delete_results) == 1\n assert len(delete_results[0][1]) == 1 # Check number of affected ACLs\n\n # Make sure the ACL does not exist in the cluster anymore\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=\"*\",\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0", "def test_create_describe_delete_acls(kafka_admin_client):\n\n # Check that we don't have any ACLs in the cluster\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0\n\n # Try to add an ACL\n acl = ACL(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n result = kafka_admin_client.create_acls([acl])\n\n assert len(result[\"failed\"]) == 0\n assert len(result[\"succeeded\"]) == 1\n\n # Check that we can list the ACL we created\n acl_filter = ACLFilter(\n principal=None,\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n acls, error = kafka_admin_client.describe_acls(acl_filter)\n\n assert error is NoError\n assert len(acls) == 1\n\n # Remove the ACL\n delete_results = kafka_admin_client.delete_acls(\n [\n ACLFilter(\n principal=\"User:test\",\n host=\"*\",\n operation=ACLOperation.READ,\n permission_type=ACLPermissionType.ALLOW,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n ]\n )\n\n assert len(delete_results) == 1\n assert len(delete_results[0][1]) == 1 # Check number of affected ACLs\n\n # Make sure the ACL does not exist in the cluster anymore\n acls, error = kafka_admin_client.describe_acls(\n ACLFilter(\n principal=\"*\",\n host=\"*\",\n operation=ACLOperation.ANY,\n permission_type=ACLPermissionType.ANY,\n resource_pattern=ResourcePattern(ResourceType.TOPIC, \"topic\")\n )\n )\n\n assert error is NoError\n assert len(acls) == 0", "def delete_upload(arn=None):\n pass", "def test_delete_hyperflex_cluster_storage_policy(self):\n pass", "def delete(ctx, query, force):\n\n ensure_validated(ctx)\n controller = ctx.obj['controller']\n creds = controller.list()\n hits = _search(creds, query)\n if len(hits) == 0:\n click.echo('No matches, nothing to be done.')\n elif len(hits) == 1:\n cred = hits[0]\n if force or (click.confirm(\n u'Delete credential: {} ?'.format(cred.printable_key),\n default=False, err=True\n )):\n controller.delete(cred)\n click.echo(u'Deleted {}.'.format(cred.printable_key))\n else:\n click.echo('Deletion aborted by user.')\n\n else:\n _error_multiple_hits(ctx, hits)", "def delete_rule(self, index):\n del self.rules[index]", "def test_delete_namespaced_policy(self):\n pass", "def catalog_alias_delete(self, args):\n try:\n alias = self.server.connect_ermrest_alias(args.id)\n alias.delete_ermrest_alias(really=True)\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n else:\n raise e", "def delete(c, bucket=None):\n if bucket == None:\n bucket = \"dvc-\" + project_dir_name().replace(\"_\",\"-\")\n bucket_resource = boto3.resource('s3').Bucket(bucket)\n bucket_resource.objects.all().delete()\n s3 = boto3.client(\"s3\")\n s3.delete_bucket(Bucket=bucket)", "def delete_bucket(self, name):\n return", "def clear(cls):\n\n db = get_db_handle()\n for secret in db.secret_table.select():\n secret.delete_instance(recursive=True)", "def s3_delete_data(self):\n\n self.k.delete()", "def test_delete_role(self):\n pass", "def test_delete_hyperflex_cluster_network_policy(self):\n pass", "def _delete_all_secrets(self):\n for secret_ref in self.created_entities['secret']:\n self.barbicanclient.secrets.delete(secret_ref, True)", "def delete_network_profile(arn=None):\n pass", "def remove_permission_from_blob(bucket_name, blob_name, role_type, member_type):\n\n # initialize client, get bucket, & get blob\n _, _, blob = create_client(bucket_name, blob_name)\n \n # get member type\n member = get_member_blob_level(member_type, blob)\n \n # revoke role from member\n revoke_role_blob_level(role_type, member)\n\n blob.acl.save()\n\n print(\n \"removed permission for {} to {} from blob {} in bucket {}\".format(\n member_type, role_type, blob_name, bucket_name\n )\n )", "async def erase(self, guild: discord.Guild):\n role = await self.get_role(guild=guild)\n if role:\n await role.delete()", "def delete(self, user, id):\n # Search for bucketlist\n print (id)\n bucketlist = Bucketlist.query.filter_by(\n id=id, created_by=user.email).first()\n\n # return 400 if bucketlist non exixtant or not belongs to this user\n if bucketlist is None:\n return 'Bucketlist not found', 202\n\n bucketlist.delete()\n\n return \"Successfully deleted bucketlist\", 200", "def delete(self, consumer_key, rid):\n policy = Policy.query.filter(\n Policy.consumer_key == consumer_key,\n Policy.rid == rid\n ).first_or_404()\n\n policy.remove()\n return '', 204", "def delete_secrets(self, id: str, body: list[str]) -> Any:\n return self.client.delete(self._url(\"%s/secrets\" % id), data=body)", "def remove_acl(self, **kwargs):\n # Validate required and accepted parameters\n params_validator.validate_params_slx_ver17s_apply_acl(**kwargs)\n\n # Parse params\n acl_name = self.ip.parse_acl_name(**kwargs)\n callback = kwargs.pop('callback', self._callback)\n acl = self._get_acl_info(acl_name, get_seqs=False)\n address_type = acl['protocol']\n\n kwargs['address_type'] = address_type\n # Parse params\n user_data = self._parse_params_for_apply_or_remove_acl(**kwargs)\n\n self.validate_interfaces(callback, user_data)\n\n result = {}\n for intf in user_data['interface_list']:\n user_data['intf'] = intf\n t = jinja2.Template(acl_template.acl_remove)\n config = t.render(**user_data)\n config = ' '.join(config.split())\n try:\n callback(config)\n result[intf] = True\n except Exception as e:\n if '<bad-element>access-group</bad-element>' in str(e):\n result[intf] = None\n else:\n raise\n return result", "async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")", "def test_delete_hyperflex_sys_config_policy(self):\n pass", "def delete(self):\n\n context = t_context.extract_context_from_environ()\n if not context.is_admin:\n # TODO(joahuang): changed to policy control later\n # to support reseller admin mode\n return Response(_('Admin role required to delete quota'), 409)\n\n kw = {}\n return self._quota_action('delete', **kw)", "def locked_delete(self):\n self._multistore._delete_credential(self._key)", "async def delete(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"aliases deleted\")\n except:\n await ctx.send(\"Alias unsuccessfully deleted\")\n elif not tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"Tag and all aliases deleted\")\n except:\n await ctx.send(\"Tag unsuccessfully deleted\")\n else:\n await ctx.send(\"No Tag with that name found\")", "def delete(config: Config, ami: str) -> None:\n\n ec2_client = boto3.client(\"ec2\", region_name=config.get(\"region\", None))\n\n response = describe(config, ami, show_snapshot_id=True)\n\n ec2_client.deregister_image(ImageId=ami)\n\n ec2_client.delete_snapshot(SnapshotId=response[0][\"SnapshotId\"])" ]
[ "0.7304661", "0.7133021", "0.6473207", "0.6455759", "0.6440277", "0.6435884", "0.64185977", "0.6405398", "0.63174623", "0.6266833", "0.6206975", "0.6191693", "0.61805665", "0.6168148", "0.6163188", "0.60189515", "0.59845203", "0.5972625", "0.59346336", "0.5915101", "0.59063804", "0.5896104", "0.5870325", "0.5860186", "0.5825764", "0.58130634", "0.5795946", "0.57953167", "0.5767018", "0.5766786", "0.56748164", "0.56667155", "0.5656408", "0.5650128", "0.56418407", "0.5638475", "0.56378734", "0.56072044", "0.5603556", "0.55965257", "0.55866545", "0.5570379", "0.55663633", "0.5563183", "0.55411744", "0.5539928", "0.5530198", "0.5510027", "0.5506072", "0.5503493", "0.5499002", "0.549378", "0.54913795", "0.5490433", "0.54803383", "0.5480277", "0.5457826", "0.5450185", "0.5442016", "0.54347044", "0.5432904", "0.54139113", "0.5407523", "0.5392425", "0.5390045", "0.5382133", "0.53733", "0.5369187", "0.53681535", "0.5364391", "0.5351333", "0.5351248", "0.53374916", "0.53374916", "0.53357977", "0.53350335", "0.5310443", "0.5306645", "0.5305633", "0.5302374", "0.529812", "0.5291857", "0.5280974", "0.52740014", "0.5273053", "0.52602804", "0.52558035", "0.5255375", "0.52530754", "0.5248531", "0.5248303", "0.52466434", "0.52457803", "0.5238707", "0.52375966", "0.52369004", "0.5229341", "0.5228125", "0.5222255", "0.5218487" ]
0.72501093
1
Obtain basic information about onefs
def show(self, username, txn_id): logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper()) resp = {'content' : {}, 'error': None, 'params': {}} logger.info('Task starting') try: info = vmware.show_onefs(username) except ValueError as doh: logger.error('Task failed: {}'.format(doh)) resp['error'] = '{}'.format(doh) else: logger.info('Task complete') resp['content'] = info return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getInfo():", "def info() -> None:", "def info(self):\n return self.nfo", "def info(self):", "def info(self):", "def demo():\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n fcm = FMQLCacher(\"Caches\")\n fcm.setVista(\"CGVISTA\", \"http://vista.caregraf.org/fmqlEP\")\n \"\"\"\n for i, scheme in enumerate(fcm.describeSchemaTypes()):\n if \"count\" in scheme:\n print \"%d: %s (%s)\" % (i, scheme[\"number\"], scheme[\"count\"])\n else:\n print \"%d: %s\" % (i, scheme[\"number\"])\n \"\"\"\n for entry in fcm.describeFileEntries(\"9_6\", cstop=\"1000\"):\n print entry[\"uri\"][\"label\"]", "def get_info(self):\n pass", "def get_info(self):\n pass", "def info(self):\n print \"root path = {path}\".format(path=self.path)\n print \"target path = {target}\".format(target=self.target)\n print \"files = {dic}\".format(dic=self.files)", "def bioinfo():\n\n pass", "def getHostFsInfo(hostfs):\n pattern = re.compile('^([^\\.]+)\\.([^\\.]+)\\.([^\\.]+)-(([0-9]+\\.)+([0-9]+))\\.([^\\.]+)$')\n result = pattern.match(hostfs)\n if result is None:\n return None\n else:\n version = result.group(4)\n platform = result.group(1)\n cpu = result.group(2)\n endian = result.group(3)\n ext = result.group(7)\n return {\n 'name': hostfs,\n 'file': hostfs,\n 'filepath': hostfs,\n 'version': version,\n 'platform': platform,\n 'cpu': cpu,\n 'endian': endian,\n 'type': ext\n }", "def info(self):\r\n print(f\"filename: {self.filename}\")\r\n print(f\"comments: \\n{self.comment_1}{self.comment_2}\")\r\n print(f\"origin: {self.origin[0]}, {self.origin[1]}, {self.origin[2]}\")\r\n print(f\"atoms count: {self.n_atoms}\")\r\n print(f\"voxels count: {self.n_x}, {self.n_y}, {self.n_z}\")\r\n print(f\"voxel x-axis: {self.x[0]}, {self.x[1]}, {self.x[2]}\")\r\n print(f\"voxel y-axis: {self.y[0]}, {self.y[1]}, {self.y[2]}\")\r\n print(f\"voxel z-axis: {self.z[0]}, {self.z[1]}, {self.z[2]}\")", "def info() -> Dict[str, Any]:", "def getInfo(self): \n print(\"\\n---------------------------------\")\n print(\"Info found about the binary file submitted for upload:\")\n print(\"Name: %s\" % self.name) \n print(\"Path: %s\" % self.path)\n print(\"Complete path to file: %s\" % self.abspath())\n print(\"Arch: %s\" % self.arch) \n print(\"File type: %s\" % self.filetype)\n print(\"Board: %s\\n\" % self.board)\n print(\"---------------------------------\")\n return True", "def info(self) -> dict:", "def _get_information(self):\n pass", "def get_info(raw_filename, epochs_filename):\n trans, fiducials, info = get_head_correct_info(\n raw_filename, epochs_filename)\n return info", "def get_info(self):\n return None", "def lsinfo(name):", "def get_info(self):\n return \"TODO !\"", "def get_info(self):\n\t\tret = 'Flash info\\n'\n\t\tret += '\\tGPNVM bits: ' + str(self.read_gpnvm()) + '\\n'\n\t\tret += '\\tUnique identifier area: ' + self.read_unique_identifier_area().decode('ascii', 'replace') + '\\n'\n\t\tret += '\\tDescriptor: ' + str(self.read_descriptor()) + '\\n'\n\t\treturn ret", "def info(file, extended, vlrs, points):\n try:\n with pylas.open(openbin_file(file)) as fp:\n echo_header(fp.header, extended)\n\n if vlrs:\n click.echo(20 * \"-\")\n echo_vlrs(fp)\n\n if points:\n click.echo(20 * \"-\")\n echo_points(fp)\n except fs.errors.ResourceNotFound as e:\n click.echo(click.style(\"Error: {}\".format(e), fg=\"red\"))", "def test2_basic_info(self):\n\t\tprint \"\\nTEST 2: Extracting basic info from each ontology in %s folder.\\n=================\" % DATA_FOLDER\n\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\n\t\t\t\t\n\t\t\t\t# divert output to a file temporarily \n\t\t\t\tsaveout = sys.stdout \n\t\t\t\tfsock = open('out.log', 'w') \n\t\t\t\tsys.stdout = fsock \n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\tprintBasicInfo(o)\t\t\t\t\n\t\t\t\t\n\t\t\t\tsys.stdout = saveout\n\t\t\t\tfsock.close()\n\t\t\t\tprint \"Success.\"", "def SOD(fp):\n info = {}\n\n return info", "def readMetaInfo(self):\n\t\tdata = self._fileSystem.readMetaInfo()\n\t\treturn data", "def get_info():\n\n global DISKINFO\n DISKINFO = {}\n\n #Run diskutil list to get disk names.\n runcmd = subprocess.Popen(\"diskutil list -plist\", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n\n #Get the output.\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n global PLIST\n\n PLIST = plistlib.loads(stdout)\n\n #Find the disks.\n for disk in PLIST[\"AllDisks\"]:\n #Run diskutil info to get disk info.\n runcmd = subprocess.Popen(\"diskutil info -plist \"+disk, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n stdout = runcmd.communicate()[0]\n\n #Parse the plist (Property List).\n PLIST = plistlib.loads(stdout)\n\n #Check if the disk is a partition.\n disk_is_partition = is_partition(disk)\n\n if not disk_is_partition:\n #These are devices.\n get_device_info(disk)\n\n else:\n #These are Partitions. Fix for disks w/ more than 9 partitions.\n host_disk = \"/dev/\"+disk.split(\"s\")[0]+\"s\"+disk.split(\"s\")[1]\n get_partition_info(disk, host_disk)\n\n #Check we found some disks.\n if not DISKINFO:\n raise RuntimeError(\"No Disks found!\")", "def printInfoDoc():\n global _modinfo\n print _modinfo\n help(\"ProcUtils\")", "def _fsal_hook(self, base, share, access):\n return {\"Hostname\": self.gluster_manager.host,\n \"Volume\": self.gluster_manager.volume,\n \"Volpath\": \"/\" + share['name']}", "def pynanoleaf_get_info(nanoleaf_light: Nanoleaf) -> dict:\n return nanoleaf_light.info", "def get_discovery_summary():\n pass", "def module_info():\n pass", "def file_info(self, f):\n ld8 = self.ld8_extract(f) # get luna_date\n sid = self.sesid(ld8) # make luna_visitnum\n age = self.age_lookup.get(sid)\n return (sid, age)", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def file_info(label, filename):\n filekit = current_app.filekits.get(label)\n if filekit is None: \n abort(404)\n try:\n fkit = filekit(filename)\n except FileNotFound:\n abort(404)\n return jsonify(fkit.to_dict())", "def info(): # noqa: E501\n return 'do some magic!'", "def get_info(self) -> Optional[Dict[str, Any]]:", "def get_waveform_info():\n dpo.write('acquire:stopafter sequence')\n dpo.write('acquire:state on')\n dpo.query('*OPC?')\n binaryFormat = dpo.query('wfmoutpre:bn_fmt?').rstrip()\n print('Binary format: ', binaryFormat)\n numBytes = dpo.query('wfmoutpre:byt_nr?').rstrip()\n print('Number of Bytes: ', numBytes)\n byteOrder = dpo.query('wfmoutpre:byt_or?').rstrip()\n print('Byte order: ', byteOrder)\n encoding = dpo.query('data:encdg?').rstrip()\n print('Encoding: ', encoding)\n if 'RIB' in encoding or 'FAS' in encoding:\n dType = 'b'\n bigEndian = True\n elif encoding.startswith('RPB'):\n dType = 'B'\n bigEndian = True\n elif encoding.startswith('SRI'):\n dType = 'b'\n bigEndian = False\n elif encoding.startswith('SRP'):\n dType = 'B'\n bigEndian = False\n elif encoding.startswith('FP'):\n dType = 'f'\n bigEndian = True\n elif encoding.startswith('SFP'):\n dType = 'f'\n bigEndian = False\n elif encoding.startswith('ASCI'):\n raise visa.InvalidBinaryFormat('ASCII Formatting.')\n else:\n raise visa.InvalidBinaryFormat\n return dType, bigEndian", "def manage_info():", "def load_info():\n\n infofile = os.path.join(ROOTDIR, 'weirdos.info')\n info = Table().read(infofile, format='ascii')\n\n return info", "def info(self):\r\n return self._get('info', {})", "def info(self):\n self._info()", "def info(self):\n return self.client.call('GET', self.name + 'info')", "def rpc_info():", "def fileInfo(tif: TiffFile):\n print(tif.flags)\n print(tif.geotiff_metadata)\n for page in tif.pages:\n print(page.tags)\n print(page.geotiff_tags)\n print(page.shape)\n print(page.dtype)\n print(page.flags)", "def info(self):\n return self._info", "def get_info(self):\n pattern = \"{}-{}-{}\".format(*self.diagram).replace(\"/\", \"|\")\n info = \"\"\n info += \"name: triangle group {}\\n\".format(pattern)\n info += \"cox_mat: {}\\n\".format(self.cox_mat)\n info += \"vertices: {}\\n\".format(self.num_vertices)\n info += \"edges: {}\\n\".format(self.num_edges)\n info += \"faces: {}\\n\".format(self.num_faces)\n info += \"states in the automaton: {}\\n\".format(self.G.dfa.num_states)\n info += \"reflection table:\\n{}\\n\".format(self.G.reftable)\n info += \"the automaton is saved as {}_dfa.png\".format(pattern)\n self.G.dfa.draw(pattern + \"_dfa.png\")\n return info", "def info():\n return buildcat.info()", "def readFeatures(self):\n\t\treturn self._fileSystem.readFeatures()", "def full_info(self, object, name, value):\n return self.info()", "def get_info(self) -> str:\n info = ffi.new(\"char **\")\n ret = lib.Fapi_GetInfo(self._ctx, info)\n _chkrc(ret)\n return ffi.string(_get_dptr(info, lib.Fapi_Free)).decode(self.encoding)", "def get_info(self) -> str:\n raise NotImplementedError()", "def fopenhelp(self):", "def _get_spec_info(self):\n raise NotImplementedError()", "def getFeatureInfo(self,feature):\n geomRef = feature.GetGeometryRef()\n nameIndex = feature.GetFieldIndex(\"OBJNAM\")\n featureName = \"NO OBJNAM\"\n if(nameIndex != -1 and feature.GetFieldAsString(nameIndex) != \"\" ):\n featureName = feature.GetFieldAsString(nameIndex)\n featureInfo = (featureName, feature.GetFID(), geomRef.GetX(), geomRef.GetY())\n # rospy.loginfo(featureInfo)\n return featureInfo", "def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)", "def get_info(self):\n raw = self.datasets.current.raw\n fname = self.datasets.current.fname\n\n nchan = raw.info[\"nchan\"]\n chans = Counter([channel_type(raw.info, i) for i in range(nchan)])\n\n return {\"File name\": fname if fname else \"-\",\n \"Number of channels\": raw.info[\"nchan\"],\n \"Channels\": \", \".join(\n [\" \".join([str(v), k.upper()]) for k, v in chans.items()]),\n \"Samples\": raw.n_times,\n \"Sampling frequency\": str(raw.info[\"sfreq\"]) + \" Hz\",\n \"Length\": str(raw.n_times / raw.info[\"sfreq\"]) + \" s\",\n \"Size in memory\": \"{:.2f} MB\".format(\n raw._data.nbytes / 1024 ** 2),\n \"Size on disk\": \"-\" if not fname else \"{:.2f} MB\".format(\n getsize(fname) / 1024 ** 2)}", "def fp_meta(self):\n for server in self.machines:\n s = self.machines[server]\n print \"%s: %s (%s)\" % (s.id, s.adminPass, s)", "def info(self, fp):\n keys = (\n (\"cas.meta.compression\", CAS._convert_meta),\n (\"cas.meta.lib\", CAS._convert_meta),\n (\"cas.meta.fp_algo\", CAS._convert_meta),\n (\"cas.meta.orig_size\", CAS._convert_meta),\n (\"cas.refcount\", CAS._convert_refcount),\n )\n\n return {key: conv(self.ioctx.get_xattr(fp, key))\n for key, conv in keys}", "def get_info(self, info):\r\n pass", "def get_gadget_info(gfname):\n partmass = pyg.readheader(gfname, 'massTable')[1]\n boxsize = pyg.readheader(gfname, 'boxsize')\n omegam = pyg.readheader(gfname, 'O0')\n omegal = pyg.readheader(gfname, 'Ol')\n h = pyg.readheader(gfname, 'h')\n npart = pyg.readheader(gfname, 'npartTotal')[1]\n return omegam, omegal, h, boxsize, partmass, npart", "def fs(self):\n return self._fs", "def info(self, paths):\n self.tracer.info(\"%s.info method called\" % self.__class__.__name__)\n\n mounts = []\n\n for path in paths:\n # determine real OS path without symlinks and retrieve the mounted devices\n path = os.path.realpath(path)\n\n # if path isn't mounted, skip this entry\n if not os.path.ismount(path):\n continue\n\n ## get fstype and device from /proc/mounts\n (code, output) = Helper._run2PipedOsCommand(\"cat /proc/mounts\", \"grep -w %s\" % path)\n if not code == 0:\n self.tracer.warning(\"error running cat /proc/mounts: code %s: %s\" % (code, output))\n dev = \"?\"\n fstype = \"?\"\n else:\n dev = output.split()[0]\n fstype = output.split()[2]\n\n # combine all extracted information\n mounts.append({\n \"path\" : path,\n \"OS Filesystem Type\" : fstype,\n \"OS Device\" : dev,\n })\n\n return mounts", "def feature_static_metadata(self):\n # Get binary size\n self.features[\"size\"] = \\\n self.report.get(\"target\", {}).get(\"file\", {}).get(\"size\")\n\n # Get binary timestamp in the UNIX timestamp format\n str_dt = self.report.get(\"static\", {}).get(\"pe_timestamp\")\n ts = None\n if str_dt is not None:\n dt = datetime.datetime.strptime(str_dt, \"%Y-%m-%d %H:%M:%S\")\n ts = int(time.mktime(dt.timetuple()))\n self.features[\"timestamp\"] = ts\n\n # ExifTool output\n et_tokens = [\"FileDescription\", \"OriginalFilename\"]\n for token in et_tokens:\n self.features[token] = None\n for attr in self.report.get(\"static\", {}).get(\"pe_versioninfo\", []):\n attr_name = attr.get(\"name\")\n if attr_name in et_tokens:\n self.features[attr_name] = attr.get(\"value\")\n\n # Magic byte\n self.features[\"magic_byte\"] = \\\n self.report.get(\"target\", {}).get(\"file\", {}).get(\"type\")", "def getInfo(self):\n return self.info", "def info(self, show_zeropoints=True):\n msg = \"\"\"Filter object information:\n name: {s.name:s}\n detector type: {s.dtype:s}\n wavelength units: {s.wavelength_unit}\n central wavelength: {s.cl:f}\n pivot wavelength: {s.lpivot:f}\n effective wavelength: {s.leff:f}\n photon wavelength: {s.lphot:f}\n minimum wavelength: {s.lmin:f}\n maximum wavelength: {s.lmax:f}\n norm: {s.norm:f}\n effective width: {s.width:f}\n fullwidth half-max: {s.fwhm:f}\n definition contains {s.transmit.size:d} points\"\"\"\n print(msg.format(s=self).replace('None', 'unknown'))\n\n # zero points only if units\n if (self.wavelength_unit is None) or (not show_zeropoints):\n return\n\n print(\"\"\"\n Zeropoints\n Vega: {s.Vega_zero_mag:f} mag,\n {s.Vega_zero_flux},\n {s.Vega_zero_Jy}\n {s.Vega_zero_photons}\n AB: {s.AB_zero_mag:f} mag,\n {s.AB_zero_flux},\n {s.AB_zero_Jy}\n ST: {s.ST_zero_mag:f} mag,\n {s.ST_zero_flux},\n {s.ST_zero_Jy}\n \"\"\".format(s=self))", "def sys_info(self):\n\n for i in self._nodes.items():\n print(\"\\n==============================\")\n name = i[0]\n node = i[1]\n\n print(\"NODE: {}\\n\".format(name))\n\n # CPU\n print(\"CPU:\")\n self.cpu_info(node)\n\n # Grub\n print(\"\\nGrub Command Line:\")\n if \"grub\" in node:\n print(\" Current: {}\".format(node[\"grub\"][\"current_cmdline\"]))\n print(\" Configured: {}\".format(node[\"grub\"][\"default_cmdline\"]))\n\n # Huge Pages\n print(\"\\nHuge Pages:\")\n self.hugepage_info(node)\n\n # Devices\n print(\"\\nDevices:\")\n self.device_info(node)\n\n # Status\n print(\"\\nVPP Service Status:\")\n state, errors = VPPUtil.status(node)\n print(\" {}\".format(state))\n for e in errors:\n print(\" {}\".format(e))\n\n # Minimum system resources\n self.min_system_resources(node)\n\n print(\"\\n==============================\")", "def test3_advanced_info(self):\n\t\tprint \"\\nTEST 3: Extracting detailed entities info from each ontology in %s folder.\\n=================\" % DATA_FOLDER\n\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\n\n\t\t\t\t# divert output to a file temporarily \n\t\t\t\tsaveout = sys.stdout \n\t\t\t\tfsock = open('out.log', 'w') \n\t\t\t\tsys.stdout = fsock \n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\tprintEntitiesInformation(o)\t\t\t\t\n\t\t\t\t\n\t\t\t\tsys.stdout = saveout\n\t\t\t\tfsock.close()\n\t\t\t\tprint \"Success.\"", "def process_info(process):\n\thelp(process)", "def _get_root_metadata(self):\n r = self._do_request(\n 'get',\n http_server_utils.join_url_components(\n [self._api_drive_endpoint_prefix, 'root']),\n params={'select': 'id,name,fileSystemInfo'})\n return r.json()", "def lsinfo(path):", "def get_info():\n\n #Determine if running on Linux or Mac.\n if platform.system() == 'Linux':\n linux = True\n\n elif platform.system() == \"Darwin\":\n linux = False\n\n if linux:\n from . import linux\n linux.get_info()\n diskinfo = linux.DISKINFO\n\n else:\n from . import macos\n macos.get_info()\n diskinfo = macos.DISKINFO\n\n return diskinfo", "def info_file(self):\n return self._info_file", "def detailedInfo(cls):\n return 'tbd'", "def detailedInfo(cls):\n return 'tbd'", "def getFileInfoFromXML(thisfile):\n\n pfn = thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\")\n lfn = thisfile.getElementsByTagName(\"lfn\")[0].getAttribute(\"name\")\n guid = thisfile.getAttribute(\"ID\")\n\n return lfn, pfn, guid", "def _SetFmapInfo(self):\n for epi in self.pfiles + self.epirt_paths:\n self.info[epi]['fmapname'] = None\n self.info[epi]['fmap_entry'] = None\n for entry in self.entry_map['fmap']:\n fmap_name = self.info[entry]['imgfile'] + self.info[entry]['suffix']\n if self.info[entry]['plane'] == self.info[epi]['plane']:\n# Use the fieldmap acquired at the same plane.\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n else:\n# for fmap in self.fmaps.keys():\n for entry in self.entry_map['fmap']:\n# No fmap at same orientation, look for fmaps in other planes.\n# There won't be more than one, so it isn't much of a choice.\n fmap_name = self.info[entry]['imgfile'] + \\\n self.info[entry]['suffix']\n if self.info[entry]['plane'] == 'sagittal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'axial':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'coronal':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n break\n elif self.info[entry]['plane'] == 'oblique':\n self.info[epi]['fmapname'] = fmap_name\n self.info[epi]['fmap_entry'] = entry\n self.info[epi]['plane'] = 'oblique'\n break", "def get_info(self) -> str:\n return self.info", "def debug_info_struct(fdt):\n # Traverse node tree in depth first\n depth = 0\n path = b''\n root = fdt.get_root_node()\n debug_node(fdt, root, depth, path)", "def show_info(self, handle=sys.stdout):\n pt = PrettyTable(['EntryInfo', 'Value'])\n pt.align = 'r'\n pt.align['EntryInfo'] = 'l'\n pt.align['Value'] = 'l'\n pt.float_format = '8.5'\n\n # Gather all device information, do not show private\n # information that begins with an underscore\n show_info = self.post()\n public_keys = sorted([key for key in show_info.keys()\n if not key.startswith('_')])\n for key in public_keys:\n pt.add_row([key, show_info[key]])\n\n print(pt, file=handle)", "def get_atom_info(self):\n return", "def metadata(self): # -> list[Unknown]:\n ...", "def metadata(self): # -> list[Unknown]:\n ...", "def get_info(self):\n raise NotImplementedError(\"Robot.get_info\")", "def describe():", "def get_info(self):\n return {}", "def describe_collect(self):\n dd = dict()\n dd.update(self.detector.hdf1.full_file_name.describe())\n return {self.stream_name: dd}", "def full_info(files: List[str], args, dir_: str ='.') -> List[str]:\n temp_info = []\n for item in files:\n f_info = {}\n f_st = os.stat(os.path.join(CURRENT_DIR, dir_, item))\n f_info['mpde'] = f'{stat.filemode(f_st.st_mode):10}'\n f_info['nlink'] = f'{f_st.st_nlink:>3}'\n f_info['uid'] = f'{f_st.st_uid:>3}'\n size = f_st.st_size\n if args.block_size:\n size = ceil(size / args.block_size)\n f_info['size'] = f'{size:>8}'\n date = dt.datetime.fromtimestamp(f_st.st_mtime)\n if (dt.datetime.now() - date).days / 30 > 6:\n date_format = '%b %d %Y'\n else:\n date_format = '%b %d %I:%M'\n f_info['time'] = f'{date.strftime(date_format)} '\n f_info['name'] = f'{item:<}'\n temp_info.append(\n ' '.join([f_info['mpde'], f_info['nlink'], f_info['uid'],\n f_info['size'], f_info['time'], f_info['name']])\n )\n temp_info.append('\\n')\n return temp_info", "def get_files_info(self, sid):\n try:\n return self.datas.get_file_info(sid)\n except Exception as ex:\n raise ex", "def get_info(curf, begin, read, param_of_unpack):\n curf.seek(begin)\n info = curf.read(read)\n info = struct.unpack(param_of_unpack, info)\n return str(info[0])", "def info(self):\n path = self._get_path('info')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return response", "def info(self, *path):\n target = self.localpath(*path)\n return _open_file_info(target + '.info')", "def get_flt_info(files=[], columns=['FILE', 'FILTER', 'INSTRUME', 'DETECTOR', 'TARGNAME', 'DATE-OBS', 'TIME-OBS', 'EXPSTART', 'EXPTIME', 'PA_V3', 'RA_TARG', 'DEC_TARG', 'POSTARG1', 'POSTARG2']):\n import astropy.io.fits as pyfits\n from astropy.table import Table\n \n if not files:\n files=glob.glob('*flt.fits')\n \n N = len(files)\n \n data = []\n\n for i in range(N):\n line = [os.path.basename(files[i]).split('.gz')[0]]\n if files[i].endswith('.gz'):\n im = pyfits.open(files[i])\n h = im[0].header\n else:\n h = pyfits.Header().fromfile(files[i])\n \n filt = get_hst_filter(h)\n line.append(filt)\n has_columns = ['FILE', 'FILTER']\n for key in columns[2:]:\n if key in h:\n line.append(h[key])\n has_columns.append(key)\n else:\n continue\n \n data.append(line)\n \n tab = Table(rows=data, names=has_columns)\n return tab", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def describe(self, fieldids=None):\n if not fieldids:\n fieldids = self.fields.keys()\n fieldids.sort()\n \n fields = []\n for fieldid in fieldids:\n field = self.fields[fieldid]\n if field.fmql_type == FT_SUBFILE and field.subfileid == self.fileid:\n #raise FilemanError(\"Database Corrupt - subfile %s contains self\" % self.fileid)\n print \"Database Corrupt - subfile %s contains self\" % self.fileid\n continue\n\n fi = self.fields[fieldid].describe()\n fi['fieldhelp2'] = self.fieldhelp2(fieldid)\n fields.append(fi)\n\n return {'fields': fields, 'fileid': self.fileid, 'filename': self.filename,\n 'description': self.filedescription()}", "def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()", "def test_fc(self):\n # These entries exist for both Nodal and VARIANT, but have different values\n # for the same model\n print(self.nhf.metadata.items())\n self.assertEqual(self.nhf.metadata[\"nMom\"], 35)\n self.assertEqual(self.nhf.metadata[\"nscoef\"], 3)\n\n # These entries are only for VARIANT\n self.assertEqual(self.nhf.metadata[\"npcbdy\"], 30)\n self.assertEqual(self.nhf.metadata[\"npcsym\"], 0)\n self.assertEqual(self.nhf.metadata[\"npcsec\"], 0)\n self.assertEqual(self.nhf.metadata[\"iwnhfl\"], 0)\n self.assertEqual(self.nhf.metadata[\"nMoms\"], 0)", "def info(self):\n return (self._title, self._version, self._descr)", "def _info(self) -> tfds.core.DatasetInfo:\n features = tfds.features.FeaturesDict({\n \"tokens\":\n tfds.features.Sequence(tfds.features.Text()),\n \"tags\":\n tfds.features.Sequence(\n tfds.features.ClassLabel(names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ])),\n \"langs\":\n tfds.features.Sequence(tfds.features.Text()),\n \"spans\":\n tfds.features.Sequence(tfds.features.Text()),\n })\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=features,\n supervised_keys=None,\n homepage=\"https://github.com/afshinrahimi/mmner\",\n citation=_CITATION,\n )", "def info_resource():\n fn = request.args.get(\"filename\")\n info = get_file_info(fn)\n if info:\n return jsonify(info)\n else:\n return Response(status=404)", "def show_footprint(self, fpname):\n logging.debug(\"show_footprint entered\")\n # container_name = \"%s-metadata\" % footprint_name\n # container = self.cf.get_container(container_name)\n # index = container.get_object(\"index.json\")\n # config = json.loads(index.fetch())\n # \n # \n # \n # logging.info(\"loaded footprint configuration\")\n # return config\n fp = self.get_footprint(fpname, start=False)\n pt = fp.status()\n print pt" ]
[ "0.6832259", "0.67963994", "0.66478056", "0.61971104", "0.61971104", "0.61895555", "0.6114911", "0.6114911", "0.6095414", "0.60491127", "0.5942189", "0.5878473", "0.58726853", "0.584561", "0.5845232", "0.58328277", "0.58308643", "0.58216065", "0.58098024", "0.5809369", "0.5808404", "0.5700839", "0.5679733", "0.56465554", "0.564043", "0.5585869", "0.5571613", "0.555905", "0.55556035", "0.55514425", "0.553725", "0.55369616", "0.5523301", "0.5518279", "0.55134916", "0.55087155", "0.5507574", "0.5505142", "0.5498808", "0.54986215", "0.54843175", "0.546671", "0.5465354", "0.5464934", "0.5455273", "0.5450243", "0.544956", "0.5444812", "0.5436229", "0.54332644", "0.5420041", "0.5414944", "0.54141563", "0.5399243", "0.539496", "0.53837866", "0.5376336", "0.53758496", "0.53752244", "0.5372484", "0.53679055", "0.5359003", "0.53582084", "0.5354922", "0.53420633", "0.53370184", "0.5329575", "0.5329475", "0.5324278", "0.5318372", "0.5316181", "0.53003025", "0.5299864", "0.5299864", "0.52997", "0.52961636", "0.5294598", "0.5294498", "0.528025", "0.5275508", "0.5271866", "0.5271866", "0.5270755", "0.5255864", "0.5254906", "0.52482796", "0.5245947", "0.5243816", "0.524196", "0.52409", "0.52345204", "0.52318895", "0.52317625", "0.52317625", "0.5231565", "0.5229532", "0.5218603", "0.5210127", "0.52100754", "0.52082735", "0.52045757" ]
0.0
-1
Deploy a new OneFS node
def create(self, username, machine_name, image, front_end, back_end, ram, cpu_count, txn_id): logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper()) resp = {'content' : {}, 'error': None, 'params': {}} logger.info('Task starting') try: resp['content'] = vmware.create_onefs(username, machine_name, image, front_end, back_end, ram, cpu_count, logger) except ValueError as doh: logger.error('Task failed: {}'.format(doh)) resp['error'] = '{}'.format(doh) logger.info('Task complete') return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deploy():", "def deploy_node(self, node_name, node, node_folder, client):\n if client is None:\n client = self.CLIENT\n\n # Create specific command for input and output parameters\n param_i = [\"--i={}\".format(p) for p in node[\"to_set\"]]\n param_o = [\"--o={}\".format(p) for p in node[\"to_get\"]]\n\n # Build the command with the wrapper, the host, the name of the files with the initial values and the parameters\n full_command = [\n os.path.basename(node[\"wrapper\"]),\n self.HOST,\n node_name,\n self.INIT_VALUES_FILE,\n *param_i,\n *param_o,\n ]\n\n # Add the \"--first\" option if the node is in the first group of the sequence\n if node[\"is_first\"]:\n full_command.append(\"--first\")\n\n # Add additional command\n if node[\"command\"]:\n full_command.append(\"--cmd={}\".format(node[\"command\"]))\n\n if node[\"is_local\"]:\n logging.info(\"The node {} needs to be deployed manually in the temporary node folder.\".format(node_name))\n return ' '.join([\"python\"] + full_command)\n\n else:\n # Run the container with the Docker API\n client.containers.run(\n image=node[\"image\"],\n name=node_name,\n volumes={\n os.path.abspath(node_folder): {\"bind\": \"/home/project\", \"mode\": \"rw\"}\n },\n command=full_command,\n detach=True,\n auto_remove=True,\n )\n\n logging.info(\"The node {} is deployed.\".format(node_name))\n\n return client.containers.get(node_name).logs(stream=True)", "def do_node_deploy(self, context, node_id):\n self.cast(context,\n self.make_msg('do_node_deploy',\n node_id=node_id))", "def deploy(ctx):\n click.echo('deploying')\n ctx.deploy()\n click.echo('done')", "def deploy(config, args):\n log = logging.getLogger('kraftwerk.deploy')\n \n # TODO better way to detect new, or maybe move to dedicated command\n stdout, stderr = args.node.ssh('stat /var/service/%s' % args.project.name, pipe=True)\n new = bool(stderr) or args.override\n \n # Sync codebase over with the web user\n destination = 'web@%s:/web/%s/' % (args.node.hostname, args.project.name)\n stdout, stderr = args.project.rsync(destination)\n if stderr:\n log.error(\"Sync error: %s\" % stderr)\n sys.exit(stderr)\n \n # Copy requirements\n args.project.copy(args.node, 'requirements.txt')\n \n # Put together the setup script\n cmd = config.template(\"scripts/project_setup.sh\", \n project=args.project, new=new, \n upgrade_packages=args.upgrade_packages)\n stdout, stderr = args.node.ssh(cmd, pipe=True)\n if stderr:\n print stderr\n \n # TODO detect new services\n if not args.no_service_setup and new:\n for service in args.project.services():\n args.node.ssh(service.setup_script)\n \n print u\"%s live at %r\" % (args.project.canonical_domain(), args.node.hostname)", "def deploy():\n update_treesheets()\n restart_treesheets()", "def setup_node(config, args):\n if args.templates:\n config['templates'].insert(0, args.templates)\n config.templates = config._templates()\n stdin, stderr = args.node.ssh(config.template(\"scripts/node_setup.sh\"))\n if stderr:\n print stderr\n else:\n print u\"Node ready at %s\" % (args.node.hostname)", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy(self):\n src = self.settings.get('src', None)\n \n if src is not None:\n typ, _, src = src.partition(\":\")\n self._deployment = Deployment.get_deployment(typ)(self, src)\n else:\n self._deployment = Deployment(self, None)\n \n self._thread = self.node.spawn_thread( self._deployment.start )\n eventlet.sleep(0)", "def deploy():\n build()\n copy()\n install()", "def task_deploy_nodes(self, req, resp, json_data):\n action = json_data.get('action', None)\n\n if action != 'deploy_nodes':\n self.error(\n req.context,\n \"Task body ended up in wrong handler: action %s in task_deploy_nodes\"\n % action)\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Error\",\n retry=False)\n\n try:\n task = self.create_task(json_data, req.context)\n resp.text = json.dumps(task.to_dict())\n resp.append_header('Location',\n \"/api/v1.0/tasks/%s\" % str(task.task_id))\n resp.status = falcon.HTTP_201\n except errors.InvalidFormat as ex:\n self.error(req.context, ex.msg)\n self.return_error(resp,\n falcon.HTTP_400,\n message=ex.msg,\n retry=False)", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def deploy(self, topology):\n print \"ABC - Deployer.deploy()\"", "def deploy():\n upload_static()\n compile_code()\n upload_code()\n upload_supervisor()\n start_server()", "def deploy():\n stage(branch='live', role='live')", "def deploy(parameters):\n\n print(\"In deploy module\")", "def upload_config_for_node_and_env_in_transitional_state(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n bs_node = [\n node for node in self.env.d_env.get_nodes()\n if node.name == 'slave-05']\n self.env.bootstrap_nodes(bs_node)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute']})\n target_node = bs_node[0]\n target_node_id = self.fuel_web.get_nailgun_node_by_devops_node(\n target_node)['id']\n\n config = {'nova_config': {'foo': {'value': 'bar'}}}\n\n self.show_step(3)\n task = self.fuel_web.deploy_cluster(cluster_id)\n # wait for creation of child 'deployment' task\n self.fuel_web.wait_for_tasks_presence(self.fuel_web.client.get_tasks,\n name='deployment',\n parent_id=task.get('id'))\n\n self.show_step(4)\n self.show_step(5)\n expected_code = 403\n err_msg = 'A configuration was applied for env in deploying state'\n check_response_code(\n expected_code, err_msg,\n self.fuel_web.client.upload_configuration,\n config, cluster_id)\n\n self.show_step(6)\n self.wait_for_node_status(target_node, 'provisioning')\n\n self.show_step(7)\n self.show_step(8)\n err_msg = 'A configuration was applied for node in provisioning state'\n check_response_code(\n expected_code, err_msg,\n self.fuel_web.client.upload_configuration,\n config, cluster_id, node_id=target_node_id)\n\n self.show_step(9)\n self.wait_for_node_status(target_node, 'deploying')\n\n self.show_step(10)\n self.show_step(11)\n err_msg = 'A configuration was applied for node in deploying state'\n check_response_code(\n expected_code, err_msg,\n self.fuel_web.client.upload_configuration,\n config, cluster_id, node_id=target_node_id)\n\n self.show_step(12)\n self.fuel_web.assert_task_success(task, timeout=7800, interval=30)\n\n snapshot_name = \"upload_config_for_node_and_env_in_transitional_state\"\n self.env.make_snapshot(snapshot_name)", "def create_node(self, node_cfg):\n with self.__connect_node(node_cfg) as conn:\n self._provision_node(conn, node_cfg)\n self._bootup_node(conn)", "def provision_server(self, body):\n if not body:\n raise AssertionError(\"Payload cannot be empty\")\n\n self.nodes = len(body.get('nodes')) if body.get('os') else 1\n\n _cmd = 'mktemp -d'\n workspace = self._remote_cmd(_cmd).get('output')\n xml = self._pre_tasks(body, workspace)\n log = workspace + '/' + 'rg_cpt_deploy.log'\n\n _bin = '/usr/bin/nohup /usr/bin/l2add'\n _cmd = '{} -f {} -c y -r > {} 2>&1 &'.format(_bin, xml, log)\n\n if self._remote_cmd(_cmd, block=False).get('output') is None:\n raise AssertionError(\"Error encountered during provisioning\")\n\n return log", "def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()", "def create_node(config, args):\n log = logging.getLogger('kraftwerk.create-node')\n \n if 'pubkey' in config:\n pubkey_paths = [config[\"pubkey\"]]\n else:\n pubkey_paths = [os.path.join(os.environ['HOME'], '.ssh', f) for f \\\n in ['id_rsa.pub', 'id_dsa.pub']]\n\n for path in pubkey_paths:\n if os.path.exists(path):\n print 'SSH public key: %s' % path\n with open(path) as fp:\n pubkey = fp.read().strip()\n break\n else:\n pubkey = raw_input('Your SSH public key (for root login): ')\n \n if not re.search(r'^[a-z0-9.-]+$', args.hostname):\n raise CommandError(\n \"Invalid hostname (must contain only letters, numbers, ., and -): %r\"\n % args.hostname)\n \n # Query driver for size, image, and location\n print args\n print dir(args)\n image_id = args.image_id or config['image_id']\n for i in config.driver.list_images():\n if str(i.id) == image_id:\n image = i\n break\n else:\n sys.exit(\"Image %s not found for this provider. Aborting.\" % image_id)\n \n size_id = args.size_id or config['size_id']\n for s in config.driver.list_sizes():\n if str(s.id) == size_id:\n size = s\n break\n else:\n sys.exit(\"Size %s not found for this provider. Aborting.\" % size_id)\n \n location_id = str(getattr(args, 'location-id', config.get(\"location_id\", \"0\")))\n if location_id != 'None':\n for l in config.driver.list_locations():\n if str(l.id) == location_id:\n location = l\n break\n else:\n sys.exit(\"Location %s not found for this provider. Aborting.\" % location_id)\n else:\n location = None\n \n if isinstance(config.driver, ec2.EC2NodeDriver):\n extra = dict(ex_userdata=\"\"\"#!/bin/bash\necho '%s' > /root/.ssh/authorized_keys\"\"\" % pubkey)\n if not \"keyname\" in config:\n config[\"keyname\"] = raw_input(\"EC2 Key Pair [default=\\\"default\\\"]: \") or \"default\"\n extra.update(ex_keyname=config[\"keyname\"])\n if 'securitygroup' in config:\n extra.update(ex_securitygroup=config[\"securitygroup\"])\n elif isinstance(config.driver, rackspace.RackspaceNodeDriver):\n extra = dict(ex_files={'/root/.ssh/authorized_keys': pubkey})\n elif isinstance(config.driver, linode.LinodeNodeDriver):\n from libcloud.base import NodeAuthSSHKey\n extra = dict(auth=NodeAuthSSHKey(pubkey))\n \n create_info = dict(name=args.hostname, location=location,\n image=image, size=size, **extra)\n node = config.driver.create_node(**create_info)\n public_ip = node.public_ip\n \n # Poll the node until it has a public ip\n while not public_ip:\n time.sleep(3)\n for node_ in config.driver.list_nodes():\n if node.id == node_.id and node_.public_ip:\n public_ip = node_.public_ip[0]\n\n if type(public_ip) == list:\n public_ip = public_ip[0]\n\n # At least EC2 passes only back hostname\n if not IP_RE.match(public_ip):\n public_ip = gethostbyname(public_ip)\n\n if confirm(\"Create /etc/hosts entry?\"):\n etchosts.set_etchosts(args.hostname, public_ip)\n \n print u\"Node %s (%s)\" % (args.hostname, public_ip)\n print u\"Run 'kraftwerk setup-node %s'\" % args.hostname", "def _provision_node(self, conn, node_cfg):\n # NOTE, general prepare for a node\n conn.run(f\"mkdir -p {EXPORTER_HOME}\")\n # docker, docker-compose\n conn.run(\"docker --help\")\n conn.run(\"docker-compose --help\")\n\n hostaddr = node_cfg.get(\"hostaddr\")\n username = node_cfg.get(\"username\")\n password = node_cfg.get(\"password\")\n\n conn.run(f\"mkdir -p {EXPORTER_HOME}/docker\")\n fpath = f\"{self.src_base_dir}/docker/{COMPOSE_FNAME}\"\n conn.local((f\"sshpass -p \\\"{password}\\\" \"\n f\"scp {fpath} {username}@{hostaddr}:{EXPORTER_HOME}/docker\"))", "def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()", "def deploy():\n build()\n collect()\n commit()\n push()", "def createNIMDeploy(self):\n\n if self.deploy == 'y':\n\n # find next IP on the range\n #\n new_ip = nim.NIMNewIP()\n new_ip = new_ip.getNewIP(self.nim_address, self.nim_ipstart,\n self.nim_ipend, self.nim_ipnet)\n self.new_ip = new_ip\n f_nim_reserved_ips = open('%s/poweradm/data/reserved_ips' %\n config.pahome, 'a')\n f_nim_reserved_ips.write('%s\\n' % (self.new_ip))\n f_nim_reserved_ips.close()\n\n f_nim_exe = open('%s/poweradm/changes/deploy_nim_%s-%s.nim' %\n (config.pahome, self.lparprefix, self.lparname),\n 'w')\n\n def f_nimexe_chksh():\n f_nim_exe.write(\"\\nif [ $? != 0 ];\"\n \"then\\n\"\n \"\\techo 'An error has occurred. Check the \"\n \"actions taken.'; \\n\"\n \"\\texit;\\n\"\n \"else\\n\"\n \"\\techo 'Command OK. Continuing';\\n\"\n \"fi\\n\")\n\n f_nim_exe.write('#!/bin/sh\\n')\n\n f_nim_exe.write('\\n\\necho \"Adding host %s-%s on NIM Server '\n '/etc/hosts\"\\n' % (self.lparprefix, self.lparname))\n\n f_nim_exe.write('\\n\\nssh -l poweradm %s sudo hostent -a %s -h %s' %\n (self.nim_address, self.new_ip, self.lparname))\n f_nimexe_chksh()\n\n f_nim_exe.write('\\n\\necho \"Creating machine %s-%s on NIM Server\"\\n'\n % (self.lparprefix, self.lparname))\n\n f_nim_exe.write('\\n\\nssh -l poweradm %s sudo nim -o define -t '\n 'standalone -a platform=chrp -a netboot_kernel=mp '\n '-a if1=\\\\\"$(ssh -l poweradm %s sudo lsnim -t ent '\n '| awk \\'{ print $1 }\\' | head -1) %s 0\\\\\" -a '\n 'cable_type1=tp %s\\n' % (self.nim_address,\n self.nim_address,\n self.lparname,\n self.lparname))\n f_nimexe_chksh()\n\n f_nim_exe.write('\\n\\necho \"Resource alocations and perform '\n 'operations to %s-%s on NIM Server\"\\n'\n % (self.lparprefix, self.lparname))\n\n if config.nim_deploy_mode.lower() == 'mksysb':\n\n f_nim_exe.write('\\n\\nssh -l poweradm %s sudo nim -o bos_inst'\n ' -a source=mksysb -a spot=%s -a mksysb=%s -a '\n 'no_client_boot=yes %s -a '\n 'accept_licenses=yes %s\\n'\n % (self.nim_address, self.nim_cfg_spot,\n self.nim_cfg_mksysbspot, self.bosinst_data,\n self.lparname))\n\n f_nimexe_chksh()\n\n elif nim_deploy_mode.lower() == 'lpp':\n\n f_nim_exe.write('\\n\\nssh -l poweradm %s sudo nim -o bos_inst '\n '-a source=spot -a spot=%s -a lpp_source=%s '\n '-a no_client_boot=yes %s -a '\n 'accept_licenses=yes %s\\n'\n % (self.nim_address, self.nim_cfg_spot,\n self.nim_cfg_mksysbspot, self.bosinst_data,\n self.lparname))\n f_nimexe_chksh()\n\n f_nim_exe.write('\\n\\necho \"Getting the Mac Address from %s-%s\"\\n'\n % (self.lparprefix, self.lparname))\n f_nim_exe.write('echo \"This might take a few minutes...\"\\n')\n\n f_nim_exe.write('\\n\\nmac_address=$(ssh -l poweradm %s '\n 'lpar_netboot -M -A -n -T off -t '\n 'ent %s-%s %s %s | grep C10-T1 | '\n 'awk \\'{ print $3 }\\')\\n'\n % (config.hmcserver, self.lparprefix,\n self.lparname, self.lparname, self.lparframe))\n f_nimexe_chksh()\n\n f_nim_exe.write('\\n\\necho \"Booting LPAR %s-%s on NIM Server\"\\n'\n % (self.lparprefix, self.lparname))\n f_nim_exe.write('echo \"This might take a few minutes...\"\\n')\n f_nim_exe.write('\\n\\nssh -l poweradm %s lpar_netboot -m '\n '$mac_address -T off -t ent -s auto -d auto '\n '-S %s -C %s %s-%s %s %s\\n'\n % (config.hmcserver, self.nim_ipdeploy,\n self.new_ip, self.lparprefix, self.lparname,\n self.lparname, self.lparframe))\n f_nimexe_chksh()\n\n print ('\\n\\nChange VLAN on profile to final config')\n f_nim_exe.write('\\n\\nssh -l poweradm %s chsyscfg -r prof -m '\n '%s -i \\'lpar_name=%s-%s, name=%s, '\n '\\\\\\\"virtual_eth_adapters=%s\\\\\\\"\\''\n % (config.hmcserver, self.lparframe,\n self.lparprefix, self.lparname, self.lparname,\n self.lparvlans))\n\n f_nim_exe.close()\n\n print ('\\n\\nInitializing deploy OS...')\n\n f_nim_deploy = open(self.nim_file, 'a')\n f_nim_deploy.write('#IP %s\\n' % (self.new_ip))\n f_nim_deploy.write('#NIMSERVER %s\\n' % (self.nim_server))\n f_nim_deploy.write('#NIMADDRESS %s\\n' % (self.nim_address))\n f_nim_deploy.close()\n\n os.system('sh %s/poweradm/changes/deploy_nim_%s-%s.nim' %\n (config.pahome, self.lparprefix, self.lparname))\n\n os.system('mv %s/poweradm/nim/%s-%s.nim %s/poweradm/nim_executed/'\n % (config.pahome, self.lparprefix,\n self.lparname, config.pahome))\n os.system('mv %s/poweradm/changes/deploy_nim_%s-%s.'\n 'nim %s/poweradm/changes_executed/'\n % (config.pahome, self.lparprefix, self.lparname,\n config.pahome))\n\n print ('\\nPlease, access HMC %s and run command below to finish '\n 'OS install. '\n '\\n\\t\\'mkvterm -m %s -p %s-%s\\' ' %\n (config.hmcserver, self.lparframe, self.lparprefix,\n self.lparname))", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def deploy_node(app, deltas={}):\n\n virtualenv_path = join(ENV_ROOT, app)\n node_path = join(ENV_ROOT, app, \"node_modules\")\n node_modules_symlink = join(APP_ROOT, app, \"node_modules\")\n npm_prefix = abspath(join(node_path, \"..\"))\n env_file = join(APP_ROOT, app, 'ENV')\n deps = join(APP_ROOT, app, 'package.json')\n\n first_time = False\n if not exists(node_path):\n echo(\"-----> Creating node_modules for '{}'\".format(app), fg='green')\n makedirs(node_path)\n first_time = True\n\n env = {\n 'VIRTUAL_ENV': virtualenv_path,\n 'NODE_PATH': node_path,\n 'NPM_CONFIG_PREFIX': npm_prefix,\n \"PATH\": ':'.join([join(virtualenv_path, \"bin\"), join(node_path, \".bin\"), environ['PATH']])\n }\n if exists(env_file):\n env.update(parse_settings(env_file, env))\n\n # include node binaries on our path\n environ[\"PATH\"] = env[\"PATH\"]\n\n version = env.get(\"NODE_VERSION\")\n node_binary = join(virtualenv_path, \"bin\", \"node\")\n installed = check_output(\"{} -v\".format(node_binary), cwd=join(APP_ROOT, app), env=env, shell=True).decode(\"utf8\").rstrip(\n \"\\n\") if exists(node_binary) else \"\"\n\n if version and check_requirements(['nodeenv']):\n if not installed.endswith(version):\n started = glob(join(UWSGI_ENABLED, '{}*.ini'.format(app)))\n if installed and len(started):\n echo(\"Warning: Can't update node with app running. Stop the app & retry.\", fg='yellow')\n else:\n echo(\"-----> Installing node version '{NODE_VERSION:s}' using nodeenv\".format(**env), fg='green')\n call(\"nodeenv --prebuilt --node={NODE_VERSION:s} --clean-src --force {VIRTUAL_ENV:s}\".format(**env),\n cwd=virtualenv_path, env=env, shell=True)\n else:\n echo(\"-----> Node is installed at {}.\".format(version))\n\n if exists(deps) and check_requirements(['npm']):\n if first_time or getmtime(deps) > getmtime(node_path):\n copyfile(join(APP_ROOT, app, 'package.json'), join(ENV_ROOT, app, 'package.json'))\n if not exists(node_modules_symlink):\n symlink(node_path, node_modules_symlink)\n echo(\"-----> Running npm for '{}'\".format(app), fg='green')\n call('npm install --prefix {} --package-lock=false'.format(npm_prefix), cwd=join(APP_ROOT, app), env=env, shell=True)\n return spawn_app(app, deltas)", "def deploy(self, driver, location_id=config.DEFAULT_LOCATION_ID,\n size=config.DEFAULT_SIZE):\n\n logger.debug('deploying node %s using driver %s' % (self.name, driver))\n\n args = {'name': self.name}\n\n if hasattr(config, 'SSH_KEY_NAME'):\n args['ex_keyname'] = config.SSH_KEY_NAME\n\n if hasattr(config, 'EX_USERDATA'):\n args['ex_userdata'] = config.EX_USERDATA\n\n args['location'] = driver.list_locations()[location_id]\n logger.debug('location %s' % args['location'])\n\n args['size'] = size_from_name(size, driver.list_sizes())\n logger.debug('size %s' % args['size'])\n\n logger.debug('image name %s' % config.IMAGE_NAMES[self.image_name])\n args['image'] = image_from_name(\n config.IMAGE_NAMES[self.image_name], driver.list_images())\n logger.debug('image %s' % args['image'])\n\n logger.debug('creating node with args: %s' % args)\n node = driver.create_node(**args)\n logger.debug('node created')\n\n # password must be extracted before _wait_until_running(), where it goes away\n logger.debug('driver.features %s' % driver.features)\n password = node.extra.get('password') \\\n if 'generates_password' in driver.features['create_node'] else None\n\n logger.debug('waiting for node to obtain %s' % config.SSH_INTERFACE)\n node, ip_addresses = driver._wait_until_running(\n node, timeout=1200, ssh_interface=config.SSH_INTERFACE)\n\n ssh_args = {'hostname': ip_addresses[0], 'port': 22, 'timeout': 10}\n if password:\n ssh_args['password'] = password\n else:\n ssh_args['key'] = config.SSH_KEY_PATH if hasattr(config, 'SSH_KEY_PATH') else None\n\n logger.debug('initializing ssh client with %s' % ssh_args)\n ssh_client = libcloud.compute.ssh.SSHClient(**ssh_args)\n\n logger.debug('ssh client attempting to connect')\n ssh_client = driver._ssh_client_connect(ssh_client)\n logger.debug('ssh client connected')\n\n logger.debug('starting node deployment with %s steps' % len(self.deployment.steps))\n driver._run_deployment_script(self.deployment, node, ssh_client)\n\n node.script_deployments = self.script_deployments # retain exit_status, stdout, stderr\n\n logger.debug('node.extra[\"imageId\"] %s' % node.extra['imageId'])\n\n return NodeProxy(node, args['image'])", "def deploy(self):\n raise NotImplementedError('You must implement the deploy() method '\n 'yourself!')", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def _provision_node(\n self,\n name,\n hostname,\n ):\n docker_utils.install_docker(\n hostname=hostname,\n ssh_port=SSH_PORT,\n ssh_username=self.get_ssh_username(name),\n ssh_private_key_file=self.get_ssh_private_key_file(name),\n executor=name,\n logger=self._logger,\n )", "def full_deploy(api_version='HEAD', renderer_version='HEAD',\n markup_renderer_version=None):\n setup()\n\n api.full_deploy(api_version)\n renderer.full_deploy(renderer_version)\n markup_renderer.full_deploy(markup_renderer_version)\n\n upload_nginx_conf()\n upload_uwsgi_conf()\n install_systemd_services()", "def start_node(self, **kwargs):\n # project_name, node_name\n\n try:\n if kwargs['project_name'] in self.data:\n project_name = kwargs['project_name']\n project_id = self.data[project_name]['project_id']\n if kwargs['node_name'] in self.data[project_name]['nodes']:\n node_name = kwargs['node_name']\n node_id = self.data[project_name]['nodes'][node_name]['node_id']\n resp = self.post_to_server('projects/{}/nodes/{}/start'.format(project_id, node_id),{})\n print('Node \\'{}\\' started.'.format(node_name))\n self.data[project_name]['nodes'][node_name]['status'] = \"running\"\n except:\n traceback_print_exc()", "def deploy():\n test()\n if not env.is_staging:\n backup()\n prepare()\n restart_api()", "def create_node(self, **kwargs):\n size = kwargs['size'].ram\n params = {\n 'cmd' : 'dreamhost_ps-add_ps',\n 'movedata' : kwargs.get('movedata', 'no'),\n 'type' : kwargs['image'].name,\n 'size' : size\n }\n data = self.connection.request('/', params).object\n return Node(\n id = data['added_web'],\n name = data['added_web'],\n state = NodeState.PENDING,\n public_ip = [],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'type' : kwargs['image'].name\n }\n )", "def deploy(self):\n\n # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.\n if not self.vm_deploy:\n return\n\n self.connection = ssh.SSH.from_node(self.host_mgmt)\n self.dpdk_nic_bind = provision_tool(\n self.connection,\n os.path.join(get_nsb_option(\"bin_path\"), \"dpdk-devbind.py\"))\n\n # Check dpdk/ovs version, if not present install\n self.check_ovs_dpdk_env()\n # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.\n StandaloneContextHelper.install_req_libs(self.connection)\n self.networks = StandaloneContextHelper.get_nic_details(self.connection,\n self.networks,\n self.dpdk_nic_bind)\n\n self.setup_ovs()\n self.start_ovs_serverswitch()\n self.setup_ovs_bridge_add_flows()\n self.nodes = self.setup_ovs_dpdk_context()\n LOG.debug(\"Waiting for VM to come up...\")\n self.nodes = StandaloneContextHelper.wait_for_vnfs_to_start(self.connection,\n self.servers,\n self.nodes)", "def setup_node(\n *,\n # Change this to take host, user, and identity_file?\n # Add some kind of caching for SSH connections so that they\n # can be looked up by host and reused?\n ssh_client: paramiko.client.SSHClient,\n services: list,\n cluster: FlintrockCluster):\n host = ssh_client.get_transport().getpeername()[0]\n ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n set -e\n\n echo {private_key} > \"$HOME/.ssh/id_rsa\"\n echo {public_key} >> \"$HOME/.ssh/authorized_keys\"\n\n chmod 400 \"$HOME/.ssh/id_rsa\"\n \"\"\".format(\n private_key=shlex.quote(cluster.ssh_key_pair.private),\n public_key=shlex.quote(cluster.ssh_key_pair.public)))\n\n with ssh_client.open_sftp() as sftp:\n sftp.put(\n localpath=os.path.join(SCRIPTS_DIR, 'setup-ephemeral-storage.py'),\n remotepath='/tmp/setup-ephemeral-storage.py')\n\n logger.info(\"[{h}] Configuring ephemeral storage...\".format(h=host))\n # TODO: Print some kind of warning if storage is large, since formatting\n # will take several minutes (~4 minutes for 2TB).\n storage_dirs_raw = ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n set -e\n python /tmp/setup-ephemeral-storage.py\n rm -f /tmp/setup-ephemeral-storage.py\n \"\"\")\n storage_dirs = json.loads(storage_dirs_raw)\n\n cluster.storage_dirs.root = storage_dirs['root']\n cluster.storage_dirs.ephemeral = storage_dirs['ephemeral']\n\n ensure_java8(ssh_client)\n\n for service in services:\n service.install(\n ssh_client=ssh_client,\n cluster=cluster)", "def test_post_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume = synthetic_volume_full(host)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpAccepted(response)", "def deploy():\n filepath = do_pack()\n if (filepath is None):\n return False\n return do_deploy(filepath)", "def deploy(c, _hosts=\"\"):\n eve = DeployHost(\"eve.i\", user=\"root\")\n if _hosts != \"\":\n hosts = get_hosts(_hosts)\n else:\n hosts = [\n eve,\n DeployHost(\n \"localhost\",\n user=\"joerg\",\n meta=dict(\n extra_args=[\"--use-remote-sudo\"],\n flake_path=\"/home/joerg/.homesick/repos/dotfiles\",\n ),\n forward_agent=True,\n ),\n DeployHost(\n \"eve.i\",\n user=\"root\",\n forward_agent=True,\n command_prefix=\"eva.r\",\n meta=dict(target_host=\"eva.i\", flake_attr=\"eva\"),\n ),\n DeployHost(\n \"eve.i\",\n user=\"root\",\n forward_agent=True,\n command_prefix=\"blob64.r\",\n meta=dict(target_host=\"blob64.r\", flake_attr=\"blob64\"),\n ),\n ]\n deploy_nixos(hosts)\n eve.run(\"systemctl restart buildbot-master\")", "def deploy():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n with settings(warn_only=True):\n maintenance_up()\n \n checkout_latest()\n gzip_assets()\n deploy_to_s3()\n maintenance_down()", "def create_node(\n self,\n name,\n size,\n image,\n ex_cloud_service_name,\n ex_storage_service_name=None,\n ex_new_deployment=False,\n ex_deployment_slot=\"Production\",\n ex_deployment_name=None,\n ex_admin_user_id=\"azureuser\",\n ex_custom_data=None,\n ex_virtual_network_name=None,\n ex_network_config=None,\n auth=None,\n **kwargs,\n ):\n # TODO: Refactor this method to make it more readable, split it into\n # multiple smaller methods\n auth = self._get_and_check_auth(auth)\n password = auth.password\n\n if not isinstance(size, NodeSize):\n raise ValueError(\"Size must be an instance of NodeSize\")\n\n if not isinstance(image, NodeImage):\n raise ValueError(\"Image must be an instance of NodeImage, \" \"produced by list_images()\")\n\n # Retrieve a list of currently available nodes for the provided cloud\n # service\n node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name)\n\n if ex_network_config is None:\n network_config = ConfigurationSet()\n else:\n network_config = ex_network_config\n network_config.configuration_set_type = \"NetworkConfiguration\"\n\n # Base64 encode custom data if provided\n if ex_custom_data:\n ex_custom_data = self._encode_base64(data=ex_custom_data)\n\n # We do this because we need to pass a Configuration to the\n # method. This will be either Linux or Windows.\n if WINDOWS_SERVER_REGEX.search(image.id, re.I):\n machine_config = WindowsConfigurationSet(\n computer_name=name,\n admin_password=password,\n admin_user_name=ex_admin_user_id,\n )\n\n machine_config.domain_join = None\n\n if not node_list or ex_new_deployment:\n port = \"3389\"\n else:\n port = random.randint(41952, 65535)\n endpoints = self._get_deployment(\n service_name=ex_cloud_service_name,\n deployment_slot=ex_deployment_slot,\n )\n\n for instances in endpoints.role_instance_list:\n ports = [ep.public_port for ep in instances.instance_endpoints]\n\n while port in ports:\n port = random.randint(41952, 65535)\n\n endpoint = ConfigurationSetInputEndpoint(\n name=\"Remote Desktop\",\n protocol=\"tcp\",\n port=port,\n local_port=\"3389\",\n load_balanced_endpoint_set_name=None,\n enable_direct_server_return=False,\n )\n else:\n if not node_list or ex_new_deployment:\n port = \"22\"\n else:\n port = random.randint(41952, 65535)\n endpoints = self._get_deployment(\n service_name=ex_cloud_service_name,\n deployment_slot=ex_deployment_slot,\n )\n\n for instances in endpoints.role_instance_list:\n ports = []\n if instances.instance_endpoints is not None:\n for ep in instances.instance_endpoints:\n ports += [ep.public_port]\n\n while port in ports:\n port = random.randint(41952, 65535)\n\n endpoint = ConfigurationSetInputEndpoint(\n name=\"SSH\",\n protocol=\"tcp\",\n port=port,\n local_port=\"22\",\n load_balanced_endpoint_set_name=None,\n enable_direct_server_return=False,\n )\n machine_config = LinuxConfigurationSet(\n name, ex_admin_user_id, password, False, ex_custom_data\n )\n\n network_config.input_endpoints.items.append(endpoint)\n\n _storage_location = self._get_cloud_service_location(service_name=ex_cloud_service_name)\n\n if ex_storage_service_name is None:\n ex_storage_service_name = ex_cloud_service_name\n ex_storage_service_name = re.sub(\n r\"[\\W_-]+\", \"\", ex_storage_service_name.lower(), flags=re.UNICODE\n )\n\n if self._is_storage_service_unique(service_name=ex_storage_service_name):\n self._create_storage_account(\n service_name=ex_storage_service_name,\n location=_storage_location.service_location,\n is_affinity_group=_storage_location.is_affinity_group,\n )\n\n # OK, bit annoying here. You must create a deployment before\n # you can create an instance; however, the deployment function\n # creates the first instance, but all subsequent instances\n # must be created using the add_role function.\n #\n # So, yeah, annoying.\n if not node_list or ex_new_deployment:\n # This is the first node in this cloud service.\n\n if not ex_deployment_name:\n ex_deployment_name = ex_cloud_service_name\n\n vm_image_id = None\n disk_config = None\n\n if image.extra.get(\"vm_image\", False):\n vm_image_id = image.id\n # network_config = None\n else:\n blob_url = \"http://%s.blob.core.windows.net\" % (ex_storage_service_name)\n\n # Azure's pattern in the UI.\n disk_name = \"{}-{}-{}.vhd\".format(\n ex_cloud_service_name,\n name,\n time.strftime(\"%Y-%m-%d\"),\n )\n\n media_link = \"{}/vhds/{}\".format(blob_url, disk_name)\n\n disk_config = OSVirtualHardDisk(image.id, media_link)\n\n response = self._perform_post(\n self._get_deployment_path_using_name(ex_cloud_service_name),\n AzureXmlSerializer.virtual_machine_deployment_to_xml(\n ex_deployment_name,\n ex_deployment_slot,\n name,\n name,\n machine_config,\n disk_config,\n \"PersistentVMRole\",\n network_config,\n None,\n None,\n size.id,\n ex_virtual_network_name,\n vm_image_id,\n ),\n )\n self.raise_for_response(response, 202)\n self._ex_complete_async_azure_operation(response)\n else:\n _deployment_name = self._get_deployment(\n service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot\n ).name\n\n vm_image_id = None\n disk_config = None\n\n if image.extra.get(\"vm_image\", False):\n vm_image_id = image.id\n # network_config = None\n else:\n blob_url = \"http://%s.blob.core.windows.net\" % (ex_storage_service_name)\n disk_name = \"{}-{}-{}.vhd\".format(\n ex_cloud_service_name,\n name,\n time.strftime(\"%Y-%m-%d\"),\n )\n media_link = \"{}/vhds/{}\".format(blob_url, disk_name)\n disk_config = OSVirtualHardDisk(image.id, media_link)\n\n path = self._get_role_path(ex_cloud_service_name, _deployment_name)\n body = AzureXmlSerializer.add_role_to_xml(\n name, # role_name\n machine_config, # system_config\n disk_config, # os_virtual_hard_disk\n \"PersistentVMRole\", # role_type\n network_config, # network_config\n None, # availability_set_name\n None, # data_virtual_hard_disks\n vm_image_id, # vm_image\n size.id, # role_size\n )\n\n response = self._perform_post(path, body)\n self.raise_for_response(response, 202)\n self._ex_complete_async_azure_operation(response)\n\n return Node(\n id=name,\n name=name,\n state=NodeState.PENDING,\n public_ips=[],\n private_ips=[],\n driver=self.connection.driver,\n extra={\"ex_cloud_service_name\": ex_cloud_service_name},\n )", "def deploy(n = 10):\n upload_current_release()\n install_requisites()\n create_redirects()\n make_symlinks()\n symlink_current_release()\n sudo('service nginx reload')\n gc_deploys(n)", "def deploy():\n\n require('environment', provided_by=env.environments)\n update_source()\n update_requirements()\n mgmt('syncdb', '--migrate')\n restart_supervisor()", "def post(self):\n node_id = blockchain.register_node(request.host)\n\n return {\n 'message': 'New node have been added.',\n 'node_id': node_id,\n 'nodes': list(blockchain.nodes)\n }, 201", "def deploy(self):\n\n netlify_cli = getattr(settings, \"NETLIFY_PATH\", None)\n if not netlify_cli:\n raise CommandError(\"NETLIFY_PATH is not defined in settings\")\n\n deployment = Deployment()\n deployment.save()\n\n command = [netlify_cli, \"deploy\"]\n command.append(\"--dir={}\".format(settings.BUILD_DIR))\n command.append(\"--prod\")\n command.append('--message=\"Wagtail Deployment #{}\"'.format(deployment.pk))\n\n site_id = getattr(settings, \"NETLIFY_SITE_ID\", None)\n if site_id:\n command.append(\"--site={}\".format(site_id))\n\n auth_token = getattr(settings, \"NETLIFY_API_TOKEN\", None)\n if auth_token:\n command.append(\"--auth={}\".format(auth_token))\n\n subprocess.call(command)", "def register_with_existing_node():\n #print('********************')\n print(request.get_json())\n node_address = request.get_json()[\"node_address\"]\n if not node_address:\n return \"Invalid data\", 400\n\n data = {\"node_address\": request.host_url}\n headers = {'Content-Type': \"application/json\"}\n\n # Make a request to register with remote node and obtain information\n response = requests.post(node_address + \"/register_node\",\n data=json.dumps(data), headers=headers)\n\n if response.status_code == 200:\n global blockchain\n global peers\n # update chain and the peers\n chain_dump = response.json()['chain']\n blockchain = create_chain_from_dump(chain_dump)\n peers.update(response.json()['peers'])\n return \"Registration successful\", 200\n else:\n # if something goes wrong, pass it on to the API response\n #print(response.content)\n #print(response.status_code)\n return response.content, response.status_code", "def deploy(fingerengine, fingerprint):\n\n global cookie \n\n cfm_path = abspath(fingerengine.options.deploy) \n cfm_file = parse_war_path(cfm_path, True)\n dip = fingerengine.options.ip\n\n # set our session cookie\n cookie = checkAuth(dip, fingerprint.port, title)\n if not cookie:\n utility.Msg(\"Could not get auth to %s:%s\" % (dip, fingerprint.port),\n LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}..\".format(cfm_file))\n utility.Msg(\"Fetching web root..\", LOG.DEBUG)\n\n # fetch web root; i.e. where we can read the shell\n root = fetch_webroot(dip, fingerprint)\n if not root:\n utility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n return\n\n # create the scheduled task \n utility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n utility.Msg(\"Creating scheduled task...\")\n\n if not create_task(dip, fingerprint, cfm_file, root):\n return\n\n # invoke the task\n utility.Msg(\"Task %s created, invoking...\" % cfm_file)\n run_task(dip, fingerprint, cfm_path)\n \n # remove the task\n utility.Msg(\"Cleaning up...\")\n delete_task(dip, fingerprint, cfm_file)", "def create(self, node_conf):\n if self.exists():\n msg = \"The node with name {0} is already defined.\"\n msg = msg.format(self.name)\n raise RuntimeError(msg)\n\n os.mkdir(self.node_dir, 0755)\n\n self.resources = node_conf['resources']\n self.services = node_conf['services']\n self.image = image.Image(self.parsed_args,\n node_conf['image'])\n self.uuid = uuid.uuid4().hex\n\n with open(self.conf_path, 'wb') as conf_file:\n conf_file.write(yaml.dump(self.get_info(),\n default_flow_style=False))", "def deploy_k3s(c):\n deploy_nixos(\n [\n DeployHost(\n \"node0.nixos-k3s.Serverless-tum.emulab.net\",\n user=\"root\",\n meta=dict(flake_attr=\"cloudlab-k3s-server\"),\n ),\n DeployHost(\n \"node1.nixos-k3s.Serverless-tum.emulab.net\",\n user=\"root\",\n meta=dict(flake_attr=\"cloudlab-k3s-agent\"),\n ),\n DeployHost(\n \"node2.nixos-k3s.Serverless-tum.emulab.net\",\n user=\"root\",\n meta=dict(flake_attr=\"cloudlab-k3s-agent\"),\n ),\n ]\n )", "def post(self, node):\n if self._from_chassis:\n raise exception.OperationNotPermitted\n\n try:\n new_node = pecan.request.dbapi.create_node(node.as_dict())\n except Exception as e:\n with excutils.save_and_reraise_exception():\n LOG.exception(e)\n return Node.convert_with_links(new_node)", "def _deploy_instance(self):\n if not os.path.exists(self.instance_path):\n pw = pwd.getpwnam(self.user)\n mode = (\n stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |\n stat.S_IROTH | stat.S_IXOTH)\n utils.mkdir(self.instance_path, mode, pw[2], pw[3])\n path = \"{}/src/automx_wsgi.py\".format(self.repo_dir)\n utils.exec_cmd(\"cp {} {}\".format(path, self.instance_path),\n sudo_user=self.user, cwd=self.home_dir)", "def deploy(upgrade=False):\n print(\"Deploying project on {} !\".format(env.stage))\n execute('system.setup')\n execute('git.checkout')\n execute('virtualenv.setup')\n execute('django.setup')\n execute('cron.setup')\n execute('uwsgi.setup')\n execute('supervisor.setup')\n execute('nginx.setup')", "def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')", "def repository_create_hosted():\n pass", "def update():\n\n # update plone\n with cd(env.directory):\n sudo('git pull', user=env.deploy_user)\n\n with cd(env.directory):\n stop()\n sudo('git checkout {}'.format(env.branch), user=env.deploy_user)\n\n # bootstrap\n\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('rm -rf ./src-mrd', user=env.deploy_user)\n else:\n sudo('./bin/pip install --no-cache-dir -r requirements.txt', user=env.deploy_user) # noqa: E501\n\n sudo('rm -rf ./var/blobstorage ./var/filestorage .installed.cfg ', user=env.deploy_user) # noqa: E501\n\n # buildout\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start zope\n start()\n sudo(\"sleep 10\")\n\n # create plonesite with addons (uses different ports for py2 and py3)\n if env.latest:\n if env.python3:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py3.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone-latest-py2.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n else:\n with cd(env.directory):\n sudo(\"/usr/bin/wget -O- --user=admin --password=admin --post-data='site_id=Plone&form.submitted=True&title=Website&default_language=de&portal_timezone=Europe/Berlin&extension_ids=plonetheme.barceloneta:default&extension_ids=plone.app.contenttypes:plone-content&extension_ids=plonedemo.site:default' http://127.0.0.1:{zeoclient_port}/@@plone-addsite &> ./var/log/wget_demo-plone.log\".format(zeoclient_port=env.zeoclient_port), user=env.deploy_user) # noqa: E501\n\n # load page to warmup\n sudo('/usr/bin/wget -S -qO- {domain} > /dev/null'.format(domain=env.domain), user=env.deploy_user) # noqa: E501", "def deploy(site):\n\n # Stop the program if the folder isn't initialized yet.\n Vagrant.stop_if_not_init()\n\n # Stop the program if the site is NOT herokufied.\n Heroku.stop_if_not_herokufied(site)\n\n # Now, run the \"deployheroku\" script on the VM.\n # That will deploy the site for you.\n Vagrant.run_script_on_vm(\"deployheroku\", site)", "def upload(self, connection):\n if not self.already_deployed(connection):\n if self.config.project_type == \"java\":\n print(blue('Pushing jar to nexus server'))\n connection.local('mvn deploy')\n self._already_deployed = True\n else:\n raise Exception(f\"Unsupported project type: {self.config.project_type}\")", "def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)", "def install():\n deploy()\n configure()", "def test_create_deployment_entire(self):\n pass", "async def deploy(self, vnf_index, charm, params, loop):\n\n if not self.n2vc:\n self.n2vc = get_n2vc(loop=loop)\n\n debug(\"Creating model for Network Service {}\".format(self.ns_name))\n await self.n2vc.CreateNetworkService(self.ns_name)\n\n application = self.n2vc.FormatApplicationName(\n self.ns_name,\n self.vnf_name,\n str(vnf_index),\n )\n\n # Initialize the state of the application\n self.state[application] = {\n 'status': None, # Juju status\n 'container': None, # lxd container, for proxy charms\n 'actions': {}, # Actions we've executed\n 'done': False, # Are we done testing this charm?\n 'phase': \"deploy\", # What phase is this application in?\n }\n\n debug(\"Deploying charm at {}\".format(self.artifacts[charm]))\n\n # If this is a native charm, we need to provision the underlying\n # machine ala an LXC container.\n machine_spec = {}\n\n if not self.isproxy(application):\n debug(\"Creating container for native charm\")\n # args = (\"default\", application, None, None)\n self.state[application]['container'] = create_lxd_container(\n name=os.path.basename(__file__)\n )\n\n hostname = self.get_container_ip(\n self.state[application]['container'],\n )\n\n machine_spec = {\n 'hostname': hostname,\n 'username': 'ubuntu',\n }\n\n await self.n2vc.DeployCharms(\n self.ns_name,\n application,\n self.vnfd,\n self.get_charm(charm),\n params,\n machine_spec,\n self.n2vc_callback,\n )", "def deploy():\n git_pull()\n# build_virtualenv()\n# collectstatic()\n migrate()\n# reload_gunicorn()\n# restart_celery()\n puts(green(\"Deployment done!\"))", "def create_qemu_node(self, name, image, images=[], properties={}, config={}, disk=None):\n # Create an ISO image containing the boot configuration and upload it\n # to the GNS3 project. We write the config to a temporary file,\n # convert it to ISO image, then post the ISO image to GNS3.\n\n assert image\n\n print(f\"Building ISO configuration for {name}...\")\n\n # Generate the ISO image that will be used as a virtual CD-ROM to pass all this initialization data to cloud-init.\n\n genisoimage_command = [\"genisoimage\", \"-input-charset\", \"utf-8\", \"-o\", \"-\", \"-l\",\n \"-relaxed-filenames\", \"-V\", \"cidata\", \"-graft-points\"]\n\n temporary_files = []\n\n for fn,data in images.items():\n\n data_file = tempfile.NamedTemporaryFile(delete = False)\n data_file.write(data)\n data_file.close()\n genisoimage_command.append(f\"{fn}={data_file.name}\")\n temporary_files.append(data_file)\n\n genisoimage_proc = subprocess.Popen(genisoimage_command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n\n isoimage = genisoimage_proc.stdout.read()\n\n debug_isoimage = False\n if debug_isoimage:\n with open('isoimage-debug.iso', 'wb') as f:\n f.write(isoimage)\n\n for tmpfile in temporary_files:\n os.remove(tmpfile.name)\n\n print(f\"Uploading ISO configuration for {name}...\")\n\n # files in the GNS3 directory take precedence over these project files,\n # so we need to make these file names unique\n cdrom_image = self.project_id + '_' + name + '.iso'\n file_url = \"{}/files/{}\".format(self.url, cdrom_image)\n result = requests.post(file_url, auth=self.auth, data=isoimage)\n result.raise_for_status()\n\n # Configure a QEMU cloud node\n\n print(f\"Configuring {name} node...\")\n\n url = \"{}/nodes\".format(self.url)\n\n # It's important to use the scsi disk interface, because the IDE interface in qemu\n # has some kind of bug, probably in its handling of DISCARD operations, that\n # causes a thin provisioned disk to balloon up with garbage.\n #\n # See https://unix.stackexchange.com/questions/700050\n # and https://bugs.launchpad.net/ubuntu/+source/qemu/+bug/1974100\n\n qemu_node = {\n \"compute_id\": \"local\",\n \"name\": name,\n \"node_type\": \"qemu\",\n \"properties\": {\n \"adapter_type\" : \"virtio-net-pci\",\n \"hda_disk_image\": image,\n \"hda_disk_interface\": \"scsi\",\n \"cdrom_image\" : cdrom_image,\n \"qemu_path\": \"/usr/bin/qemu-system-x86_64\",\n# \"process_priority\": \"very high\",\n },\n\n # ens4, ens5, ens6 seems to be the numbering scheme on Ubuntu 20,\n # but we can't replicate that with a Python format string\n \"port_name_format\": \"eth{}\",\n\n \"symbol\": \":/symbols/qemu_guest.svg\",\n }\n\n qemu_node['properties'].update(properties)\n qemu_node.update(config)\n\n result = requests.post(url, auth=self.auth, data=json.dumps(qemu_node))\n result.raise_for_status()\n qemu = result.json()\n\n if disk and disk > 2048:\n url = \"{}/compute/projects/{}/qemu/nodes/{}/resize_disk\".format(self.server.url, self.project_id, qemu['node_id'])\n resize_obj = {'drive_name' : 'hda', 'extend' : disk - 2048}\n result = requests.post(url, auth=self.auth, data=json.dumps(resize_obj))\n result.raise_for_status()\n\n self.nodes() # update self.cached_nodes\n return qemu", "def test_create_deployment(self):\n pass", "def register_with_existing_node():\n node_address = request.get_json()[\"node_address\"]\n if not node_address:\n return \"Invalid data\", 400\n\n data = {\"node_address\": request.host_url}\n headers = {'Content-Type': \"application/json\"}\n\n # Make a request to register with remote node and obtain information\n response = requests.post(node_address + \"/register_node\",\n data=json.dumps(data), headers=headers)\n\n if response.status_code == 200:\n global blockchain\n global peers\n # update chain and the peers\n chain_dump = response.json()['chain']\n blockchain = create_chain_from_dump(chain_dump)\n # peers.update(response.json()['peers'])\n peers.add(node_address+'/') #Add other node address to peers\n return \"Registration successful\", 200\n else:\n # if something goes wrong, pass it on to the API response\n return response.content, response.status_code", "def deploy():\n # Check if secret is mounted\n sudo(\"mkdir -p /etc/confd/conf.d\")\n sudo(\"mkdir -p /etc/confd/templates\")\n sudo(\"mkdir -p /usr/local/bin\")\n\n link = \"https://github.com/kelseyhightower/confd/releases/download/v\" + CONFD_VERSION + \"/confd-\" \\\n + CONFD_VERSION + \"-linux-amd64\"\n\n sudo(\"wget -q -O /tmp/confd %s\" % link)\n check_sha256 = \"echo '\" + CONFD_SHA256 + \" /tmp/confd' | sha256sum --check -\"\n run(check_sha256)\n sudo(\"mv /tmp/confd /usr/bin/confd && chmod 755 /usr/bin/confd\")", "def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')", "def deploy(\n context, instance, user=get_local_user(), initial=False, stack=None, branch=BRANCH,\n):\n remote = True\n\n if initial:\n clone(context, instance, user, branch)\n else:\n backup(context, user, remote, instance, stack)\n\n update(context, user, remote, instance, branch)\n up(context, user, remote, instance, stack)", "def deploy(fingerengine, fingerprint):\n\n\tcfm_path = abspath(fingerengine.options.deploy)\n\tcfm_file = parse_war_path(cfm_path, True)\n\tdip = fingerengine.options.ip\n\n\tcookie = checkAuth(dip, fingerprint.port, title, fingerprint.version)[0]\n\tif not cookie:\n\t\tutility.Msg(\"Could not get auth\", LOG.ERROR)\n\t\treturn\n\n\tutility.Msg(\"Preparing to deploy {0}...\".format(cfm_file))\n\tutility.Msg(\"Fetching web root...\", LOG.DEBUG)\n\n\troot = fetch_webroot(dip, fingerprint, cookie)\n\tif not root:\n\t\tutility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n\t\treturn\n\t\n\t# create the scheduled task\n\tutility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n\tutility.Msg(\"Creating scheduled task...\")\n\n\tif not create_task(dip, fingerprint, cfm_file, root, cookie):\n\t\treturn\n\n\t# invoke the task\n\tutility.Msg(\"Task %s created, invoking...\" % cfm_file)\n\trun_task(dip, fingerprint, cfm_path, cookie)\n\n\t# cleanup\n\tutility.Msg(\"Cleaning up...\")\n\tif not delete_task(dip, fingerprint, cfm_file, cookie):\n\t\tutility.Msg(\"Failed to remove task. May require manual removal.\", LOG.ERROR)", "def deploy_worker(dist_file):\n _set_credentials()\n provision()\n _deploy_python_package(dist_file)\n _reload_supervisor()", "def deploy():\n comp = do_pack()\n\n if (not comp):\n return False\n return do_deploy(comp)", "def deploy():\n _git_pull()\n _migrate()\n _collect_static_files()\n _restart_webserver()", "def add_nodes(self, count=1):\n self.log.info('Adding %d nodes' % count)\n new_nodes = []\n Node.flavor = env_vars['client_flavor']\n for i in range(count):\n #check if cluster did not previously exist\n if i == 0 and len(self.all_nodes) == 0:\n # give a floating IPv4 to the first node only\n new_guy = Node(self.cluster_name, '', len(self.all_nodes)+1, create=True, IPv4=True)\n else:\n new_guy = Node(self.cluster_name, node_type=\"\", number=len(self.all_nodes)+1, create=True)\n self.all_nodes.append(new_guy)\n new_nodes.append(new_guy)\n self.save_cluster()\n for n in new_nodes:\n n.wait_ready()\n #inject host files to everybody\n n.inject_hostnames(self.get_hosts(private=True), delete=self.cluster_name)\n n.bootstrap()\n self.log.info(\"Node %s is live \" % new_guy.name)\n #inform all\n self.inject_hosts_files()", "def _add_node_to_etc_hosts(self):\n image = 'alpine:latest'\n command = 'echo \"{} {} # clusterdock\" >> /etc/hosts'.format(self.ip_address,\n self.fqdn)\n volumes = {'/etc/hosts': {'bind': '/etc/hosts', 'mode': 'rw'}}\n\n logger.debug('Adding %s to /etc/hosts ...', self.fqdn)\n client.containers.run(image=image,\n command=[self.execute_shell, '-c', command],\n volumes=volumes,\n remove=True)", "def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)", "def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)", "def deploy():\n db.drop_all()\n create_DB()\n app.run()", "def cvmfsPublish(reponame = None):\n if reponame == None:\n reponame = _getRepoName()\n\n rc = subprocess.call([\"cvmfs_server\", \"publish\", \"-f\", reponame])\n if rc != 0:\n raise RuntimeError(\"Could not publish CVMFS transaction\")", "def deploy():\n setup()\n builddir = get_build_dir()\n if sys.platform == 'win32':\n # Support cygwin rsync on windows:\n build_path = cygpath(slashed(builddir))\n else:\n build_path = slashed(builddir)\n rsync_project(env.admin_webroot, build_path, exclude=\".*\", delete=True)\n sudo(\"chmod -R 755 %(admin_webroot)s\" % env)", "def provision_node(\n *,\n services: list,\n user: str,\n host: str,\n identity_file: str,\n cluster: FlintrockCluster):\n client = get_ssh_client(\n user=user,\n host=host,\n identity_file=identity_file,\n wait=True)\n\n with client:\n setup_node(\n ssh_client=client,\n services=services,\n cluster=cluster)\n for service in services:\n service.configure(\n ssh_client=client,\n cluster=cluster)", "def deploy():\n packing = do_pack()\n if packing is False:\n return False\n\n return do_deploy(packing)", "def firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr=\"\", mode=\"staged\",\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\\\n FirmwareComputeHostPack\n\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,\n name=name,\n descr=descr,\n rack_bundle_version=rack_bundle_version,\n mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()", "def deploy(fingerengine, fingerprint):\n\n base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)\n uri = '/manager/html/upload'\n war_file = fingerengine.options.deploy\n war_path = parse_war_path(war_file)\n cookies = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title, fingerprint.version)\n if not cookies:\n utility.Msg(\"Could not get auth for %s:%s\" %\n (fingerengine.options.ip, fingerprint.port), LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\n\n if fingerprint.version in ['6.0', '7.0', '8.0']:\n # deploying via the gui requires a CSRF token\n (csrf, c) = fetchCSRF(base, cookies)\n if not csrf:\n return\n else:\n # set CSRF and refresh session id\n uri += '?org.apache.catalina.filters.CSRF_NONCE={0}'\n uri = uri.format(csrf)\n cookies = (c, cookies[1])\n\n # read in payload\n try:\n tag = 'deployWar'\n if fingerprint.version in ['4.0', '4.1']:\n tag = 'installWar'\n files = {tag : (war_path + '.war', open(war_file, 'rb'))}\n except Exception, e:\n utility.Msg(e, LOG.ERROR)\n return\n\n # deploy\n response = utility.requests_post(base + uri, files=files, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200 and \"OK\" in response.content:\n utility.Msg(\"Deployed {0} to /{1}\".format(war_file, war_path), LOG.SUCCESS)\n elif 'Application already exists' in response.content:\n utility.Msg(\"Application {0} is already deployed\".format(war_file), LOG.ERROR)\n elif response.status_code is 403:\n utility.Msg(\"This account does not have permissions to remotely deploy. Try\"\\\n \" using manager_deploy\", LOG.ERROR)\n else:\n utility.Msg(\"Failed to deploy (HTTP %d)\" % response.status_code, LOG.ERROR)", "def install_node_instance_subgraph(ctx,instance, graph, hist=None):\n subgraph = graph.subgraph('install_{0}'.format(instance.id))\n\n ct=None\n if hist:\n #get completed tasks for instance\n ct=_completed_tasks(ctx,hist,instance.id)\n\n sequence = subgraph.sequence()\n\n #CREATE\n run=True\n if(hist and 'create' in ct):\n run=False\n\n ctx.logger.info(\"run={} CREATE {}\".format(str(run),instance.id))\n if(run):\n ctx.logger.info(\" hist={} ct={}\".format(str(hist),str(ct)))\n\n if(run):\n sequence.add(\n instance.set_state('initializing'),\n forkjoin(instance.send_event('Creating node'),\n instance.set_state('creating')),\n _add_es_log(ctx,instance,'create',instance.execute_operation('cloudify.interfaces.lifecycle.create')),\n instance.set_state('created'),\n forkjoin(*_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.preconfigure'\n )))\n\n #CONFIGURE\n run=True\n if(hist and 'configure' in ct):\n run=False\n\n ctx.logger.info(\"run={} CONFIGURE {}\".format(str(run),instance.id))\n\n if(run):\n sequence.add(\n forkjoin(instance.set_state('configuring'),\n instance.send_event('Configuring node')),\n _add_es_log(ctx,instance,'configure',instance.execute_operation('cloudify.interfaces.lifecycle.configure')),\n instance.set_state('configured'),\n forkjoin(*_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.postconfigure'\n )))\n\n # STARTING\n run=True\n if(hist and 'start' in ct):\n run=False\n\n ctx.logger.info(\"run={} START {}\".format(str(run),instance.id))\n\n if(run):\n sequence.add(\n forkjoin(instance.set_state('starting'),\n instance.send_event('Starting node')),\n instance.execute_operation('cloudify.interfaces.lifecycle.start'))\n\n # If this is a host node, we need to add specific host start\n # tasks such as waiting for it to start and installing the agent\n # worker (if necessary)\n if run and is_host_node(instance):\n sequence.add(*_host_post_start(instance))\n\n sequence.add(\n forkjoin(\n _add_es_log(ctx,instance,'start',instance.execute_operation('cloudify.interfaces.monitoring.start')),\n *_relationships_operations(\n instance,\n 'cloudify.interfaces.relationship_lifecycle.establish'\n )),\n instance.set_state('started'))\n\n subgraph.on_failure = get_install_subgraph_on_failure_handler(ctx,instance)\n return subgraph", "def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")", "def create(self,\n name=None,\n image=None,\n size=None,\n timeout=360,\n group=None,\n **kwargs):\n \"\"\"\n create one node\n \"\"\"\n raise NotImplementedError", "def deploy(env='development', update_settings='n', upgrade_apps='n'):\n update_site(env, update_settings, upgrade_apps)\n restart_site(env)", "def test_deploy_instance_with_new_network_and_metadata(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_metadata_\" + suffix\n instance_meta = {\"test_item\": \"test_value\"}\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 251\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n metadata=instance_meta)", "async def start_node(request: web.Request) -> web.Response:\n req_ctx = RequestContext.parse_obj(request)\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n try:\n\n await projects_api.start_project_node(\n request, req_ctx.user_id, path_params.project_id, path_params.node_id\n )\n\n raise web.HTTPNoContent(content_type=MIMETYPE_APPLICATION_JSON)\n except ProjectStartsTooManyDynamicNodes as exc:\n raise web.HTTPConflict(reason=f\"{exc}\") from exc\n except ProjectNotFoundError as exc:\n raise web.HTTPNotFound(\n reason=f\"Project {path_params.project_id} not found\"\n ) from exc\n except NodeNotFoundError as exc:\n raise web.HTTPNotFound(\n reason=f\"Node {path_params.node_id} not found in project\"\n ) from exc", "def deploy(self, image_name, ip, flavor='m1.small'):\n body_value = {\n \"port\": {\n \"admin_state_up\": True,\n \"name\": self.name + '_provision',\n \"network_id\": os_utils.get_network_id(self.nova_api, 'provision_bob'),\n 'fixed_ips': [{'ip_address': ip}]}}\n response = self.neutron.create_port(body=body_value)\n self._provision_port_id = response['port']['id']\n self.mac = response['port']['mac_address']\n\n image_id_to_boot_from = os_utils.get_image_id(self.nova_api, image_name)\n flavor_id = os_utils.get_flavor_id(self.nova_api, flavor)\n # TODO(Gonéri): We don't need keypair for the BM nodes\n keypair_id = os_utils.get_keypair_id(self.nova_api, self._keypair)\n # Ensure with get DHCP lease on the provision network first\n nics = [{'port-id': self._provision_port_id}]\n\n self._os_instance = os_provisioner.build_openstack_instance(\n self.nova_api,\n self.name,\n image_id_to_boot_from,\n flavor_id,\n keypair_id,\n nics)\n\n if not self._os_instance:\n LOG.error(\"deployment has failed\")\n raise Exception()\n\n os_provisioner.add_provision_security_group(self.nova_api)\n os_utils.add_security_groups(self._os_instance, ['provision'])\n os_utils.add_security_groups(self._os_instance, self._security_groups)\n LOG.info(\"add security groups '%s'\" % self._security_groups)\n LOG.info(\"instance '%s' ready to use\" % self.name)\n\n # the instance should be off for Ironic\n self._os_instance.stop()", "def registerExistingServer():\n cd('/')\n cd('/Servers/'+managedServername)\n registerServer(cmo)", "def init():\n pass\n # destination_dir = os.getcwd() + '/deploy'\n # try:\n # os.makedirs(destination_dir)\n # except OSError as e:\n # if e.errno == errno.EEXIST:\n # print('''AWS \"deploy\" directory already exists in this folder\n # \\n''', destination_dir)\n # copy_tree(deploy_path_join('../deploy'), destination_dir)", "def create_node(self, **kwargs):\n if not self.nodes:\n self.get_nodes()\n\n _node = Node(project_id=self.project_id, connector=self.connector, **kwargs)\n\n _node.create()\n self.nodes.append(_node)\n print(\n f\"Created: {_node.name} -- Type: {_node.node_type} -- \"\n f\"Console: {_node.console}\"\n )", "def test_node_builder(patch, os_info, dummy_server):\n NOVA.servers.find = mock.MagicMock(return_value=dummy_server)\n nb = NodeBuilder(CONFIG, os_info)\n nodes = nb.get_nodes()\n list(map(lambda x: setattr(x, \"exists\", False), nodes))\n assert isinstance(nodes[0], koris.cloud.openstack.Instance)\n assert nodes[0].name == 'node-1-test'\n\n certs = create_certs(CONFIG, ['node-1-test'], ['192.168.1.103'],\n write=False)\n\n lb_ip = '212.58.134.78'\n node_tasks = nb.create_initial_nodes(CloudConfig(), certs['ca'], lb_ip,\n \"6443\",\n \"123456.abcdefg12345678\",\n \"discovery_hash\",\n )\n\n coro_server_create = node_tasks[1]\n\n call_args = coro_server_create.get_stack()[0].f_locals\n # we go a long way to check that nb.creat_node_tasks\n # will create a future with the correct user data\n assert call_args['keypair'] == 'otiram'\n assert call_args['self'].name == 'node-1-test'\n assert isinstance(call_args['flavor'], Flavor)", "def install_version_on_node(self, nodes, version):\n install_params = dict()\n install_params['num_nodes'] = len(nodes)\n install_params['product'] = \"cb\"\n install_params['version'] = version\n install_params['vbuckets'] = [self.cluster.vbuckets]\n install_params['init_nodes'] = False\n install_params['debug_logs'] = False\n self.installer_job.parallel_install(nodes, install_params)", "def deploy():\n build()\n rsync_project(\n local_dir=os.path.abspath(env.config['destination']) + \"/\",\n remote_dir=env.remote_dir,\n delete=True,\n extra_opts='--exclude=\".DS_Store\"',\n )" ]
[ "0.69264865", "0.63039076", "0.62826276", "0.6249289", "0.6248976", "0.6238209", "0.6202333", "0.6194543", "0.6194543", "0.6194543", "0.61874604", "0.6165393", "0.6107505", "0.6099111", "0.60960966", "0.6072571", "0.6064653", "0.60304236", "0.60209894", "0.6007666", "0.5980989", "0.5974511", "0.59686667", "0.5967629", "0.5956092", "0.58835036", "0.5877741", "0.58537966", "0.58287585", "0.5821095", "0.5814758", "0.57820797", "0.5763116", "0.57444274", "0.57355475", "0.5711169", "0.57062656", "0.57041556", "0.56910354", "0.56849223", "0.56848335", "0.5680528", "0.5669533", "0.56590074", "0.5654909", "0.5643991", "0.5643644", "0.56345457", "0.56328344", "0.5626297", "0.5615597", "0.56136525", "0.56053716", "0.5581085", "0.5572628", "0.55440575", "0.55351996", "0.55277604", "0.5525743", "0.5524819", "0.5522135", "0.55125904", "0.5499721", "0.54972845", "0.5495987", "0.54915833", "0.54805964", "0.5478177", "0.5435924", "0.54178303", "0.5413774", "0.54113495", "0.5407997", "0.54079086", "0.54077107", "0.54060435", "0.5394554", "0.53927344", "0.53890663", "0.53829795", "0.53766865", "0.53745776", "0.5371801", "0.5369533", "0.53561413", "0.53545755", "0.53502494", "0.53339326", "0.5332747", "0.5329411", "0.5326281", "0.53052086", "0.5289254", "0.528558", "0.52810246", "0.5278975", "0.5269948", "0.5267265", "0.5266568", "0.5261943" ]
0.5484384
66
Destroy a OneFS node
def delete(self, username, machine_name, txn_id): logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper()) resp = {'content' : {}, 'error': None, 'params': {}} logger.info('Task starting') try: vmware.delete_onefs(username, machine_name, logger) except ValueError as doh: logger.error('Task failed: {}'.format(doh)) resp['error'] = '{}'.format(doh) else: logger.info('Task complete') return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy_node(self):\n driver = self.driver\n driver.ex_detach_floating_ip_from_node(self.node, self.floating_ip)\n driver.destroy_node(self.node)\n sleep(15)\n for volume in self.volumes:\n driver.destroy_volume(volume)", "def deletenode(self, node_p=None):\n node_p = self.getnodenamed(node_p) # Verify pointer.\n # (node_bn* node)\n cnetica.DeleteNode_bn.argtypes = [c_void_p]\n cnetica.DeleteNode_bn.restype = None\n cnetica.DeleteNode_bn(node_p)", "def destroy(self):\n\n node = self.node\n if not config.is_node_destroyable(node.name):\n logger.error('node %s has non-destroyable prefix' % node.name)\n return False\n logger.info('destroying node %s' % node)\n return node.destroy()", "def destroy_nodes(\n self,\n name,\n ):\n pass", "def delete_node(self, node_cfg):\n with self.__connect_node(node_cfg) as conn:\n self._shutdown_node(conn)\n self._unprovision_node(conn)", "def destroy(self):\n del self.nodes\n self.nodes = {}", "def delete_node(self, uri):\n if self.sm.already_exists('nodes', uri):\n self.sm.delete_node(uri)\n else:\n raise VOSpaceError(404, \"The specified node does not exist.\")", "def remove_node(self, node):\n\t\tnode.close()\n\t\taddress = (node.server_ip, node.server_port)\n\t\tself.nodes.pop(address)", "def destroy_node_and_cleanup(driver, node):\n volumes = driver.list_volumes(node=node)\n\n assert (\n INSTANCE_NAME_STRING in node.name\n ), \"Refusing to delete node without %s in the name\" % (INSTANCE_NAME_STRING)\n\n print(\"\")\n print(('Destroying node \"%s\"...' % (node.name)))\n\n try:\n node.destroy()\n except Exception as e:\n if \"does not exist\" in str(e):\n # Node already deleted, likely by another concurrent run. This error is not fatal so we\n # just ignore it.\n print(\n \"Failed to delete node, likely node was already deleted, ignoring error...\"\n )\n print(str(e))\n else:\n raise e\n\n assert len(volumes) <= 1\n print(\"Cleaning up any left-over EBS volumes for this node...\")\n\n # Give it some time for the volume to become detached from the node\n if volumes:\n time.sleep(10)\n\n for volume in volumes:\n # Additional safety checks\n if volume.extra.get(\"instance_id\", None) != node.id:\n continue\n\n if volume.size not in [8, 30]:\n # All the volumes we use are 8 GB EBS volumes\n # Special case is Windows 2019 with 30 GB volume\n continue\n\n destroy_volume_with_retry(driver=driver, volume=volume)", "def delete_node(self, node_tup):\n signature = hashlib.sha256((uname+node_sig).encode('utf-8')).hexdigest() #hash value\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"DELETE FROM nodes WHERE ip==(:ip) AND port==(:port)\", { \"ip\":node_tup[1], \"port\":node_tup[2]})\n app_process.commit()\n app_process.close()", "def destroy(self):\r\n self._namespace.unregisterNode(self)\r\n self._namespace = None\r\n\r\n super(Node, self).destroy()", "def destroyNode(self, remoteNode):\r\n for node in self._nodes:\r\n if node.destroyExternal(remoteNode):\r\n break", "def delNode(nodeName):\n\t\tslicer.util.getNode(nodeName)\n\t\tslicer.mrmlScene.RemoveNode(slicer.util.getNode(nodeName))\n\t\treturn", "def remove(self):\n self.node.destroy()", "def delete(self):\n _url = (\n f\"{self.connector.base_url}/projects/{self.project_id}/nodes/{self.node_id}\"\n )\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.node_id = None\n self.name = None", "def delete(node):\n try:\n if os.path.isdir(node):\n shutil.rmtree(node)\n else:\n os.unlink(node)\n except OSError as error:\n if error.errno not in [errno.ENOENT, errno.EPERM, errno.EACCES]:\n raise error", "def delete_node(self, _id):\n return self.make_request(\"DELETE\", \"nodes/\"+_id, {})", "def destroyNodes(self):\r\n for nt in self.listNodes.keys(): \t# for all kind of nodes...\r\n for node in self.listNodes[nt]: \t# for all nodes of type <nt>\r\n if node.graphObject_: node.graphObject_.destroy()", "def del_node(node, delnum):\n pass", "def delete_node(self, node):\n return node.delete()", "def delete_node(self, node: 'GraphNode'):\n\n self.operator.delete_node(node)", "def delete_node(tx, node_value, node_type):\n cql = \"MATCH(n:\" + node_type + \"{name:$node_value}) DETACH DELETE(n);\"\n try:\n tx.run(cql, node_value=node_value)\n except Exception as e:\n print(str(e))", "def delete_node(self, node):\n return self.manager.delete_node(self, node)", "def removeNode(self, node):", "def _unprovision_node(self, conn):\n conn.run(f\"rm -rf {EXPORTER_HOME}\")", "def delete_cluster(self):", "def delete_node(uuid):\n with session_for_write() as session:\n # Delete attribute data\n session.execute(\n delete(model.Attribute).where(\n model.Attribute.node_uuid == uuid))\n # Delete introspection data\n session.execute(\n delete(model.Option).where(\n model.Option.uuid == uuid))\n session.execute(\n delete(model.IntrospectionData).where(\n model.IntrospectionData.uuid == uuid))\n # Delete the actual node\n session.execute(\n delete(model.Node).where(\n model.Node.uuid == uuid\n ).execution_options(synchronize_session=False)\n )", "def destroy_node(self, node):\n params = {'Action': 'TerminateInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_terminate_boolean(res)", "def destroyNode(self, remoteNode):\r\n # TODO: Workaround for now...\r\n try:\r\n iter(self._namespaces).next().destroyNode(remoteNode)\r\n except StopIteration:\r\n pass", "def removeNode(self, nTag):\r\n try:\r\n self._nodes.pop(nTag).destroy()\r\n except KeyError:\r\n raise InvalidRequest('Can not remove a non existent node '\r\n \"'{0}' from the container.\".format(nTag))", "def del_node (self, id):\n raise NotImplementedError", "def delete_node(self, name):\n\n name = self._validate_name(name)\n if name in self.nodes:\n del self.nodes[name]", "def del_node(self, n):\n if n in self.node_dict:\n del self.node_dict[n]\n for node in self.node_dict:\n try:\n self.del_edge(node, n)\n except:\n pass\n else:\n raise KeyError(\"Cannot remove node that does not exist.\")", "def test_delete_hyperflex_node_profile(self):\n pass", "def destroy():\n pass", "def destroy():\n pass", "def remove_node():\n\ttry:\n\t\tnetwork.remove_connection()\n\texcept ValueError as err:\n\t\tfeedback.config(text=err)", "def remove_node(self, node):\n self.nodes.remove(node)\n node.close()", "def destroy(self, context=None):\n self.dbapi.destroy_nodegroup(self.cluster_id, self.uuid)\n self.obj_reset_changes()", "def deleteNode(*args, **kwds):\n nodes = args\n if len(args) < 1:\n nodes = cmds.ls(sl=1)\n \n for node in nodes:\n node_lst = [node]\n if isinstance(node, (list, tuple)):\n node_lst = node\n\n for n in node_lst:\n if cmds.objExists(str(n)):\n cmds.delete(str(n), **kwds)\n else:\n cmds.warning(\"# Don’t exist - \" + node)", "def delete_instance(self, node_id: str, wait_for_operation: bool = True) -> dict:\n return", "def removeDevice(self, node, fullDeviceName):", "def del_node(self, n):\n try:\n del self.dict[n]\n # remove edges pointing to n\n for key, value in self.dict.iteritems():\n if n in value:\n del self.dict[key][n]\n except (ValueError, KeyError):\n raise AttributeError('No Such Node Exists')", "def delete(self, **kwargs):\n db.delete_node(self.handle_id, self.__class__.__name__)\n super(NodeHandle, self).delete()\n return True", "def delete_node(self,n):\n if self._node_to_edges is not None:\n if len(self._node_to_edges[n])>0:\n print( \"Node %d has edges: %s\"%(n,self._node_to_edges[n]) )\n raise GridException(\"Node still has edges referring to it\")\n del self._node_to_edges[n]\n if self._node_to_cells is not None:\n if len(self._node_to_cells[n])>0:\n raise GridException(\"Node still has cells referring to it\")\n del self._node_to_cells[n]\n if self._node_index is not None:\n self._node_index.delete(n, self.nodes['x'][n,self.xxyy] )\n\n self.push_op(self.undelete_node,n,self.nodes[n].copy())\n\n self.nodes['deleted'][n] = True\n \n # special case, used for undo, reverts to previous state\n # more completely.\n if len(self.nodes)==n+1:\n self.nodes=self.nodes[:-1]", "def clean_node(\n self,\n name,\n ):\n # Gets the node IP address.\n ip = self.get_node_ip(name)\n\n # Deletes the images.\n docker_utils.clean(\n hostname=ip,\n ssh_port=SSH_PORT,\n ssh_username=self.get_ssh_username(name),\n ssh_private_key_file=self.get_ssh_private_key_file(name),\n executor=name,\n logger=self._logger,\n )", "def del_node(self, node_id):\n assert(node_id is not None)\n LOG.info(\"Try to del node=%s\" % node_id)\n\n try:\n enet = EnhNetNode(node_id)\n self.info.nodeDel(enet.ident)\n LOG.debug(\"Successfully deleted node: %s\", str(enet))\n\n except TOPOLOGY.CannotFetchNode, exe:\n LOG.error(\"CannotFetchNode exception: %s\", str(exe))\n except TOPOLOGY.InternalProblems, exe:\n LOG.error(\"InternalProblems exception: %s\", str(exe))\n except TOPOLOGY.InvocationNotAllowed, exe:\n LOG.error(\"InvocationNotAllowed exception: %s\", str(exe))\n except Exception, exe:\n LOG.error(\"Generic exception: %s\", str(exe))", "def clear_node(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Clearing local node statistics\"\n return ret\n\n __salt__[\"trafficserver.clear_node\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Cleared local node statistics\"\n return ret", "def delete_node(self, n):\n\n if n not in self.node:\n raise PathGraphException(\"The node {} is not in the graph.\".format(n))\n\n self.delete_node_from_path(n)\n self.delete_path_containing_node(n)\n del self.node[n]", "def del_node (self, node):\n try:\n if isinstance(node, Node):\n node = node.id\n elif isinstance(node, Port):\n node = node.node.id\n self.network.remove_node(node)\n return True\n except NetworkXError:\n # There was no node in the graph\n return False", "def delete(self):\n\t\tself.canvas.delete('node_'+self.identifier)\n\t\tself.canvas.tag_unbind('node_'+self.identifier,\"<Any>\")", "def node_delete(self, nodeId):\n\n self._client.delete(\n \"{}/nodes/{}\".format(\n LKECluster.api_endpoint, parse.quote(str(nodeId))\n ),\n model=self,\n )", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def fusion_api_delete_ha_nodes(self, uri=None, api=None, headers=None):\n return self.ha_nodes.delete(uri, api, headers)", "def remove_node_from_onnx(cls, node: onnx.NodeProto,\n onnx_model: onnx.ModelProto):\n onnx_model.graph.node.remove(node)", "def node_file_remove(ctx, filename):\n try:\n ctx.obj['node'].remove_file(filename)\n except TimeoutError as e:\n logger.error('Error: %s' % e)\n exit(1)", "def delete_node(ugraph, node):\r\n neighbors = ugraph[node]\r\n ugraph.pop(node)\r\n for neighbor in neighbors:\r\n ugraph[neighbor].remove(node)", "def test_destroy_nas_share(self):\n pass", "def task_destroy_nodes(self, req, resp, json_data):\n action = json_data.get('action', None)\n\n if action != 'destroy_nodes':\n self.error(\n req.context,\n \"Task body ended up in wrong handler: action %s in task_destroy_nodes\"\n % action)\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Error\",\n retry=False)\n\n try:\n task = self.create_task(json_data, req.context)\n resp.text = json.dumps(task.to_dict())\n resp.append_header('Location',\n \"/api/v1.0/tasks/%s\" % str(task.task_id))\n resp.status = falcon.HTTP_201\n except errors.InvalidFormat as ex:\n self.error(req.context, ex.msg)\n self.return_error(resp,\n falcon.HTTP_400,\n message=ex.msg,\n retry=False)", "def delete_node(name: str, value: str) -> None:\n global _graph\n\n if _graph is None:\n print('\\ndelete_node(): Error: graph has not been initialized or opened.\\n\\n')\n return\n\n lname = str(name)\n lvalue = str(value)\n\n if lname == '' or lvalue == '' or lname == 'nan' or lvalue == 'nan':\n return\n\n node = read_node(name=lname, value=lvalue)\n if node is None:\n return\n\n _graph.delete(node)\n return", "def __del__(self):\n del self.board_\n del self.children_edges_\n self.board_ = None\n del self.parent_edge_\n # print(\"destruct node\")", "def removeOnDestroy(call, args=(), kwargs={}, nodeClass='*'):\n pass", "def removeNode(self, index):\n del self.nodes[index]", "def delete_vm(self, account, vm_id):\n node = Node()\n node.id = vm_id\n self.driver(account).destroy_node(node)", "def remove_node(self, node: str) -> None:\n self.graph.remove_node(node)", "def delete(self):\n self.parent.delete_node(self)", "def remove_resource(self, graph_db):\n with mutex:\n neo_resource.delete_node(graph_db, self.index)", "def freeNode(self):\n libxml2mod.xmlFreeNode(self._o)", "def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=\"Production\"):\n\n if not isinstance(node, Node):\n raise ValueError(\"A libcloud Node object is required.\")\n\n if ex_cloud_service_name is None and node.extra is not None:\n ex_cloud_service_name = node.extra.get(\"ex_cloud_service_name\")\n\n if not ex_cloud_service_name:\n raise ValueError(\"Unable to get ex_cloud_service_name from Node.\")\n\n _deployment = self._get_deployment(\n service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot\n )\n\n _deployment_name = _deployment.name\n\n _server_deployment_count = len(_deployment.role_instance_list)\n\n if _server_deployment_count > 1:\n path = self._get_role_path(ex_cloud_service_name, _deployment_name, node.id)\n else:\n path = self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name)\n\n path += \"?comp=media\"\n\n self._perform_delete(path)\n\n return True", "def delete(node):\n #first we find a good candidate for a key switch\n if node.right != None:\n candidate = node.right\n while candidate.left != None:\n candidate = candidate.left\n #if not that then the node doesnt have a right child, so just swap with left child\n else:\n candidate = node.left\n #either way, candidate is a left child\n node.key = candidate.key\n candidate.parent.left = None\n Node.recalculate_heights(candidate.parent)\n Node.rotatation_adjusting_heights(candidate.parent)\n del candidate", "def _del_node_one_child(self, parent, node):\n if parent:\n if parent._rkid == node:\n if node._rkid:\n parent._rkid = node._rkid\n node._rkid._parent = parent\n else:\n parent._rkid = node._lkid\n node._lkid._parent = parent\n elif node._rkid:\n parent._lkid = node._rkid\n node._rkid._parent = parent\n else:\n parent._lkid = node._lkid\n node._lkid._parent = parent\n else:\n if node._rkid:\n self._root = node._rkid\n node._rkid._parent = None\n else:\n self._root = node._lkid\n node._lkid._parent = None", "def destroy(self):", "def destroy(self):", "def destroy(self):", "def destroy(self):\n pass", "def destroy(self):\n pass", "def destroy(self):\n pass", "def destroy(self):\n pass", "def delete(self):\n\n nodeip = request.form.get(\"ip\")\n nodeflag = request.form.get(\"flag\")\n force = True if request.form.get(\"force\") in (\"true\", \"True\", True) else False\n if g.auth:\n return g.swarm_node.rm(nodeip, nodeflag, force)\n else:\n res = {\"msg\": \"Authentication failed, permission denied.\", \"code\": 403}\n logger.warn(res)\n return res, 403", "def _delete(self, current_node):\n pass", "def unlinkNode(self):\n libxml2mod.xmlUnlinkNode(self._o)", "def remove_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True", "def test_destroy_nas_share_by_nas(self):\n pass", "def unspawn(self):\n global NodeTypeclass\n if not NodeTypeclass:\n from .room import XYZRoom as NodeTypeclass\n\n xyz = (self.X, self.Y, self.Z)\n\n try:\n nodeobj = NodeTypeclass.objects.get_xyz(xyz=xyz)\n except django_exceptions.ObjectDoesNotExist:\n # no object exists\n pass\n else:\n nodeobj.delete()", "def del_node_from_string(self, node):\n assert(node is not None)\n LOG.info(\"Try to del node=%s\" % node)\n\n try:\n net = NetNode(node)\n self.info.nodeDel(net.ident)\n LOG.debug(\"Successfully deleted node: %s\", str(net))\n\n except TOPOLOGY.CannotFetchNode, exe:\n LOG.error(\"CannotFetchNode exception: %s\", str(exe))\n except TOPOLOGY.InternalProblems, exe:\n LOG.error(\"InternalProblems exception: %s\", str(exe))\n except TOPOLOGY.InvocationNotAllowed, exe:\n LOG.error(\"InvocationNotAllowed exception: %s\", str(exe))\n except Exception, exe:\n LOG.error(\"Generic exception: %s\", str(exe))", "def destroy(self):\n\n pass", "def delete_node(self, loadbalancer, node):\n lb = node.parent\n if not lb:\n raise exc.UnattachedNode(\"No parent Load Balancer for this node \"\n \"could be determined.\")\n resp, body = self.api.method_delete(\"/loadbalancers/%s/nodes/%s\" %\n (lb.id, node.id))\n return resp, body", "def _del_node_no_children(self, parent, node):\n if parent:\n if parent._rkid == node:\n parent._rkid = None\n else:\n parent._lkid = None\n else:\n self._root = None", "def delete_one_child(self, node):\n if node.left != None:\n child = node.left\n else:\n child = node.right\n \n parent = node.parent\n if parent.left == node:\n parent.left = child\n else:\n parent.right = child\n child.parent = parent\n del self.nodes[node.key]\n\n self.update_path(parent)", "def clear_node():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"metric\", \"clear\")\n else:\n cmd = _traffic_line(\"-c\")\n\n return _subprocess(cmd)", "def delete_ll_node(node):\n node.val = node.next.val\n node.next = node.next.next", "def delete():", "def hfp_delete(handle, org_dn, name):\r\n\r\n dn = org_dn + \"/fw-host-pack-\" + name\r\n mo = handle.query_dn(dn)\r\n if mo is None:\r\n raise ValueError(\"HFP '%s' does not exist\" % dn)\r\n\r\n handle.remove_mo(mo)\r\n handle.commit()", "def delete(self):\n self.graph._del(handle=self.handle)", "def delete_node(self, u_node_id):\n node = self.node_memory[u_node_id]\n\n # Delete the formulas from the tree, but keep the formulas in node for restoration later\n copy = list(node.formulas)\n for f in node.formulas:\n self.delete_formula(f)\n node.formulas = copy\n\n # Remove node from parent_formula\n parent_formula = node.parent_formula\n parent_formula.node_children.remove(node)\n\n # Remove the node from parent\n node.parent.children.remove(node)\n\n # Remove the node from the Tree node list\n self.nodes.pop(node.node_id)\n self.readjust_node_id(node.node_id)", "def perspective_nodeDied(self, remoteNode):\r\n self._endpoint.destroyNode(remoteNode)", "def delete(self, node):\n # TODO: Catch errors if empty or node not in list\n self.length -= 1 # Update length\n # If head and tail, both get set to None\n if self.head is self.tail:\n self.head = None\n self.tail = None\n elif node is self.head: # If head, set current head to next\n self.head = self.head.next\n node.delete()\n elif node is self.tail: # If tail, set current tail to prev\n self.tail = self.tail.prev\n node.delete()\n else: # If regular node, just delete\n node.delete()" ]
[ "0.74795836", "0.7125707", "0.70684725", "0.6884586", "0.6880332", "0.674062", "0.66680616", "0.6622097", "0.66166276", "0.65986913", "0.6577545", "0.65129596", "0.65098155", "0.65094095", "0.6506481", "0.6490085", "0.64562887", "0.6453083", "0.64295024", "0.6426773", "0.6406237", "0.6381781", "0.63463455", "0.6340725", "0.6326563", "0.63084143", "0.63053954", "0.62973803", "0.62821096", "0.6267716", "0.6263554", "0.6261406", "0.6255522", "0.62531", "0.62240595", "0.62240595", "0.61985046", "0.6192857", "0.6149334", "0.6145376", "0.6137957", "0.61239463", "0.612004", "0.61027044", "0.61017334", "0.6070481", "0.60281646", "0.60027075", "0.5971987", "0.5967599", "0.5961157", "0.59581923", "0.59335524", "0.59335524", "0.59335524", "0.59335524", "0.59335524", "0.592023", "0.5903511", "0.5892694", "0.5884236", "0.58808994", "0.58793074", "0.586371", "0.5862611", "0.58475053", "0.5845919", "0.58445984", "0.5831274", "0.5819344", "0.58174425", "0.5816032", "0.58132285", "0.58114666", "0.5793791", "0.57870764", "0.57870764", "0.57870764", "0.5769134", "0.5769134", "0.5769134", "0.5769134", "0.5767949", "0.57678676", "0.57678586", "0.5759752", "0.57571924", "0.5755492", "0.57510006", "0.57494086", "0.5745198", "0.5734323", "0.5725857", "0.57225335", "0.5710506", "0.57048124", "0.5700223", "0.5668588", "0.5667155", "0.5662034", "0.56525403" ]
0.0
-1
Obtain the available OneFS images/versions that can be deployed
def image(self, txn_id): logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper()) resp = {'content' : {}, 'error': None, 'params': {}} logger.info('Task starting') resp['content'] = {'image': vmware.list_images()} logger.info('Task complete') return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_images():\n return AVAILABLE_IMAGES", "def get_installed_images(self):\n raise NotImplementedError", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def get_images(stage=0):\n return get_files(stage)[0]", "def list_images():\n return json_response(list_manifests())", "def image_versions(self, image_name):\n # TODO: Expand to read all tags locally, not just a fixed list\n try:\n return {\"latest\": self.image_version(image_name, \"latest\")}\n except ImageNotFoundException:\n return {}", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"The avail_images function must be called with \"\n \"-f or --function, or with the --list-images option\"\n )\n\n ret = {}\n conn = get_conn()\n\n for item in conn.list_images()[\"items\"]:\n image = {\"id\": item[\"id\"]}\n image.update(item[\"properties\"])\n ret[image[\"name\"]] = image\n\n return ret", "def required_images(self):\n required = set()\n deployment_type = self.get_var(\"openshift_deployment_type\")\n host_groups = self.get_var(\"group_names\")\n # containerized etcd may not have openshift_image_tag, see bz 1466622\n image_tag = self.get_var(\"openshift_image_tag\", default=\"latest\")\n image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]\n\n # template for images that run on top of OpenShift\n image_url = \"{}/{}-{}:{}\".format(image_info[\"namespace\"], image_info[\"name\"], \"${component}\", \"${version}\")\n image_url = self.get_var(\"oreg_url\", default=\"\") or image_url\n if 'nodes' in host_groups:\n for suffix in NODE_IMAGE_SUFFIXES:\n required.add(image_url.replace(\"${component}\", suffix).replace(\"${version}\", image_tag))\n # The registry-console is for some reason not prefixed with ose- like the other components.\n # Nor is it versioned the same, so just look for latest.\n # Also a completely different name is used for Origin.\n required.add(image_info[\"registry_console_image\"])\n\n # images for containerized components\n if self.get_var(\"openshift\", \"common\", \"is_containerized\"):\n components = set()\n if 'nodes' in host_groups:\n components.update([\"node\", \"openvswitch\"])\n if 'masters' in host_groups: # name is \"origin\" or \"ose\"\n components.add(image_info[\"name\"])\n for component in components:\n required.add(\"{}/{}:{}\".format(image_info[\"namespace\"], component, image_tag))\n if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise\n required.add(\"registry.access.redhat.com/rhel7/etcd\") # and no image tag\n\n return required", "def avail_images(call=None):\n if call == \"action\":\n raise SaltCloudSystemExit(\n \"This function must be called with -f, --function argument.\"\n )\n return template_list()", "def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)", "def get_images(name):\n url = \"/\".join([REGISTRY_BASE, name, \"/tags/list\"])\n response = req(url)\n image_list = []\n if response is not None:\n headers = {\"Accept\": \"application/vnd.docker.distribution.manifest.v2+json\"}\n tags = response[\"tags\"]\n for tag in tags:\n url = \"/\".join([REGISTRY_BASE, name, \"/manifests\", tag])\n response = req(url, headers)\n if response is not None:\n image = {}\n image[\"size\"] = response[\"config\"][\"size\"]\n for i in response[\"layers\"]:\n image[\"size\"] += i[\"size\"]\n image[\"size\"] = round(float(image[\"size\"]) / 1024 / 1024, 2)\n image[\"id\"] = response[\"config\"][\"digest\"][7:19]\n image[\"tag\"] = tag\n image[\"cmd\"] = \"docker pull uk8s.com/\" + name + \":\" + tag\n image_list.append(image)\n return sorted(image_list, reverse=True)", "def pull_image(self):\n status = []\n for key, container in self.containers.items():\n result = container.pull()\n status.append(result)\n return status", "def list_images():\n image_map = build_image_map()\n click.echo('')\n click.echo('List of available images (Name - Description)')\n click.echo('')\n for name in image_map:\n click.echo('{} -> {}'.format(name, image_map[name]))", "def get_build_images(self) -> List[Image]:\n images = []\n image_names = []\n conflicting_names = []\n for registry_name, registry in self.registries.items():\n # if the registry is not marked as source, skip it\n if not registry.source:\n continue\n\n images += self.get_images_from_registry(registry)\n\n if conflicting_names:\n raise RuntimeError(\"Images found in multiple 'source' repositories: %s\", conflicting_names)\n\n return images", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def get_base_image(version_map, version):\n for entry in version_map:\n if entry.supports(version):\n return entry\n return None", "def local_images(self, images):\n registries = self.known_docker_registries()\n found_images = []\n for image in images:\n # docker could have the image name as-is or prefixed with any registry\n imglist = [image] + [reg + \"/\" + image for reg in registries]\n if self.is_image_local(imglist):\n found_images.append(image)\n return found_images", "def get_images():\n return _IMAGES", "def get_all_images_from_filesystem():\r\n\r\n logging.debug('get_all_images_from_filesystem()')\r\n\r\n dir_path = os.path.join(os.environ['TEMP'],'WarietyWallpaperImages')\r\n all_full_image_paths = []\r\n for my_file in os.listdir(dir_path):\r\n if os.path.isfile(os.path.join(dir_path, my_file)):\r\n all_full_image_paths.append(os.path.join(dir_path, my_file))\r\n return all_full_image_paths", "def available_images(self, images, registries, task_vars):\n return [\n image for image in images\n if any(self.is_available_skopeo_image(image, registry, task_vars) for registry in registries)\n ]", "def get_artefactversions(self, artefact):\n\n if self.url == 'test':\n artefactversionlist = [artefact + '-1.0.0-80.x86_64.rpm', artefact + '-1.0.0-81.x86_64.rpm']\n else:\n if 'fk-' in artefact:\n tmp = artefact.split('fk-')\n leverable = tmp[1].split('_')[0]\n else:\n leverable = 'tools'\n\n artefactversionlist = []\n try:\n response = urlopen(\n 'http://' + self.url + '/nexus/service/local/lucene/search?repositoryId=rpm-dev&g=fk.rpm.'\n + leverable + '&a=' + artefact)\n except (HTTPError, URLError) as e:\n logger.error(e)\n return ['Error getting artefactversions!!!']\n\n metadata_root = elementTree.parse(response)\n for data in metadata_root.iter('artifact'):\n extension = 'x86_64.rpm'\n for ext in data.findall('.//extension'):\n if 'rpm' in ext.text:\n extension = ext.text\n artefactversionlist.append(artefact + '-' + '.' + extension + '.rpm')\n # artefactversiondict[data.find('version').text] = extension\n\n return artefactversionlist", "def GetAllInstancesInfo(self, hvparams=None):\n data = []\n for file_name in os.listdir(self._ROOT_DIR):\n path = utils.PathJoin(self._ROOT_DIR, file_name)\n if self._IsDirLive(path):\n data.append((file_name, 0, 0, 0, 0, 0))\n return data", "def list_docker_images():\n raw_result = subprocess.getstatusoutput('docker images')\n return result_handler(raw_result)", "def list_images(self):\n raise NotImplementedError()", "def get_image(name):\r\n return nova.images.find(name=name)", "def list(self):\n r = self.target.ttbd_iface_call(\"images\", \"list\", method = \"GET\")\n return r['result']", "def get_image_list(self, account):\n images = self.driver(account).list_images()\n return [image.name for image in images]", "def get_flavors(self):\n url = '%s/flavors/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavors']\n else:\n LOG.error('Get flavors failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def check_manifests(self):\n # Fetch image repositories\n repos = self.fetch_repos()\n\n # Create an empty dataframe\n df = pd.DataFrame(columns=[\"image_name\", \"age_days\", \"size_gb\"])\n\n # Loop over the repositories\n logging.info(\"Checking repository manifests\")\n for repo in repos:\n # Get the manifest for the current repository\n logging.info(\"Pulling manifests for: %s\" % repo)\n show_cmd = [\n \"az\",\n \"acr\",\n \"repository\",\n \"show-manifests\",\n \"-n\",\n self.name,\n \"--repository\",\n repo,\n ]\n\n result = run_cmd(show_cmd)\n\n if result[\"returncode\"] != 0:\n logging.error(result[\"err_msg\"])\n raise AzureError(result[\"err_msg\"])\n\n logging.info(\"Successfully pulled manifests\")\n outputs = (\n result[\"output\"]\n .replace(\"\\n\", \"\")\n .replace(\" \", \"\")[1:-1]\n .split(\"},\")\n )\n logging.info(\n \"Total number of manifests in %s: %d\" % (repo, len(outputs))\n )\n\n # Loop over the manifests for each repository\n for j, output in enumerate(outputs):\n if j < (len(outputs) - 1):\n output += \"}\"\n\n # Convert the manifest to a dict and extract timestamp\n manifest = json.loads(output)\n timestamp = pd.to_datetime(manifest[\"timestamp\"]).tz_localize(\n None\n )\n\n # Get time difference between now and the manifest timestamp\n diff = (pd.Timestamp.now() - timestamp).days\n logging.info(\n \"%s@%s is %d days old.\" % (repo, manifest[\"digest\"], diff)\n )\n\n # Check the size of each image\n image_size_cmd = [\n \"az\",\n \"acr\",\n \"repository\",\n \"show\",\n \"-n\",\n self.name,\n \"--image\",\n f\"{repo}@{manifest['digest']}\",\n \"--query\",\n \"imageSize\",\n \"-o\",\n \"tsv\",\n ]\n\n result = run_cmd(image_size_cmd)\n\n if result[\"returncode\"] != 0:\n logging.error(result[\"err_msg\"])\n raise AzureError(result[\"err_msg\"])\n\n image_size = int(result[\"output\"]) * 1.0e-9\n\n # Append to dataframe\n df = df.append(\n {\n \"image_name\": f\"{repo}@{manifest['digest']}\",\n \"age_days\": diff,\n \"size_gb\": image_size,\n },\n ignore_index=True,\n )\n\n return df", "def ListInstances(self, hvparams=None):\n return [name for name in os.listdir(self._ROOT_DIR)\n if self._IsDirLive(utils.PathJoin(self._ROOT_DIR, name))]", "def default_image_list(self):\n for version in self.database.versions:\n release = self.database.latest_release(\n default.platform, version=version, architecture=default.architecture\n )\n\n if not release:\n continue\n\n image = self.database.get(\n version=version,\n platform=default.platform,\n release=release,\n architecture=default.architecture,\n )\n\n if not image:\n continue\n\n tags = [\n tag\n for tag in self.database.tags(image)\n if not any(\n (len(tag.version) == 4, tag.platform, tag.release, tag.architecture)\n )\n ]\n\n yield _format_image(image, tags)", "def get_image_bases(image_root: str) -> list:\n return list(sorted(os.listdir(image_root), key=lambda x: tuple(\n int(x.split('.')[0].split('-')[i]) for i in range(1, len(x.split('-'))))))", "def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images", "def images(self, **kwargs):\n return self.get_list(self.cloudman.compute.images(),\n kind=\"image\")", "async def fetch_all_images(sess: Session = Depends(get_db)):\n image_list = utils_com.get_com_image_list(sess)\n return image_list", "def list_images():\n resource_route = \"/static/img/\"\n file_request_path = request.base_url[:request.base_url.rfind('/')] + resource_route\n path_to_current_file = os.path.dirname(os.path.abspath(__file__))\n images_path = os.path.join(path_to_current_file, 'static', 'img')\n directory_list = os.listdir(images_path)\n image_files = [f for f in directory_list if os.path.isfile(os.path.join(images_path, f))]\n image_files.sort()\n if '.gitignore' in image_files:\n image_files.remove('.gitignore')\n full_image_paths = [file_request_path + f for f in image_files]\n response_code = 200\n return make_response(jsonify({'files': full_image_paths}), response_code)", "def get_images(self):\n return self._get_brains(\"Image\")", "def _get_image(runtime):\n return \"{}:{}\".format(LambdaContainer._IMAGE_REPO_NAME, runtime)", "def get_files():\n\n img_dir = '../ADE20K_2016_07_26/full_data/images/validation/'\n sem_dir = '../ADE20K_2016_07_26/full_data/annotations/validation/'\n ins_dir = '../ADE20K_2016_07_26/full_data/annotations_instance/validation/'\n\n img_files = os.listdir(img_dir)\n sem_files = os.listdir(sem_dir)\n ins_files = os.listdir(ins_dir)\n \n img_files = [ os.path.join(img_dir,item) for item in img_files ]\n sem_files = [ os.path.join(sem_dir,item) for item in sem_files ]\n ins_files = [ os.path.join(ins_dir,item) for item in ins_files ]\n \n img_files.sort()\n sem_files.sort()\n ins_files.sort()\n \n return img_files, sem_files, ins_files", "def get_images_online(self, img_names):\n images = []\n errs = []\n for img_name in img_names:\n try:\n path = IMAGES_FOLDER + img_name\n # print(path)\n img = get_image(path, self.img_shape)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n images.append(x)\n except Exception as e:\n # print(path,str(e))\n errs.append(img_name)\n\n return np.vstack(images), errs", "def all(self):\r\n if self._versions is None or \\\r\n len(self._versions) == 0:\r\n url = \"%s/versions\" % self._url\r\n params = {'f':'json'}\r\n res = self._con.get(url, params)\r\n self._versions = []\r\n if 'versions' in res:\r\n for v in res['versions']:\r\n guid = v['versionGuid'][1:-1]\r\n vurl = \"%s/versions/%s\" % (self._url, guid)\r\n self._versions.append(Version(url=vurl,\r\n flc=self._flc,\r\n gis=self._gis))\r\n return self._versions\r\n return self._versions", "def get_images(self):\n \n return self.img_lst", "def images_list(self, kwargs=None):\n\n try:\n scode, images = Rest.get('Image')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(images) == 0:\n Console.info(\"No images exist\")\n return\n\n n = 1\n e = {}\n for image in images:\n d = {}\n d['Ip'] = image['Ip']\n d['Id'] = image['Id']\n if image['RepoTags'] == None:\n d['Repository'] = image['RepoDigests'][0]\n else:\n d['Repository'] = image['RepoTags'][0]\n # d['Size'] = image['Size']\n d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2) # Converting the size to GB\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))", "def show_flavors():\n return get_flavors()", "def getCurrentVersions(self):\r\n if path.exists('../versions.pckl'):\r\n f = open('../versions.pckl', 'rb')\r\n versions = pickle.load(f)\r\n f.close()\r\n else:\r\n versions = {\"subsystems\": {}, \"grafana\": {}}\r\n return versions", "def list_versions(self):\n if not USE_GCLOUD:\n return self.run_appcfg(['list_versions'])\n data = self.run_gcloud(['app', 'versions', 'list'])\n per_module = collections.defaultdict(list)\n for deployment in data:\n service = deployment['service'].encode('utf-8')\n version_id = deployment['id'].encode('utf-8')\n per_module[service].append(version_id)\n return dict(per_module)", "def get_flavors() -> dict:\n flavor_rq = request(\n method=\"GET\", url=app.config[\"FLAVORS_REF\"], headers=build_header(),\n )\n\n if not flavor_rq.ok:\n HTTPError(f\"Can not get flavor id for virtual machine: {flavor_rq.status_code}\")\n\n return flavor_rq.json()", "def cee(ctx, images):\n if images:\n info = consume_task(ctx.obj.vlab_api,\n endpoint='/api/2/inf/cee/image',\n base_endpoint=False,\n message='Collecting available versions of CEE for deployment',\n method='GET').json()['content']\n rows = []\n for img in info['image']:\n rows.append(Version(img, name='CEE'))\n output = get_formatted_table(sorted(rows))\n click.echo('\\n{}\\n'.format(output))\n else:\n info = consume_task(ctx.obj.vlab_api,\n endpoint='/api/2/inf/cee',\n message='Collecting information about your CEE instances',\n method='GET').json()\n output = vm_table_view(ctx.obj.vlab_api, info['content'])\n if not output:\n output = 'You do not own any CEE instances'\n click.echo(output)", "def list_installed(self) -> Generator[Path, None, None]:\n LOGGER.verbose(\"checking %s for Terraform versions...\", self.versions_dir)\n return self.versions_dir.rglob(\"*.*.*\")", "def _get_local_repodigest(self, context):\n images_list = context.docker_client.images.list(self._name())\n if len(images_list) == 0:\n return None\n sorted_by_created = sorted(images_list, key=lambda x: x.attrs.get('Created'), reverse=True)\n image = sorted_by_created[0]\n repo_digests = image.attrs.get('RepoDigests')\n if len(repo_digests) != 1:\n raise Exception(\"Ambiguous RepoDigests, do not know how to handle this: %s\" % str(repo_digests))\n repo_digest = repo_digests[0]\n return repo_digest.split(\"@\")[-1]", "def images(self):\n return self._primary_images + self._unique_images + self._floating_images", "def get_images_by_vulnerability(self, **kwargs):\n ...", "def getimgs():", "def get_installation_packages(self):\n self.logger.debug(\"get_installation_packages()\")\n parameter = {'onlyLatest':'False'}\n resp = self._im_session.get(\"{}/{}\".format(self._im_api_url, 'types/InstallationPackageWithLatest/instances'), params=parameter)\n #resp = self._im_session.get('https://192.168.100.52/types/InstallationPackageWithLatest/instances', params=parameter)\n jresp = json.loads(resp.text)\n #pprint(jresp.text)\n return jresp", "def do_command(self, args):\n imageops = dbops.Images()\n listing = imageops.list(args)\n ordering = ['image_name', 'image_format', 'vendor_name', 'os_type_name',\n 'is_64bit', 'is_bigmem', 'is_smp', 'is_enabled']\n do_list(listing, ordering)", "def listReferenceImages(self):\n productPath = self.productSearch.productClient.product_path(\n project=self.productSearch.projectId, location=self.productSearch.location, product=self.productId)\n\n images = self.productSearch.productClient.list_reference_images(parent=productPath)\n return [x.name for x in images]", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def get_image(self):\n logging.debug(\"%s get_image entered\" % str(self.machine_name))\n snapshots = cs.list_snapshots()\n # find the one for this server\n if self.cloudserver:\n server_id = self.cloudserver.id\n else:\n return self.image_id\n\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print \"XXX:\", img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n return img\n\n print \"Server %s has no snapshots\" % (server_id)\n return None", "def get_images_list(self):\n return self.image_filenames_list", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def all_images():\n\n total = set()\n jobs = [nomad.parse(get_job(job.template)) for job in config.enabled_jobs]\n for spec in jobs:\n for image in nomad.get_images(spec):\n if image is not None and image != 'None':\n total |= set([image])\n return total", "def get_used_versions(self, egg_directory):\n return [\n egg.split('-')[0]\n for egg in os.listdir(egg_directory)\n if egg.endswith('.egg')\n ]", "def imageItems(self, context):\n prefs = getPreferences()\n\n images = [('NONE', \"––– Select –––\", \"\")]\n if prefs.path_value:\n for img in environmentImages(prefs.path_value):\n images.append((img, img, \"\"))\n\n return images", "def get_snapshots(dataset=''):\n # filter my tags\n return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)", "def list(self):\n res = self.db.execute(select([model.imaging_servers.c.fqdn]))\n return self.column(res)", "def _get_pinned_docker_images() -> Mapping[str, Mapping[str, str]]:\n\n pinned_docker_images_file = resources_dir / \"pinned_docker_images.cfg\"\n all_pinned_docker_images = ConfigParser()\n all_pinned_docker_images.read(pinned_docker_images_file)\n return all_pinned_docker_images", "def list_image_impl(**kwargs: Any) -> None:\n try:\n config = configuration.create_transient_list_image_config(kwargs)\n except configuration.CLIArgumentError as e:\n print(e, file=sys.stderr)\n sys.exit(1)\n\n store = image.ImageStore(\n backend_dir=config.image_backend, frontend_dir=config.image_frontend\n )\n images = _find_requested_images(store, config)\n\n if len(images) == 0:\n print(\"No images match selection\", file=sys.stderr)\n sys.exit(1)\n\n frontend, backend = image.format_image_table(images)\n if len(frontend) > 0:\n print(\"Frontend Images:\")\n print(frontend)\n if len(backend) > 0:\n print(\"\\nBackend Images:\")\n print(backend)\n sys.exit(0)", "def getVersions(self):\n logger.debug(\"Func: getVersions\")\n\n try:\n return self._currentSceneInfo[\"Versions\"]\n except:\n return []", "def get_image(vm_):\n vm_image = config.get_cloud_config_value(\"image\", vm_, __opts__).encode(\n \"ascii\", \"salt-cloud-force-ascii\"\n )\n\n images = avail_images()\n for key in images:\n if vm_image and vm_image in (images[key][\"id\"], images[key][\"name\"]):\n return images[key]\n\n raise SaltCloudNotFound(\n \"The specified image, '{}', could not be found.\".format(vm_image)\n )", "def images(self):\n return self._data[\"images\"]", "def _get_images(image_path):\n logger.debug(\"Getting images: '%s'\", image_path)\n if not os.path.isdir(image_path):\n logger.debug(\"Folder does not exist\")\n return None\n files = [os.path.join(image_path, f)\n for f in os.listdir(image_path) if f.lower().endswith((\".png\", \".jpg\"))]\n logger.debug(\"Image files: %s\", files)\n return files", "def find_image(image_name):\n imgs = pyrax.images\n image = imgs.list(name=image_name)[0]\n\n # print image.id\n return image.id", "def list_all_images(s, c, p, project, name, ceph):\n\n def second_filter():\n if project is None:\n f1 = True\n else:\n f1 = image[2] == project\n\n if ceph is None:\n f2 = True\n else:\n f2 = image[3] == ceph\n\n if name is None:\n f3 = True\n else:\n f3 = image[1] == name\n\n f4 = project is None and ceph is None and name is None\n\n return (f1 and f2 and f3) or f4\n\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_all_images()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Project\", \"Ceph\", \"Public\",\n \"Snapshot\",\n \"Parent\"])\n images = ret[constants.RETURN_VALUE_KEY]\n for image in images:\n flag = False\n if s and image[5]:\n flag = second_filter()\n elif c and image[6] != '' and not image[5]:\n flag = second_filter()\n elif p and image[4]:\n flag = second_filter()\n elif not s and not c and not p:\n flag = second_filter()\n\n if flag:\n table.add_row(image)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'", "def get_instance_image_info(task):\n ctx = task.context\n node = task.node\n image_info = {}\n # NOTE(pas-ha) do not report image kernel and ramdisk for\n # local boot or whole disk images so that they are not cached\n if (node.driver_internal_info.get('is_whole_disk_image')\n or deploy_utils.get_boot_option(node) == 'local'):\n return image_info\n root_dir = get_http_boot_dir()\n i_info = node.instance_info\n labels = ('kernel', 'ramdisk')\n d_info = deploy_utils.get_image_instance_info(node)\n if not (i_info.get('kernel') and i_info.get('ramdisk')):\n glance_service = service.GlanceImageService(context=ctx)\n iproperties = glance_service.show(d_info['image_source'])['properties']\n for label in labels:\n i_info[label] = str(iproperties[label + '_id'])\n node.instance_info = i_info\n node.save()\n\n for label in labels:\n image_info[label] = (\n i_info[label],\n os.path.join(root_dir, node.uuid, label)\n )\n\n return image_info", "def get_uploaded_versions(self, modules=None):\n # Build a mapping: version -> list of modules that have it.\n versions = collections.defaultdict(list)\n for module, version_list in self.list_versions().iteritems():\n for version in version_list:\n versions[version].append(module)\n\n # Keep only versions that are deployed to all requested modules.\n modules = modules or self.modules\n actual_versions = [\n version for version, modules_with_it in versions.iteritems()\n if set(modules_with_it).issuperset(modules)\n ]\n\n # Sort by version number (best effort, nonconforming version names will\n # appear first in the list).\n def extract_version_num(version):\n parts = version.split('-', 1)\n try:\n parts[0] = int(parts[0])\n except ValueError:\n pass\n return tuple(parts)\n return sorted(actual_versions, key=extract_version_num)", "def get_files(stage):\n # get folder\n folder = stage_folder(stage)\n\n # Open all images\n data = []\n images = []\n global VALID_DATA, VALID_IMAGE\n for file in os.listdir(folder):\n name = file.lower()\n if name.startswith('.'):\n continue # skip hidden files\n # end if\n\n _, ext = path.splitext(name)\n if ext.lower() in VALID_IMAGE:\n images.append(name) # image file\n elif ext.lower() in VALID_DATA:\n data.append(name) # data file\n # end if\n # end for\n\n # for debugging\n if len(images):\n log(len(images), 'images', 'found on stage', stage)\n # end if\n if len(data):\n log(len(data), 'data', 'found on stage', stage)\n # end if\n\n return images, data", "def available_images_index(self):\n first = ct.c_long()\n last = ct.c_long()\n self.lib.GetNumberAvailableImages(ct.pointer(first), ct.pointer(last))\n\n return (first.value, last.value)", "def list_test_instances():\n run('ls -1 %s' % env.site_root)", "def index(self, req):\n context = req.environ['nova.context']\n filters = self._get_filters(req)\n images = self._image_service.index(context, filters=filters)\n images = common.limited(images, req)\n builder = self.get_builder(req).build\n return dict(images=[builder(image, detail=False) for image in images])", "def available_versions(self):\n return list(sorted(self.onxs))", "def image(images):\n return images[0]", "def images(self):\n return self._images", "def images(self):\n return self._images", "def get_image_ref() -> str:\n images_rq = request(\n method=\"GET\", url=app.config[\"IMAGE_REF\"], headers=build_header(),\n )\n if not images_rq.ok:\n HTTPError(f\"Can not get image id for virtual machine: {images_rq.status_code}\")\n\n [image] = images_rq.json()[\"images\"]\n return image[\"id\"]", "def list_images(ec2): # pragma: no coverage\n response = ec2.describe_images(Filters=[{'Name': 'is-public',\n 'Values': ['false']}])\n response.pop('ResponseMetadata')\n printy(\"{:12}\\t{:20}\\t\\tCreationDate:\".format(\"ImageId\", \"Name\"))\n\n for image in response['Images']:\n if len(image[\"Name\"]) > 20:\n image['Name'] = image['Name'][:20] + \"...\"\n print(\"{ImageId}\\t{Name:20}\\t\\t{CreationDate}\".format(**image))", "def local_images(self, images, task_vars):\n return [\n image for image in images\n if self.is_image_local(image, task_vars)\n ]", "def get(self, namespace, repository):\n repo_ref = registry_model.lookup_repository(namespace, repository)\n if repo_ref is None:\n raise NotFound()\n\n tags = registry_model.list_all_active_repository_tags(repo_ref)\n images_with_tags = defaultdict(list)\n for tag in tags:\n legacy_image_id = tag.manifest.legacy_image_root_id\n if legacy_image_id is not None:\n images_with_tags[legacy_image_id].append(tag)\n\n # NOTE: This is replicating our older response for this endpoint, but\n # returns empty for the metadata fields. This is to ensure back-compat\n # for callers still using the deprecated API, while not having to load\n # all the manifests from storage.\n return {\n \"images\": [\n {\n \"id\": image_id,\n \"created\": format_date(\n datetime.utcfromtimestamp((min([tag.lifetime_start_ts for tag in tags])))\n ),\n \"comment\": \"\",\n \"command\": \"\",\n \"size\": 0,\n \"uploading\": False,\n \"sort_index\": 0,\n \"tags\": [tag.name for tag in tags],\n \"ancestors\": \"\",\n }\n for image_id, tags in images_with_tags.items()\n ]\n }", "def images_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n self.client = docker.from_env()\n try:\n images = self.client.images.list(**kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(images) == 0:\n Console.info(\"No images exist\")\n continue\n\n for imagem in images:\n image = imagem.__dict__['attrs']\n image['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(image)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = image['Id']\n if image['RepoTags'] == None:\n d['Repository'] = image['RepoDigests'][0]\n else:\n d['Repository'] = image['RepoTags'][0]\n # d['Size'] = image['Size']\n d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2)\n e[n] = d\n n = n + 1\n Rest.delete('Image', filter)\n Rest.post('Image', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))", "def versions(self) -> Dict[str, str]:\n self.__logger.debug('Eva.versions called')\n return self.__http_client.api_versions()", "def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())", "async def imageList(self, ctx: Context, imageType=\"icons\"):\n imageSingular = self.getSingularImageType(imageType)\n allImages = await self.config.guild(ctx.guild).get_attr(imageType)()\n if not allImages:\n await ctx.send(f\"There are no {imageType}, please add some first!\")\n return\n\n async with self.config.guild(ctx.guild).get_attr(f\"{imageType}Dates\")() as imageDates:\n imageDates = dict(sorted(imageDates.items()))\n msg = \"\"\n for changeDate, name in imageDates.items():\n # YYYY-MM-DD\n theDate = date.fromisoformat(f\"2020-{changeDate}\").strftime(\"%B %d\")\n msg += f\"{theDate}: {name}\\n\"\n notAssigned = set(allImages) - set(imageDates.values())\n if notAssigned:\n msg += f\"Unassigned: \"\n msg += \", \".join(notAssigned)\n pageList = []\n pages = list(pagify(msg, page_length=500))\n totalPages = len(pages)\n async for pageNumber, page in AsyncIter(pages).enumerate(start=1):\n embed = discord.Embed(\n title=f\"Server {imageSingular} changes for {ctx.guild.name}\", description=page\n )\n embed.set_footer(text=f\"Page {pageNumber}/{totalPages}\")\n pageList.append(embed)\n await menu(ctx, pageList, DEFAULT_CONTROLS)", "def get_image_by_version(self, region, version=None):\n pass", "def run(self):\n self._run()\n return {\"versions\": self.share_memory[\"latest_versions\"]}\\\n if len(self.share_memory[\"latest_versions\"]) > 0 else {}", "def images(self) -> Iterable[dto.Image]:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def _get_versioned_native_image(\n self,\n native_image: str,\n ) -> Union[str, None]:\n logger.info(\n f\"{self._pack_name} - Get Versioned Native Image - {native_image} - Started\"\n )\n\n native_image_config = (\n NativeImageConfig()\n ) # parsed docker_native_image_config.json file (a singleton obj)\n\n return native_image_config.get_native_image_reference(native_image)", "def get_versions(self):\n raise NotImplementedError", "def get_recent_images(num_images=30):\n folder = app.config['UPLOAD_FOLDER']\n\n init_image_info()\n\n # get list of last modified images - ignore .json file and files start with .\n files = ['/'.join((folder, file)) \\\n for file in os.listdir(folder) if ('json' not in file) \\\n and not (file.startswith('.')) ]\n\n # list of tuples (file_path, timestamp)\n last_modified_files = [(file, os.path.getmtime(file)) for file in files]\n print(last_modified_files)\n last_modified_files = sorted(last_modified_files,\n key=lambda t: t[1], reverse=True)\n num_stored_images = len(last_modified_files)\n\n # build a list of image information\n image_stats = []\n\n print(\"THE NUMBER OF STORED IMAGES IS: {}\".format(num_stored_images))\n\n if num_stored_images != 0:\n\n # read in image info\n with open(IMAGE_INFO_JSON, 'r') as f:\n info = json.load(f)\n\n for i, f in enumerate(last_modified_files):\n # set limit for rendering pictures\n if i > num_images: break\n\n path, filename = f[0], f[0].replace(folder, '').replace('/', '')\n cur_image_info = info.get(filename, {})\n\n print(\"CURRENT IMAGE INFO IS: {}\".format(cur_image_info))\n\n img = {\n 'path': path,\n 'labels': cur_image_info\n }\n print(\"CURRENT IMG LABEL DATA IS: {}\".format(img['labels']))\n image_stats.append(img)\n\n return image_stats, num_stored_images", "def get(isamAppliance, check_mode=False, force=False):\n return isamAppliance.invoke_get(\"Retrieving snapshots\", \"/snapshots\")", "def get_staged_images(self):\n with open(self.staging_path, \"r\") as f:\n return json.load(f)", "def latest_ready_image(self):\n return (\n self.algorithm_container_images.filter(ready=True)\n .order_by(\"-created\")\n .first()\n )" ]
[ "0.69780606", "0.6833153", "0.6587997", "0.6557733", "0.63813823", "0.63264614", "0.62088", "0.61655736", "0.5964975", "0.5890417", "0.5876325", "0.5871348", "0.5833859", "0.58310384", "0.5792491", "0.5789766", "0.5733587", "0.57318777", "0.57247746", "0.57242477", "0.5707273", "0.5669875", "0.5666854", "0.56588054", "0.56404924", "0.5640021", "0.563311", "0.55968994", "0.5567246", "0.55581725", "0.55491793", "0.5549047", "0.55285656", "0.55169743", "0.55147004", "0.55142444", "0.5503743", "0.5500284", "0.5499037", "0.5491617", "0.5479807", "0.54792064", "0.546918", "0.5441756", "0.54385567", "0.54361004", "0.5435027", "0.54298484", "0.5425541", "0.5419532", "0.5412315", "0.53975457", "0.53943515", "0.53875333", "0.5380056", "0.5375327", "0.5374483", "0.5372246", "0.53688073", "0.5363881", "0.53600794", "0.53562444", "0.53451747", "0.53437585", "0.5342656", "0.53424037", "0.5340699", "0.5339041", "0.5336303", "0.53263134", "0.5319255", "0.53177565", "0.5316519", "0.53150463", "0.5296011", "0.5295181", "0.52950084", "0.5287675", "0.52830505", "0.5275434", "0.52714723", "0.5269812", "0.5268347", "0.5268347", "0.52644444", "0.5255086", "0.52540654", "0.5251281", "0.52442735", "0.52429044", "0.5238138", "0.5231797", "0.52273613", "0.5225735", "0.5217076", "0.5216269", "0.52007693", "0.51946753", "0.5193404", "0.5192829", "0.5188668" ]
0.0
-1
Turn a blank OneFS node into a usable device
def config(self, cluster_name, name, username, version, int_netmask, int_ip_low, int_ip_high, ext_netmask, ext_ip_low, ext_ip_high, gateway, dns_servers, encoding, sc_zonename, smartconnect_ip, join_cluster, compliance, txn_id): logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper()) resp = {'content' : {}, 'error': None, 'params': {}} logger.info('Task starting') nodes = vmware.show_onefs(username) node = nodes.get(name, None) if not node: error = "No node named {} found".format(name) resp['error'] = error logger.error(error) return resp elif node['meta']['configured']: error = "Cannot configure a node that's already configured" resp['error'] = error logger.error(error) else: # Lets set it up! logger.info('Found node') console_url = node['console'] if join_cluster: logger.info('Joining node to cluster {}'.format(cluster_name)) setup_onefs.join_existing_cluster(console_url, cluster_name, compliance, logger) else: logger.info('Setting up new cluster named {}'.format(cluster_name)) setup_onefs.configure_new_cluster(version=version, console_url=console_url, cluster_name=cluster_name, int_netmask=int_netmask, int_ip_low=int_ip_low, int_ip_high=int_ip_high, ext_netmask=ext_netmask, ext_ip_low=ext_ip_low, ext_ip_high=ext_ip_high, gateway=gateway, dns_servers=dns_servers, encoding=encoding, sc_zonename=sc_zonename, smartconnect_ip=smartconnect_ip, compliance=compliance, logger=logger) node['meta']['configured'] = True vmware.update_meta(username, name, node['meta']) logger.info('Task complete') return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addDevice(self, node, fullDeviceName, device):", "def _get_device(node):\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n\n # Save the device information\n node[\"devices\"] = {}\n node[\"devices\"][\"dpdk_devices\"] = vpp.get_dpdk_devices()\n node[\"devices\"][\"kernel_devices\"] = vpp.get_kernel_devices()\n node[\"devices\"][\"other_devices\"] = vpp.get_other_devices()\n node[\"devices\"][\"linkup_devices\"] = vpp.get_link_up_devices()", "def load_device():", "def removeDevice(self, node, fullDeviceName):", "def _start_oef_node(self, network_node):", "def create_onedrive_mounting_point():\n return None", "def fp_from_device(new_light_devices, new_medium_devices, new_heavy_devices):\n device = kg_to_tonnes((new_light_devices) * 75 + \\\n (new_medium_devices) * 200 + (new_heavy_devices) * 800)\n return device", "def to_device(model, device):\n p = next(model.parameters())\n if p.device == device:\n return\n model.to(device)", "def getNode(self, node, includeDevices=True, flatDeviceHierarchy=False):", "def to_device(device, x):\n if device is None:\n return x\n elif device < 0:\n return cuda.to_cpu(x)\n else:\n return cuda.to_gpu(x, device)", "def test_get_node_hardware_fast(self):\n pass", "def test_get_node_hardware(self):\n pass", "def _create_device(self):\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "def __init__(self, parent, address, name):\r\n # The id (node_def_id) is the address because each hub has a unique nodedef in the profile.\r\n self.did = address\r\n address = \"d\" + address\r\n self.id = address\r\n super(HarmonyDevice, self).__init__(parent.controller, parent.address, address, name)\r\n #self.name = name\r\n #self.address = address\r\n self.hub = parent\r\n # Only Hub devices are polled.\r\n self.do_poll = False", "def __init__(self, node: Node, device_info: DeviceInfo | None = None) -> None:\n super().__init__(node, device_info=device_info)\n self._uom = self._node.uom\n if isinstance(self._uom, list):\n self._uom = self._node.uom[0]\n self._hvac_action: str | None = None\n self._hvac_mode: str | None = None\n self._fan_mode: str | None = None\n self._temp_unit = None\n self._current_humidity = 0\n self._target_temp_low = 0\n self._target_temp_high = 0", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def __init__(self, g: 'graph.Graph', node_id: int, name: str, op_name: str,\n device: str = \"\"):\n Node.__init__(self, g, node_id=node_id, name=name,\n op_name=op_name, outputs=[], device=device)\n self._attributes = []\n self._inputs = []\n self._control_inputs = []", "def make_hoomd_device(args):\n if args.device == 'CPU':\n device = hoomd.device.CPU()\n elif args.device == 'GPU':\n device = hoomd.device.GPU()\n else:\n raise ValueError(f'Invalid device {args.device}.')\n\n if not args.verbose:\n device.notice_level = 0\n\n return device", "def get_device(arn=None):\n pass", "def setup_device(n_gpus: int) -> object:\n if n_gpus >= 1 and torch.cuda.is_available():\n LOG.info('\\n CUDA is available! using GPU...')\n return torch.device('cuda')\n else:\n LOG.info('\\n Using CPU...')\n return torch.device('cpu')", "def convert_full(node, **kwargs):\n # ToDo: Use Constant or ConstantOfShape, when Issue #15101 is resolved?\n name, input_nodes, attrs = get_inputs(node, kwargs)\n del input_nodes\n\n # Convert \"0\"s dimensions to \"1\"s. This is a workaround for the case, where\n # mxnet symbols can broadcast \"0\"s, while ONNX can only broadcast over \"1\"s\n shape = convert_string_to_list(attrs[\"shape\"])\n shape = tuple(dim if dim else 1 for dim in shape)\n\n value = {\n '_zeros': 0.0,\n '_ones': 1.0,\n '_full': eval(attrs.get('value', '0')),\n }[node['op']]\n dtype = attrs.get('dtype')\n data = np.full(shape, value, dtype)\n\n return create_helper_tensor_node(data, name, kwargs)", "def test_get_device_detects_none(hass, mock_openzwave):\n node = MockNode()\n value = MockValue(data=0, node=node)\n values = MockEntityValues(primary=value, node=node)\n\n device = cover.get_device(hass=hass, node=node, values=values, node_config={})\n assert device is None", "def connect_to_device(client: virl.ClientLibrary, node: virl_ty.Node) -> netmiko.BaseConnection:\n\tconn = netmiko.ConnectHandler(device_type='terminal_server',\n\t\thost=client.get_host(),\n\t\tusername=client.username,\n\t\tpassword=client.password,\n\t)\n\n\tconn.write_channel('\\r')\n\tconn.write_channel(f'open /{node.lab.id}/{node.id}/0\\r')\n\n\t#conn.write_channel('\\r\\n')\n\tsleep(0.5)\n\n\t#conn.write_channel('\\r\\n')\n\n\t# try to activate the device\n\tfor _ in range(3):\n\t\tconn.write_channel('\\r\\n')\n\t\tsleep(0.4)\n\n\tnode_def = node.node_definition\n\tdevice_type = None\n\tif node_def == 'iosv' or node_def == 'iosvl2':\n\t\tdevice_type = 'cisco_ios'\n\telif node_def == 'asav':\n\t\tdevice_type = 'cisco_asa'\n\telse:\n\t\tprint(f\"Unrecognized node_definition: {repr(node_def)}, defaulting to 'cisco_ios' netmiko device_type\", file=sys.stderr)\n\t\tdevice_type = 'cisco_ios'\n\t\n\t# tell netmiko what our actual device is\n\tnetmiko.redispatch(conn, device_type)\n\n\tconn.write_channel('\\r\\n\\r\\n')\n\t#conn.write_channel('\\r\\n')\n\tsleep(0.5)\n\tconn.write_channel('\\r\\n\\r\\n')\n\n\tconn.find_prompt()\n\n\tconn.disable_paging()\n\n\treturn conn", "def part_device(part_number):\n return \"/dev/mmcblk0p\" + part_number", "def test_create_device1(self):\n pass", "def zero_node():\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"metric\", \"clear\")\n else:\n cmd = _traffic_line(\"-z\")\n\n return _subprocess(cmd)", "def get_s1n_if_exist(self, node: Node) -> Node:\n if isinstance(node, Device):\n log.error(\"get_s1n_if_exist hits DEVICE type (unexpected)\")\n return node\n if isinstance(node, Socket1N):\n return node\n return self.get_s1n_if_exist(node.ds[0].ds)", "def setup_device_and_gradient(f: hessQuik.networks.NN, network_wrapper: str = 'hessQuik', device: str = 'cpu') \\\n -> torch.nn.Module:\n # map to device\n f = f.to(device)\n\n if network_wrapper == 'PytorchAD':\n f = net.NNPytorchAD(f)\n\n if network_wrapper == 'PytorchHessian':\n f = net.NNPytorchHessian(f)\n\n return f", "def get_leaf_from_s1n(self, node: Node, idx: int) -> Device:\n return self.get_downstream_device(node.ds[idx].ds)", "def device(self):\n return self._vars[0].device", "def generate_device_tree(self, outdir: str) -> None:\n\n state = FdtState(addr_cells=2, size_cells=2, cpu_cells=1)\n root = FdtNode(\"/\")\n root.append(state.addrCellsProperty())\n root.append(state.sizeCellsProperty())\n root.appendCompatible([\"riscv-virtio\"])\n\n for mem_range in self.mem_ranges:\n node = FdtNode(f\"memory@{int(mem_range.start):x}\")\n node.append(FdtPropertyStrings(\"device_type\", [\"memory\"]))\n node.append(\n FdtPropertyWords(\n \"reg\",\n state.addrCells(mem_range.start)\n + state.sizeCells(mem_range.size()),\n )\n )\n root.append(node)\n\n # See Documentation/devicetree/bindings/riscv/cpus.txt for details.\n cpus_node = FdtNode(\"cpus\")\n cpus_state = FdtState(addr_cells=1, size_cells=0)\n cpus_node.append(cpus_state.addrCellsProperty())\n cpus_node.append(cpus_state.sizeCellsProperty())\n # Used by the CLINT driver to set the timer frequency. Value taken from\n # RISC-V kernel docs (Note: freedom-u540 is actually 1MHz)\n cpus_node.append(FdtPropertyWords(\"timebase-frequency\", [100000000]))\n\n for i, core in enumerate(self.get_processor().get_cores()):\n node = FdtNode(f\"cpu@{i}\")\n node.append(FdtPropertyStrings(\"device_type\", \"cpu\"))\n node.append(FdtPropertyWords(\"reg\", state.CPUAddrCells(i)))\n node.append(FdtPropertyStrings(\"mmu-type\", \"riscv,sv48\"))\n node.append(FdtPropertyStrings(\"status\", \"okay\"))\n node.append(FdtPropertyStrings(\"riscv,isa\", \"rv64imafdc\"))\n # TODO: Should probably get this from the core.\n freq = self.clk_domain.clock[0].frequency\n node.append(FdtPropertyWords(\"clock-frequency\", freq))\n node.appendCompatible([\"riscv\"])\n int_phandle = state.phandle(f\"cpu@{i}.int_state\")\n node.appendPhandle(f\"cpu@{i}\")\n\n int_node = FdtNode(\"interrupt-controller\")\n int_state = FdtState(interrupt_cells=1)\n int_phandle = int_state.phandle(f\"cpu@{i}.int_state\")\n int_node.append(int_state.interruptCellsProperty())\n int_node.append(FdtProperty(\"interrupt-controller\"))\n int_node.appendCompatible(\"riscv,cpu-intc\")\n int_node.append(FdtPropertyWords(\"phandle\", [int_phandle]))\n\n node.append(int_node)\n cpus_node.append(node)\n\n root.append(cpus_node)\n\n soc_node = FdtNode(\"soc\")\n soc_state = FdtState(addr_cells=2, size_cells=2)\n soc_node.append(soc_state.addrCellsProperty())\n soc_node.append(soc_state.sizeCellsProperty())\n soc_node.append(FdtProperty(\"ranges\"))\n soc_node.appendCompatible([\"simple-bus\"])\n\n # CLINT node\n clint = self.platform.clint\n clint_node = clint.generateBasicPioDeviceNode(\n soc_state, \"clint\", clint.pio_addr, clint.pio_size\n )\n int_extended = list()\n for i, core in enumerate(self.get_processor().get_cores()):\n phandle = soc_state.phandle(f\"cpu@{i}.int_state\")\n int_extended.append(phandle)\n int_extended.append(0x3)\n int_extended.append(phandle)\n int_extended.append(0x7)\n clint_node.append(\n FdtPropertyWords(\"interrupts-extended\", int_extended)\n )\n clint_node.appendCompatible([\"riscv,clint0\"])\n soc_node.append(clint_node)\n\n # PLIC node\n plic = self.platform.plic\n plic_node = plic.generateBasicPioDeviceNode(\n soc_state, \"plic\", plic.pio_addr, plic.pio_size\n )\n\n int_state = FdtState(addr_cells=0, interrupt_cells=1)\n plic_node.append(int_state.addrCellsProperty())\n plic_node.append(int_state.interruptCellsProperty())\n\n phandle = int_state.phandle(plic)\n plic_node.append(FdtPropertyWords(\"phandle\", [phandle]))\n plic_node.append(FdtPropertyWords(\"riscv,ndev\", [plic.n_src - 1]))\n\n int_extended = list()\n for i, core in enumerate(self.get_processor().get_cores()):\n phandle = state.phandle(f\"cpu@{i}.int_state\")\n int_extended.append(phandle)\n int_extended.append(0xB)\n int_extended.append(phandle)\n int_extended.append(0x9)\n\n plic_node.append(FdtPropertyWords(\"interrupts-extended\", int_extended))\n plic_node.append(FdtProperty(\"interrupt-controller\"))\n plic_node.appendCompatible([\"riscv,plic0\"])\n\n soc_node.append(plic_node)\n\n # PCI\n pci_state = FdtState(\n addr_cells=3, size_cells=2, cpu_cells=1, interrupt_cells=1\n )\n pci_node = FdtNode(\"pci\")\n\n if int(self.platform.pci_host.conf_device_bits) == 8:\n pci_node.appendCompatible(\"pci-host-cam-generic\")\n elif int(self.platform.pci_host.conf_device_bits) == 12:\n pci_node.appendCompatible(\"pci-host-ecam-generic\")\n else:\n m5.fatal(\"No compatibility string for the set conf_device_width\")\n\n pci_node.append(FdtPropertyStrings(\"device_type\", [\"pci\"]))\n\n # Cell sizes of child nodes/peripherals\n pci_node.append(pci_state.addrCellsProperty())\n pci_node.append(pci_state.sizeCellsProperty())\n pci_node.append(pci_state.interruptCellsProperty())\n # PCI address for CPU\n pci_node.append(\n FdtPropertyWords(\n \"reg\",\n soc_state.addrCells(self.platform.pci_host.conf_base)\n + soc_state.sizeCells(self.platform.pci_host.conf_size),\n )\n )\n\n # Ranges mapping\n # For now some of this is hard coded, because the PCI module does not\n # have a proper full understanding of the memory map, but adapting the\n # PCI module is beyond the scope of what I'm trying to do here.\n # Values are taken from the ARM VExpress_GEM5_V1 platform.\n ranges = []\n # Pio address range\n ranges += self.platform.pci_host.pciFdtAddr(space=1, addr=0)\n ranges += soc_state.addrCells(self.platform.pci_host.pci_pio_base)\n ranges += pci_state.sizeCells(0x10000) # Fixed size\n\n # AXI memory address range\n ranges += self.platform.pci_host.pciFdtAddr(space=2, addr=0)\n ranges += soc_state.addrCells(self.platform.pci_host.pci_mem_base)\n ranges += pci_state.sizeCells(0x40000000) # Fixed size\n pci_node.append(FdtPropertyWords(\"ranges\", ranges))\n\n # Interrupt mapping\n plic_handle = int_state.phandle(plic)\n int_base = self.platform.pci_host.int_base\n\n interrupts = []\n\n for i in range(int(self.platform.pci_host.int_count)):\n interrupts += self.platform.pci_host.pciFdtAddr(\n device=i, addr=0\n ) + [int(i) + 1, plic_handle, int(int_base) + i]\n\n pci_node.append(FdtPropertyWords(\"interrupt-map\", interrupts))\n\n int_count = int(self.platform.pci_host.int_count)\n if int_count & (int_count - 1):\n fatal(\"PCI interrupt count should be power of 2\")\n\n intmask = self.platform.pci_host.pciFdtAddr(\n device=int_count - 1, addr=0\n ) + [0x0]\n pci_node.append(FdtPropertyWords(\"interrupt-map-mask\", intmask))\n\n if self.platform.pci_host._dma_coherent:\n pci_node.append(FdtProperty(\"dma-coherent\"))\n\n soc_node.append(pci_node)\n\n # UART node\n uart = self.platform.uart\n uart_node = uart.generateBasicPioDeviceNode(\n soc_state, \"uart\", uart.pio_addr, uart.pio_size\n )\n uart_node.append(\n FdtPropertyWords(\"interrupts\", [self.platform.uart_int_id])\n )\n uart_node.append(FdtPropertyWords(\"clock-frequency\", [0x384000]))\n uart_node.append(\n FdtPropertyWords(\"interrupt-parent\", soc_state.phandle(plic))\n )\n uart_node.appendCompatible([\"ns8250\"])\n soc_node.append(uart_node)\n\n # VirtIO MMIO disk node\n disk = self.disk\n disk_node = disk.generateBasicPioDeviceNode(\n soc_state, \"virtio_mmio\", disk.pio_addr, disk.pio_size\n )\n disk_node.append(FdtPropertyWords(\"interrupts\", [disk.interrupt_id]))\n disk_node.append(\n FdtPropertyWords(\"interrupt-parent\", soc_state.phandle(plic))\n )\n disk_node.appendCompatible([\"virtio,mmio\"])\n soc_node.append(disk_node)\n\n # VirtIO MMIO rng node\n rng = self.rng\n rng_node = rng.generateBasicPioDeviceNode(\n soc_state, \"virtio_mmio\", rng.pio_addr, rng.pio_size\n )\n rng_node.append(FdtPropertyWords(\"interrupts\", [rng.interrupt_id]))\n rng_node.append(\n FdtPropertyWords(\"interrupt-parent\", soc_state.phandle(plic))\n )\n rng_node.appendCompatible([\"virtio,mmio\"])\n soc_node.append(rng_node)\n\n root.append(soc_node)\n\n fdt = Fdt()\n fdt.add_rootnode(root)\n fdt.writeDtsFile(os.path.join(outdir, \"device.dts\"))\n fdt.writeDtbFile(os.path.join(outdir, \"device.dtb\"))", "def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )", "def device_info(node):\n\n if \"cpu\" in node and \"total_mbufs\" in node[\"cpu\"]:\n total_mbufs = node[\"cpu\"][\"total_mbufs\"]\n if total_mbufs != 0:\n print(\"Total Number of Buffers: {}\".format(total_mbufs))\n\n vpp = VppPCIUtil(node)\n vpp.get_all_devices()\n linkup_devs = vpp.get_link_up_devices()\n if len(linkup_devs):\n print(\"\\nDevices with link up (can not be used with VPP):\")\n vpp.show_vpp_devices(linkup_devs, show_header=False)\n # for dev in linkup_devs:\n # print (\" \" + dev)\n kernel_devs = vpp.get_kernel_devices()\n if len(kernel_devs):\n print(\"\\nDevices bound to kernel drivers:\")\n vpp.show_vpp_devices(kernel_devs, show_header=False)\n else:\n print(\"\\nNo devices bound to kernel drivers\")\n\n dpdk_devs = vpp.get_dpdk_devices()\n if len(dpdk_devs):\n print(\"\\nDevices bound to DPDK drivers:\")\n vpp.show_vpp_devices(dpdk_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices bound to DPDK drivers\")\n\n other_devs = vpp.get_other_devices()\n if len(other_devs):\n print(\"\\nDevices not bound to Kernel or DPDK drivers:\")\n vpp.show_vpp_devices(other_devs, show_interfaces=True, show_header=False)\n else:\n print(\"\\nNo devices not bound to Kernel or DPDK drivers\")\n\n vpputl = VPPUtil()\n interfaces = vpputl.get_hardware(node)\n if interfaces == {}:\n return\n\n print(\"\\nDevices in use by VPP:\")\n\n if len(interfaces.items()) < 2:\n print(\"None\")\n return\n\n print(\n \"{:30} {:4} {:4} {:7} {:4} {:7}\".format(\n \"Name\", \"Numa\", \"RXQs\", \"RXDescs\", \"TXQs\", \"TXDescs\"\n )\n )\n for intf in sorted(interfaces.items()):\n name = intf[0]\n value = intf[1]\n if name == \"local0\":\n continue\n numa = rx_qs = rx_ds = tx_qs = tx_ds = \"\"\n if \"numa\" in value:\n numa = int(value[\"numa\"])\n if \"rx queues\" in value:\n rx_qs = int(value[\"rx queues\"])\n if \"rx descs\" in value:\n rx_ds = int(value[\"rx descs\"])\n if \"tx queues\" in value:\n tx_qs = int(value[\"tx queues\"])\n if \"tx descs\" in value:\n tx_ds = int(value[\"tx descs\"])\n\n print(\n \"{:30} {:>4} {:>4} {:>7} {:>4} {:>7}\".format(\n name, numa, rx_qs, rx_ds, tx_qs, tx_ds\n )\n )", "def device():\n return G.DEVICE", "def setup_device(self, conf: DictConfig) -> device:\n device = torch.device(conf.runner.device) if torch.cuda.is_available() else torch.device('cpu')\n\n return device", "def set_device(sys_device_id):\n device_id = -1\n cuda = (sys_device_id != -1)\n if cuda:\n # CUDA_VISIBLE_DEVICE is a list, and device_id is the index of its members.\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = str(sys_device_id)\n device_id = 0\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO", "def create_convert_node(node):\n\n try:\n file = read_knob_val(node, \"file\").getValue()\n first = int(read_knob_val(node, \"first\").getValue())\n last = int(read_knob_val(node, \"last\").getValue())\n first2 = int(read_knob_val(node, \"origfirst\").getValue())\n last2 = int(read_knob_val(node, \"origlast\").getValue())\n format = read_knob_val(node, \"format\").value()\n except Exception, e:\n return None\n\n cv = c.ConvertNode(file, first, last, first2, last2, format)\n return cv", "def get_default_device():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n return device", "def do_Device (self, line):", "def node():\n return uname().node", "def node():\n return uname().node", "def run_node(self, node, device='CPU'): # pylint: disable=unused-argument\n op_name = node.op_type\n attr = self._parse_attr(node.attribute)\n new_op, new_attr = _convert_operator(op_name, attr)\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # some workarounds for onnx problem\n new_attr = self._fix_bias(new_op, new_attr, len(sym_list))\n new_attr = self._fix_channels(new_op, new_attr, list(node.input))\n\n # calling again to get new symbols after some workarounds\n sym_list = [mx.sym.Variable(node_name) for node_name in node.input]\n\n # onnx slice works on multiple axes whereas mxnet's slice_axis is for single axis\n if op_name == 'Slice':\n op = self._fix_slice(sym_list, new_attr)\n elif op_name == 'Squeeze':\n op = self._fix_squeeze(sym_list, new_attr)\n else:\n op = new_op(*sym_list, **new_attr)\n\n node_output = self._fix_outputs(op_name, node.output)\n for k, i in zip(list(node_output), range(len(node_output))):\n self._nodes[k] = op[i]\n\n # now return the outputs\n return op", "def load_devices():", "def device():\n return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", "def device(request):\n d = request.param()\n\n # enable GPU error checking\n if isinstance(d, hoomd.device.GPU):\n d.gpu_error_checking = True\n\n return d", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def fission_nodes():\n def _nodes(num):\n assert num <= 253\n return [base.BaseNode(f\"192.168.4.{i}\") for i in range(1, num + 1)]\n yield _nodes\n base.reset()", "def fission_node():\n def _node(ip):\n return base.BaseNode(ip)\n yield _node\n base.reset()", "def get_null_dev(for_writing = True):\n if for_writing:\n method = 'w'\n else:\n method = 'r'\n try:\n if conf.is_windows:\n f = open('nul', method)\n else:\n f = open('/dev/null', method)\n return f\n except Exception as e:\n log.error(\"failed to open NULL device: {0}\", e)\n raise SysError(\"limiter system error\")", "def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev", "def create_empty_node():\n from linked_list import Node\n return Node()", "def __init__(self, node):\n super().__init__(node, USB_MOTION_ID)\n self.node_callbacks = (USB_AVAILABLE_ID, USB_MOTION_ID)", "def test_get_node_drive(self):\n pass", "def _dummy_node(self) -> CFNode:\n node = CFNode()\n self._graph.add_node(node)\n return node", "def get_default_device():\n return MXNET_DEFAULT_DEVICE", "def __init__(self):\n self._device_info = None", "def test_01_Device0(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_1\n l_device = self.m_device_obj\n print(PrettyFormatAny.form(l_device, 'C2-01-A - Device'))\n self.assertEqual(l_device.Name, TESTING_LIGHT_NAME_0)\n self.assertEqual(l_device.Key, TESTING_LIGHT_KEY_0)\n self.assertEqual(l_device.Active, TESTING_LIGHT_ACTIVE_0)\n self.assertEqual(l_device.DeviceFamily, TESTING_DEVICE_FAMILY_INSTEON)\n self.assertEqual(str(l_device.DeviceType), TESTING_LIGHT_DEVICE_TYPE_0)\n self.assertEqual(str(l_device.DeviceSubType), TESTING_LIGHT_DEVICE_SUBTYPE_0)\n self.assertEqual(l_device.RoomName, TESTING_LIGHT_ROOM_NAME_0)", "def modify_devices(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n other_devices = devices[\"other_devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n if other_devices:\n self._modify_other_devices(\n node, other_devices, kernel_devices, dpdk_devices\n )\n\n # Get the devices again for this node\n self._get_device(node)\n devices = node[\"devices\"]\n kernel_devices = devices[\"kernel_devices\"]\n dpdk_devices = devices[\"dpdk_devices\"]\n\n klen = len(kernel_devices)\n if klen > 0:\n print(\"\\nThese devices are safe to be used with VPP.\\n\")\n VppPCIUtil.show_vpp_devices(kernel_devices)\n question = (\n \"\\nWould you like to use any of these \" \"device(s) for VPP [y/N]? \"\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd = {}\n for dit in kernel_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to use device {} \".format(dvid)\n question += \"for VPP [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppd[dvid] = device\n for dit in vppd.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n question = \"Would you like to bind the driver {} for {} [y/N]? \".format(\n driver, dvid\n )\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n logging.debug(\n \"Binding device {} to driver {}\".format(\n dvid, driver\n )\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\n \"Could not bind device {}\".format(dvid)\n )\n dpdk_devices[dvid] = device\n del kernel_devices[dvid]\n\n dlen = len(dpdk_devices)\n if dlen > 0:\n print(\"\\nThese device(s) are already using DPDK.\\n\")\n VppPCIUtil.show_vpp_devices(dpdk_devices, show_interfaces=False)\n question = \"\\nWould you like to remove any of \"\n question += \"these device(s) [y/N]? \"\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n question = \"Would you like to remove {} [y/N]? \".format(dvid)\n answer = self._ask_user_yn(question, \"n\")\n if answer == \"y\":\n vppdl[dvid] = device\n for dit in vppdl.items():\n dvid = dit[0]\n device = dit[1]\n if (\n \"unused\" in device\n and len(device[\"unused\"]) != 0\n and device[\"unused\"][0] != \"\"\n ):\n driver = device[\"unused\"][0]\n logging.debug(\n \"Binding device {} to driver {}\".format(dvid, driver)\n )\n ret = VppPCIUtil.bind_vpp_device(node, driver, dvid)\n if ret:\n logging.debug(\"Could not bind device {}\".format(dvid))\n else:\n kernel_devices[dvid] = device\n del dpdk_devices[dvid]\n\n interfaces = {}\n for dit in dpdk_devices.items():\n dvid = dit[0]\n device = dit[1]\n VppPCIUtil.vpp_create_interface(interfaces, dvid, device)\n node[\"interfaces\"] = interfaces\n\n self._update_auto_config()\n self.updateconfig()", "def create_knx_device(\n hass: HomeAssistant,\n platform: SupportedPlatforms,\n knx_module: XKNX,\n config: ConfigType,\n) -> XknxDevice:\n if platform is SupportedPlatforms.light:\n return _create_light(knx_module, config)\n\n if platform is SupportedPlatforms.cover:\n return _create_cover(knx_module, config)\n\n if platform is SupportedPlatforms.climate:\n return _create_climate(knx_module, config)\n\n if platform is SupportedPlatforms.switch:\n return _create_switch(knx_module, config)\n\n if platform is SupportedPlatforms.sensor:\n return _create_sensor(knx_module, config)\n\n if platform is SupportedPlatforms.notify:\n return _create_notify(knx_module, config)\n\n if platform is SupportedPlatforms.scene:\n return _create_scene(knx_module, config)\n\n if platform is SupportedPlatforms.binary_sensor:\n return _create_binary_sensor(hass, knx_module, config)\n\n if platform is SupportedPlatforms.weather:\n return _create_weather(knx_module, config)", "def test_device_property(coresys):\n device = Device(\n \"ttyACM0\",\n Path(\"/dev/ttyACM0\"),\n Path(\"/sys/bus/usb/001\"),\n \"tty\",\n None,\n [Path(\"/dev/serial/by-id/fixed-device\")],\n {\"MAJOR\": \"5\", \"MINOR\": \"10\"},\n [],\n )\n\n assert device.by_id == device.links[0]\n assert device.major == 5\n assert device.minor == 10", "def toggleCamGeoDisplay():\n\n sel = nuke.selectedNodes()\n\n # on a selection\n good = []\n goodCam = [\"Camera2\",\"Camera\", \"hubCamera\"]\n goodGeo = [\"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\", \"Axis\", \"Axis2\"]\n if (int(str(len(sel))))>0:\n nodes = nuke.selectedNodes()\n for node in nodes:\n if node.Class() in goodCam+goodGeo:\n if node['display'].value() == \"off\" :\n if node.Class() in goodCam:\n node['display'].setValue('wireframe')\n if node.Class() in goodGeo:\n node['display'].setValue('textured')\n #node['label'].setValue(\"\")\n node['note_font_color'].setValue(0)\n node['tile_color'].setValue(0)\n print node.name()+\" display on\"\n else:\n node['display'].setValue('off')\n #node['label'].setValue(\"DISPLAY OFF !!!\")\n node['note_font_color'].setValue(4120346367)\n node['tile_color'].setValue(573912575)\n print node.name()+\" display off\"\n\n # fill good[] if there is good nodes in the selection\n\n if node.Class() in goodCam:\n good.append(node.name())\n if node.Class() in goodGeo:\n good.append(node.name())\n if not good:\n nuke.message(\"there is no camera or readGeo in the selection\")\n\n # on all the readGeos and Cameras\n\n else:\n nodeL = []\n all = nuke.allNodes()\n for node in all:\n if node.Class() in goodCam+goodGeo:\n nodeL.append(node.name())\n for node in nodeL:\n if nuke.toNode(node)['display'].value() == \"off\":\n if nuke.toNode(node).Class() in goodCam:\n nuke.toNode(node)['display'].setValue('wireframe')\n if nuke.toNode(node).Class() in goodGeo:\n nuke.toNode(node)['display'].setValue('textured')\n nuke.toNode(node)['label'].setValue(\"\")\n nuke.toNode(node)['note_font_color'].setValue(0)\n nuke.toNode(node)['tile_color'].setValue(0)\n print nuke.toNode(node).name()+\" display on\"\n else:\n nuke.toNode(node)['display'].setValue('off')\n nuke.toNode(node)['label'].setValue(\"DISPLAY OFF !!!\")\n nuke.toNode(node)['note_font_color'].setValue(4120346367)\n nuke.toNode(node)['tile_color'].setValue(573912575)\n print nuke.toNode(node).name()+\" display off\"\n \n if not nodeL:\n nuke.message(\"there is no cameras or readGeos in this scene\")", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def set_device(in_arg): \n \n return torch.device(\"cuda\" if torch.cuda.is_available() and in_arg.gpu == 1 else \"cpu\")", "def create_gpu_device():\n d1 = dpctl.SyclDevice(\"gpu\")\n d2 = dpctl.select_gpu_device()\n assert d1 == d2\n print_device(d1)\n return d1", "def to(self, device):\n self.device = device\n self.model.to(self.device)", "def __init__(self, node, mac, sensor_id):\n super().__init__(node, mac)\n self.sensor_id = sensor_id\n self.sensor_type = SENSORS[sensor_id]\n self.node_callbacks = (AVAILABLE_SENSOR_ID, sensor_id)", "def get_device(link):\n device = Device(\"\",0,0,0,0,0)\n device.link = link\n return device.identify()", "def _create_device(self):\n project_page = 'https://garage.maemo.org/projects/brisa'\n self.device = Device('urn:schemas-upnp-org:device:BinaryLight:1',\n self.server_name,\n manufacturer='Brisa Team. Embedded Laboratory '\\\n 'and INdT Brazil',\n manufacturer_url=project_page,\n model_name='Binary Light Device',\n model_description='A UPnP Binary Light Device',\n model_number='1.0',\n model_url=project_page)", "def get_device_info(ns, device, human_friendly):\n if device.NumberOfBlocks and device.BlockSize:\n size = size2str(device.NumberOfBlocks * device.BlockSize, human_friendly)\n else:\n size = 'N/A'\n\n fslabel = fs.get_device_format_label(ns, device)\n return (device.DeviceID,\n device.Name,\n device.ElementName,\n size,\n fslabel)", "def test_create_device_data(self):\n pass", "def dummy_head_node(mocker):\n mocker.patch(\n \"pcluster.config.cluster_config.HeadNodeNetworking.availability_zone\",\n new_callable=PropertyMock(return_value=\"us-east-1a\"),\n )\n head_node_networking = HeadNodeNetworking(\n subnet_id=\"dummy-subnet-1\", proxy=Proxy(http_proxy_address=\"http://10.0.0.164:3129\")\n )\n head_node_networking.additional_security_groups = [\"additional-dummy-sg-1\"]\n head_node_dcv = Dcv(enabled=True, port=1024)\n head_node_imds = Imds(secured=True)\n ssh = HeadNodeSsh(key_name=\"test\")\n\n custom_actions = CustomActions(\n on_node_start=[\n CustomAction(script=\"https://tests1\", args=[\"arg1\", \"arg2\"]),\n CustomAction(script=\"https://tests2\", args=[\"arg1\", \"arg2\"]),\n ],\n on_node_updated=CustomAction(script=\"https://testus\", args=[\"arg1\", \"arg2\"]),\n on_node_configured=None,\n )\n\n head_node = HeadNode(\n instance_type=\"fake\",\n networking=head_node_networking,\n ssh=ssh,\n dcv=head_node_dcv,\n imds=head_node_imds,\n custom_actions=custom_actions,\n )\n\n return head_node", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda:0')\n else:\n return torch.device('cpu')", "def test_02_Device1(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_2\n self.m_api = FamUtil._get_family_device_api(self.m_pyhouse_obj, self.m_device_obj)\n print(PrettyFormatAny.form(self.m_device_obj, 'C2-02-A - Device'))\n self.assertEqual(self.m_device_obj.Name, TESTING_LIGHT_NAME_1)\n self.assertEqual(self.m_device_obj.Key, TESTING_LIGHT_KEY_1)\n self.assertEqual(self.m_device_obj.Active, TESTING_LIGHT_ACTIVE_1)\n self.assertEqual(self.m_device_obj.DeviceFamily, TESTING_DEVICE_FAMILY_UPB)\n self.assertEqual(str(self.m_device_obj.DeviceType), TESTING_LIGHT_DEVICE_TYPE_0)\n self.assertEqual(str(self.m_device_obj.DeviceSubType), TESTING_LIGHT_DEVICE_SUBTYPE_0)\n self.assertEqual(self.m_device_obj.RoomName, TESTING_LIGHT_ROOM_NAME_0)", "def guess_vserver_device():\n\n s = commands.getoutput('/bin/mount | /bin/grep tagxid | /usr/bin/head -n 1')\n device = s.split()[0]\n\n return device", "def test_get_node_sled(self):\n pass", "def fdToNode( cls, fd ):\n node = Node.outToNode.get( fd )\n return node or Node.inToNode.get( fd )", "def spawn_node(\n islandNamespace=None,\n namespace=None,\n island_id=None,\n robot_id=None,\n model_location=None,\n camera_location=None,\n robot_name=None,\n camera_en=False\n):\n\n # print(model_location)\n\n arg_model = \"-param robot_description -urdf -model {}_{}_{} -x {} -y {} -z {}\".format(\n robot_name,\n island_id,\n robot_id,\n model_location[0],\n model_location[1],\n model_location[2]\n )\n # print(arg_model + \": ARG MODEL\")\n\n arg_camera = \"\"\n if camera_en:\n arg_camera = \"-urdf -param camera -model camera_{}_{} -x {} -y {} -z {} \".format(\n island_id,\n robot_id,\n (camera_location[0]),\n (camera_location[1] - 1),\n (camera_location[2])\n )\n # print(arg_camera + \": ARG CAMERA\")\n\n arg_gazebo = \" -gazebo_namespace {}\".format(islandNamespace) + \"gzserver\"\n # print(arg_gazebo)\n\n # gazebo_env_args = \"$GAZEBO_MASTER_IP:1134{}\".format(4+island_id)\n\n node_model_spawn = roslaunch.core.Node(\n package=\"gazebo_ros\",\n node_type=\"spawn_model\",\n name=\"spawn_urdf\",\n namespace=namespace,\n output=\"screen\",\n args=(arg_model + arg_gazebo)\n )\n\n node_camera_spawn = roslaunch.core.Node(\n package=\"gazebo_ros\",\n node_type=\"spawn_model\",\n name=\"spawn_camera\",\n namespace=namespace,\n output=\"screen\",\n args=(arg_camera + arg_gazebo)\n )\n\n nodes = [node_model_spawn]\n if camera_en:\n nodes.extend(node_camera_spawn)\n return nodes", "def to_device(m: torch.nn.Module, x:torch.Tensor):\n if isinstance(m, torch.nn.Module):\n device = next(m.parameters()).device\n elif isinstance(m, torch.Tensor):\n device = m.device\n else:\n raise TypeError(\n \"Expected torch.nn.Module or torch.tensor, \" f\"bot got: {type(m)}\"\n )\n return x.to(device)", "def CASE2( self, main ):\n import json\n from tests.CHOTestMonkey.dependencies.elements.NetworkElement import Device, Link\n\n main.log.report( \"Collect and Store topology details from ONOS\" )\n main.log.report( \"____________________________________________________________________\" )\n main.case( \"Collect and Store Topology Details from ONOS\" )\n topoResult = main.TRUE\n topologyOutput = main.Cluster.active( 0 ).CLI.topology()\n topologyResult = main.Cluster.active( 0 ).CLI.getTopology( topologyOutput )\n ONOSDeviceNum = int( topologyResult[ 'devices' ] )\n ONOSLinkNum = int( topologyResult[ 'links' ] )\n mininetSwitchNum = len( main.mininetSwitches )\n mininetLinkNum = ( len( main.mininetLinks ) - len( main.mininetHosts ) ) * 2\n if mininetSwitchNum == ONOSDeviceNum and mininetLinkNum == ONOSLinkNum:\n main.step( \"Collect and store device data\" )\n stepResult = main.TRUE\n dpidToName = {}\n for key, value in main.mininetSwitches.items():\n dpidToName[ 'of:' + str( value[ 'dpid' ] ) ] = key\n devicesRaw = main.Cluster.active( 0 ).CLI.devices()\n devices = json.loads( devicesRaw )\n deviceInitIndex = 0\n for device in devices:\n name = dpidToName[ device[ 'id' ] ]\n newDevice = Device( deviceInitIndex, name, device[ 'id' ] )\n print newDevice\n main.devices.append( newDevice )\n deviceInitIndex += 1\n utilities.assert_equals( expect=main.TRUE,\n actual=stepResult,\n onpass=\"Successfully collected and stored device data\",\n onfail=\"Failed to collect and store device data\" )\n\n main.step( \"Collect and store link data\" )\n stepResult = main.TRUE\n linksRaw = main.Cluster.active( 0 ).CLI.links()\n links = json.loads( linksRaw )\n linkInitIndex = 0\n for link in links:\n for device in main.devices:\n if device.dpid == link[ 'src' ][ 'device' ]:\n deviceA = device\n elif device.dpid == link[ 'dst' ][ 'device' ]:\n deviceB = device\n assert deviceA is not None and deviceB is not None\n newLink = Link( linkInitIndex, deviceA, link[ 'src' ][ 'port' ], deviceB, link[ 'dst' ][ 'port' ] )\n print newLink\n main.links.append( newLink )\n linkInitIndex += 1\n # Set backward links and outgoing links of devices\n for linkA in main.links:\n linkA.deviceA.outgoingLinks.append( linkA )\n if linkA.backwardLink is not None:\n continue\n for linkB in main.links:\n if linkB.backwardLink is not None:\n continue\n if linkA.deviceA == linkB.deviceB and\\\n linkA.deviceB == linkB.deviceA and\\\n linkA.portA == linkB.portB and\\\n linkA.portB == linkB.portA:\n linkA.setBackwardLink( linkB )\n linkB.setBackwardLink( linkA )\n utilities.assert_equals( expect=main.TRUE,\n actual=stepResult,\n onpass=\"Successfully collected and stored link data\",\n onfail=\"Failed to collect and store link data\" )\n else:\n main.log.info( \"Devices (expected): %s, Links (expected): %s\" % ( mininetSwitchNum, mininetLinkNum ) )\n main.log.info( \"Devices (actual): %s, Links (actual): %s\" % ( ONOSDeviceNum, ONOSLinkNum ) )\n topoResult = main.FALSE\n\n caseResult = topoResult\n utilities.assert_equals( expect=main.TRUE,\n actual=caseResult,\n onpass=\"Saving ONOS topology data test PASS\",\n onfail=\"Saving ONOS topology data test FAIL\" )\n\n if not caseResult:\n main.log.info( \"Topology does not match, exiting test...\" )\n main.cleanAndExit()", "def process_device(spc, device):\n try:\n d = device.get();\n print(\"Processing device: \", device.name)\n me_href = d['managed-elements']['managed-element'].get('href')\n me = factory.fetch_resource(spc, me_href)\n\n # Fetch Physical Termination Points\n ptps = me.ptps.get()\n for p in ptps:\n p.get()\n\n # Fetch equipment inventory\n ehs = me.equipment_holders.get()\n for eh in ehs:\n eh.get()\n\n # Fetch software inventory\n me.software_identities.get()\n\n # Fetch relevant configuration\n try:\n device.configurations.expanded.post(xpaths=[\n '/configuration/version',\n '/configuration/routing-instances',\n '/configuration/access/radius-server',\n '/configuration/system/domain-name',\n '/configuration/routing-options/router-id',\n '/configuration/interfaces/interface[name=\"lo0\"]'])\n except:\n pass\n\n return device.name\n except:\n raise Exception(\"Failed to process %s due to %s\" % (device.name, sys.exc_info()[1]))", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')", "def __init__(self, module: torch.nn.Module, loss: torch.nn.Module,\n input_node_name='0',\n output_node_name='output', label_node_name='label',\n loss_node_name='loss',\n events: List[d5.ExecutorEvent] = [],\n device: d5.DeviceType = None, with_outputs = False):\n # Do not call super() here!\n self.network = PyTorchNativeNetwork(module)\n self.devname = 'cuda' if device is None or device.is_gpu() else 'cpu'\n self.events = events\n self.model = module.to(self.devname)\n self.is_training = True\n self.loss = loss.to(self.devname) if loss is not None else None\n self.innode = input_node_name\n self.outnode = output_node_name\n self.labelnode = label_node_name\n self.lossnode = loss_node_name\n self.with_outputs = with_outputs", "def skel_model(action, install_path_mp, install_path_zfs, jname):\n # init vars\n # mp - mount point, zfs - zfs point\n skel_path_mp = '%s-SKELETON' % install_path_mp\n skel_path_zfs = '%s-SKELETON' % install_path_zfs\n rw_path_mp = '%s-RW' % install_path_mp\n rw_path_zfs = '%s-RW' % install_path_zfs\n \n if action == 'init':\n# create SKELETON MODEL\n# http://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/jails-application.html\n log(\" INFO: Init BASE-SKELETON zfs START\")\n# Create a skeleton for the read-write portion of the system\n os.system('zfs create %s' % skel_path_zfs)\n os.system('zfs set mountpoint=%s %s' % (skel_path_mp, skel_path_zfs))\n os.system('zfs create %s' % rw_path_zfs)\n os.system('zfs set mountpoint=%s %s' % (rw_path_mp, rw_path_zfs))\n\n os.system('mkdir -p %s/home %s/usr-X11R6 %s/distfiles %s/usr-share-keys/pkg' % (skel_path_mp, skel_path_mp, skel_path_mp, skel_path_mp))\n os.system('mv %s/etc %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/usr/local %s/usr-local' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/tmp %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/var %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/root %s' % (install_path_mp, skel_path_mp ))\n# mergemaster to install missing configuration files. Then, remove the the extra directories that mergemaster creates:\n# os.system('mergemaster -t %s/var/tmp/temproot -D %s -i' % (skel_path, skel_path))\n# os.system('rm -R %(key)s/bin %(key)s/boot %(key)s/lib %(key)s/libexec %(key)s/mnt %(key)s/proc %(key)s/rescue %(key)s/sbin %(key)s/sys %(key)s/usr %(key)s/dev' % {'key': skel_path})\n# Now, symlink the read-write file system to the read-only file system. Ensure that the symlinks are created in the correct s/ locations as the creation of directories in the wrong locations will cause the installation to fail.\n os.chdir('%s' % install_path_mp)\n os.system('mkdir SROOT')\n os.system('ln -s SROOT/etc etc')\n os.system('ln -s SROOT/home home')\n os.system('ln -s SROOT/root root')\n os.system('ln -s /SROOT/usr-local usr/local')\n os.system('ln -s /SROOT/usr-share-keys usr/share/keys')\n os.system('ln -s /SROOT/usr-X11R6 usr/X11R6')\n os.system('ln -s /SROOT/distfiles usr/ports/distfiles')\n os.system('ln -s SROOT/tmp tmp')\n os.system('ln -s SROOT/var var')\n# Create a generic /home/j/skel/etc/make.conf containing this line\n os.system('echo \\\"WRKDIRPREFIX?= /SROOT/portbuild\\\" > %s/etc/make.conf' % skel_path_mp )\n# Create zfs BASE-SKELETON snapshot which will be used for installation \n os.system('zfs snapshot %s@install' % skel_path_zfs)\n log(\" INFO: Init BASE-SKELETON zfs FINISH\")\n \n# install SKELETON jail \n if action == 'install':\n# install RW fs for jail\n os.system('zfs send %s/BASE-SKELETON@install | zfs receive -F %s/BASE-RW/%s' % (jzfs, jzfs, jname))\n# remove receive snapshot \n os.system('zfs destroy %s/BASE-RW/%s@install' % (jzfs, jname))\n# create jail local config - mount skel model for jail hosme dir\n if jname == 'BASE-update':\n os.system('echo \\\"%sBASE %s%s nullfs rw 0 0\\\" > %sBASE-RW/%s/etc/fstab' % (jpath, jpath, jname, jpath, jname))\n else:\n os.system('echo \\\"%sBASE %s%s nullfs ro 0 0\\\" > %sBASE-RW/%s/etc/fstab' % (jpath, jpath, jname, jpath, jname))\n \n os.system('echo \\\"%sBASE-RW/%s %s%s/SROOT nullfs rw 0 0\\\" >> %sBASE-RW/%s/etc/fstab' % (jpath, jname, jpath, jname, jpath, jname))\n temp_add_cfg = ['### BASE mount settings ###', 'mount.fstab=\"%sBASE-RW/%s/etc/fstab\";' % (jpath, jname), 'mount.devfs;']\n return temp_add_cfg", "def __init__(self):\n self.base_dir = '/sys/bus/w1/devices/'\n self.device_folder = glob.glob(self.base_dir + '28*')[0]\n self.device_file = self.device_folder + '/w1_slave'", "def test_change_name_of_the_devicefalse():", "def device_placement(self):\n if is_tf_available():\n import tensorflow as tf\n with tf.device('/CPU:0' if self.device == -1 else '/device:GPU:{}'.format(self.device)):\n yield\n else:\n import torch\n if self.device >= 0:\n torch.cuda.set_device(self.device)\n\n yield", "def setup_device(device):\n try:\n # Gets around \"Resource busy\" errors\n device.detach_kernel_driver(0)\n except Exception:\n pass\n device.set_configuration()", "def setup_device(gpuid=None):\n\n if gpuid is not None and not isinstance(gpuid, str):\n gpuid = str(gpuid)\n\n if gpuid is not None:\n nb_devices = len(gpuid.split(','))\n else:\n nb_devices = 1\n\n if gpuid is not None and (gpuid != '-1'):\n device = '/gpu:' + gpuid\n os.environ['CUDA_VISIBLE_DEVICES'] = gpuid\n\n # GPU memory configuration differs between TF 1 and 2\n if hasattr(tf, 'ConfigProto'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n tf.keras.backend.set_session(tf.Session(config=config))\n else:\n tf.config.set_soft_device_placement(True)\n for pd in tf.config.list_physical_devices('GPU'):\n tf.config.experimental.set_memory_growth(pd, True)\n else:\n device = '/cpu:0'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n return device, nb_devices", "def get_device(args: dict) -> torch.device:\n\n if is_distributed(args):\n device = torch.device(\"cuda\", args.local_rank)\n else:\n if torch.cuda.is_available():\n device = torch.device(\"cuda\", 0)\n else:\n device = torch.device(\"cpu\")\n return device", "def mount_loop_device(image_file):\n\n image_file = os.path.abspath(os.path.expanduser(image_file))\n free_proc = subprocess.Popen([\"losetup\", \"-f\"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n free_output, free_error = free_proc.communicate()\n if free_proc.returncode != 0:\n raise DeviceError(image_file, \"Error finding free loop device.\", str(free_output, \"utf-8\"))\n\n device_name = str(free_output, \"utf-8\").strip()\n mount_proc = subprocess.Popen([\"losetup\", device_name, image_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n mount_output, mount_error = mount_proc.communicate()\n if mount_proc.returncode != 0:\n raise DeviceError(image_file, \"Error mounting image on {0}\".format(device_name),\n str(mount_output, \"utf-8\"))\n subprocess.call([\"partprobe\", device_name])\n return device_name", "def to(self, device) -> None:\n self.obs_buffer = self.obs_buffer.to(device)\n self.hid_buffer = self.hid_buffer.to(device)\n self.rew_buffer = self.rew_buffer.to(device)\n self.act_buffer = self.act_buffer.to(device)\n self.don_buffer = self.don_buffer.to(device)\n self.true_termin = self.true_termin.to(device)\n\n self.device = device", "def send_node(self) -> str:\n node = self.current_node\n MDI_Send(node, MDI_COMMAND_LENGTH, MDI_CHAR, self.comm)\n return node", "def dev_node(self, dev, fake=False):\n if fake:\n return self.__FAKE_DEV.get(dev, -1)\n filename = '/sys/class/net/{0}/device/numa_node'.format(dev)\n if not os.path.isfile(filename):\n return -1\n with open(filename) as fd:\n return int(fd.read().strip())", "def device(self) -> torch.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")", "def zero_node(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Zeroing local node statistics\"\n return ret\n\n __salt__[\"trafficserver.zero_node\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Zeroed local node statistics\"\n return ret" ]
[ "0.6119266", "0.5911754", "0.59111613", "0.5813898", "0.5722772", "0.56492436", "0.56458735", "0.5626259", "0.5560795", "0.55487764", "0.5547376", "0.5536614", "0.55227935", "0.549715", "0.545163", "0.5447037", "0.5447037", "0.542734", "0.5422956", "0.54173166", "0.54056215", "0.53873694", "0.53772646", "0.5373456", "0.5373296", "0.5337485", "0.5336505", "0.5336247", "0.5311643", "0.526948", "0.5245394", "0.5243126", "0.52348953", "0.5234177", "0.5225729", "0.52201277", "0.5204971", "0.51906216", "0.518797", "0.5172429", "0.51651996", "0.516327", "0.516327", "0.51522505", "0.51398766", "0.5138543", "0.5110604", "0.51045394", "0.5100814", "0.5088317", "0.50864494", "0.5079251", "0.5076472", "0.50655377", "0.50618166", "0.50589377", "0.5054255", "0.5035128", "0.5025925", "0.50197065", "0.5016402", "0.50147957", "0.50129455", "0.50049484", "0.50049484", "0.50014824", "0.4997349", "0.49944583", "0.49909636", "0.49879524", "0.4987454", "0.4980059", "0.49724257", "0.49714935", "0.49713668", "0.49704984", "0.49694303", "0.49659443", "0.4962283", "0.49604103", "0.49561095", "0.49484032", "0.49481118", "0.49428445", "0.49428445", "0.49428445", "0.49428445", "0.493855", "0.49385107", "0.49383086", "0.49311796", "0.49270377", "0.49232727", "0.49187192", "0.49160877", "0.49152875", "0.49133337", "0.49106687", "0.49049443", "0.4904942", "0.4904239" ]
0.0
-1
Change the network an OneFS node is connected to
def modify_network(self, username, machine_name, new_network, txn_id): logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper()) resp = {'content' : {}, 'error': None, 'params': {}} logger.info('Task starting') try: vmware.update_network(username, machine_name, new_network) except ValueError as doh: logger.error('Task failed: {}'.format(doh)) resp['error'] = '{}'.format(doh) logger.info('Task complete') return resp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def set_network(self, network: str) -> None:\n return self.add_value(self._network_attribute, network)", "def set_node(self, name, state):\n self.source_net.nodes[name] = state", "def switch_network(self,type = None):\n network_type = self.appconfig(type,\"Settings\")\n self.logger.debug(\"Switch network to %s:%s.\" % (type,network_type))\n if self.enter_settings(u\"More…\"):\n if self.device(text=\"Mobile networks\").exists:\n self.device(text=\"Mobile networks\").click()\n if self.device(text=\"Preferred network mode\").wait.exists(timeout=self.timeout):\n self.device(text=\"Preferred network mode\").click()\n if self.device(resourceId=\"android:id/buttonPanel\").wait.exists(timeout=self.timeout):\n self.device(text=network_type).click()\n print self._is_connected(type)\n self.back_to_home()", "def replace_node(self, network_node: Node, node: Node) -> None:\n index = self.network.index(network_node)\n self.network[index] = node", "def setNetGroup(addr): #status: Done, not tested\r\n pass", "def setNetwork(self, network):\n # type: (str)->None\n\n self._validator.validate_one(\n 'network', VALID_OPTS['network'], network)\n self._ifAttributes['network'] = network", "def init_network(session: \"Session\", new_network_name: str) -> None:\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n _post(session, url_tail, None, params={CoordConstsV2.QP_NAME: new_network_name})", "def test_replace_cluster_network(self):\n pass", "def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)", "def setup_net(self):\n pass", "def onRegisterNetworkNode(self):\n pass", "def network(self) -> str:\n return pulumi.get(self, \"network\")", "def _start_oef_node(self, network_node):", "def reset_network(self, instance):\n LOG.debug(\"reset_network\")\n return", "def changeNodeLib(ned, createNodeWin):\n pass", "def connect_dc_network(self, dc_network):\n self.manage.net = dc_network\n self.compute.nets[self.manage.floating_network.id] = self.manage.floating_network\n logging.info(\"Connected DCNetwork to API endpoint %s(%s:%d)\" % (\n self.__class__.__name__, self.ip, self.port))", "def set_network(self, network: str = \"d\", pretrained=False,\n px_coordinates=True):\n # Set up the different networks\n if network == \"d\":\n network = CurbNetD(pretrained=pretrained,\n px_coordinates=px_coordinates)\n elif network == \"e\":\n network = CurbNetE()\n elif network == \"f\":\n network = CurbNetF()\n elif network == \"g\":\n network = CurbNetG()\n\n # Initialize the network as a parallelized network\n self.network = Network(network)\n\n self.network = self.network.to(device=self.device)\n\n # Set the network to train or to validation\n self.network.train(not self.validation)\n\n if not self.validation:\n # Set the optimizer according to the arguments if not validating\n if self.optimizer == \"adam\":\n self.optimizer = torch.optim.Adam(self.network.parameters(),\n lr=self.lr, eps=0.1)\n elif self.optimizer == \"sgd\":\n self.optimizer = torch.optim.SGD(self.network.parameters(),\n lr=self.lr)\n else:\n raise ValueError(\"Illegal optimizer value: only SGD and Adam \"\n \"optimizers are currently supported.\")", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def start_network(self):\n try:\n self.topo.build_topo()\n except:\n error('Cannot build the topology.')\n try:\n self.net = IPNet(topo=self.topo, use_v4=False, use_v6=True)\n self.net.start()\n except:\n self.stop_network()\n error('Cannot start the network.')", "def switch_network(self):\n if self.next_network != self.current_network:\n job = self.jobmanagers[self.next_network].latest_job\n if job is None:\n self.logger.error(\n \"Tried to switch network to {} that has no job!\"\n .format(self.next_network))\n return\n if self.current_network:\n self.logger.info(\n \"Switching from {} {:,.4f} -> {} {:,.4f} and pushing job NOW\"\n .format(self.current_network, self.profit_data[self.current_network],\n self.next_network, self.profit_data[self.next_network]))\n self.current_network = self.next_network\n job.type = 0\n self.new_job.job = job\n self.new_job.set()\n self.new_job.clear()\n return True\n return False", "def sync_target_network(self):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(e)", "def connect(self, netid):\n if netid in _k.networks:\n pagename = \"network%d\" % self._page_serial\n self._page_serial += 1\n net = _k.networks[netid]\n page = self._notebook.add(pagename, label=net.name)\n page._netframe = NetworkFrame(page, pagename=pagename, network=net,\n netid=netid)\n page._netframe.pack(fill=Tix.BOTH, expand=True)", "def network_node_changed(self, node=None, value=None, args=None):\n if node and node.node_id != self.node_id:\n return\n if args is not None and \"nodeId\" in args and args[\"nodeId\"] != self.node_id:\n return\n\n # Process central scene activation\n if value is not None and value.command_class == COMMAND_CLASS_CENTRAL_SCENE:\n self.central_scene_activated(value.index, value.data)\n\n self.maybe_update_application_version(value)\n\n self.node_changed()", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def update_policy_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def setPeerToPeerNetwork(self, peerToPeerNetwork):\r\n raise NotImplementedError()", "def test_networking_project_network_update(self):\n pass", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def network(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"network\")", "def join_network(self):\n connect_nodes_bi(self.nodes, 1, 2)\n self.sync_all()", "def direct_network(self):\n #print list(self.get_subgraphs())\n graphs = [self._depth_first_directed(g) for g in self.get_subgraphs()]\n self._network = reduce(lambda a, b: nx.union(a, b), graphs)", "def connect(self, node1, node2):\n self.neighbour1 = node1\n self.neighbour2 = node2", "def configure_net(self):\n try:\n transport_type = Conf.get(self._index,\n f'cluster>{self._server_id}')['network']['data']['transport_type']\n except:\n raise MotrError(errno.EINVAL, \"transport_type not found\")\n check_type(transport_type, str, \"transport_type\")\n\n if transport_type == \"lnet\":\n configure_lnet(self)\n elif transport_type == \"libfabric\":\n configure_libfabric(self)\n else:\n raise MotrError(errno.EINVAL, \"Unknown data transport type\\n\")", "def set_network_type(self, nNetworkType):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkType', self.handle, nNetworkType)", "def connect_one_way(node1, node2, weight):\n node1.add_or_update_neighbour(node2, weight)", "def network(self):\n return self.__network", "def split_network(self):\n disconnect_nodes(self.nodes[1], 2)\n disconnect_nodes(self.nodes[2], 1)\n self.sync_all([self.nodes[:2], self.nodes[2:]])", "def migrate_contract(network):\n print(network)", "def test_patch_cluster_network(self):\n pass", "def is_network_node():\n return config.NODE_IP == config.NETWORK_NODE_IP", "def set_network(self, addr, netmask, value):\n\n if len(addr) == 4:\n ipset.ipmap_ipv4_set_network(self.map, addr, netmask, value)\n return\n\n elif len(addr) == 16:\n ipset.ipmap_ipv6_set_network(self.map, addr, netmask, value)\n return\n\n else:\n raise ValueError(\"Invalid address\")", "def test_get_default_network(self):\n pass", "def connect(self):\n self.net.active(True)\n self.net.config(essid=self.ssid, password=self.pwd, channel=3)\n\n while not self.net.active():\n pass\n\n self.net.ifconfig((\"192.168.4.5\", \"255.255.255.0\", \"192.168.4.1\", \"208.67.222.222\"))", "def macro_network():\n # fmt: off\n tpm = np.array([\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 1.0, 1.0],\n ])\n # fmt: on\n return Network(tpm, node_labels=LABELS[:tpm.shape[1]])", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "def change_adp(self, network: str):\r\n self.ip = network\r\n self.adp = self.ipv4_adp[network]\r\n self.mac = self.ipv4_mac[network].replace('-', ':')\r\n # print(self.adp, self.ip, self.mac)\r", "def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def change_member(self,name,name_ch=None,neighbours_ch=None,weight_ch=None):\n if type(neighbours_ch) != list:\n if neighbours_ch:\n raise TypeError(\"Wrong datatype for neighbours change, input needs to be a list.\")\n\n if type(name) == int or type(name) == str or type(name) == float:\n pass\n else:\n raise TypeError(\"Wrong datatype for name. Only int, float and string accepted.\")\n\n node_to_ch = self._get(name)\n if node_to_ch:\n if name_ch:\n node_to_ch.alias = name_ch\n if weight_ch:\n node_to_ch.weight = weight_ch\n if neighbours_ch:\n if node_to_ch.neighbours:\n for neigh in node_to_ch.neighbours:\n # Before doing below we need to remove the node to change from its neighbours\n neigh.neighbours.remove(node_to_ch)\n if len(neigh.neighbours) == 0:\n neigh.neighbours = None\n # If the the node we want to change has neighbours we need to clear its neighbour list\n node_to_ch.neighbours.clear()\n else:\n node_to_ch.neighbours = []\n\n for neigh_ch in neighbours_ch:\n node_neigh_ch = self._get(neigh_ch) # OBS! This is the private get method (_get)!\n if node_neigh_ch: # If one of the neighbours we want to change to exists as a node\n node_to_ch.neighbours.append(node_neigh_ch)\n if node_neigh_ch.neighbours is None: #Remember we set it to None above if len is 0\n node_neigh_ch.neighbours = [node_to_ch]\n else:\n node_neigh_ch.neighbours.append(node_to_ch)\n # Above we re-add the node to change to an old neighbour\n elif node_neigh_ch is None:\n self.add(neigh_ch,[name_ch])\n\n elif node_to_ch is None:\n raise NameError(\"No such member exists!\")\n\n self._updated = True", "def net(self):\n if self._net is None:\n self._net = Net(name=self.name)\n return self._net", "def update_node(self, old_node: 'GraphNode', new_node: 'GraphNode'):\n\n self.operator.update_node(old_node, new_node)", "def visit_node(self, node: OnnxNode, network: Network):\n pass", "def networkMode(self, networkMode):\n\n # Setting the network mode can take a bit of time, so give it 10 seconds\n # to finish\n response = self.at.sendCommand(f\"AT+CFUN={networkMode}\", timeout = 10)\n\n if not response:\n raise modem.AtError(response, \"Failed to set network mode\")", "def restore_network(self):\n Blockade.blockade_join()", "def set_both_connections(self, new_node):\n distance_to_new = self.current_node.distance_between(new_node.location)\n self.current_node.set_adjacent_from_direction(distance_to_new, new_node)\n reverse_distance = new_node.distance_between(self.current_node.location)\n new_node.set_adjacent_from_direction(reverse_distance, self.current_node)", "def initialise_network(self):\n raise NotImplementedError", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def set_nodes(self, nodes):\n self._drv_nodes = nodes", "def lab_network(self) -> None:\n self.host = getattr(self, \"host\")\n try:\n getattr(self.host, \"uboot_network_setup\")(self)\n except AttributeError:\n raise Exception(\n f\"The lab-host {self.host!r} does not seem to support uboot network setup!\"\n )", "def create_platform_network(enode, category, config):\n # Check if this category has a defined netns\n netns = config.get('netns', None)\n if netns is None:\n return\n\n # Create the given network namespace\n enode._docker_exec('ip netns add {}'.format(netns))\n\n # lo should always be up\n enode._docker_exec('ip netns exec {} ip link set dev lo up'.format(netns))", "def update_target_network(self):\n variables = self.online_network.trainable_variables\n variables_copy = [tf.Variable(v) for v in variables]\n self.target_network.trainable_variables = variables_copy", "def moveIntf(intf, node):\n intf = str(intf)\n cmd = 'ip link set %s netns %s' % (intf, node.pid)\n result = node.rcmd(cmd)\n if result:\n raise Exception('error executing command %s' % cmd)\n return True", "def network(self):\n return self._network", "def network(self):\n return self._network", "def network(self):\n return self._network", "def assign_networks(cls, instance, networks):\n instance.assigned_networks_list = networks\n db().flush()", "def set_network(self, pair_blocks=1, base_channels=512, layers=5):\n\n # store architecture\n self.pair_blocks = pair_blocks\n self.base_channels = base_channels\n self.layers = layers\n\n self.net = Network(pair_blocks, base_channels, layers, self.device)\n self.train_loader.index = 0\n\n self._loaded = False\n self.time_stamp_path = None", "def test_change_server(self):\n networktables_mock = unittest.mock.Mock()\n\n network_instance = network.Network(networktables_mock, None, None)\n network_instance.change_server(\"localhost\")\n\n # Make sure Networktables was shutdown before network change\n self.assertTrue(networktables_mock.shutdown.called)\n # Make sure new network server ip is correct\n networktables_mock.initialize.assert_called_with(server=\"localhost\")", "def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def _update_network_config(port_config, allow_multiple=False):\n # Get network id from port config\n network_id = port_config.get('network_id')\n\n # Get the network id from relationship if any\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n # Check if network config comes from two sources or not\n if network_id and rel_network_id and not allow_multiple:\n raise NonRecoverableError('Port can\\'t both have the '\n '\"network_id\" property and be '\n 'connected to a network via a '\n 'relationship at the same time')\n\n port_config['network_id'] = network_id or rel_network_id", "def fusion_api_edit_fc_network(self, body, uri, api=None, headers=None):\n return self.fc_network.update(body, uri, api, headers)", "def save(self):\n self.network.save()", "def set_network(self, path, ip=\"\", netmask=\"255.255.255.0\", gateway=\"\"):\n\n with open(os.path.join(path, 'etc', 'network', 'interfaces'), 'w') \\\n as f:\n f.write(\"auto lo\\niface lo inet loopback\\n\\n\")\n\n if len(ip) <= 0:\n f.write(\"auto eth0\\niface eth0 inet dhcp\\n\")\n else:\n f.write(\"auto eth0\\niface eth0 inet static\\n\")\n f.write(\"\\taddress {0}\\n\\tnetmask {1}\\n\\tgateway {2}\\n\".\\\n format(ip, netmask, gateway))", "def returnNetworkNode(self):\n\n networkNodes = cmds.ls(type=\"network\")\n for node in networkNodes:\n attrs = cmds.listAttr(node)\n if \"moduleName\" in attrs:\n if cmds.getAttr(node + \".moduleName\") == self.name:\n networkNode = node\n\n return networkNode", "def __init__(self, network: Network):\n self.graph = network.graph", "def createNet(self):\n\n sw = OVSKernelSwitch\n topo = G2Topo(self.config.topoData)\n ctrl = RemoteController('c', ip=REMOTE_CONTROLLER_IP, port=CONTROLLER_PORT)\n\n # Default link parameters.\n # HTB: Hierarchical Token Bucket rate limiter.\n spec = self.config.topoData['defaultLinkInfo']\n if spec:\n mybw = float(spec['bw'])\n mydelay = spec['delay']\n myloss = float(spec['loss'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss)\n if spec['max_queue_size'] != 'N/A' and spec['use_htb'] == 'N/A':\n myqueue = int(spec['max_queue_size'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, max_queue_size=myqueue)\n if spec['max_queue_size'] == 'N/A' and spec['use_htb'] != 'N/A':\n myhtb = bool(spec['use_htb'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, use_htb=myhtb)\n if spec['max_queue_size'] != 'N/A' and spec['use_htb'] != 'N/A':\n myqueue = int(spec['max_queue_size'])\n myhtb = bool(spec['use_htb'])\n link = partial(TCLink, delay=mydelay, bw=mybw, loss=myloss, max_queue_size=myqueue, use_htb=myhtb)\n else:\n # No spec for default parameters, using Mininet defaults.\n info(\"**** [G2]: using Mininet default parameters for links other than those configured in link_info \\n\")\n link = TCLink\n\n # Configure bw, delay, loss, etc. for some links that are specified in config file.\n for spec in self.config.topoData['linkInfos']:\n src = spec['src']\n dst = spec['dst']\n try:\n linkInfo = topo.linkInfo(src, dst)\n if spec['bw'] != 'N/A':\n linkInfo['bw'] = float(spec['bw']) # Mbit\n if spec['delay'] != 'N/A':\n linkInfo['delay'] = spec['delay'] # ms\n if spec['loss'] != 'N/A':\n linkInfo['loss'] = float(spec['loss']) # Percentage\n if spec['max_queue_size'] != 'N/A':\n linkInfo['max_queue_size'] = int(spec['max_queue_size'])\n if spec['use_htb'] != 'N/A':\n linkInfo['use_htb'] = bool(spec['use_htb'])\n\n topo.setlinkInfo(src,dst,linkInfo)\n except KeyError:\n info(\"**** [G2]: no link exists between switch pair (%s, %s) \\n\" %(src, dst))\n\n # Assign a fraction of overall CPU time to Mininet hosts.\n nHosts = float(len(self.config.topoData['hosts']))\n cpuHostFrac = 0.50/nHosts\n # 'cpu' is the fraction of CPU that each host would get.\n # Indirectly, it sets 'cpu.cfs_quota_us': the total available run-time within a period (in microseconds).\n # Mininet uses the following scheme: cfs_quota_us = (cpuHostFrac * nCPU * period_us) microseconds.\n # 'period_us' sets cpu.cfs_period_us.\n # Larger period would allow for increased burst capacity.\n host = custom(CPULimitedHost, cpu=cpuHostFrac, period_us=100000)\n\n net = Mininet(topo=topo,\n host=host,\n switch=sw,\n controller=ctrl,\n waitConnected=True,\n autoStaticArp=True,\n link=link)\n\n # Create a default route for each host.\n # Turn on tcpdump on each host if debug mode is on.\n for hs in topo.hosts():\n net.getNodeByName(hs).setDefaultRoute(intf='%s-eth0' %hs) # 1st interface on hosts is hi-eth0\n if self.config.isDebug:\n net.getNodeByName(hs).cmd('tcpdump -w %s.pcap -i %s-eth0 &' %(hs,hs))\n return net", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def _switch_nick(self):\n self.nickname = self.firstnick + str(random.randint(1000, 9999))\n self._log(self.botlog, 'Switching to nick %s' % self.nickname)\n self._send('NICK %s' % self.nickname)", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def connect_to(self, inf1, router2, inf2):\n self.interfaces[inf1]['connect'] = [router2.hostname, inf2]\n router2.interfaces[inf2]['connect'] = [self.hostname, inf1]", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def set_network_id(self, sNetworkId):\n\t\tcall_sdk_function('PrlVirtNet_SetNetworkId', self.handle, sNetworkId)", "def toggleCamGeoDisplay():\n\n sel = nuke.selectedNodes()\n\n # on a selection\n good = []\n goodCam = [\"Camera2\",\"Camera\", \"hubCamera\"]\n goodGeo = [\"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\", \"Axis\", \"Axis2\"]\n if (int(str(len(sel))))>0:\n nodes = nuke.selectedNodes()\n for node in nodes:\n if node.Class() in goodCam+goodGeo:\n if node['display'].value() == \"off\" :\n if node.Class() in goodCam:\n node['display'].setValue('wireframe')\n if node.Class() in goodGeo:\n node['display'].setValue('textured')\n #node['label'].setValue(\"\")\n node['note_font_color'].setValue(0)\n node['tile_color'].setValue(0)\n print node.name()+\" display on\"\n else:\n node['display'].setValue('off')\n #node['label'].setValue(\"DISPLAY OFF !!!\")\n node['note_font_color'].setValue(4120346367)\n node['tile_color'].setValue(573912575)\n print node.name()+\" display off\"\n\n # fill good[] if there is good nodes in the selection\n\n if node.Class() in goodCam:\n good.append(node.name())\n if node.Class() in goodGeo:\n good.append(node.name())\n if not good:\n nuke.message(\"there is no camera or readGeo in the selection\")\n\n # on all the readGeos and Cameras\n\n else:\n nodeL = []\n all = nuke.allNodes()\n for node in all:\n if node.Class() in goodCam+goodGeo:\n nodeL.append(node.name())\n for node in nodeL:\n if nuke.toNode(node)['display'].value() == \"off\":\n if nuke.toNode(node).Class() in goodCam:\n nuke.toNode(node)['display'].setValue('wireframe')\n if nuke.toNode(node).Class() in goodGeo:\n nuke.toNode(node)['display'].setValue('textured')\n nuke.toNode(node)['label'].setValue(\"\")\n nuke.toNode(node)['note_font_color'].setValue(0)\n nuke.toNode(node)['tile_color'].setValue(0)\n print nuke.toNode(node).name()+\" display on\"\n else:\n nuke.toNode(node)['display'].setValue('off')\n nuke.toNode(node)['label'].setValue(\"DISPLAY OFF !!!\")\n nuke.toNode(node)['note_font_color'].setValue(4120346367)\n nuke.toNode(node)['tile_color'].setValue(573912575)\n print nuke.toNode(node).name()+\" display off\"\n \n if not nodeL:\n nuke.message(\"there is no cameras or readGeos in this scene\")", "def network(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"network\")", "def to_simple_edge_network(network):\n simple_network = nx.DiGraph(crs=network.graph[\"crs\"])\n for node in network.nodes:\n simple_network.add_node(node, **network.nodes[node])\n for node, neighbors in network.adjacency():\n for neighbor in neighbors:\n simple_network.add_edge(node, neighbor)\n return simple_network", "def update_network(self, context, net_id, network):\n LOG.debug(_(\"NeutronRestProxyV2.update_network() called\"))\n\n self._warn_on_state_status(network['network'])\n\n session = context.session\n with session.begin(subtransactions=True):\n new_net = super(NeutronRestProxyV2, self).update_network(\n context, net_id, network)\n self._process_l3_update(context, new_net, network['network'])\n\n # update network on network controller\n self._send_update_network(new_net, context)\n return new_net", "def _check_and_set_network(self) -> None:\n from hathor.transaction.storage.exceptions import WrongNetworkError\n\n network = settings.NETWORK_NAME\n stored_network = self.get_network()\n\n if stored_network is None:\n # no network is set, let's try to infer it\n self._checked_set_network(network)\n elif stored_network != network:\n # the stored network does not match, something is wrong\n raise WrongNetworkError(f'Databases created on {stored_network}, expected {network}')\n else:\n # the network is what is expected, nothing to do here\n pass", "def getNodeNetworks(self,node):\n data = self.connect('get','nodes/%s/network' % (node),None)\n return data", "def replace_node(old_node: Node, new_node: Node):\n assert old_node.graph is new_node.graph\n graph = old_node.graph\n # save output edges and reconnect them to new node\n for i in range(len(old_node.out_nodes())):\n graph.add_edge(new_node.id, old_node.out_node(i).id, **old_node.out_edge(i))\n # TODO Need to check if there are other users for this node\n graph.remove_node(old_node.id)", "def update_node(self, node, updating_node):\n out_edges = list(self.source_net.edges(node, data=True))\n self.remove_node(node)\n self.source_net.add_node(node, attr_dict=self.source_net.nodes[updating_node]['attr_dict'])\n self.source_net.add_edges_from(out_edges)\n\n # Transfer incoming edges\n for u, v, data in self.source_net.in_edges(updating_node, data=True):\n self.source_net.add_edge(u, node, **data)\n\n self.remove_node(updating_node)", "def current_carrier_network(self, current_carrier_network):\n\n self._current_carrier_network = current_carrier_network", "def network(ip):\n ip, prefix = netParse(ip)\n return \"{}/{}\".format(\n ipStr(ip & (0xffffffff << (32 - prefix))),\n prefix\n )", "def simulate_network(network: Network, bipartite=True, source_val=\"0\") -> None:\n protocols = []\n if bipartite:\n for node in network.nodes.values():\n logger.debug(\"Adding protocol to node %s\", node.name)\n if node.name == source_val:\n protocols.append(BipartiteProtocol(node, source=True, receiver=False))\n else:\n protocols.append(BipartiteProtocol(node))\n else:\n for node in network.nodes.values():\n logger.debug(\"Adding protocol to node %s\", node.name)\n if node.name == source_val:\n protocols.append(\n MultipartiteProtocol(node, source=True, receiver=False)\n )\n else:\n protocols.append(MultipartiteProtocol(node))\n\n for protocol in protocols:\n protocol.start()\n\n logger.debug(\"Running sim.\")\n ns.sim_run()\n ns.sim_reset()", "def _build_network(self):\n pass", "def on_the_network_global_configuration_page_change_the_first_nameserver_to_nameserver1(driver, nameserver1):\n global nameserver_1\n nameserver_1 = nameserver1\n assert wait_on_element(driver, 7, '//h4[contains(.,\"Hostname and Domain\")]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Nameserver 1\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Nameserver 1\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Nameserver 1\"]').send_keys(nameserver1)", "def set_nodes(self, ndict):\n self.inode_ref = ndict[self.inode]\n self.jnode_ref = ndict[self.jnode]", "def network_access(self, network_access):\n\n self._network_access = network_access" ]
[ "0.6587989", "0.63514805", "0.63323206", "0.61741155", "0.61734724", "0.6134296", "0.6105218", "0.6065056", "0.60462487", "0.59799916", "0.5911158", "0.5871496", "0.583898", "0.58377373", "0.5814114", "0.580636", "0.5799185", "0.5790336", "0.5773519", "0.57696205", "0.5767448", "0.574858", "0.5746398", "0.57435846", "0.5742533", "0.5740542", "0.571508", "0.57084846", "0.5702369", "0.56924295", "0.5671329", "0.5646419", "0.56449", "0.563583", "0.5634873", "0.5623016", "0.561476", "0.55857605", "0.55772495", "0.55649173", "0.5563326", "0.5557135", "0.55257684", "0.5520315", "0.5513615", "0.55024797", "0.54857045", "0.54796654", "0.54723126", "0.5471508", "0.5469044", "0.5462027", "0.54509044", "0.544611", "0.54441917", "0.5436173", "0.542679", "0.5424063", "0.54232275", "0.54185134", "0.54175746", "0.54165435", "0.5408118", "0.5408118", "0.5408118", "0.5401954", "0.5400439", "0.5396678", "0.53905123", "0.5381923", "0.5381923", "0.5375483", "0.5360728", "0.53450626", "0.5338463", "0.5334848", "0.5318696", "0.5311656", "0.53085357", "0.5306663", "0.53059196", "0.5293808", "0.5293794", "0.52923006", "0.5259048", "0.52562326", "0.5252378", "0.52512693", "0.5246161", "0.5237392", "0.52370596", "0.52357143", "0.52345604", "0.5234199", "0.5229854", "0.5226998", "0.5225559", "0.5222369", "0.5221958", "0.5219796" ]
0.5633962
35
Loads a checkpoint of a model, may be the model or the mean teacher model. Assumes the model has already been created, and the checkpoint exists. This does not set checkpoint epoch. This method should not be called externally. Use instead try_load_checkpoint_for_model or try_load_checkpoint_for_mean_teacher_model
def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path, key_in_state_dict: str, use_gpu: bool) -> int: logging.info(f"Loading checkpoint {checkpoint_path}") checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu) try: state_dict = checkpoint[key_in_state_dict] except KeyError: logging.error(f"Key {key_in_state_dict} not found in checkpoint") return False if isinstance(model, torch.nn.DataParallel): result = model.module.load_state_dict(state_dict, strict=False) else: result = model.load_state_dict(state_dict, strict=False) if result.missing_keys: logging.warning(f"Missing keys in model checkpoint: {result.missing_keys}") if result.unexpected_keys: logging.warning(f"Unexpected keys in model checkpoint: {result.unexpected_keys}") return checkpoint[ModelAndInfo.EPOCH_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def try_load_checkpoint_for_model(self) -> bool:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if not self.checkpoint_path:\n raise ValueError(\"No checkpoint provided\")\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n epoch = ModelAndInfo._load_checkpoint(model=self._model,\n checkpoint_path=self.checkpoint_path,\n key_in_state_dict=ModelAndInfo.MODEL_STATE_DICT_KEY,\n use_gpu=self.config.use_gpu)\n\n logging.info(f\"Loaded model from checkpoint (epoch: {epoch})\")\n self.checkpoint_epoch = epoch\n return True", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def try_load_checkpoint_for_mean_teacher_model(self) -> bool:\n if self._mean_teacher_model is None:\n raise ValueError(\"Mean teacher model must be created before it can be adjusted.\")\n\n if not self.checkpoint_path:\n raise ValueError(\"No checkpoint provided\")\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n epoch = ModelAndInfo._load_checkpoint(model=self._mean_teacher_model,\n checkpoint_path=self.checkpoint_path,\n key_in_state_dict=ModelAndInfo.MEAN_TEACHER_STATE_DICT_KEY,\n use_gpu=self.config.use_gpu)\n\n logging.info(f\"Loaded mean teacher model from checkpoint (epoch: {epoch})\")\n self.checkpoint_epoch = epoch\n return True", "def load_model_trainer_states_from_checkpoint(self, checkpoint_path, model=None):\n import os\n\n if model is None:\n try:\n import cloudpickle\n except ImportError:\n raise ImportError(\"cloudpickle is required to load model class\")\n logger.info(\"Loading model class\")\n model = cloudpickle.load(open(os.path.join(checkpoint_path, \"model_class.pkl\"), \"rb\"))\n\n self.model = HFWrapper(model)\n logger.info(\"Loading weights of previously trained model\")\n # Restoring model weights\n self.model.load_state_dict(\n # torch.load(os.path.join(training_args.output_dir, \"pytorch_model.bin\"))\n torch.load(os.path.join(checkpoint_path, \"pytorch_model.bin\"))\n )\n # Restoring random state\n rng_file = os.path.join(checkpoint_path, \"rng_state.pth\")\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n # Restoring AMP scaler\n if self.use_amp:\n self.scaler.load_state_dict(torch.load(os.path.join(checkpoint_path, \"scaler.pt\")))", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def try_create_mean_teacher_model_and_load_from_checkpoint(self) -> bool:\n self.create_mean_teacher_model()\n if self.checkpoint_path:\n # Load the stored model. If there is no checkpoint present, return immediately.\n return self.try_load_checkpoint_for_mean_teacher_model()\n return True", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def restore_checkpoint(model, checkpoint_dir, cuda=False, force=False, pretrain=False):\n try:\n cp_files = [\n file_\n for file_ in os.listdir(checkpoint_dir)\n if file_.startswith(\"epoch=\") and file_.endswith(\".checkpoint.pth.tar\")\n ]\n except FileNotFoundError:\n cp_files = None\n os.makedirs(checkpoint_dir)\n if not cp_files:\n print(\"No saved model parameters found\")\n if force:\n raise Exception(\"Checkpoint not found\")\n else:\n return model, 0, []\n\n # Find latest epoch\n for i in itertools.count(1):\n if \"epoch={}.checkpoint.pth.tar\".format(i) in cp_files:\n epoch = i\n else:\n break\n\n if not force:\n print(\n \"Which epoch to load from? Choose in range [0, {}].\".format(epoch),\n \"Enter 0 to train from scratch.\",\n )\n print(\">> \", end=\"\")\n inp_epoch = int(input())\n if inp_epoch not in range(epoch + 1):\n raise Exception(\"Invalid epoch number\")\n if inp_epoch == 0:\n print(\"Checkpoint not loaded\")\n clear_checkpoint(checkpoint_dir)\n return model, 0, []\n else:\n print(\"Which epoch to load from? Choose in range [1, {}].\".format(epoch))\n inp_epoch = int(input())\n if inp_epoch not in range(1, epoch + 1):\n raise Exception(\"Invalid epoch number\")\n\n filename = os.path.join(\n checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(inp_epoch)\n )\n\n print(\"Loading from checkpoint {}?\".format(filename))\n\n if cuda:\n checkpoint = torch.load(filename)\n else:\n # Load GPU model on CPU\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n try:\n start_epoch = checkpoint[\"epoch\"]\n stats = checkpoint[\"stats\"]\n if pretrain:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\n \"=> Successfully restored checkpoint (trained for {} epochs)\".format(\n checkpoint[\"epoch\"]\n )\n )\n except:\n print(\"=> Checkpoint not successfully restored\")\n raise\n\n return model, inp_epoch, stats", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def try_create_model_and_load_from_checkpoint(self) -> bool:\n self.create_model()\n if self.checkpoint_path:\n # Load the stored model. If there is no checkpoint present, return immediately.\n return self.try_load_checkpoint_for_model()\n return True", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(pretrain_model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % pretrain_model)\n ckpt = tf.train.get_checkpoint_state(pretrain_model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(pretrain_model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(pretrain_model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(pretrain_model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n pretrain_model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n tf.logging.info(\"Copy the pre-trained model %s as the fine-tuned initialization\" % pretrain_model_checkpoint_path)\n\n import glob\n for filename in glob.glob(pretrain_model_checkpoint_path + \"*\"):\n bas = os.path.basename(filename).split(\"-\", 1)[0]\n ext = os.path.basename(filename).rsplit(\".\", 1)[1]\n shutil.copyfile(filename, os.path.join(target_model, bas + \"-0.\" + ext))\n\n with open(os.path.join(target_model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n return", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def load_model(model):\n # Check if the model is a model directory (containing a metagraph and a checkpoint file)\n # or if it is a protobuf file with a frozen graph\n model_exp = os.path.expanduser(model)\n if os.path.isfile(model_exp):\n print('Model filename: %s' % model_exp)\n with tf.gfile.FastGFile(model_exp, 'rb') as f_l:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f_l.read())\n tf.import_graph_def(graph_def, name='')\n else:\n print('Model directory: %s' % model_exp)\n meta_file, ckpt_file = get_model_filenames(model_exp)\n\n print('Metagraph file: %s' % meta_file)\n print('Checkpoint file: %s' % ckpt_file)\n\n saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))\n saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))", "def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)", "def get_model(self, model: Optional[torch.nn.Module] = None) -> torch.nn.Module:\n with self.as_directory() as tempdir:\n model_path = os.path.join(tempdir, self.MODEL_FILENAME)\n if not os.path.exists(model_path):\n raise RuntimeError(\n \"`model.pt` not found within this checkpoint. Make sure you \"\n \"created this `TorchCheckpoint` from one of its public \"\n \"constructors (`from_state_dict` or `from_model`).\"\n )\n model_or_state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if isinstance(model_or_state_dict, torch.nn.Module):\n if model:\n warnings.warn(\n \"TorchCheckpoint already contains all information needed. \"\n \"Discarding provided `model` argument. This means: \"\n \"If you are using BatchPredictor, you should do \"\n \"`BatchPredictor.from_checkpoint(checkpoint, TorchPredictor)` by\"\n \"removing kwargs `model=`. \"\n \"If you are using TorchPredictor directly, you should do \"\n \"`TorchPredictor.from_checkpoint(checkpoint)` by removing kwargs \"\n \"`model=`.\"\n )\n model = load_torch_model(\n saved_model=model_or_state_dict, model_definition=model\n )\n return model", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def load_model(self, model_path):\n # Check the model file exists\n if not os.path.isfile(model_path):\n raise ValueError(f\"The model file `{model_path}` is not exists or broken!\")\n\n checkpoint = torch.load(model_path)\n self.model_type = checkpoint['model_type']\n self.label2idx = checkpoint['label2idx']\n self.idx2label = checkpoint['idx2label']\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.to(self.device)", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def _resume_from_checkpoint(model: tf.keras.Model,\n model_dir: str,\n train_steps: int) -> int:\n logging.info('Load from checkpoint is enabled.')\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n logging.info('latest_checkpoint: %s', latest_checkpoint)\n if not latest_checkpoint:\n logging.info('No checkpoint detected.')\n return 0\n\n logging.info('Checkpoint file %s found and restoring from '\n 'checkpoint', latest_checkpoint)\n model.load_weights(latest_checkpoint)\n initial_epoch = model.optimizer.iterations // train_steps\n logging.info('Completed loading from checkpoint.')\n logging.info('Resuming from epoch %d', initial_epoch)\n return int(initial_epoch)", "def get_model(self, model: Optional[torch.nn.Module] = None) -> torch.nn.Module:\n saved_model, _ = _load_checkpoint_dict(self, \"TorchTrainer\")\n\n if isinstance(saved_model, torch.nn.Module):\n if model:\n warnings.warn(\n \"TorchCheckpoint already contains all information needed. \"\n \"Discarding provided `model` argument. This means \"\n \"If you are using TorchPredictor directly, you should do \"\n \"`TorchPredictor.from_checkpoint(checkpoint)` by removing kwargs \"\n \"`model=`.\"\n )\n model = load_torch_model(saved_model=saved_model, model_definition=model)\n return model", "def load_model(model):\n fin = False\n backup1 = False\n backup2 = False\n\n if os.path.exists(\"TrainedModel/finalModel.pth\"):\n fin = True\n elif os.path.exists(\"TrainedModel/modelBackup.pth\"):\n backup1 = True\n elif os.path.exists(\"TrainedModel/modelBackupBackup.pth\"):\n backup2 = True\n\n if fin:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/finalModel.pth\"))\n return model\n except:\n print(\"finalModel seems to be corrupted, trying a backup...\")\n \n if fin or backup1:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/modelBackup.pth\"))\n return model\n except:\n print(\"modelBackup seems to be corrupted, trying a backup...\")\n\n if fin or backup1 or backup2:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/modelBackupBackup.pth\"))\n return model\n except:\n print(\"modelBackupBackup seems to be corrupted, you're at the end of the line.\")\n\n print(\"There doesn't seem to be anything to load.\")\n return model", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step,\r\n last_global_data_samples, **kwargs):\r\n checkpoint_state_dict = {\r\n 'epoch': epoch,\r\n 'last_global_step': last_global_step,\r\n 'last_global_data_samples': last_global_data_samples\r\n }\r\n # Add extra kwargs too\r\n checkpoint_state_dict.update(kwargs)\r\n\r\n success = model.network.save_checkpoint(PATH, ckpt_id,\r\n checkpoint_state_dict)\r\n status_msg = 'checkpointing: PATH={}, ckpt_id={}'.format(PATH, ckpt_id)\r\n if success:\r\n logging.info(f\"Success {status_msg}\")\r\n else:\r\n logging.warning(f\"Failure {status_msg}\")\r\n return", "def try_create_mean_teacher_model_load_from_checkpoint_and_adjust(self) -> bool:\n success = self.try_create_mean_teacher_model_and_load_from_checkpoint()\n self.create_summary_and_adjust_mean_teacher_model_for_gpus()\n return success", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def load_checkpoint(args, trainer, epoch_itr):\n os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']", "def save_checkpoint(self, model_path=None):\n # TODO: include new params based on ConfigEnum\n if not os.path.isdir(path_checkpoints_dir):\n os.mkdir(path_checkpoints_dir)\n if model_path is None:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"saved the model at {model_path}\") \n model_state_dict = self.model.state_dict()\n if self.optimizer is not None:\n opt_state_dict = self.optimizer.state_dict()\n else:\n opt_state_dict = None\n if self.scheduler is not None:\n sch_state_dict = self.scheduler.state_dict()\n else:\n sch_state_dict = None\n \n if self.scaler is not None:\n amp_grad_scaler = self.scaler.state_dict()\n else:\n amp_grad_scaler = None\n\n model_dict = {}\n model_dict[\"state_dict\"] = model_state_dict\n model_dict[\"optimizer\"] = opt_state_dict\n model_dict[\"scheduler\"] = sch_state_dict\n model_dict['scaler'] = amp_grad_scaler\n model_dict['image_size'] = self.image_size\n model_dict['device'] = self.device\n model_dict['fp16'] = self.fp16\n model_dict['accumulate_grad_steps'] = self.accumulate_grad_steps\n\n model_dict['experiment_id'] = self.experiment_id\n model_dict['experiment_tag'] = self.experiment_tag\n\n model_dict['seed'] = self.seed\n\n model_dict['train_batch_size'] = self.train_batch_size\n model_dict['valid_batch_size'] = self.valid_batch_size\n model_dict['test_batch_size'] = self.test_batch_size\n model_dict['dataloader_num_workers'] = self.dataloader_num_workers\n model_dict['train_dataloader_shuffle'] = self.train_dataloader_shuffle\n\n model_dict['optimizer_type'] = self.optimizer_type\n model_dict['optimizer_params'] = self.optimizer_params\n\n model_dict['scheduler_type'] = self.scheduler_type\n model_dict['scheduler_params'] = self.scheduler_params\n model_dict['step_scheduler_after'] = self.step_scheduler_after\n model_dict['step_scheduler_metric'] = self.step_scheduler_metric\n\n model_dict['compute_train_loss_after'] = self.compute_train_loss_after\n model_dict['compute_train_metric_after'] = self.compute_train_metric_after\n model_dict['compute_valid_loss_after'] = self.compute_valid_loss_after\n model_dict['compute_valid_metric_after'] = self.compute_valid_metric_after\n\n model_dict['training_stopping_criteria'] = self.training_stopping_criteria\n model_dict['stopping_criteria_params'] = self.stopping_criteria_params\n model_dict['max_epoch'] = self.max_epoch\n model_dict['train_on_all_data'] = self.train_on_all_data\n model_dict['validate_after'] = self.validate_after\n model_dict['validation_steps'] = self.validation_steps\n model_dict['run_lr_range_test'] = self.run_lr_range_test\n model_dict['sleep_in_epochs'] = self.sleep_in_epochs\n model_dict['sleep_time'] = self.sleep_time\n model_dict['checkpoint_epochs'] = self.checkpoint_epochs\n\n model_dict['_best_score'] = self._best_score\n model_dict['_current_score'] = self._current_score\n model_dict['_counter'] = self._counter\n\n model_dict['metrics'] = self.metrics\n model_dict['current_epoch'] = self.current_epoch\n model_dict['current_train_batch'] = self.current_train_batch\n model_dict['current_valid_batch'] = self.current_valid_batch\n\n model_dict['num_train_samples'] = self.num_train_samples\n model_dict['num_train_iterations'] = self.num_train_iterations\n model_dict['checkpoint_snapshot'] = self.checkpoint_snapshot \n torch.save(model_dict, model_path)", "def create_or_load_model(model, model_dir, session, name):\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n start_time = time.time()\n # It only takes a few seconds to initialize all variables.\n session.run(tf.global_variables_initializer())\n logging.info(\n \"Initialize %s model with fresh parameters before loading variables \"\n \"from the checkpoint, time %.2fs\", name,\n time.time() - start_time)\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_model(model, transfer_from, sess):\n param_path = final_param_path(model.name, transfer_from)\n step_to_load = FINAL_PARAM_STEPS[model.name][transfer_from]\n util.load_checkpoint_at_step(\n model_name=model.name,\n global_step=step_to_load,\n saver=tf.train.Saver(),\n sess=sess,\n path=param_path)", "def train(model, infer_train, infer_val, load_checkpoint=None):\n\n global checkpoint_name\n print('Initialising {}'.format(cfg['experiment_name']))\n checkpoint_folder = 'checkpoints/{}/'.format(cfg['experiment_name'])\n\n if not os.path.exists(checkpoint_folder):\n os.makedirs(checkpoint_folder)\n\n tb_folder = 'tb/{}/'.format(cfg['experiment_name'])\n if not os.path.exists(tb_folder):\n os.makedirs(tb_folder)\n\n writer = SummaryWriter(logdir=tb_folder, flush_secs=30)\n optimiser = Adam(model.parameters(), lr=cfg['learning_rate'], weight_decay=cfg['weight_decay'])\n\n train_dataset = TweetDataset(dataset_type='train')\n train_loader = DataLoader(train_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=True, pin_memory=True)\n\n val_dataset = TweetDataset(dataset_type='val')\n val_loader = DataLoader(val_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=False, pin_memory=True)\n\n if load_checkpoint:\n checkpoint = torch.load(load_checkpoint)\n assert model.config == checkpoint['net_config'], \\\n \"The provided checkpoint has a different configuration, loading is impossible\"\n start_epoch = checkpoint['epoch'] + 1\n epochs = cfg['epochs'] + start_epoch\n step = checkpoint['step']\n model.load_state_dict(checkpoint['model'])\n optimiser.load_state_dict(checkpoint['optimiser'])\n print(\"Loaded the checkpoint at {}\".format(load_checkpoint))\n else:\n start_epoch, step = 0, 0\n epochs = cfg['epochs']\n\n init_loss = 0.\n avg_loss = AverageMeter()\n best_mae = 1e10\n\n print('Sanity val')\n val(model, val_loader, writer, 0, infer_val)\n model.train()\n\n print('Starting training')\n for epoch in range(start_epoch, epochs):\n loader_length = len(train_loader)\n epoch_start = time.time()\n\n for batch_idx, batch in enumerate(train_loader):\n optimiser.zero_grad()\n\n loss = infer_train(model, batch)\n loss.backward()\n\n if epoch == 0 and batch_idx == 0:\n init_loss = loss\n\n # logging\n elapsed = time.time() - epoch_start\n progress = batch_idx / loader_length\n est = datetime.timedelta(seconds=int(elapsed / progress)) if progress > 0.001 else '-'\n avg_loss.update(loss)\n suffix = '\\tloss {:.4f}/{:.4f}\\tETA [{}/{}]'.format(avg_loss.avg, init_loss,\n datetime.timedelta(seconds=int(elapsed)), est)\n printProgressBar(batch_idx, loader_length, suffix=suffix,\n prefix='Epoch [{}/{}]\\tStep [{}/{}]'.format(epoch, epochs - 1, batch_idx, loader_length))\n\n writer.add_scalar('Steps/train_loss', loss, step)\n\n # saving the model\n if step % cfg['checkpoint_every'] == 0:\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n step += 1\n optimiser.step()\n\n # validating\n if step % cfg['val_every'] == 0:\n mae = val(model, val_loader, writer, step, infer_val)\n if mae < best_mae:\n best_mae = mae\n print('Best model with V{:.2f}'.format(best_mae))\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n '{}/best.pth'.format(checkpoint_folder))\n model.train()\n\n # end of epoch\n print('')\n writer.add_scalar('Epochs/train_loss', avg_loss.avg, epoch)\n avg_loss.reset()\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': loader_length, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n\n # finished training\n writer.close()\n print('Training finished :)')", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def load_weights_infer(checkpoint_path, model):\n try:\n # catalyst weights\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")[\"model_state_dict\"]\n except:\n # anything else\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(state_dict, strict=True)\n except:\n # for clf + seg for seg only prediction\n print(f\"Non-strict loading of weights from {checkpoint_path}\")\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n return model", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load_model(self, tmp_dir):\n if self.inf_learner is None:\n self.log_options()\n model_uri = self.backend_opts.model_uri\n model_path = download_if_needed(model_uri, tmp_dir)\n self.inf_learner = load_learner(\n dirname(model_path), basename(model_path))", "def load_checkpoint_train(cpdir, model, optimizer):\n start_epoch = 0\n start_global_step = 0\n if cpdir is not None:\n start_global_step, start_epoch = load_checkpoint(\n cpdir, model, optimizer)\n start_global_step += 1\n start_epoch += 1\n return start_global_step, start_epoch", "def load_model_saved_with_module(model, checkpoint_path, logger):\n checkpoint = torch.load(checkpoint_path)\n new_state_dict = dict()\n for k, v in checkpoint[\"model_state_dict\"].items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n logger.info(f\"Already restored model from checkpoint: {checkpoint_path}\")\n return model", "def try_create_model_load_from_checkpoint_and_adjust(self) -> bool:\n success = self.try_create_model_and_load_from_checkpoint()\n self.create_summary_and_adjust_model_for_gpus()\n return success", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def load_model(session: tf.Session, model_dir: Text) -> None:\n saver = tf.train.Saver()\n saver.restore(session, model_dir)", "def load_model(model, device, model_path):\n if os.path.exists(model_path):\n print(\"Reading model from \", model_path)\n checkpoint = torch.load(model_path, map_location=torch.device(device))\n model.load_state_dict(checkpoint['state_dict'])\n return model\n else:\n raise RuntimeError('Model does not exist!')", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def load_checkpoint_ram(self, checkpoint, train=True):\n # -- For all tasks, create a corresponding head, otherwise the restoring would not work due to mismatching weights -- #\n self.mh_network.add_n_tasks_and_activate(self.already_trained_on[str(self.fold)]['tasks_at_time_of_checkpoint'],\n self.already_trained_on[str(self.fold)]['active_task_at_time_of_checkpoint'])\n \n # -- Set the network to the full MultiHead_Module network to restore everything -- #\n self.network = self.mh_network\n \n # -- Use parent class to save checkpoint for MultiHead_Module model consisting of self.model, self.body and self.heads -- #\n super().load_checkpoint_ram(checkpoint, train)\n\n # -- Reset network to the assembled model to continue training -- #\n self.network = self.mh_network.model", "def load_model(model_name):\n model = get_model(training = False)\n checkpoint = torch.load('../models/' + model_name)\n model.load_state_dict(checkpoint['model_state_dict'])\n return model", "def save_checkpoint(self, epoch: int) -> Path:\n logging.getLogger().disabled = True\n model_state_dict = self.model.module.state_dict() \\\n if isinstance(self.model, torch.nn.DataParallel) else self.model.state_dict()\n checkpoint_file_path = self.config.get_path_to_checkpoint(epoch)\n checkpoint_file_path.parent.mkdir(exist_ok=True, parents=True)\n info_to_store = {\n ModelAndInfo.EPOCH_KEY: epoch,\n ModelAndInfo.MODEL_STATE_DICT_KEY: model_state_dict,\n ModelAndInfo.OPTIMIZER_STATE_DICT_KEY: self.optimizer.state_dict()\n }\n if self.config.compute_mean_teacher_model:\n assert self.mean_teacher_model is not None # for mypy, getter has this built in\n mean_teacher_model_state_dict = self.mean_teacher_model.module.state_dict() \\\n if isinstance(self.mean_teacher_model, torch.nn.DataParallel) \\\n else self.mean_teacher_model.state_dict()\n info_to_store[ModelAndInfo.MEAN_TEACHER_STATE_DICT_KEY] = mean_teacher_model_state_dict\n\n torch.save(info_to_store, checkpoint_file_path)\n logging.getLogger().disabled = False\n logging.info(f\"Saved model checkpoint for epoch {epoch} to {checkpoint_file_path}\")\n return checkpoint_file_path", "def _resume_checkpoint(self, resume_path, model, optimizer):\n if not resume_path:\n return model, optimizer\n\n self.logger.info(f'Loading checkpoint: {resume_path}')\n checkpoint = torch.load(resume_path)\n model.load_state_dict(checkpoint['state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from \"\n \"that of checkpoint. Optimizer parameters not being resumed.\")\n else:\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n self.logger.info(f'Checkpoint \"{resume_path}\" loaded')\n return model, optimizer", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)", "def resume(self, checkpoint):\n model_dict = paddle.load(checkpoint)\n self.model.set_state_dict(model_dict)", "def create_checkpoint(model, save_dir, train_data):\n model.class_to_idx = train_data.class_to_idx\n\n checkpoint = {\n 'model': model.name,\n 'classifier': model.classifier,\n 'class_to_idx': model.class_to_idx,\n 'state_dict': model.state_dict()\n }\n\n if save_dir and isdir(save_dir):\n torch.save(checkpoint, save_dir + 'checkpoint.pth')\n print('checkpoint created')\n else: \n print(\"Directory not found. Saving at current directory in checkpoint.pth\")\n torch.save(checkpoint, 'checkpoint.pth')", "def load_checkpoint(checkpoint_directory,\n session):\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # filter variables if needed.\n print(variables)\n saver_ob = tf.train.Saver(variables, max_to_keep=0)\n os.makedirs(checkpoint_directory, exist_ok=True)\n # verify if we don't have a checkpoint saved directly\n step = 0\n ckpt = tf.train.get_checkpoint_state(checkpoint_directory)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n model_checkpoint_path = ckpt.model_checkpoint_path\n saver_ob.restore(session, model_checkpoint_path)\n step = int(model_checkpoint_path.rsplit('-', 1)[1])\n print('Model loaded = ', step)\n return saver_ob, step", "def load_model(fn, model):\n if fn[-3] != \".tf\":\n fn += \".tf\"\n if model.saver is None:\n with model.graph.as_default():\n model.saver = tf.train.Saver()\n log(\"Loading model from {}\".format(fn))\n model.saver.restore(model.session, fn)\n log(\"Done loading!\")", "def __init__(self, saved_model=None, serialize_input=True):\n assert saved_model\n self.saved_model_path = saved_model\n self.serialize_input = serialize_input\n logging.info(\"Reading checkpoint {}.\".format(saved_model))\n imported_model = tf.saved_model.load(saved_model)\n self.bleurt_model_ops = imported_model.signatures[\"serving_default\"]\n logging.info(\"BLEURT initialized.\")", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def _load_checkpoint(filename, map_location=None):\n if filename.startswith('modelzoo://'):\n warnings.warn('The URL scheme of \"modelzoo://\" is deprecated, please '\n 'use \"torchvision://\" instead')\n model_urls = get_torchvision_models()\n model_name = filename[11:]\n checkpoint = load_url_dist(model_urls[model_name])\n elif filename.startswith('torchvision://'):\n model_urls = get_torchvision_models()\n model_name = filename[14:]\n checkpoint = load_url_dist(model_urls[model_name])\n elif filename.startswith('open-mmlab://'):\n model_urls = get_external_models()\n model_name = filename[13:]\n deprecated_urls = get_deprecated_model_names()\n if model_name in deprecated_urls:\n warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '\n f'of open-mmlab://{deprecated_urls[model_name]}')\n model_name = deprecated_urls[model_name]\n model_url = model_urls[model_name]\n # check if is url\n if model_url.startswith(('http://', 'https://')):\n checkpoint = load_url_dist(model_url)\n else:\n filename = osp.join(_get_mmcv_home(), model_url)\n if not osp.isfile(filename):\n raise IOError(f'{filename} is not a checkpoint file')\n checkpoint = torch.load(filename, map_location=map_location)\n elif filename.startswith('mmcls://'):\n model_urls = get_mmcls_models()\n model_name = filename[8:]\n checkpoint = load_url_dist(model_urls[model_name])\n checkpoint = _process_mmcls_checkpoint(checkpoint)\n elif filename.startswith(('http://', 'https://')):\n checkpoint = load_url_dist(filename)\n elif filename.startswith('pavi://'):\n model_path = filename[7:]\n checkpoint = load_pavimodel_dist(model_path, map_location=map_location)\n elif filename.startswith('s3://'):\n checkpoint = load_fileclient_dist(\n filename, backend='ceph', map_location=map_location)\n else:\n if not osp.isfile(filename):\n raise IOError(f'{filename} is not a checkpoint file')\n checkpoint = torch.load(filename, map_location=map_location)\n return checkpoint", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def resume_model(self, resume_path):\n if resume_path:\n resume_path = f'{self.model_dir}/{resume_path}.pth.tar'\n if os.path.isfile(resume_path):\n print(f\"=> loading checkpoint '{resume_path}'\")\n checkpoint = torch.load(resume_path, map_location=self.device)\n self.best_acc = checkpoint['best_acc']\n self.model.load_state_dict(checkpoint['state_dict'])\n print(f\"=> loaded checkpoint '{resume_path}' (Round {checkpoint['rnd']})\")\n del checkpoint\n else:\n print(f\"=> no checkpoint found at '{resume_path}'\")" ]
[ "0.7765723", "0.77516997", "0.7738326", "0.77286375", "0.7725346", "0.76465744", "0.7636256", "0.75987977", "0.75818604", "0.75716907", "0.75123113", "0.74969316", "0.73805517", "0.7359161", "0.7350395", "0.7348898", "0.73487484", "0.73225814", "0.7317315", "0.72933567", "0.7282364", "0.7272194", "0.7224731", "0.7216087", "0.7209707", "0.7166834", "0.7161973", "0.7130044", "0.7125879", "0.71061075", "0.71061075", "0.70817846", "0.7079612", "0.70787436", "0.70482695", "0.6974577", "0.69675547", "0.69033134", "0.68997", "0.6894964", "0.6891786", "0.68894124", "0.68505085", "0.683359", "0.683359", "0.6831248", "0.6826624", "0.67750007", "0.677139", "0.6760273", "0.67435586", "0.6735815", "0.67355144", "0.6729151", "0.6721847", "0.671401", "0.6713823", "0.6709707", "0.6682865", "0.6669053", "0.6668227", "0.66617495", "0.6651775", "0.66398656", "0.6637274", "0.6635226", "0.66345394", "0.66221344", "0.6614112", "0.6610171", "0.6609997", "0.6604011", "0.6595632", "0.6591981", "0.65674734", "0.6556434", "0.65520513", "0.65443593", "0.65433335", "0.6521348", "0.6501942", "0.650097", "0.6500878", "0.64889354", "0.6488736", "0.6487856", "0.6486586", "0.64778787", "0.6474291", "0.64527774", "0.64332503", "0.6426515", "0.64191526", "0.6415524", "0.64048445", "0.64030457", "0.63860357", "0.63737947", "0.6369424", "0.63636583" ]
0.68640363
42
Updates a torch model so that input minibatches are parallelized across the batch dimension to utilise multiple gpus. If model parallel is set to True and execution is in test mode, then model is partitioned to perform full volume inference. This assumes the model has been created, that the optimizer has not yet been created, and the the model has not been adjusted twice. This method should not be called externally. Use instead adjust_model_for_gpus or adjust_mean_teacher_model_for_gpus
def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase, model_execution_mode: ModelExecutionMode) -> DeviceAwareModule: if config.use_gpu: model = model.cuda() logging.info("Adjusting the model to use mixed precision training.") # If model parallel is set to True, then partition the network across all available gpus. if config.use_model_parallel: devices = config.get_cuda_devices() assert devices is not None # for mypy model.partition_model(devices=devices) # type: ignore else: logging.info("Making no adjustments to the model because no GPU was found.") # Update model related config attributes (After Model Parallel Activated) config.adjust_after_mixed_precision_and_parallel(model) # DataParallel enables running the model with multiple gpus by splitting samples across GPUs # If the model is used in training mode, data parallel is activated by default. # Similarly, if model parallel is not activated, data parallel is used as a backup option use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel) if config.use_gpu and use_data_parallel: logging.info("Adjusting the model to use DataParallel") # Move all layers to the default GPU before activating data parallel. # This needs to happen even though we put the model to the GPU at the beginning of the method, # but we may have spread it across multiple GPUs later. model = model.cuda() model = DataParallelModel(model, device_ids=config.get_cuda_devices()) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_model_parallel(self, global_rank: int, world_size: int) -> None:\n app_state = AppState()\n\n # we initialize megatron-lm model parallel and data parallel groups\n # after initializing DDP with PTL.\n if app_state.model_parallel_size is not None:\n # destroy groups in case they have already been created\n # this happens with multiple calls to trainer.test for example\n parallel_state.destroy_model_parallel()\n if torch.distributed.is_initialized():\n parallel_state.initialize_model_parallel(\n tensor_model_parallel_size=app_state.tensor_model_parallel_size,\n pipeline_model_parallel_size=app_state.pipeline_model_parallel_size,\n virtual_pipeline_model_parallel_size=app_state.virtual_pipeline_model_parallel_size,\n pipeline_model_parallel_split_rank=app_state.pipeline_model_parallel_split_rank,\n use_fp8=app_state.use_fp8,\n )\n\n # assert that fake tp and pp rank match after model parallel init\n assert app_state.tensor_model_parallel_rank == parallel_state.get_tensor_model_parallel_rank()\n assert app_state.pipeline_model_parallel_rank == parallel_state.get_pipeline_model_parallel_rank()\n\n app_state.tensor_model_parallel_group = parallel_state.get_tensor_model_parallel_group()\n app_state.data_parallel_group = parallel_state.get_data_parallel_group()\n app_state.data_parallel_rank = parallel_state.get_data_parallel_rank()\n app_state.data_parallel_size = parallel_state.get_data_parallel_world_size()\n app_state.pipeline_model_parallel_group = parallel_state.get_pipeline_model_parallel_group()\n\n # create MPI process group for UCX-based communication APIs\n if app_state.init_mpi_proc_group:\n torch.distributed.new_group(backend='mpi')", "def adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n # Adjusting twice causes an error.\n if self.is_model_adjusted:\n logging.debug(\"model_and_info.is_model_adjusted is already True\")\n\n if self._optimizer:\n raise ValueError(\"Create an optimizer only after creating and adjusting the model.\")\n\n self._model = ModelAndInfo._adjust_for_gpus(model=self._model,\n config=self.config,\n model_execution_mode=self.model_execution_mode)\n\n self.is_model_adjusted = True\n logging.debug(\"model_and_info.is_model_adjusted set to True\")", "def partition_data_parallel(\n graph: GraphModule,\n model: nn.Module,\n optimizer: Optional[torch.optim.Optimizer],\n params_buffers: Dict[str, torch.Tensor],\n named_states: Dict[str, Any],\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n mesh: DeviceMesh,\n parallel_style: DataParallelStyle,\n input_batch_dim: int,\n) -> GraphModule:\n num_params_buffers = len(params_buffers)\n flattened_states = pytree.tree_flatten(named_states)[0]\n num_states = len(flattened_states)\n\n changed = graph.graph.eliminate_dead_code()\n if changed:\n graph.recompile()\n\n # 1. First build up data parallel strategies for the whole graph\n strategy_map = build_data_parallel_strategies(\n graph, num_params_buffers, num_states, mesh=mesh, batch_dim=input_batch_dim\n )\n\n # 2. Next we mark the data parallel strategy for each node base on\n # the parallel_style\n mark_data_parallel_shardings(\n graph,\n num_parameters=num_params_buffers,\n num_states=num_states,\n dp_strategy_map=strategy_map,\n parallel_mode=parallel_style,\n )\n\n # 3. Partition the single machine graph to the distribute graph\n partitioned_graph = partitioner(graph)\n\n # preserve node types for the expanded graph\n for node in partitioned_graph.graph.nodes:\n if node in strategy_map:\n node_strategy = strategy_map[node]\n if isinstance(node_strategy, DataParallelStrategy):\n node.meta[\"node_type\"] = node_strategy.node_type\n elif isinstance(node_strategy, TupleStrategy):\n node.meta[\"node_type\"] = NodeType.NON_TENSOR\n else:\n raise RuntimeError(f\"Unknown node strategy {node_strategy}\")\n else:\n # if the nodes are expanded nodes (collectives), we mark them\n # the same type as the input node.\n input_node = node.all_input_nodes[0]\n node.meta[\"node_type\"] = input_node.meta[\"node_type\"]\n\n # 4. Last, inplace partition the weights and optim states to\n # DTensors base on the parallel style\n accessor = NamedMemberAccessor(model)\n for param_key, param in params_buffers.items():\n placement: Placement = Replicate()\n if parallel_style == DataParallelStyle.FULLY_SHARD:\n placement = Shard(0)\n elif parallel_style != DataParallelStyle.REPLICATE:\n raise RuntimeError(f\"parallel style {parallel_style} not supported yet\")\n\n dtensor_param = distribute_tensor(param, mesh, [placement])\n # update re-parameterized module param dict and optim states dict to DTensor\n params_buffers[param_key] = dtensor_param.to_local()\n # update module parameters to DTensor\n accessor.set_tensor(param_key, dtensor_param)\n\n # update the optimizer state key and values to DTensor\n if optimizer is not None and param in optimizer.state:\n param_states = named_states[param_key]\n param_dtensor_states = {}\n for state_key, state_val in param_states.items():\n if isinstance(state_val, torch.Tensor) and state_val.ndim > 0:\n # shard/replicate non-scalar tensors, for scalar tensor, we\n # don't do anything\n dtensor_state = distribute_tensor(state_val, mesh, [placement])\n param_dtensor_states[state_key] = dtensor_state\n param_states[state_key] = dtensor_state.to_local()\n else:\n param_dtensor_states[state_key] = state_val\n\n optimizer.state.pop(param) # type: ignore[call-overload]\n optimizer.state[dtensor_param] = param_dtensor_states # type: ignore[index]\n\n return partitioned_graph", "def optimize_model(input,\n model_type='bert',\n num_heads=0,\n hidden_size=0,\n optimization_options=None,\n opt_level=0,\n use_gpu=False,\n only_onnxruntime=False):\n (optimizer_class, producer, run_onnxruntime) = MODEL_CLASSES[model_type]\n\n temp_model_path = None\n if opt_level > 1: # Optimization specified for an execution provider.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=use_gpu, opt_level=opt_level)\n elif run_onnxruntime:\n # Use Onnxruntime to do optimizations (like constant folding and cast elimation) that is not specified to exection provider.\n # CPU provider is used here so that there is no extra node for GPU memory copy.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=False, opt_level=1)\n\n model = load_model(temp_model_path or input, format=None, load_external_data=True)\n\n if model.producer_name and producer != model.producer_name:\n logger.warning(\n f\"Model producer not matched: Expect {producer}, Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter.\"\n )\n\n if optimization_options is None:\n optimization_options = BertOptimizationOptions(model_type)\n\n optimizer = optimizer_class(model, num_heads, hidden_size)\n\n if not only_onnxruntime:\n optimizer.optimize(optimization_options)\n\n # Remove the temporary model.\n if temp_model_path:\n os.remove(temp_model_path)\n logger.debug(\"Remove tempoary model: {}\".format(temp_model_path))\n\n optimizer.model.producer_name = \"onnxruntime.transformers\"\n from onnxruntime import __version__ as onnxruntime_version\n optimizer.model.producer_version = onnxruntime_version\n\n return optimizer", "def configure_ddp(self):\n\n if (hasattr(self.model, 'megatron_amp_o2') and self.model.megatron_amp_o2) or (\n hasattr(self.model, 'with_distributed_adam') and self.model.with_distributed_adam\n ):\n # do not use DDP if using megatron amp O2 or distributed optimizer\n self._model = _LightningModuleWrapperBase(self.model)\n else:\n app_state = AppState()\n\n if app_state.model_parallel_size is not None:\n\n logging.info(f\"Configuring DDP for model parallelism.\")\n\n # With model parallelism, multiple GPUs form a large \"logical GPU\"\n # this means that data parallel groups span multiple GPUs\n # and are non-trivial\n # TODO: for megatron-lm self.model is a list\n # Removing self.pre_configure_ddp() as DDP's 'find_unused_parameters' now defaults\n # to False in PTL 2.0 and hence pre_configure_ddp() is removed in ddp.py\n # self.pre_configure_ddp()\n # device_ids = self.determine_ddp_device_ids()\n self._model = DistributedDataParallel(\n _LightningModuleWrapperBase(self.model),\n process_group=parallel_state.get_data_parallel_group(),\n **self._ddp_kwargs,\n )\n\n if self.no_ddp_communication_hook:\n # When using custom gradient accumulation and allreduce, disable\n # DDP communication hook that works on the gradient bucket.\n # Instead, use the custom gradient function and communication hook,\n # which is defined in the master optimizer wrapper.\n self._model.require_backward_grad_sync = False\n self._model.register_comm_hook(None, noop_hook)\n\n else:\n super().configure_ddp()", "def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)", "def may_data_parallel(model):\n if torch.cuda.device_count() > 1:\n model = TransparentDataParallel(model)\n return model", "def parallelize(self):\n self.parallel = True\n self.network = torch.nn.DataParallel(self.network)", "def parallelize(self):\r\n self.parallel = True\r\n self.network = torch.nn.DataParallel(self.network)", "def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)", "def initialize_multitask_model(\n *,\n model_def: nn.Module,\n input_spec: Dict[Tuple[Tuple[str, str], ...],\n Sequence[Union[Tuple[Tuple[int, ...], jnp.dtype],\n Tuple[int, ...]]]],\n config: ml_collections.ConfigDict,\n rngs: Union[jnp.ndarray, Mapping[str, jnp.ndarray]],\n) -> Tuple[PyTree, PyTree, int, Optional[Dict[str, float]]]:\n\n def init_fn(model_def):\n for kwargs, in_spec in input_spec.items():\n\n if config.get('batch_sizes') is not None:\n batch_size = config.batch_sizes.get(dict(kwargs)['dataset'])\n else:\n batch_size = config.batch_size\n\n batch_size = (batch_size // jax.device_count()) if batch_size else None\n\n input_shapetype = [\n debug_utils.input_spec_to_jax_shape_dtype_struct(\n spec, batch_size=batch_size) for spec in in_spec\n ]\n dummy_input = []\n for in_st in input_shapetype:\n dummy_input.append(jnp.zeros(in_st.shape, in_st.dtype))\n model_def(\n *dummy_input, train=False, debug=False, **dict(kwargs))\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def _initialize_model(rngs):\n \"\"\"Initialization function to be jitted.\"\"\"\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state\n\n if not isinstance(rngs, dict):\n rngs = {'params': rngs}\n init_params, init_model_state = _initialize_model(rngs)\n # Pop out params rng:\n rngs.pop('params')\n\n # Count number of trainable parameters:\n num_trainable_params = debug_utils.log_param_shapes(init_params)\n\n # Count gflops:\n count_flops = config.get('count_flops',\n ml_collections.ConfigDict({'count_flops': True}))\n if count_flops:\n variables = {'params': init_params, **init_model_state}\n gflops_dict = {}\n gflops_all = 0\n for kwargs, in_spec in input_spec.items():\n flops = debug_utils.compute_flops(\n flax_model_apply_fn=functools.partial(\n model_def.apply,\n variables,\n train=False,\n debug=False,\n rngs=rngs,\n **dict(kwargs)),\n input_spec=count_flops.get('input_spec', in_spec),\n fuse_multiply_add=count_flops.get('fuse_multiply_add', True))\n gflops = flops / (10**9)\n gflops_key = 'gflops/' + '/'.join(f'{x}={y}' for x, y in kwargs)\n gflops_dict[gflops_key] = gflops\n gflops_all += gflops\n gflops_dict['gflops'] = gflops_all\n else:\n gflops_dict = None\n\n return init_params, init_model_state, num_trainable_params, gflops_dict", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def train_parallel(config):\n _setup_parallel_env()\n print(f\" | Starting training on {os.getenv('RANK_SIZE', None)} devices.\")\n\n pre_train_dataset = load_dataset(\n data_files=config.pre_train_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(\n data_files=config.fine_tune_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.fine_tune_dataset else None\n test_dataset = load_dataset(\n data_files=config.test_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset)", "def build_model(cfg, model, gpu_id=None):\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n # Construct the model\n # name = cfg.MODEL.MODEL_NAME\n # model = MODEL_REGISTRY.get(name)(cfg)\n \n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n #, find_unused_parameters=True\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n \n return model", "def fit(\n self,\n lr: float,\n epochs: int,\n model_dir: str = \"checkpoints\",\n model_name: str = None,\n momentum: float = 0.95,\n weight_decay: float = 0.0001,\n mixed_prec: bool = False,\n use_one_cycle_policy: bool = False,\n warmup_pct: float = 0.3,\n lr_gamma: float = 0.1,\n lr_step_size: float = None,\n grad_steps: int = 2,\n save_model: bool = False,\n ) -> None:\n # set epochs\n self.epochs = epochs\n\n # set lr_step_size based on epochs\n if lr_step_size is None:\n lr_step_size = np.ceil(2 / 3 * self.epochs)\n\n # set model name\n if model_name is None:\n model_name = self.model_name\n\n os.makedirs(model_dir, exist_ok=True)\n\n data_loaders = {}\n data_loaders[\"train\"] = self.dataset.train_dl\n data_loaders[\"valid\"] = self.dataset.test_dl\n\n # Move model to gpu before constructing optimizers and amp.initialize\n device = torch_device()\n self.model.to(device)\n count_devices = num_devices()\n torch.backends.cudnn.benchmark = True\n\n named_params_to_update = {}\n total_params = 0\n for name, param in self.model.named_parameters():\n total_params += 1\n if param.requires_grad:\n named_params_to_update[name] = param\n\n print(\"Params to learn:\")\n if len(named_params_to_update) == total_params:\n print(\"\\tfull network\")\n else:\n for name in named_params_to_update:\n print(f\"\\t{name}\")\n\n # create optimizer\n optimizer = optim.SGD(\n list(named_params_to_update.values()),\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n )\n\n # Use mixed-precision if available\n # Currently, only O1 works with DataParallel: See issues https://github.com/NVIDIA/apex/issues/227\n if mixed_prec:\n # break if not AMP_AVAILABLE\n assert AMP_AVAILABLE\n # 'O0': Full FP32, 'O1': Conservative, 'O2': Standard, 'O3': Full FP16\n self.model, optimizer = amp.initialize(\n self.model,\n optimizer,\n opt_level=\"O1\",\n loss_scale=\"dynamic\",\n # keep_batchnorm_fp32=True doesn't work on 'O1'\n )\n\n # Learning rate scheduler\n if use_one_cycle_policy:\n # Use warmup with the one-cycle policy\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=lr,\n total_steps=self.epochs,\n pct_start=warmup_pct,\n base_momentum=0.9 * momentum,\n max_momentum=momentum,\n )\n else:\n # Simple step-decay\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=lr_step_size, gamma=lr_gamma,\n )\n\n # DataParallel after amp.initialize\n model = (\n nn.DataParallel(self.model) if count_devices > 1 else self.model\n )\n\n criterion = nn.CrossEntropyLoss().to(device)\n\n # set num classes\n topk = 5\n if topk >= self.num_classes:\n topk = self.num_classes\n\n for e in range(1, self.epochs + 1):\n print(\n f\"Epoch {e} =========================================================\"\n )\n print(f\"lr={scheduler.get_lr()}\")\n\n self.results.append(\n self.train_an_epoch(\n model,\n data_loaders,\n device,\n criterion,\n optimizer,\n grad_steps=grad_steps,\n mixed_prec=mixed_prec,\n topk=topk,\n )\n )\n\n scheduler.step()\n\n if save_model:\n self.save(\n os.path.join(\n model_dir,\n \"{model_name}_{epoch}.pt\".format(\n model_name=model_name, epoch=str(e).zfill(3),\n ),\n )\n )\n self.plot_precision_loss_curves()", "def update_model(self, verbose):\n if self.comm.project.meshes == \"multi-mesh\":\n self.comm.lasif.move_gradient_to_cluster()\n\n if not self.task_dict[\"summing_completed\"]:\n grad_summer = GradientSummer(comm=self.comm)\n grad_summer.sum_gradients(\n events=self.comm.project.non_val_events_in_iteration,\n output_location=self.raw_gradient_path,\n batch_average=True,\n sum_vpv_vph=True,\n store_norms=True,\n )\n write_xdmf(self.raw_gradient_path)\n self.task_dict[\"summing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Summing already done\")\n\n if not self.task_dict[\"raw_update_completed\"]:\n self._update_model(raw=True, smooth=False, verbose=verbose)\n self.task_dict[\"raw_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Raw updating already completed\")\n\n if not self.task_dict[\"smoothing_completed\"]:\n self.perform_smoothing()\n self.task_dict[\"smoothing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smoothing already done\")\n\n if not self.task_dict[\"smooth_update_completed\"]:\n self._update_model(raw=False, smooth=True, verbose=verbose)\n self.task_dict[\"smooth_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smooth updating already completed\")\n\n if not self.task_dict[\"iteration_finalized\"]:\n self._finalize_iteration(verbose=verbose)\n self.task_dict[\"iteration_finalized\"] = True\n self._update_task_file()\n else:\n self.print(\"Iteration already finalized\")\n\n self.finish_task()", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def convert_model(self, model: nn.Module) -> nn.Module:\n if self.sync_bn is not None:\n try:\n model = convert_sync_batchnorm(model, self.sync_bn)\n except ValueError as e:\n self.logger.error('cfg.sync_bn should be \"torch\" or '\n f'\"mmcv\", but got {self.sync_bn}')\n raise e\n\n return model", "def optimize(self, model):\n model.optimize_params(\n max_iters=self.max_iters, max_beta_iters=self.max_beta_iters,\n max_U_iters=self.max_U_iters, rel_tol=self.rel_tol,\n optimize_beta=self.optimize_beta, optimize_U=self.optimize_U,\n compute_D=self.compute_D\n )\n return model", "def run_training(self, schema_params, export_model=False, output_model_dir=None):\n # Log distributed execution context, which includes cluster configuration\n logger.info(f\"Commencing {self.effect_name} training\")\n logger.info(f\"Execution context : {self.execution_context}\")\n\n # Create partition_index_list\n partition_index_list = self._get_partition_list()\n logger.info(f\"This worker on work on the following list of partitions : {partition_index_list}\")\n\n # Sequentially train model on partitions\n for partition_index in partition_index_list:\n logger.info(f\"Commencing {self.effect_name} training for partition index : {partition_index}\")\n\n # Resolve partitioned data directory from raw path params from user\n checkpoint_path = self._anchor_directory(\n self.model.checkpoint_path,\n partition_index)\n training_data_dir = self._anchor_directory(self.model.training_data_dir,\n partition_index)\n validation_data_dir = self._anchor_directory(self.model.validation_data_dir,\n partition_index) if self.model.validation_data_dir else None\n\n if is_empty_directory(training_data_dir):\n logger.info(f\"{training_data_dir} is empty, no dataset to train on.\")\n continue\n # Train model\n self.execution_context[constants.PARTITION_INDEX] = partition_index\n self.model.train(training_data_dir=training_data_dir,\n validation_data_dir=validation_data_dir,\n metadata_file=self.model.metadata_file,\n checkpoint_path=checkpoint_path,\n execution_context=self._prepare_training_context(partition_index),\n schema_params=schema_params)\n\n # Chief should export model\n is_chief = self.execution_context[constants.IS_CHIEF]\n if export_model and is_chief:\n logger.info(f\"Exporting model to directory : {output_model_dir}\")\n self.model.export(output_model_dir=output_model_dir)", "def create_summary_and_adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if self.config.is_segmentation_model:\n summary_for_segmentation_models(self.config, self._model)\n # Prepare for mixed precision training and data parallelization (no-op if already done).\n # This relies on the information generated in the model summary.\n self.adjust_model_for_gpus()", "def test_build_default_model(self):\n cfg = get_cfg_defaults()\n cfg.SYSTEM.NUM_GPUS = self.num_gpu\n model = build_model(cfg, self.device)\n self.assertTrue(isinstance(model, (torch.nn.Module,\n torch.nn.DataParallel,\n torch.nn.parallel.DistributedDataParallel)))", "def build_model(cfg, gpu_id=None):\n # Construct the model\n if MODEL_REGISTRY.get(cfg.MODEL.NAME) == None:\n # attempt to find standard models\n model = BaseVideoModel(cfg)\n else:\n # if the model is explicitly defined,\n # it is directly constructed from the model pool\n model = MODEL_REGISTRY.get(cfg.MODEL.NAME)(cfg)\n\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n model = model.cuda(device=cur_device)\n \n model_ema = None\n if cfg.MODEL.EMA.ENABLE:\n model_ema = ModelEmaV2(model, decay=cfg.MODEL.EMA.DECAY)\n\n try:\n # convert batchnorm to be synchronized across \n # different GPUs if needed\n sync_bn = cfg.BN.SYNC_BN\n if sync_bn == True and cfg.NUM_GPUS * cfg.NUM_SHARDS > 1:\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n except:\n sync_bn = None\n\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS*cfg.NUM_SHARDS > 1:\n # Make model replica operate on the current device\n if cfg.PAI:\n # Support distributed training on the cluster\n model = torch.nn.parallel.DistributedDataParallel(\n module=model\n )\n else:\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n\n return model, model_ema", "def _update_model(self, X_all, Y_all):\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.set_XY(X_all, Y_all)\n\n # WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...\n if self.max_iters > 0:\n # --- update the model maximizing the marginal likelihood.\n if self.optimize_restarts==1:\n self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)\n else:\n self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose)", "def update_model(self, **kwargs):\n self.__dict__.update(kwargs)\n opt_params = ['optimizer_params', 'optimizer']\n if any(item in kwargs.keys() for item in opt_params):\n self.get_unet_model()", "def enable_model_cpu_offload(self, gpu_id=0):\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def initialize_model(self):\n args = self.args\n\n if self.args.search_space == 'nasbench':\n self.model_fn = NasBenchNetSearchDarts\n self.fixmodel_fn = NasBenchNet\n model = self.model_fn(args)\n utils = darts_nasbench_utils\n else:\n raise NotImplementedError(\"Not supported\")\n # finialize model update\n if args.gpus > 0:\n if self.args.gpus == 1:\n model = model.cuda()\n self.parallel_model = model\n else:\n self.model = model\n self.parallel_model = nn.DataParallel(self.model).cuda()\n # IPython.embed(header='checking replicas and others.')\n else:\n self.parallel_model = model\n\n darts = DartsArchitect(model, args=args)\n model = self.parallel_model\n # logging.info(\"DARTS param size = %fMB\", utils.count_parameters_in_MB(darts))\n self.train_fn = partial(darts_train_model, args=args, architect=darts, sampler=None)\n self.eval_fn = partial(darts_model_validation, args=args, verbose=True)\n self.controller = darts\n\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n optimizer = torch.optim.SGD(\n model.parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n )\n\n # scheduler as Cosine.\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.learning_rate_min)\n return model, optimizer, scheduler, darts, None", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n\n loader = tqdm.tqdm(loader, total=num_batches)\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def freeze_model(model):\n for param in model.parameters():\n param.requires_grad = False", "def sync_model(model):\n size = float(dist.get_world_size())\n\n for param in model.parameters():\n dist.broadcast(param.data, 0)", "def test_auto_scale_batch_size_set_model_attribute(tmpdir, use_hparams):\n tutils.reset_seed()\n\n hparams = EvalModelTemplate.get_default_hparams()\n before_batch_size = hparams.get('batch_size')\n\n class HparamsEvalModelTemplate(EvalModelTemplate):\n\n def dataloader(self, *args, **kwargs):\n # artificially set batch_size so we can get a dataloader\n # remove it immediately after, because we want only self.hparams.batch_size\n setattr(self, \"batch_size\", before_batch_size)\n dataloader = super().dataloader(*args, **kwargs)\n del self.batch_size\n return dataloader\n\n datamodule_model = MNISTDataModule(data_dir=tmpdir, batch_size=111) # this datamodule should get ignored!\n datamodule_fit = MNISTDataModule(data_dir=tmpdir, batch_size=before_batch_size)\n\n model_class = HparamsEvalModelTemplate if use_hparams else EvalModelTemplate\n model = model_class(**hparams)\n model.datamodule = datamodule_model # unused when another module gets passed to .tune() / .fit()\n\n trainer = Trainer(default_root_dir=tmpdir,\n max_epochs=1,\n auto_scale_batch_size=True,\n gpus=1)\n trainer.tune(model, datamodule_fit)\n after_batch_size = model.hparams.batch_size if use_hparams else model.batch_size\n assert trainer.datamodule == datamodule_fit\n assert before_batch_size != after_batch_size\n assert after_batch_size <= len(trainer.train_dataloader.dataset)\n assert datamodule_fit.batch_size == after_batch_size\n # should be left unchanged, since it was not passed to .tune()\n assert datamodule_model.batch_size == 111", "def make_non_parallel_copy(model):\n def replace_data_parallel(container):\n for name, module in container.named_children():\n if isinstance(module, nn.DataParallel):\n setattr(container, name, module.module)\n if has_children(module):\n replace_data_parallel(module)\n\n # Make a copy of the model, because we're going to change it\n new_model = deepcopy(model)\n if isinstance(new_model, nn.DataParallel):\n new_model = new_model.module\n replace_data_parallel(new_model)\n\n return new_model", "def _assign_model_params(self, sess):\n with self.graph.as_default():\n for nn in range(self.num_networks):\n self.networks[nn].assign_model_params(sess)", "def train(model, config, logger, record): \n # initialize userIDs\n users_to_sample = config.users\n userIDs = np.arange(config.users) \n\n # initialize the optimizer for the server model\n dataset = assign_user_data(config, logger)\n\n # initialize the delta offset buffers and local residual buffers\n offset_buffers = []\n residual_buffers = []\n for user in range(users_to_sample):\n offset_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n residual_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n\n global_updater = GlobalUpdater(config, model.state_dict()) \n\n # before optimization, report the result first\n validate_and_log(model, dataset, config, record, logger)\n \n for comm_round in range(config.rounds):\n userIDs_candidates = userIDs[:users_to_sample]\n \n # Wait for all users updating locally\n local_packages = []\n for i, user_id in enumerate(userIDs_candidates):\n user_resource = assign_user_resource(config, user_id, \n dataset[\"train_data\"], dataset[\"user_with_data\"])\n updater = LocalUpdater(user_resource, config)\n updater.local_step(model, offset_buffers[user_id])\n local_package = updater.uplink_transmit()\n local_packages.append(local_package)\n\n # Update the global model\n global_updater.global_step(model, local_packages, residual_buffers)\n\n # Update local offsets\n update_offset_buffers(offset_buffers, \n residual_buffers,\n global_updater.accumulated_delta, \n config.tau) \n\n # log and record\n logger.info(\"Round {:d}\".format(comm_round))\n validate_and_log(model, dataset, config, record, logger)\n\n # if comm_round == config.scheduler[0]:\n # config.lr *= config.lr_scaler\n # config.scheduler.pop(0)", "def update(self, task_model):\n raise NotImplementedError()", "def enable_model_cpu_offload(self, gpu_id=0):\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n hook = None\n for cpu_offloaded_model in [self.vae, self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook", "def train(self, mode=True):\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n # print('the type train model : {}'.format(type(m)))\n if isinstance(m, torch.nn.BatchNorm2d) or \\\n isinstance(m, linklink.nn.syncbn_layer.SyncBatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n # print('the freeze module: {} of {}th'.format(type(m), count))\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def set_model(self, model):\r\n self.model = model.model\r\n with context.eager_mode():\r\n self._close_writers()\r\n if self.write_graph:\r\n with self._get_writer(self._train_run_name).as_default():\r\n with summary_ops_v2.always_record_summaries():\r\n if not self.model.run_eagerly:\r\n summary_ops_v2.graph(K.get_graph(), step=0)\r\n\r\n summary_writable = (\r\n self.model._is_graph_network or # pylint: disable=protected-access\r\n self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access\r\n if summary_writable:\r\n summary_ops_v2.keras_model('keras', self.model, step=0)\r\n\r\n if self.embeddings_freq:\r\n self._configure_embeddings()", "def create_model(model_class, model_params=None, model_name='model'):\n\n model_params = {} if model_params is None else model_params\n\n model = model_class(**model_params)\n\n if special_parameters.load_model: # recover from checkpoint\n _load_model(model, model_name)\n\n # configure usage on GPU\n if use_gpu():\n model.to(first_device())\n model = torch.nn.DataParallel(model, device_ids=all_devices())\n\n # print info about devices\n print_info('Device(s)): ' + str(device_description()))\n\n return model", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):\n options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n if is_deepspeed_available():\n options += (DeepSpeedEngine,)\n\n while isinstance(model, options):\n model = model.module\n\n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n original_forward = model.__dict__.pop(\"_original_forward\", None)\n if original_forward is not None:\n while hasattr(forward, \"__wrapped__\"):\n forward = forward.__wrapped__\n if forward == original_forward:\n break\n model.forward = forward\n if getattr(model, \"_converted_to_transformer_engine\", False):\n convert_model(model, to_transformer_engine=False)\n return model", "def post_training_model_optimization(model_path: str, config_path: str) -> bool:\n # Load the model and its parameters from the given paths\n main_dict = load_model(model_path, \"cpu\")\n parameters = main_dict.get(\"parameters\", None)\n\n # If parameters are not available in the model file, parse them from the config file\n parameters = (\n parseConfig(config_path, version_check_flag=False)\n if parameters is None\n else parameters\n )\n\n # Create PyTorch objects and set onnx_export to True for optimization\n model, _, _, _, _, parameters = create_pytorch_objects(parameters, device=\"cpu\")\n parameters[\"model\"][\"onnx_export\"] = True\n\n # Perform version check and load the model's state dictionary\n version_check(parameters[\"version\"], version_to_check=main_dict[\"version\"])\n model.load_state_dict(main_dict[\"model_state_dict\"])\n\n # Optimize the model and save it to an ONNX file\n optimize_and_save_model(model, parameters, model_path, onnx_export=True)\n\n # Check if the optimized model file exists\n optimized_model_path = model_path.replace(\"pth.tar\", \"onnx\")\n if not os.path.exists(optimized_model_path):\n print(\"Error while optimizing the model.\")\n return False\n\n return True", "def TrainStudent(self, model_name, teacher_model_name, **kwargs):\n batch_size = kwargs.pop(\"batch_size\", 64)\n model_save_path = kwargs.pop('model_save_path', \"./checkpoints/student/\")\n teacher_model_path = kwargs.pop(\"teacher_model_path\", \"./checkpoints/teacher/\")\n temp = kwargs.pop(\"temp\", 10)\n num_epoch = kwargs.pop(\"num_epoch\", 20)\n basic_learning_rate = kwargs.pop(\"basic_learning_rate\", 5e-4)\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/student\")\n is_dev = kwargs.pop(\"dev_mode\", False)\n learning_rate_decay = kwargs.pop(\"learning_rate_decay\", 0.01)\n reg_scale = kwargs.pop(\"reg_scale\", 1e-1)\n soft_target_scale = kwargs.pop(\"soft_target_scale\", 1)\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(teacher_model_path):\n raise RuntimeError(\"Cannot find pretrained teacher model in '{}'\".format(teacher_model_path))\n if not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n teacher_model_path = os.path.join(teacher_model_path, \"{}.ckpt\".format(teacher_model_name))\n\n tf.reset_default_graph()\n \n # Get training dataset\n if is_dev:\n train_data, train_label = self.data_manager.dev_data, self.data_manager.dev_label\n else:\n train_data, train_label = self.data_manager.train_data, self.data_manager.train_label\n \n num_train_data = train_data.shape[0]\n\n # The input of model\n X = tf.placeholder(train_data.dtype, [None]+list(train_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(train_label.dtype, [None]+list(train_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n \n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n dataset = dataset.shuffle(buffer_size=8000)\n batched_dataset = dataset.batch(batch_size)\n\n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher and student model\n regularizer = tf.contrib.layers.l2_regularizer(scale=reg_scale)\n with tf.variable_scope('student_model', regularizer=regularizer):\n logits, probs = self.student_model(batch_data, is_train=is_train)\n\n with tf.variable_scope('teacher_model'):\n teacher_logits, teacher_probs = self.teacher_model(batch_data, is_train=False, trainable=False, temp=temp)\n\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n\n teacher_variabels = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"teacher_model\")\n student_variabels = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"student_model\")\n teacher_loader = tf.train.Saver(teacher_variabels)\n student_saver = tf.train.Saver(student_variabels)\n \n # Training part\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=batch_label, name=\"hard_loss\"))\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'teacher_model'))\n loss += reg_loss\n soft_target_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=teacher_probs, name=\"soft_loss\"))\n loss += soft_target_scale * soft_target_loss\n \n global_step = tf.get_variable('global_step', initializer=0.0, trainable=False)\n learning_rate = tf.train.natural_exp_decay(\n basic_learning_rate, global_step,\n decay_rate=learning_rate_decay,\n name='learning_rate', decay_steps=1\n )\n \n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss)\n global_step_add = tf.assign_add(global_step, 1)\n\n train_acc_hist = []\n val_acc_hist = []\n train_loss_hist = []\n best_acc = 0.0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n teacher_loader.restore(sess, teacher_model_path)\n for i in range(num_epoch):\n sess.run(iterator.initializer, feed_dict={X:train_data, y:train_label})\n cnt = 0\n total_correct_cnt = 0\n total_loss, acc = 0.0, 0.0\n while True:\n try:\n curr_loss, train, right_num, curr_result = sess.run(\n [loss, train_op, correct_num, result],\n feed_dict={is_train: True}\n )\n total_correct_cnt += right_num\n total_loss += curr_loss\n cnt += 1\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_train_data\n last_loss = total_loss / cnt \n if verbose:\n div = \"===========================\"\n print(\"{}\\nEpoch {}/{}\\t\\tloss: {}\\t\\tacc: {}\".format(div, i+1, num_epoch, last_loss, acc))\n train_acc_hist.append(acc)\n train_loss_hist.append(last_loss)\n sess.run([global_step_add])\n if verbose:\n last_global_step, last_learning_rate = sess.run([global_step, learning_rate])\n print(\"learning_rate: {}\".format(last_learning_rate))\n break\n \n # Validation\n sess.run(iterator.initializer, feed_dict={X:self.data_manager.val_data, y:self.data_manager.val_label})\n acc = 0.0\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / self.data_manager.val_data.shape[0]\n if verbose:\n print(\"Validation acc: {}\".format(acc))\n val_acc_hist.append(acc)\n if acc > best_acc:\n best_acc = acc\n student_saver.save(sess, model_save_path)\n break\n # Write train process record\n self._writeRecord(record_save_path, \"{}_train_accuracy\".format(model_name), train_acc_hist)\n self._writeRecord(record_save_path, \"{}_validation_accuracy\".format(model_name), val_acc_hist)\n self._writeRecord(record_save_path, \"{}_train_loss\".format(model_name), train_loss_hist)\n if verbose:\n print(\"Finish Training Student Model! The Best Validation Accuracy is: {}\".format(best_acc))", "def prepare(self, n_cores=1, ipp_client=None):\n if len(self.shape_parameters):\n self.morpher = MORPHERS[self.config['morpher']](self.config.get('morpher_config', {}),\n self.shape_parameters)\n zs_list = self.morpher.get_anchor_points(bounds=self.get_bounds())\n\n # Create the configs for each new model\n configs = []\n for zs in zs_list:\n config = deepcopy(self.pdf_base_config)\n for i, (setting_name, (anchors, _, _)) in enumerate(self.shape_parameters.items()):\n # Translate from zs to settings using the anchors dict. Maybe not all settings are numerical.\n config[setting_name] = anchors[zs[i]]\n if ipp_client is None and n_cores != 1:\n # We have to compute in parallel: must have delayed computation on\n config['delay_pdf_computation'] = True\n configs.append(config)\n\n # Create the new models\n if n_cores == 1:\n models = [Model(c) for c in tqdm(configs, desc=\"Computing/loading models on one core\")]\n\n elif ipp_client is not None:\n models = create_models_ipyparallel(configs, ipp_client,\n block=self.config.get('block_during_paralellization', False))\n\n else:\n models = [Model(c) for c in tqdm(configs, desc=\"Preparing model computation tasks\")]\n\n hashes = set()\n for m in models:\n for s in m.sources:\n hashes.add(s.hash)\n\n compute_many(hashes, n_cores)\n\n # Reload models so computation takes effect\n models = [Model(c) for c in tqdm(configs, desc=\"Loading computed models\")]\n\n # Add the new models to the anchor_models dict\n for zs, model in zip(zs_list, models):\n self.anchor_models[tuple(zs)] = model\n\n # Build the interpolator for the rates of each source.\n self.mus_interpolator = self.morpher.make_interpolator(f=lambda m: m.expected_events(),\n extra_dims=[len(self.source_name_list)],\n anchor_models=self.anchor_models)\n\n self.is_data_set = False\n self.is_prepared = True", "def freeze_model(self):\n # BN layers need to be freezed explicitly since they cannot be freezed via '.requires_grad=False'\n for module in self.modules():\n if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):\n module.eval()\n \n # freeze all parameters\n for param in self.parameters():\n param.requires_grad = False", "def mutate(self, model: GraphModelSpace) -> None:\n raise NotImplementedError()", "def dist_setting(current_gpu, model, args):\n print(\"channels_last : {}\".format(args.channels_last))\n if args.channels_last:\n args.memory_format = torch.channels_last\n else:\n args.memory_format = torch.contiguous_format\n\n if args.apex:\n args.lr = args.lr*float(args.batch_size*args.world_size)/256.\n args.current_gpu = current_gpu\n if args.current_gpu is not None:\n print(\"Use GPU: {} for training\".format(args.current_gpu))\n\n if args.multigpus_distributed:\n args.rank = args.num_gpus * args.host_num + args.current_gpu\n dist.init_process_group(backend=args.backend,\n rank=args.rank, world_size=args.world_size)\n logger.info('Initialized the distributed environment: \\'{}\\' backend on {} nodes. '.format(\n args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(\n dist.get_rank(), args.num_gpus))\n else:\n args.rank = 0\n\n if args.sync_bn:\n import apex\n print(\"using apex synced BN\")\n model = apex.parallel.convert_syncbn_model(model)\n\n if args.multigpus_distributed:\n if args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n args.batch_size = int(args.batch_size / args.num_gpus)\n logger.info(\"Batch size for each GPU: {}\".format(args.batch_size))\n if not args.apex:\n model.cuda(args.current_gpu)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.current_gpu])\n else:\n if not args.apex:\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n if not args.apex:\n model = model.cuda(args.current_gpu)\n else:\n if not args.apex:\n model = torch.nn.DataParallel(model).cuda()\n\n return model, args", "def _update_model(self, normalization_type='stats'):\n if self.num_acquisitions % self.model_update_interval == 0:\n\n # input that goes into the model (is unziped in case there are categorical variables)\n X_inmodel = self.space.unzip_inputs(self.X)\n\n # Y_inmodel is the output that goes into the model\n if self.normalize_Y:\n Y_inmodel = normalize(self.Y, normalization_type)\n else:\n Y_inmodel = self.Y\n\n self.model.updateModel(X_inmodel, Y_inmodel, None, None)", "def worker(self, gpu_id: int):\n if self.seed is not None:\n make_deterministic(self.seed)\n self.current_rank = self.rank\n if self.distributed:\n if self.multiprocessing:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n self.current_rank = self.rank * self.ngpus_per_node + gpu_id\n dist.init_process_group(\n backend=self.dist_backend,\n init_method=self.dist_url,\n world_size=self.world_size,\n rank=self.current_rank\n )\n # set up process logger\n self.logger = logging.getLogger(\"worker_rank_{}\".format(self.current_rank))\n self.logger.propagate = False\n handler = QueueHandler(self.logger_queue)\n self.logger.addHandler(handler)\n self.logger.setLevel(logging.INFO)\n\n # only write in master process\n if self.current_rank == 0:\n self.tb_writer = self.tb_writer_constructor()\n\n self.logger.info(\n \"Use GPU: %d for training, current rank: %d\",\n gpu_id,\n self.current_rank\n )\n # get dataset\n train_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"train\"\n )\n val_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"val\"\n )\n # create model\n self.model = get_model(\n model_name=self.global_cfg[\"model\"][\"name\"],\n num_classes=self.global_cfg[\"dataset\"][\"n_classes\"]\n )\n\n self.device = torch.device(\"cuda:{}\".format(gpu_id))\n self.model.to(self.device)\n\n batch_size = self.global_cfg[\"training\"][\"batch_size\"]\n n_workers = self.global_cfg[\"training\"][\"num_workers\"]\n if self.distributed:\n batch_size = int(batch_size / self.ngpus_per_node)\n n_workers = int((n_workers + self.ngpus_per_node - 1) / self.ngpus_per_node)\n if self.global_cfg[\"training\"][\"sync_bn\"]:\n self.model = SyncBatchNorm.convert_sync_batchnorm(self.model)\n self.model = DistributedDataParallel(self.model, device_ids=[gpu_id])\n self.logger.info(\"batch_size: {}, workers: {}\".format(batch_size, n_workers))\n\n # define loss function (criterion) and optimizer\n self.loss_fn = CrossEntropyLoss().to(self.device)\n\n optimizer_cls = get_optimizer(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params = copy.deepcopy(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params.pop(\"name\")\n self.optimizer: Optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)\n self.logger.info(\"Loaded optimizer:\\n%s\", self.optimizer)\n\n # scheduler\n self.scheduler = get_scheduler(self.optimizer, self.global_cfg[\"training\"][\"lr_schedule\"])\n\n if self.distributed:\n train_sampler = DistributedSampler(\n train_dataset,\n shuffle=True,\n drop_last=True\n )\n val_sampler = DistributedSampler(\n val_dataset,\n shuffle=False\n )\n else:\n train_sampler = RandomSampler(train_dataset)\n val_sampler = SequentialSampler(val_dataset)\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=train_sampler\n )\n\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=val_sampler\n )\n self.logger.info(\n \"Load dataset done\\nTraining: %d imgs, %d batchs\\nEval: %d imgs, %d batchs\",\n len(train_dataset),\n len(train_loader),\n len(val_dataset),\n len(self.val_loader)\n )\n iter_generator = make_iter_dataloader(train_loader)\n\n while self.iter < self.global_cfg[\"training\"][\"train_iters\"]:\n img, label = next(iter_generator)\n self.train_iter(img, label)\n\n def is_val():\n p1 = self.iter != 0\n p2 = (self.iter + 1) % self.global_cfg[\"training\"][\"val_interval\"] == 0\n p3 = self.iter == self.global_cfg[\"training\"][\"train_iters\"] - 1\n return (p1 and p2) or p3\n\n # have a validation\n if is_val():\n self.validate()\n # end one iteration\n self.iter += 1", "def enable_parallel_projection(self):\n # Fix the 'reset camera' effect produced by the VTK when parallel\n # projection is enabled.\n angle = np.radians(self.camera.view_angle)\n self.camera.parallel_scale = self.camera.distance * np.sin(0.5 * angle)\n\n self.camera.enable_parallel_projection()\n self.Modified()", "def update_model(engine, batch):\n\t\tengine.model.train()\n\t\tengine.model.rpn.nms_thresh = 0.7\n\t\timg, target = prepare_batch(batch, device=get_device(engine.model))\n\t\tengine.optimizer.zero_grad()\n\t\tloss = engine.model(img, target)\n\t\tlosses = sum(l for l in loss.values())\n\t\tlosses.backward()\n\t\tengine.optimizer.step()\n\t\treturn loss", "def setup_model(self,\n model_weights_path: Optional[str] = None,\n model_def_path: Optional[str] = None) -> None:\n if self.model is not None:\n self.model.to(self.device)\n return\n\n self._onnx_mode = (model_weights_path is not None\n and model_weights_path.lower().endswith('.onnx'))\n if self._onnx_mode:\n model = self.load_onnx_model(model_weights_path)\n else:\n model = self.build_model(model_def_path)\n\n if self.cfg.model.external_def is not None:\n # this model will have 1 extra output classes that we will ignore\n self.model = TorchVisionODAdapter(model, ignored_output_inds=[0])\n else:\n # this model will have 2 extra output classes that we will ignore\n num_classes = self.cfg.data.num_classes\n self.model = TorchVisionODAdapter(\n model, ignored_output_inds=[0, num_classes + 1])\n\n if not self._onnx_mode:\n self.model.to(self.device)\n self.load_init_weights(model_weights_path)", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def module_transfer_to_device(self) -> None:\n for name, module in self.modules.items():\n module.to(self.device)\n if self.device.type == 'cuda':\n self.modules[name] = torch.nn.DataParallel(module, self.gpu_ids)\n return", "def test_no_model_parallel(self):\n for m in ['transformer/generator', 'transformer/ranker']:\n try:\n _ = self._distributed_train_model(model=m, model_parallel=True)\n except RuntimeError:\n pass\n else:\n self.fail('Did not raise RuntimeError')", "def mount(xpu, model):\n # Unwrap the core model if necessary\n model = xpu.raw(model)\n model = xpu.move(model)\n if xpu._device_ids and len(xpu._device_ids) > 1:\n model = ContainerDataParallel(\n model, device_ids=xpu._device_ids,\n output_device=xpu._main_device_id)\n else:\n model = DataSerial(model)\n return model", "def apply(self, model: GraphModelSpace) -> GraphModelSpace:\n assert self.sampler is not None\n copy = model.fork()\n copy.status = ModelStatus.Mutating\n self._cur_model = copy\n self._cur_choice_idx = 0\n self._cur_samples = []\n\n # Some mutate() requires a full mutation history of the model.\n # Therefore, parent needs to be set before the mutation.\n copy.parent = Mutation(self, self._cur_samples, model, copy)\n self.sampler.mutation_start(self, copy)\n self.mutate(copy)\n self.sampler.mutation_end(self, copy)\n self._cur_model = None\n self._cur_choice_idx = None\n return copy", "def run(\n cls,\n model: AbsESPnetModel,\n optimizers: Sequence[torch.optim.Optimizer],\n schedulers: Sequence[Optional[AbsScheduler]],\n train_iter_factory: AbsIterFactory,\n valid_iter_factory: AbsIterFactory,\n plot_attention_iter_factory: Optional[AbsIterFactory],\n reporter: Reporter,\n scaler: Optional[GradScaler],\n output_dir: Path,\n max_epoch: int,\n seed: int,\n patience: Optional[int],\n keep_nbest_models: int,\n early_stopping_criterion: Sequence[str],\n best_model_criterion: Sequence[Sequence[str]],\n val_scheduler_criterion: Sequence[str],\n trainer_options,\n distributed_option: DistributedOption,\n find_unused_parameters: bool = False,\n ) -> None:\n assert check_argument_types()\n assert is_dataclass(trainer_options), type(trainer_options)\n\n start_epoch = reporter.get_epoch() + 1\n if start_epoch == max_epoch + 1:\n logging.warning(\n f\"The training has already reached at max_epoch: {start_epoch}\"\n )\n\n if distributed_option.distributed:\n dp_model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=(\n # Perform multi-Process with multi-GPUs\n [torch.cuda.current_device()]\n if distributed_option.ngpu == 1\n # Perform single-Process with multi-GPUs\n else None\n ),\n output_device=(\n torch.cuda.current_device()\n if distributed_option.ngpu == 1\n else None\n ),\n find_unused_parameters=find_unused_parameters,\n )\n elif distributed_option.ngpu > 1:\n dp_model = torch.nn.parallel.DataParallel(\n model,\n device_ids=list(range(distributed_option.ngpu)),\n find_unused_parameters=find_unused_parameters,\n )\n else:\n # NOTE(kamo): DataParallel also should work with ngpu=1,\n # but for debuggability it's better to keep this block.\n dp_model = model\n\n if trainer_options.use_tensorboard and (\n not distributed_option.distributed or distributed_option.dist_rank == 0\n ):\n summary_writer = SummaryWriter(str(output_dir / \"tensorboard\"))\n else:\n summary_writer = None\n\n start_time = time.perf_counter()\n for iepoch in range(start_epoch, max_epoch + 1):\n if iepoch != start_epoch:\n logging.info(\n \"{}/{}epoch started. Estimated time to finish: {}\".format(\n iepoch,\n max_epoch,\n humanfriendly.format_timespan(\n (time.perf_counter() - start_time)\n / (iepoch - start_epoch)\n * (max_epoch - iepoch + 1)\n ),\n )\n )\n else:\n logging.info(f\"{iepoch}/{max_epoch}epoch started\")\n set_all_random_seed(seed + iepoch)\n\n reporter.set_epoch(iepoch)\n # 1. Train and validation for one-epoch\n with reporter.observe(\"train\") as sub_reporter:\n all_steps_are_invalid = cls.train_one_epoch(\n model=dp_model,\n optimizers=optimizers,\n schedulers=schedulers,\n iterator=train_iter_factory.build_iter(iepoch),\n reporter=sub_reporter,\n scaler=scaler,\n summary_writer=summary_writer,\n options=trainer_options,\n )\n\n with reporter.observe(\"valid\") as sub_reporter:\n cls.validate_one_epoch(\n model=dp_model,\n iterator=valid_iter_factory.build_iter(iepoch),\n reporter=sub_reporter,\n options=trainer_options\n )\n\n # 2. LR Scheduler step\n for scheduler in schedulers:\n if isinstance(scheduler, AbsValEpochStepScheduler):\n scheduler.step(reporter.get_value(*val_scheduler_criterion))\n elif isinstance(scheduler, AbsEpochStepScheduler):\n scheduler.step()\n\n if not distributed_option.distributed or distributed_option.dist_rank == 0:\n # 3. Report the results\n logging.info(reporter.log_message())\n reporter.matplotlib_plot(output_dir / \"images\")\n if summary_writer is not None:\n reporter.tensorboard_add_scalar(summary_writer)\n if trainer_options.use_wandb:\n reporter.wandb_log()\n\n # 4. Save/Update the checkpoint\n torch.save(\n {\n \"model\": model.state_dict(),\n \"reporter\": reporter.state_dict(),\n \"optimizers\": [o.state_dict() for o in optimizers],\n \"schedulers\": [\n s.state_dict() if s is not None else None\n for s in schedulers\n ],\n \"scaler\": scaler.state_dict() if scaler is not None else None,\n },\n output_dir / \"checkpoint.pth\",\n )\n\n # 5. Save the model and update the link to the best model\n torch.save(model.state_dict(), output_dir / f\"{iepoch}epoch.pth\")\n\n # Creates a sym link latest.pth -> {iepoch}epoch.pth\n p = output_dir / \"latest.pth\"\n if p.is_symlink() or p.exists():\n p.unlink()\n p.symlink_to(f\"{iepoch}epoch.pth\")\n\n _improved = []\n for _phase, k, _mode in best_model_criterion:\n # e.g. _phase, k, _mode = \"train\", \"loss\", \"min\"\n if reporter.has(_phase, k):\n best_epoch = reporter.get_best_epoch(_phase, k, _mode)\n # Creates sym links if it's the best result\n if best_epoch == iepoch:\n p = output_dir / f\"{_phase}.{k}.best.pth\"\n if p.is_symlink() or p.exists():\n p.unlink()\n p.symlink_to(f\"{iepoch}epoch.pth\")\n _improved.append(f\"{_phase}.{k}\")\n if len(_improved) == 0:\n logging.info(\"There are no improvements in this epoch\")\n else:\n logging.info(\n \"The best model has been updated: \" + \", \".join(_improved)\n )\n\n # 6. Remove the model files excluding n-best epoch and latest epoch\n _removed = []\n # Get the union set of the n-best among multiple criterion\n nbests = set().union(\n *[\n set(reporter.sort_epochs(ph, k, m)[:keep_nbest_models])\n for ph, k, m in best_model_criterion\n if reporter.has(ph, k)\n ]\n )\n for e in range(1, iepoch):\n p = output_dir / f\"{e}epoch.pth\"\n if p.exists() and e not in nbests:\n p.unlink()\n _removed.append(str(p))\n if len(_removed) != 0:\n logging.info(\"The model files were removed: \" + \", \".join(_removed))\n\n # 7. If any updating haven't happened, stops the training\n if all_steps_are_invalid:\n logging.warning(\n f\"The gradients at all steps are invalid in this epoch. \"\n f\"Something seems wrong. This training was stopped at {iepoch}epoch\"\n )\n break\n\n # 8. Check early stopping\n if patience is not None:\n if reporter.check_early_stopping(patience, *early_stopping_criterion):\n break\n\n else:\n logging.info(f\"The training was finished at {max_epoch} epochs \")", "def train_model_batch(model, config, test, resume=None):\n\n if config['optimizer']['method'] == 'adagrad':\n optimizer = Adagrad()\n elif config['optimizer']['method'] == 'adadelta':\n optimizer = Adadelta()\n elif config['optimizer']['method'] == 'adam':\n optimizer = Adam()\n else: # default SGD\n params = config['optimizer']['params']\n if resume is None: # New experiment\n optimizer = SGD(lr=params['lrate'], momentum=params['momentum'], decay=params['decay'],\n nesterov=params['nesterov'])\n iepoch = 0\n else: # Resume training\n nlrate = params['lrate'] - ((params['lrate'] / config['train']['epochs']) * params['epochs_trained'])\n\n optimizer = SGD(lr=nlrate, momentum=params['momentum'], decay=params['decay'],\n nesterov=params['nesterov'])\n iepoch = config['train']['epochs_trained']\n\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n classweight = detransweights(config['train']['classweight'])\n if 'log' not in config or config['log'] == 'db':\n dblog = DBLog(database=mongoconnection, config=config, model=model, modelj=model.to_json(), resume=resume)\n else:\n dblog = FileLog(config=config, modelj=model.to_json())\n\n recode = None if 'recode' not in config else recoding_dictionary(config['recode'])\n\n train = Dataset(config['datapath'], config['traindata'], config['zfactor'], imgord=config['imgord'],\n nclasses=test.nclasses, recode=recode)\n\n # Train Epochs\n logs = {'loss': 0.0, 'acc': 0.0, 'val_loss': 0.0, 'val_acc': 0.0}\n train.open()\n chunks, _ = train.chunks_list()\n\n for epoch in range(iepoch, config['train']['epochs']):\n\n shuffle(chunks)\n\n # Train Batches\n lloss = []\n lacc = []\n for chunk in chunks:\n train.load_chunk(chunk, config['train']['batchsize'])\n\n for p in train.perm:\n loss, acc = model.train_on_batch(train.X_train[p], train.y_train[p], class_weight=classweight)\n lloss.append(loss)\n lacc.append(acc)\n\n logs['loss'] = float(np.mean(lloss))\n logs['acc'] = float(np.mean(lacc))\n\n logs['val_loss'], logs['val_acc'] = model.evaluate(test.X_train, test.y_train, verbose=0)\n\n force_stop = dblog.force_stop()\n dblog.on_epoch_end(epoch, logs=logs)\n\n if config['savepath']:\n model.save(config['savepath'] + '/' + str(dblog.id) + '.h5')\n\n # If the training is stopped remotely training stops\n if force_stop:\n break\n train.close()\n\n scores = model.evaluate(test.X_train, test.y_train, verbose=0)\n dblog.on_train_end(logs={'acc': logs['acc'], 'val_acc': scores[1]})\n y_pred = model.predict_classes(test.X_train, verbose=0)\n dblog.save_final_results(scores, confusion_matrix(test.y_labels, y_pred),\n classification_report(test.y_labels, y_pred))", "def data_parallel(self, batch_size, inputs):\n inputs = list(inputs)\n\n # quick path: only one device, do not slice\n if len(self.work_devices) == 1:\n assert(self.main_device == self.work_devices[0])\n yield self.main_device, False, tuple(inputs)\n\n # slow path: multi-GPUs\n else:\n # the GPUs are not in the same group, place variables on CPU\n if self.main_device not in self.work_devices:\n yield self.main_device, True, tuple(inputs)\n\n # build the paralleled computation graph for each device\n with tf.name_scope('data_parallel') as ns:\n pass # generate a name scope to place our data slicing ops\n\n k = len(self.work_devices)\n for i, device in enumerate(self.work_devices):\n dev_inputs = []\n with tf.name_scope(ns + 'tower_gpu_{}'.format(i)):\n for inp in inputs:\n slice_len = (batch_size + k - 1) // k\n low, high = slice_len * i, slice_len * (i + 1)\n dev_inputs.append(inp[low: high])\n yield device, False, tuple(dev_inputs)", "async def fit_model_on_worker(\n worker,\n built_model: sy.Plan,\n built_loss_fn: sy.Plan,\n encrypters,\n batch_size: int,\n curr_round: int,\n max_nr_batches: int,\n lr: float,\n):\n num_of_parameters = len(built_model.parameters())\n built_model.id = \"GlobalModel\"\n # built_loss_fn.id = \"LossFunc\"\n # model_config = sy.ModelConfig(model=built_model,\n # loss_fn=built_loss_fn,\n # optimizer=\"SGD\",\n # batch_size=batch_size,\n # optimizer_args={\"lr\": lr},\n # epochs=1,\n # max_nr_batches=max_nr_batches)\n # model_config_send_start = time.time()\n built_model.send(worker)\n # model_config_send_end = time.time()\n print(\"[trace] GlobalInformationSend duration\", worker.id, model_config_send_end - model_config_send_start)\n\n return_ids = [0, 1]\n for i in range(num_of_parameters):\n return_ids.append(\"p\" + str(i))\n\n fit_sagg_start = time.time()\n result_list = await worker.async_fit_sagg_mc(dataset_key=\"mnist\", encrypters=encrypters, return_ids=return_ids)\n fit_sagg_end = time.time()\n print(\"[trace] FitSagg\", \"duration\", worker.id, fit_sagg_end - fit_sagg_start)\n\n loss = result_list[0]\n num_of_training_data = result_list[1]\n enc_params = result_list[2:]\n\n print(\"Iteration %s: %s loss: %s\" % (curr_round, worker.id, loss))\n\n return worker.id, enc_params, loss, num_of_training_data", "def __init__(self,\n names,\n data,\n embedding_fns,\n encoder_fns_1,\n encoder_fns_2,\n logits_fns,\n evaluation_fns,\n # MTL\n mixing_ratios,\n L2_coefficient=None,\n is_distill=False,\n distill_coefficient_loc=None,\n distill_coefficient_scale=None,\n distill_temperature=1.0,\n # optimization\n optimizer=\"Adam\",\n learning_rate=0.001,\n gradient_clipping_norm=2.0,\n # misc\n graph=None,\n logdir=None,\n main_model_index=0,\n debug_mode=False):\n \n super(MultitaskBaseModel, self).__init__(\n logdir=logdir, graph=graph,\n saver_max_to_keep=MAX_CHECKPOINTS_TO_KEEP)\n\n num_models = len(names)\n _check_list_compatability(data, num_models)\n _check_fn_list_compatability(embedding_fns, num_models, True)\n _check_fn_list_compatability(encoder_fns_1, num_models, True)\n _check_fn_list_compatability(encoder_fns_2, num_models, True)\n _check_fn_list_compatability(logits_fns, num_models, False)\n _check_fn_list_compatability(evaluation_fns, num_models, False)\n\n # check mixing ratios and MTL\n if len(names) == 1:\n raise ValueError(\"Not supported\")\n _mr_compatible(mixing_ratios, num_models, print_out=True)\n if main_model_index != 0:\n raise ValueError(\"`main_model_index` must be set to `0`\")\n\n self._names = names\n self._data = data\n self._embedding_fns = embedding_fns\n self._encoder_fns_1 = encoder_fns_1\n self._encoder_fns_2 = encoder_fns_2\n self._logits_fns = logits_fns\n self._evaluation_fns = evaluation_fns\n\n # MTL\n self._mixing_ratios = mixing_ratios\n self._L2_coefficient = L2_coefficient\n self._is_disill = is_distill\n self._distill_temperature = distill_temperature\n self._distill_coefficient_loc = distill_coefficient_loc\n self._distill_coefficient_scale = distill_coefficient_scale\n\n self._optimizer = optimizer\n self._learning_rate = learning_rate\n self._gradient_clipping_norm = gradient_clipping_norm\n\n self._main_model_index = main_model_index\n self._debug = collections.defaultdict(list)\n self._debug_mode = debug_mode", "def _get_nonwrapped_model(\n self, group, **model_kwargs,\n ) -> torch.nn.Module:\n return TransformerWithSharedParams(group, **model_kwargs).cuda()", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def preprocess(\n model_proto, disable_fuse_bn=False, duplicate_shared_weights=True\n):\n logger.info(\"Preprocessing the model...\")\n helper.setup_current_opset_version(model_proto)\n eliminating.eliminate_empty_value_infos(model_proto.graph)\n other.add_name_to_node(model_proto.graph)\n other.rename_all_node_name(model_proto.graph)\n replacing.replace_initializer_with_Constant(model_proto.graph)\n other.topological_sort(model_proto.graph)\n m = other.polish_model(model_proto)\n passes = [\n \"extract_constant_to_initializer\",\n \"eliminate_nop_dropout\",\n \"eliminate_deadend\",\n \"fuse_matmul_add_bias_into_gemm\",\n \"fuse_pad_into_conv\",\n ]\n if not disable_fuse_bn:\n passes.append(\"fuse_bn_into_conv\")\n m = optimizer.optimize(m, passes)\n g = m.graph\n # Add name again since onnx optimizer higher than 1.7 may remove node names\n other.add_name_to_node(g)\n if duplicate_shared_weights:\n replacing.replace_initializer_with_Constant(\n g, duplicate_shared_weights=True\n )\n other.duplicate_param_shared_constant(g)\n else:\n replacing.replace_initializer_with_Constant(\n g, duplicate_shared_weights=False\n )\n other.topological_sort(g)\n m = other.polish_model(m)\n g = m.graph\n eliminating.eliminate_consecutive_Cast(m.graph)\n eliminating.eliminate_Cast_after_input(m.graph)\n eliminating.eliminate_nop_pads(g)\n eliminating.eliminate_nop_cast(g)\n eliminating.eliminate_Identify_and_Dropout(g)\n eliminating.eliminate_trivial_maxpool(g)\n eliminating.eliminate_no_children_input(g)\n other.format_value_info_shape(g)\n other.topological_sort(g)\n m = other.inference_shapes(m)\n g = m.graph\n replacing.replace_split_with_slices(g)\n other.topological_sort(g)\n\n return m", "def __init__(\n self,\n model: nn.Module,\n input_path: Union[Path, str],\n out_activations: Dict[str, str],\n out_boundary_weights: Dict[str, bool],\n stride: int,\n patch_size: Tuple[int, int],\n instance_postproc: str,\n padding: int = None,\n batch_size: int = 8,\n normalization: str = None,\n device: str = \"cuda\",\n n_devices: int = 1,\n save_intermediate: bool = False,\n save_dir: Union[Path, str] = None,\n save_format: str = \".mat\",\n checkpoint_path: Union[Path, str] = None,\n n_images: int = None,\n type_post_proc: Callable = None,\n sem_post_proc: Callable = None,\n **kwargs,\n ) -> None:\n super().__init__(\n model=model,\n input_path=input_path,\n out_activations=out_activations,\n out_boundary_weights=out_boundary_weights,\n patch_size=patch_size,\n padding=padding,\n batch_size=batch_size,\n normalization=normalization,\n instance_postproc=instance_postproc,\n device=device,\n save_intermediate=save_intermediate,\n save_dir=save_dir,\n save_format=save_format,\n checkpoint_path=checkpoint_path,\n n_images=n_images,\n n_devices=n_devices,\n type_post_proc=type_post_proc,\n sem_post_proc=sem_post_proc,\n **kwargs,\n )\n\n self.stride = stride", "def apply_model_params(train_model,\n ridge=False,\n lasso=False,\n lasso_penalty=None,\n model='lr'):\n if model == 'lr':\n if ridge:\n return partial(\n train_model,\n ridge=ridge,\n c_values=cfg.ridge_c_values\n )\n elif lasso:\n return partial(\n train_model,\n lasso=lasso,\n lasso_penalty=lasso_penalty\n )\n else:\n # elastic net is the default\n return partial(\n train_model,\n alphas=cfg.alphas,\n l1_ratios=cfg.l1_ratios\n )\n elif model == 'mlp':\n return partial(\n train_model,\n search_n_iter=cfg.mlp_search_n_iter\n )\n else:\n raise NotImplementedError(f'model {model} not implemented')", "def adjust_mean_teacher_model_for_gpus(self) -> None:\n if self._mean_teacher_model is None:\n raise ValueError(\"Mean teacher model must be created before it can be adjusted.\")\n\n # Adjusting twice causes an error.\n if self.is_mean_teacher_model_adjusted:\n logging.debug(\"model_and_info.is_mean_teacher_model_adjusted is already True\")\n\n self._mean_teacher_model = ModelAndInfo._adjust_for_gpus(model=self._mean_teacher_model,\n config=self.config,\n model_execution_mode=self.model_execution_mode)\n\n self.is_mean_teacher_model_adjusted = True\n logging.debug(\"model_and_info.is_mean_teacher_model_adjusted set to True\")", "def tune(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n ):\n self.tuner.tune(model, train_dataloader, val_dataloaders, datamodule)", "def _build_model(self):\n tf.set_random_seed(self.params.tf_random_seed)\n np.random.seed(4321)\n phase_train = not (self.params.eval or self.params.forward_only)\n\n log_fn('Generating model')\n losses = []\n device_grads = []\n all_logits = []\n all_top_1_ops = []\n all_top_5_ops = []\n enqueue_ops = []\n gpu_compute_stage_ops = []\n gpu_grad_stage_ops = []\n\n with tf.device(self.global_step_device):\n global_step = tf.train.get_or_create_global_step()\n \n # Build the processing and model for the worker.\n (image_producer_ops,\n image_producer_stages) = self._build_image_processing(shift_ratio=0)\n image_producer_ops = tf.group(*image_producer_ops)\n update_ops = None\n staging_delta_ops = []\n\n for device_num in range(len(self.devices)):\n with self.variable_mgr.create_outer_variable_scope(\n device_num), tf.name_scope('tower_%i' % device_num) as name_scope:\n results = self.add_forward_pass_and_gradients(\n phase_train, device_num, device_num,\n image_producer_stages[device_num], gpu_compute_stage_ops,\n gpu_grad_stage_ops)\n if phase_train:\n losses.append(results['loss'])\n device_grads.append(results['gradvars'])\n \n\n if device_num == 0:\n # Retain the Batch Normalization updates operations only from the\n # first tower. These operations update the moving mean and moving\n # variance variables, which are updated (but not used) during\n # training, and used during evaluation. The moving mean and variance\n # approximate the true mean and variance across all images in the\n # dataset. Therefore, in replicated mode, these moving averages would\n # be almost identical for each tower, and so we only update and save\n # the moving averages for one tower. In parameter server mode, all\n # towers share a copy of the variables so we also only need to update\n # and save the moving averages once.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)\n staging_delta_ops = list(self.variable_mgr.staging_delta_ops)\n \n enqueue_ops.append(tf.group(*gpu_compute_stage_ops))\n\n fetches = self._build_fetches(global_step, all_logits, losses, device_grads,\n enqueue_ops, update_ops, all_top_1_ops,\n all_top_5_ops, phase_train)\n return (image_producer_ops, enqueue_ops, fetches)", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def to_multi_gpu(model, n_gpus=4):\n\n with tf.device('/cpu:0'):\n x = Input(model.input_shape[1:], name=model.input_names[0])\n towers = []\n device=[0,1,2,3]\n for g in range(n_gpus):\n with tf.device('/gpu:' + str(device[g])):\n slice_g = Lambda(slice_batch, lambda shape: shape,\n arguments={'n_gpus':n_gpus, 'part':g})(x)\n towers.append(model(slice_g))\n\n with tf.device('/cpu:0'):\n merged = merge(towers, mode='concat', concat_axis=0)\n\n return Model(inputs=[x], outputs=merged)", "def eval_model(\n self,\n model: nn.Module,\n batch_size: int = 32,\n data: Union[str, th.utils.data.Dataset] = \"test\",\n collate_fn: Optional[Callable] = None,\n by_example: bool = False,\n label_map: Optional[Callable] = None,\n nll: bool = False,\n ):\n # Set model to test mode\n mode = model.training\n model.train(mode=False)\n # Select dataset for evaluation\n dataset = data\n if isinstance(data, str):\n dataset = self.get_split(data)\n elif not isinstance(dataset, th.utils.data.Dataset):\n raise ValueError(\n \"`data` must be a pytorch dataset or one of 'dev'/'valid'\"\n f\"/'test/'train', got {dataset.__class__.__name__} instead\"\n )\n # Dataloader\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n collate_fn=self.collate_fn if collate_fn is None else collate_fn,\n )\n y, y_hat, all_nlls = [], [], []\n for batch in data_loader:\n # Get model predictions\n with th.no_grad():\n nlls, _, predicted = self.nll(\n model,\n batch,\n reduction=\"none\",\n predict=True,\n )\n # Track predictions and reference\n y.append(batch[-1])\n y_hat.append(predicted)\n all_nlls.append(nlls)\n # Concatenate\n y = th.cat(y, dim=0).cpu()\n y_hat = th.cat(y_hat, dim=0).cpu()\n all_nlls = th.cat(all_nlls, dim=0).cpu()\n # Map predictions to labels (this is useful for single\n # head model evaluated on multiple tasks)\n if label_map:\n y_hat = th.tensor([label_map(y_hat_i.item()) for y_hat_i in y_hat])\n # Task specific score\n if by_example:\n score = (y == y_hat).float()\n else:\n score = self.score(y_hat, y)\n nlls = nlls.mean()\n # Reset model to the original mode\n model.train(mode=mode)\n\n result = score\n if nll:\n result = (score, all_nlls)\n return result", "def freeze_keras_model(model):\n model.trainable = True\n for layer in model.layers[::-1]:\n if \"input_calibration\" not in layer.name:\n layer.trainable = False # freeze this layer", "def attach_model(self, model: nn.Module) -> None:\n\n assert isinstance(model, nn.Module)\n\n # Move model to correct device\n model.to(self._device)\n\n # Attach model\n self._model = model", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def parallelWrapper(target_file):\n # for readability, specifying the globals used\n global model\n global all_files\n global train_file_count\n \n # open the training files\n train_file_names = genTrainingSet(all_files, target_file,\n train_size = train_file_count)\n \n # return the results of running our single driver through the model\n return singleDriverTrainer(target_file, train_file_names, model1, model2,\n weight_1 = 0.7875,\n file_subset_size=num_samples_per_training_file)", "def _freeze_base_model(self, modality, freeze_mode):\n\n if freeze_mode == \"all\":\n print(\"Freezing the Base model.\")\n for param in getattr(self, \"Base_{}\".format(modality)).parameters():\n param.requires_grad = False\n elif freeze_mode == \"partialbn\" and self.base_model_name == \"bninception\":\n print(\n \"Freezing the batchnorms of Base Model {} except first or new layers.\".format(\n modality\n )\n )\n for mod_no, mod in enumerate(\n getattr(self, \"Base_{}\".format(modality)).children()\n ):\n if isinstance(mod, torch.nn.BatchNorm2d):\n if (modality == \"Audio\" and mod_no > 6) or mod_no > 1:\n mod.weight.requires_grad = False\n mod.bias.requires_grad = False", "def execute(\n self,\n model: Module,\n input_tfms: MultiStageTransformation = None,\n metric_drop_ths: float = None,\n quantization_type: QuantizationType = None,\n input_data: DataManager = None,\n **kwargs,\n ):\n\n if quantization_type not in self.supported_ops[self.device.type.value]:\n self.compiled_model = None\n return\n\n if quantization_type is QuantizationType.STATIC and input_data is None:\n raise ValueError(\"Input data is required for static quantization.\")\n\n self.logger.info(\n f\"Optimizing with {self.__class__.__name__} and \"\n f\"q_type: {quantization_type}.\"\n )\n\n check_quantization(quantization_type, metric_drop_ths)\n train_input_data = input_data.get_split(\"train\")\n\n self.model_orig = model\n\n if quantization_type is not None:\n quantized_model = self._quantize_model(\n model, quantization_type, input_tfms, train_input_data\n )\n self.compiled_model = self._compile_model(quantized_model)", "def config_training_instance(self):\n # Compute the average of the gradients main_train_device\n tower_grads = []\n\n # Distribute the model onto available GPUs\n for i in range(self.num_gpus):\n with tf.device(\"/gpu:{}\".format(i)):\n\n optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate)\n batch_size_instance = self.batch_size // self.num_gpus\n\n # Split data between GPUs\n inputs_instance = self.inputs[i * batch_size_instance:(i + 1) *\n batch_size_instance]\n labels_instance = self.labels[i * batch_size_instance:(i + 1) *\n batch_size_instance]\n\n logits = self.model(inputs_instance)\n trainable_variables = self.model.trainable_variables\n model_loss = self.construct_loss(logits=logits,\n labels=labels_instance)\n network_regularizers, regularizer_loss, exporter, costs = self.embed_morphnet(\n input_boundary=[inputs_instance.op],\n output_boundary=[logits.op],\n morphnet_regularization_strength=self.\n morphnet_regularization_strength_placeholder,\n morphnet_cost_thresholds=self.morphnet_target_cost_thresholds)\n total_loss = model_loss + regularizer_loss\n\n grads = optimizer.compute_gradients(\n total_loss, var_list=trainable_variables)\n tower_grads.append(grads)\n\n # Usually we would use the first GPU\n if i == 0:\n # Evaluate model (with test logits, for dropout to be disabled)\n self.logits_train_instance = logits\n self.model_loss_train_instance = model_loss\n self.probs_train_instance = tf.nn.softmax(logits)\n self.correct_pred_train_instance = tf.equal(\n tf.argmax(logits, 1), tf.argmax(labels_instance, 1))\n self.accuracy_train_instance = tf.reduce_mean(\n tf.cast(self.correct_pred_train_instance, tf.float32))\n\n self.network_regularizer_train_instance = network_regularizers\n self.regularizer_loss_train_instance = regularizer_loss\n self.total_loss_train_instance = total_loss\n self.exporter_train_instance = exporter\n self.cost_train_instance = costs\n\n # Compute the average of the gradients main_train_device\n with tf.device(self.main_train_device):\n grads = self.average_gradients(tower_grads)\n self.train_op = optimizer.apply_gradients(grads, global_step=None)", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train(model, data_loader, optimizer, epoch, train_mloss, train_rloss, train_acc, learning_rate, lr_wr, output_tensor):\r\n print('===> Training mode')\r\n\r\n num_batches = len(data_loader) # iteration per epoch. e.g: 469\r\n total_step = args.epochs * num_batches\r\n epoch_tot_acc = 0\r\n\r\n # Switch to train mode\r\n model.train()\r\n\r\n if args.cuda:\r\n # When we wrap a Module in DataParallel for multi-GPUs\r\n model = model.module\r\n\r\n start_time = timer()\r\n\r\n for batch_idx, (data, target) in enumerate(tqdm(data_loader, unit='batch')):\r\n batch_size = data.size(0)\r\n global_step = batch_idx + (epoch * num_batches) - num_batches\r\n\r\n labels = target\r\n target_one_hot = utils.one_hot_encode(target, length=args.num_classes)\r\n assert target_one_hot.size() == torch.Size([batch_size, 10])\r\n\r\n data, target = Variable(data), Variable(target_one_hot)\r\n\r\n if args.cuda:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n labels = labels.to(args.device)\r\n\r\n # Train step - forward, backward and optimize\r\n optimizer.zero_grad()\r\n #utils.exponential_decay_LRR(optimizer, args.lr, global_step, args.decay_steps, args.decay_rate, args.staircase)\r\n # learning rate policies\r\n if args.find_lr:\r\n utils.find_lr(optimizer, global_step)\r\n\r\n elif args.exp_decay_lr:\r\n utils.exponential_decay_LRR(\r\n optimizer, args.lr, global_step, args.decay_steps, args.decay_rate, args.staircase)\r\n\r\n elif args.one_cycle_policy:\r\n utils.one_cycle_policy(optimizer, args.lr, global_step, total_step)\r\n\r\n elif args.warm_restarts:\r\n # lr_wr.update_lr(optimizer, num_batches)\r\n lr_wr.update_lr(optimizer)\r\n\r\n output, reconstruction = model(data, labels, True)\r\n # utils.write_tensor(output, output_tensor)\r\n loss, margin_loss, recon_loss = loss_func(\r\n output, target, args.regularization_scale, reconstruction, data, args.device, batch_size)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n for param_group in optimizer.param_groups:\r\n lr_temp = param_group['lr']\r\n learning_rate.write('%.10f \\n' % lr_temp)\r\n\r\n # Calculate accuracy for each step and average accuracy for each epoch\r\n acc = utils.accuracy(output, labels, args.cuda)\r\n epoch_tot_acc += acc\r\n epoch_avg_acc = epoch_tot_acc / (batch_idx + 1)\r\n\r\n train_mloss.write('%.6f \\n' % margin_loss)\r\n train_rloss.write('%.6f \\n' % recon_loss)\r\n train_acc.write('%.6f \\n' % acc)\r\n\r\n # Print losses\r\n if batch_idx % args.log_interval == 0:\r\n template = 'Epoch {}/{}, ' \\\r\n 'Step {}/{}: ' \\\r\n '[Total loss: {:.6f},' \\\r\n '\\tMargin loss: {:.6f},' \\\r\n '\\tReconstruction loss: {:.6f},' \\\r\n '\\tBatch accuracy: {:.6f},' \\\r\n '\\tAccuracy: {:.6f}]'\r\n tqdm.write(template.format(\r\n epoch,\r\n args.epochs,\r\n global_step,\r\n total_step,\r\n loss.data.item(),\r\n margin_loss.data.item(),\r\n recon_loss.data.item() if args.use_reconstruction_loss else 0,\r\n acc,\r\n epoch_avg_acc))\r\n\r\n # Print time elapsed for an epoch\r\n end_time = timer()\r\n\r\n global avg_training_time_per_epoch\r\n\r\n avg_training_time_per_epoch = (avg_training_time_per_epoch * (epoch - 1) + end_time - start_time) / epoch\r\n\r\n print('Time elapsed for epoch {}: {:.0f}s.'.format(epoch, end_time - start_time))", "def test_multitask(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore\n pass", "def update_model(train_dir, image_size = 224, batch_size = 8, epochs = 2):\n \n # Create a data generator and specify\n # the parameters for augmentation\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n \n # create an iterator for data generator\n # and autment the images\n \n train_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size=(image_size, image_size),\n batch_size= batch_size,\n class_mode='categorical')\n \n #load pretrained model\n model = models.load_model('vgg16_finetuned.h5')\n \n # Compile the pretrained model in order to update its weight\n model.compile(loss='categorical_crossentropy',\n optimizer = optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n \n # use keras checkpoint to update the model weight\n file_path = 'vgg16_finetuned.h5'\n checkpoint = ModelCheckpoint(file_path)\n callbacks_list = [checkpoint]\n \n # Train the model to update model weight\n history = model.fit_generator(\n train_generator,\n steps_per_epoch = train_generator.samples/train_generator.batch_size,\n epochs = epochs,\n callbacks = callbacks_list)", "def run_pytorch_model(model_name, create_model, X_train, y_train, cohorts_train,\n X_val, y_val, cohorts_val,\n X_test, y_test, cohorts_test,\n all_tasks, FLAGS, samp_weights):\n model_fname_parts = get_model_fname_parts(model_name, FLAGS)\n if FLAGS.viz_time:\n model_path = '{}/logs'.format(FLAGS.result_dir) + \\\n '/models/' + \"_\".join(model_fname_parts) + \\\n FLAGS.result_suffix + '.m' # secondary mark for future change\n model = torch.load(model_path)\n\n # save output\n save_output(model, model_name, X_val, y_val, cohorts_val, all_tasks, \"output\", FLAGS)\n return\n \n if FLAGS.test_time:\n model_path = '{}/logs'.format(FLAGS.result_dir) + \\\n '/models/' + \"_\".join(model_fname_parts) + \\\n FLAGS.result_suffix + '.m' # secondary mark for future change\n model = torch.load(model_path)\n\n # print('testing on validation set')\n # cohort_aucs = evaluation(model, model_name, X_val, y_val, cohorts_val, all_tasks, FLAGS)\n # save_cohort_aucs(cohort_aucs, model_name, 'val_auc_on', FLAGS)\n\n # test\n print('testing on test set')\n cohort_aucs = evaluation(model, model_name, X_test, y_test, cohorts_test, all_tasks, FLAGS)\n save_cohort_aucs(cohort_aucs, model_name, 'test_auc_on', FLAGS)\n return\n\n batch_size = 100\n if 'mtl' in model_name:\n criterion = mtl_loss\n train_loader = create_mtl_loader(X_train, y_train, cohorts_train,\n samp_weights=samp_weights,\n batch_size=batch_size, shuffle=True)\n # no samp_weights for val; samp_weights is only for train\n val_loader = create_mtl_loader(X_val, y_val, cohorts_val,\n batch_size=batch_size, shuffle=False)\n else:\n criterion = sample_weighted_bce_loss\n train_loader = create_loader(X_train, y_train, samp_weights=samp_weights,\n batch_size=batch_size, shuffle=True)\n # no samp_weights for val; samp_weights is only for train\n val_loader = create_loader(X_val, y_val, batch_size=batch_size, shuffle=False)\n\n # secondary mark\n if FLAGS.global_model_fn is None:\n global_model_dir = '{}/logs'.format(FLAGS.result_dir) + \\\n '/checkpoints/global_pytorch_' + \\\n \"_\".join(model_fname_parts[1:]) + \\\n FLAGS.result_suffix\n global_model_fn = '{}/logs'.format(FLAGS.result_dir) + \\\n '/models/global_pytorch_' +\\\n \"_\".join(model_fname_parts[1:]) + \\\n FLAGS.result_suffix + \".m\",\n else:\n global_model_dir = '{}/logs'.format(FLAGS.result_dir) + \\\n '/checkpoints/' + \\\n FLAGS.global_model_fn[:-2] # drop \".m\"\n global_model_fn = '{}/logs'.format(FLAGS.result_dir) + \\\n '/models/' +\\\n FLAGS.global_model_fn\n \n model_args = {\n 'n_layers': FLAGS.num_lstm_layers,\n 'units': FLAGS.lstm_layer_size,\n 'num_dense_shared_layers': FLAGS.num_dense_shared_layers,\n 'dense_shared_layer_size': FLAGS.dense_shared_layer_size,\n 'input_dim': X_train[0][0].shape[1], # (bs, T, d); note X_train is a dataset\n 'output_dim': 1,\n 'n_multi_layers': FLAGS.num_multi_layers, # mtl layers\n 'multi_units': FLAGS.multi_layer_size,\n 'tasks': all_tasks,\n 'global_model_dir': global_model_dir,\n 'global_model_fn': global_model_fn,\n 'X_val': X_val,\n 'y_val': y_val,\n 'cohorts_val': cohorts_val,\n 'FLAGS': FLAGS,\n }\n\n model = create_model(model_args)\n model = model.cuda()\n\n # secondary mark for change later\n model_dir = '{}/logs'.format(FLAGS.result_dir) + \\\n '/checkpoints/' + \"_\".join(model_fname_parts) + FLAGS.result_suffix\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n \n optimizer = torch.optim.Adam(model.parameters(), lr=FLAGS.lr, weight_decay=FLAGS.wd)\n get_c = partial(get_criterion, criterion=criterion)\n model, train_log = train(model, train_loader, criterion, optimizer, FLAGS.epochs,\n savename = model_dir,\n val_loader = val_loader,\n es_named_criterion = ('loss', get_c, True),\n verbose=True)\n\n joblib.dump(train_log, '{}/log'.format(model_dir))\n # secondary mark for change\n torch.save(model, '{}/logs'.format(FLAGS.result_dir) + '/models/' +\n \"_\".join(model_fname_parts) + FLAGS.result_suffix + '.m')\n\n ############### evaluation ###########\n # test\n print('testing on test set')\n cohort_aucs = evaluation(model, model_name, X_test, y_test, cohorts_test, all_tasks, FLAGS)\n save_cohort_aucs(cohort_aucs, model_name, 'test_auc_on', FLAGS)\n\n # validation\n print('testing on validation set')\n cohort_aucs = evaluation(model, model_name, X_val, y_val, cohorts_val, all_tasks, FLAGS)\n save_cohort_aucs(cohort_aucs, model_name, 'val_auc_on', FLAGS)\n \n print('Saved {} results.'.format(model_name))", "def train(self, model_type, params=None):\n Model = load_model_class(model_type)\n self.model_type = model_type\n X, y = self.task.make_dataset()\n self.final_data = X.copy()\n # Save preds\n preds = np.zeros_like(y.values).astype(np.float)\n with TMPFolder():\n N = len(X)\n n = N // self.cv\n # Assign a fold to each sample\n folds = np.random.permutation(np.repeat(np.arange(self.cv), n+1)[:N])\n if self.cv == 1:\n folds[:] = 1\n folds[np.random.permutation(np.arange(N))[:int(round(0.25 * N))]] = 0\n # Iterate over folds\n for k in range(self.cv):\n print(\"Fold\", k)\n # Create model\n model = Model()\n if params is not None:\n model.set_hp(params)\n # Create sub-dataset\n X_train = X[folds != k]\n y_train = y[folds != k]\n X_test = X[folds == k]\n y_test = y[folds == k]\n # Train the model\n model.train(X_train, y_train)\n # Make predictions on test samples\n y_pred = model.predict(X_test)\n # Save the predictions\n preds[folds == k] = y_pred\n self.model_save.append(model)\n # Save folds\n self.folds = folds\n self.is_trained = True\n self.preds = preds\n self.true_labels = y", "def compute_models_parallel(data, varying_parameters=None, constant_parameters=None, n_max_processes=None):\n mp_models = MultiprocModelsRunner(MultiprocModelsWorkerLDA, data, varying_parameters, constant_parameters,\n n_max_processes=n_max_processes)\n\n return mp_models.run()", "def update_model(self):\n _start0 = time()\n model_name = self.agents[0].sync_model() # fixme: async alg dummy\n self.ag_stats.wait_model_time = time() - _start0\n\n # fixme: unify model type\n # 1) don't restore model before data meet minimum data set. likes qmix.\n # 2) don't restore with special policy, likes IMPALA.\n if model_name:\n _start1 = time()\n self.restore(model_name)\n self.ag_stats.restore_model_time = time() - _start1\n return type(model_name)", "def keras_multitask(self, args):\n start_time = time.time()\n\n # if self.args.log_metrics:\n # utils.wandb_init_logs(self.config[\"multitask_trainer\"])\n\n embedding_type = self.config[\"multitask_trainer\"][\"embedding_type\"]\n max_len = int(self.config[\"multitask_trainer\"][\"max_len\"])\n\n reader = SciciteReader(self.config[\"preprocessor\"])\n print(\"Loading data...\")\n text, labels, sections, worthiness = reader.load_data(\n _type=\"train\", multitask=True\n )\n text_dev, labels_dev, _, _ = reader.load_data(_type=\"dev\", multitask=False)\n text_test, labels_test, _, _ = reader.load_data(_type=\"test\", multitask=False)\n\n keras_model = MultitaskLearner(self.config)\n\n if embedding_type == \"bert\" or embedding_type == \"albert\":\n input_ids, input_masks, input_segments = keras_model.prepare_input_data(\n text\n )\n (\n dev_input_ids,\n dev_input_masks,\n dev_input_segments,\n ) = keras_model.prepare_input_data(text_dev)\n (\n test_input_ids,\n test_input_masks,\n test_input_segments,\n ) = keras_model.prepare_input_data(text_test)\n\n print(\"Preparing data...\")\n text_tensor, text_tokenizer = keras_model.prepare_data(text, max_len=max_len)\n labels_tensor, labels_tokenizer = keras_model.prepare_data(labels)\n sections_tensor, sections_tokenizer = keras_model.prepare_data(sections)\n worthiness_tensor, worthiness_tokenizer = keras_model.prepare_data(worthiness)\n\n text_tensor_dev = keras_model.prepare_dev_data(\n text_dev, text_tokenizer, max_len=max_len\n )\n labels_tensor_dev = keras_model.prepare_dev_data(labels_dev, labels_tokenizer)\n text_tensor_test = keras_model.prepare_dev_data(\n text_test, text_tokenizer, max_len=max_len\n )\n labels_tensor_test = keras_model.prepare_dev_data(labels_test, labels_tokenizer)\n\n print(\"Creating datasets...\")\n if embedding_type == \"lstm\":\n dataset = keras_model.create_dataset(\n text=text_tensor,\n labels=labels_tensor,\n sections=sections_tensor,\n worthiness=worthiness_tensor,\n ids=None,\n mask=None,\n segments=None,\n )\n dev_dataset = keras_model.create_dev_dataset(\n text=text_tensor_dev,\n ids=None,\n mask=None,\n segments=None,\n labels=labels_tensor_dev,\n )\n test_dataset = keras_model.create_dev_dataset(\n text=text_tensor_test,\n ids=None,\n mask=None,\n segments=None,\n labels=labels_tensor_test,\n )\n elif embedding_type == \"bert\" or embedding_type == \"albert\":\n dataset = keras_model.create_dataset(\n text=None,\n labels=labels_tensor,\n sections=sections_tensor,\n worthiness=worthiness_tensor,\n ids=input_ids,\n mask=input_masks,\n segments=input_segments,\n )\n dev_dataset = keras_model.create_dev_dataset(\n text=None,\n ids=dev_input_ids,\n mask=dev_input_masks,\n segments=dev_input_segments,\n labels=labels_tensor_dev,\n )\n test_dataset = keras_model.create_dev_dataset(\n text=None,\n ids=test_input_ids,\n mask=test_input_masks,\n segments=test_input_segments,\n labels=labels_tensor_test,\n )\n\n vocab_size = len(text_tokenizer.word_index.keys()) + 1\n labels_size = len(labels_tokenizer.word_index.keys())\n section_size = len(sections_tokenizer.word_index.keys())\n worthiness_size = len(worthiness_tokenizer.word_index.keys())\n\n print(\"Creating model...\")\n keras_model.create_model(vocab_size, labels_size, section_size, worthiness_size)\n print(\"Fitting model...\")\n keras_model.fit_model(dataset, dev_dataset)\n\n print(\"Saving model...\")\n keras_model.save_model()\n\n print(\"Evaluating...\")\n keras_model.eval(test_dataset, save_output=True)\n keras_model.eval(test_dataset, save_output=False)\n\n end_time = time.time()\n total_time = end_time - start_time\n print(\"Execution time:\", str(datetime.timedelta(seconds=total_time)))", "def test_torch_prepare_model(ray_start_4_cpus_2_gpus):\n\n def train_fn():\n model = torch.nn.Linear(1, 1)\n\n # Wrap in DDP.\n model = train.torch.prepare_model(model)\n\n # Make sure model is wrapped in DDP.\n assert isinstance(model, DistributedDataParallel)\n\n # Make sure model is on cuda.\n assert next(model.parameters()).is_cuda\n\n trainer = Trainer(\"torch\", num_workers=2, use_gpu=True)\n trainer.start()\n trainer.run(train_fn)\n trainer.shutdown()", "def main(model_arch: str, images: List, batch_size: int,\n batches_per_step: int, loop: bool, num_iterations: int, num_ipus: int, mode: str, data: str,\n available_memory_proportion: float, gen_report: bool, save_graph_pb: bool, use_ipu_model: bool) -> None:\n\n if (available_memory_proportion <= 0.05) or (available_memory_proportion > 1):\n raise ValueError('Invalid \"availableMemoryProportion\" value: must be a float >=0.05'\n ' and <=1 (default value is 0.6)')\n\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --log_cycle_count=0\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--log_cycle_count=0\"\n\n if data == \"synthetic\":\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"\"\n\n if use_ipu_model:\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_ipu_model\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_ipu_model\"\n\n # Select model architecture\n model_cls = model_dict[model_arch]\n if model_arch == 'googlenet':\n model_arch = 'inceptionv1'\n config = Path(f'configs/{model_arch}.yml')\n\n # Create graph and data iterator\n loop_op, infeed_initializer, outfeed_op = construct_graph(model_cls, config,\n f\"./checkpoints/{model_arch}/\",\n batch_size, batches_per_step,\n images, loop,\n model_cls.preprocess_method(), num_ipus,\n mode, save_graph_pb)\n # Run on model or device\n if gen_report:\n get_report(loop_op, infeed_initializer, outfeed_op, f\"{config.stem}_report.txt\",\n available_memory_proportion=available_memory_proportion)\n else:\n ground_truth = tuple([Path(filename).stem for filename in images])\n run_inference(loop_op, infeed_initializer, outfeed_op, batch_size, batches_per_step, config.stem,\n model_cls.decode_method(), ground_truth, num_iterations, num_ipus, mode, data,\n available_memory_proportion=available_memory_proportion)", "def prepare_model_(model, *data, device='cpu'):\n _auto_name('', model)\n set_default_parent(model)\n def _prep_data(d):\n if isinstance(d, (np.ndarray, torch.Tensor)):\n return torch.as_tensor(d).to(device)\n elif isinstance(d, (list, tuple)):\n if all(isinstance(x, int) for x in d):\n return torch.randn(*d, device=device)\n return [_prep_data(x) for x in d]\n elif isinstance(d, dict):\n return {k:_prep_data(v) for k, v in d.items()}\n with torch.no_grad():\n is_training = model.training\n data = [_prep_data(d) for d in data]\n model.eval()\n model.to(device)\n model(*data)\n model.train(is_training)\n return model", "def do_train_job(self):\n # get the initial tensor dict\n # initial_tensor_dict = self.wrapped_model.get_tensor_dict()\n\n # get the training data size\n data_size = self.wrapped_model.get_training_data_size()\n\n # train the model\n # FIXME: model header \"version\" needs to be changed to \"rounds_trained\"\n # FIXME: We assume the models allow training on partial batches.\n # FIXME: Currently, num_batches_per_round overrides epochs per round. Is this the correct behavior?\n if self.num_batches_per_round is not None:\n num_batches = self.num_batches_per_round\n else:\n batches_per_epoch = int(np.ceil(data_size/self.wrapped_model.data.batch_size))\n num_batches = int(np.floor(batches_per_epoch * self.epochs_per_round))\n loss = self.wrapped_model.train_batches(num_batches=num_batches)\n self.logger.debug(\"{} Completed the training job for {} batches.\".format(self, num_batches))\n\n # get the trained tensor dict and store any designated to be held out from aggregation\n shared_tensors = self._remove_and_save_holdout_tensors(self.wrapped_model.get_tensor_dict(with_opt_vars=self._with_opt_vars()))\n\n # create the model proto\n if self.send_model_deltas:\n deltas = self.create_deltas(tensor_dict=shared_tensors)\n model_proto = construct_proto(tensor_dict=deltas[\"tensor_dict\"],\n model_id=self.model_header.id,\n model_version=self.model_header.version,\n compression_pipeline=self.compression_pipeline,\n is_delta=True,\n delta_from_version=deltas[\"delta_from_version\"])\n else:\n model_proto = construct_proto(tensor_dict=shared_tensors,\n model_id=self.model_header.id,\n model_version=self.model_header.version,\n compression_pipeline=self.compression_pipeline,\n is_delta=False,\n delta_from_version=-1)\n\n self.logger.debug(\"{} - Sending the model to the aggregator.\".format(self))\n\n reply = self.channel.UploadLocalModelUpdate(LocalModelUpdate(header=self.create_message_header(), model=model_proto, data_size=data_size, loss=loss))\n self.validate_header(reply)\n check_type(reply, LocalModelUpdateAck, self.logger)\n self.logger.info(\"{} - Model update succesfully sent to aggregator\".format(self))", "def train(config, model, train_iterator, criterion, optimizer, scheduler=None):\n if isinstance(model, collections.Iterable) or isinstance(\n optimizer, collections.Iterable) or isinstance(\n scheduler, collections.Iterable):\n raise ValueError(\n \"Need to provide custom training function if using multi-model \"\n \"or multi-scheduler or multi-optimizer training.\")\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n timers = {k: TimerStat() for k in [\"h2d\", \"fwd\", \"grad\", \"apply\"]}\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n for batch_idx, (features, target) in enumerate(train_iterator):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # Create non_blocking tensors for distributed training\n with timers[\"h2d\"]:\n if torch.cuda.is_available():\n features = features.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n with timers[\"fwd\"]:\n output = model(features)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n losses.update(loss.item(), features.size(0))\n\n with timers[\"grad\"]:\n # compute gradients in a backward pass\n optimizer.zero_grad()\n\n if config.get(USE_FP16):\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n with timers[\"apply\"]:\n # Call step of optimizer to update model params\n optimizer.step()\n\n if scheduler and config.get(SCHEDULER_STEP) == SCHEDULER_STEP_BATCH:\n scheduler.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if config.get(TEST_MODE) and batch_idx == 0:\n break\n\n if scheduler and config.get(SCHEDULER_STEP) == SCHEDULER_STEP_EPOCH:\n scheduler.step()\n\n stats = {\n \"batch_time\": batch_time.avg,\n BATCH_COUNT: batch_idx + 1,\n \"train_loss\": losses.avg,\n \"data_time\": data_time.avg,\n }\n stats.update({k: t.mean for k, t in timers.items()})\n return stats", "def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):\n\n # Build embeddings.\n if model_opt.model_type == \"text\":\n src_field = fields[\"src\"]\n src_emb = build_embeddings(model_opt, src_field)\n else:\n src_emb = None\n\n # Build encoder.\n encoder = build_encoder(model_opt, src_emb)\n\n # Build decoder.\n tgt_field = fields[\"tgt\"]\n tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n # src/tgt vocab should be the same if `-share_vocab` is specified.\n assert src_field.base_field.vocab == tgt_field.base_field.vocab, \\\n \"preprocess with -share_vocab if you use share_embeddings\"\n\n tgt_emb.word_lut.weight = src_emb.word_lut.weight\n\n if model_opt.share_position_embeddings:\n tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight\n\n decoder = build_decoder(model_opt, tgt_emb)\n\n # Build NMTModel(= encoder + decoder).\n if gpu and gpu_id is not None:\n device = torch.device(\"cuda\", gpu_id)\n elif gpu and not gpu_id:\n device = torch.device(\"cuda\")\n elif not gpu:\n device = torch.device(\"cpu\")\n\n # Build separate LM if doing simple fusion\n if model_opt.simple_fusion:\n layers = 12\n size = 768\n heads = 12\n\n lm_decoder_opt = copy.deepcopy(model_opt)\n lm_decoder_opt.dec_layers = layers\n lm_decoder_opt.use_GPT_version_ctxattn = False\n lm_decoder_opt.use_GPT_version_psa = False\n lm_decoder_opt.use_GPT_version_unconditional = True\n lm_decoder_opt.tgt_word_vec_size = size\n lm_decoder_opt.rnn_size = size\n lm_decoder_opt.dec_rnn_size = size\n lm_decoder_opt.transformer_ff = size*4\n lm_decoder_opt.dec_heads = heads\n lm_decoder_opt.position_encoding_learned_dec = True\n lm_decoder_opt.share_decoder_embeddings = True\n lm_decoder_opt.dropout = 0\n\n lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False)\n logger.info(lm_decoder_emb)\n\n lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb)\n load_decoder = lm_decoder\n\n model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder)\n\n generator = SimpleFusionGenerator(model_opt.dec_rnn_size,\n lm_decoder_opt.dec_rnn_size,\n len(fields[\"tgt\"].base_field.vocab))\n generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight\n\n if model_opt.share_decoder_embeddings:\n generator.decoder_linear.weight = decoder.embeddings.word_lut.weight\n gen_linear = generator.lm_linear\n else:\n load_decoder = decoder\n if model_opt.unconditional:\n model = onmt.models.UncondModel(decoder)\n else:\n model = onmt.models.NMTModel(encoder, decoder)\n\n # Build Generator.\n if not model_opt.copy_attn:\n if model_opt.generator_function == \"sparsemax\":\n gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)\n else:\n gen_func = nn.LogSoftmax(dim=-1)\n\n if model_opt.padded_vocab_fix_me_later:\n gen_func = nn.Sequential(PadGen(), gen_func)\n\n generator = nn.Sequential(\n nn.Linear(model_opt.dec_rnn_size,\n len(fields[\"tgt\"].base_field.vocab)),\n Cast(torch.float32),\n gen_func\n )\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n gen_linear = generator[0]\n else:\n tgt_base_field = fields[\"tgt\"].base_field\n vocab_size = len(tgt_base_field.vocab)\n pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]\n generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)\n if model_opt.share_decoder_embeddings:\n generator.linear.weight = decoder.embeddings.word_lut.weight\n gen_linear = generator.linear\n\n if model_opt.encdec_share_params:\n for name, p in decoder.named_parameters():\n if 'ctx' in name or 'context' in name:\n continue\n pointer = encoder\n attrs = name.split('.')\n for attr_name in attrs[:-1]:\n pointer = getattr(pointer, attr_name)\n\n # pointer now has the encoder version of the parameter parent\n setattr(pointer, attrs[-1], p)\n\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n # Normally, just load the model parameters from checkpoint\n if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint:\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.b_2',\n r'\\1.layer_norm\\2.bias', s)\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.a_2',\n r'\\1.layer_norm\\2.weight', s)\n return s\n \n checkpoint['model'] = {fix_key(k): v\n for k, v in checkpoint['model'].items()}\n # end of patch for backward compatibility\n\n # Initialize rest of parameters normally\n if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from:\n for p in decoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n \n # Always initialize encoder parameters normally\n for p in encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if model_opt.ctx_weight_param:\n for name, p in decoder.named_parameters():\n if 'ctx_weight' in name:\n p.data.zero_()\n if 'ctx_bias' in name:\n p.data.fill_(-10)\n\n\n model.load_state_dict(checkpoint['model'], strict=False)\n generator.load_state_dict(checkpoint['generator'], strict=False)\n else:\n # load the gpt parameters\n if 'gpt2_params' in checkpoint:\n init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or model_opt.GPT_representation_mode != 'none'\n \n if init_something:\n # Initialize all the weights first\n if model_opt.gpt2_init_zero:\n for p in decoder.parameters():\n p.data.zero_()\n if model_opt.simple_fusion:\n generator.decoder_linear.weight.data.zero_()\n generator.decoder_linear.bias.data.zero_()\n else:\n for p in decoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n \n # Always initialize encoder parameters normally\n if encoder is not None:\n for p in encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n if model_opt.zero_bias_init:\n gen_linear.bias.data.zero_()\n\n if model_opt.ctx_weight_param:\n for name, p in decoder.named_parameters():\n if 'ctx_weight' in name:\n p.data.zero_()\n if 'ctx_bias' in name:\n p.data.fill_(-10)\n gen_linear.bias.data.zero_()\n\n load_models = []\n if model_opt.GPT_representation_mode != 'none':\n load_embs = []\n if model_opt.GPT_representation_loc in ['both', 'src']:\n load_models.append(src_emb.gpt_model)\n load_embs.append(src_emb)\n if model_opt.GPT_representation_loc in ['both', 'tgt']:\n load_models.append(tgt_emb.gpt_model)\n load_embs.append(tgt_emb)\n \n else:\n if model_opt.gpt2_init_embanddec or model_opt.simple_fusion:\n load_models = [load_decoder]\n elif model_opt.gpt2_init_embandenc:\n load_models = [encoder]\n \n it_list = list(checkpoint['gpt2_params'])\n for lm_idx, load_model in enumerate(load_models):\n #print(lm_idx, load_model)\n for name, array in it_list:\n name = name[12:] # skip \"transformer.\"\n name = name.split('.')\n\n assigned = False\n if name[0] == 'wpe':\n if model_opt.GPT_representation_mode != 'none':\n pointer = load_embs[lm_idx].make_embedding.pe.pe.weight\n else:\n pointer = load_model.embeddings.make_embedding.pe.pe.weight\n\n elif name[0] == 'wte':\n if model_opt.GPT_representation_mode != 'none':\n pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]\n else:\n pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]\n if not model_opt.nopretrain_decemb:\n pointer.append(gen_linear.weight)\n if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:\n pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)\n\n elif name[0] == 'ln_f':\n if name[1] == 'weight':\n pointer = load_model.layer_norm.weight\n elif name[1] == 'bias':\n pointer = load_model.layer_norm.bias\n else:\n raise ValueError('I am missing something here!')\n\n elif name[0] == 'h':\n layer_num = name[1]\n pointer = getattr(load_model.transformer_layers, layer_num)\n if name[2] == 'attn':\n assigned = True\n pointer = pointer.self_attn\n full_data = torch.from_numpy(array)\n if name[3] == 'c_attn':\n end_size = full_data.shape[-1]//3\n assert full_data.shape[-1] % 3 == 0\n if name[4] == 'bias':\n if init_something:\n pointer.linear_query.bias.data = full_data[:end_size]\n pointer.linear_keys.bias.data = full_data[end_size:end_size*2]\n pointer.linear_values.bias.data = full_data[end_size*2:]\n if model_opt.gpt2_params_std > 0:\n pointer.linear_query.bias.orig = full_data[:end_size].clone()\n pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()\n pointer.linear_values.bias.orig = full_data[end_size*2:].clone()\n elif name[4] == 'weight':\n if init_something:\n pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()\n pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()\n pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()\n if model_opt.gpt2_params_std > 0:\n pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()\n pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()\n pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()\n else:\n raise ValueError('I am missing something here!')\n elif name[3] == 'c_proj':\n if name[4] == 'bias':\n if init_something:\n pointer.final_linear.bias.data = full_data\n if model_opt.gpt2_params_std > 0:\n pointer.final_linear.bias.orig = full_data.clone()\n elif name[4] == 'weight':\n if init_something:\n pointer.final_linear.weight.data = full_data.t().contiguous()\n if model_opt.gpt2_params_std > 0:\n pointer.final_linear.weight.orig = full_data.t().contiguous().clone()\n\n else:\n raise ValueError('I am missing something here!')\n\n elif name[2] == 'ln_1' or name[2] == 'ln_2':\n num = name[2][3]\n pointer = getattr(pointer, 'layer_norm_'+num)\n if name[2] == 'bias':\n pointer = pointer.bias\n elif name[2] == 'weight':\n pointer = pointer.weight\n else:\n raise ValueError('I am missing something here!')\n elif name[2] == 'mlp':\n pointer = pointer.feed_forward\n pointer = getattr(pointer, name[2])\n if name[3] == 'bias':\n pointer = pointer.bias\n elif name[3] == 'weight':\n pointer = pointer.weight\n else:\n raise ValueError('I am missing something here!')\n else:\n raise ValueError('I am missing something here!')\n else:\n raise ValueError('I am missing something here!')\n \n if not assigned:\n # if name[0] == 'wte':\n # print(array.shape)\n # continue\n if name[-1] == 'weight':\n array = array.T\n\n if not isinstance(pointer, list):\n pointer = [pointer]\n for pointer_i in pointer:\n target_size = int(math.ceil(array.shape[0]/8))*8\n padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size\n padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]\n try:\n assert pointer_i.shape == array.shape or padded_vocab\n except AssertionError as e:\n \n e.args += (pointer_i.shape, array.shape)\n raise\n if init_something:\n print(\"Initialize PyTorch weight {}\".format(name))\n if padded_vocab:\n pointer_i.data[:array.shape[0]] = torch.from_numpy(array)\n else:\n pointer_i.data = torch.from_numpy(array)\n if model_opt.gpt2_params_std > 0:\n if padded_vocab:\n raise NotImplementedError\n else:\n pointer_i.orig = torch.from_numpy(array).clone()\n # name = name[6:] # skip \"model/\"\n # name = name.split('/')\n\n # assigned = False\n # if name[0] == 'wpe':\n # if model_opt.GPT_representation_mode != 'none':\n # pointer = load_embs[lm_idx].make_embedding.pe.pe.weight\n # else:\n # pointer = load_model.embeddings.make_embedding.pe.pe.weight\n\n # elif name[0] == 'wte':\n # if model_opt.GPT_representation_mode != 'none':\n # pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]\n # else:\n # pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]\n # if not model_opt.nopretrain_decemb:\n # pointer.append(gen_linear.weight)\n # if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:\n # pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)\n\n # elif name[0] == 'ln_f':\n # if name[1] == 'g':\n # pointer = load_model.layer_norm.weight\n # elif name[1] == 'b':\n # pointer = load_model.layer_norm.bias\n # else:\n # raise ValueError('I am missing something here!')\n\n # elif name[0][0] == 'h':\n # layer_num = name[0][1:]\n # pointer = getattr(load_model.transformer_layers, layer_num)\n # if name[1] == 'attn':\n # assigned = True\n # pointer = pointer.self_attn\n # full_data = torch.from_numpy(array)\n # if name[2] == 'c_attn':\n # end_size = full_data.shape[-1]//3\n # assert full_data.shape[-1] % 3 == 0\n # if name[3] == 'b':\n # if init_something:\n # pointer.linear_query.bias.data = full_data[:end_size]\n # pointer.linear_keys.bias.data = full_data[end_size:end_size*2]\n # pointer.linear_values.bias.data = full_data[end_size*2:]\n # if model_opt.gpt2_params_std > 0:\n # pointer.linear_query.bias.orig = full_data[:end_size].clone()\n # pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()\n # pointer.linear_values.bias.orig = full_data[end_size*2:].clone()\n # elif name[3] == 'w':\n # if init_something:\n # pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()\n # pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()\n # pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()\n # if model_opt.gpt2_params_std > 0:\n # pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()\n # pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()\n # pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()\n # else:\n # raise ValueError('I am missing something here!')\n # elif name[2] == 'c_proj':\n # if name[3] == 'b':\n # if init_something:\n # pointer.final_linear.bias.data = full_data\n # if model_opt.gpt2_params_std > 0:\n # pointer.final_linear.bias.orig = full_data.clone()\n # elif name[3] == 'w':\n # if init_something:\n # pointer.final_linear.weight.data = full_data.t().contiguous()\n # if model_opt.gpt2_params_std > 0:\n # pointer.final_linear.weight.orig = full_data.t().contiguous().clone()\n\n # else:\n # raise ValueError('I am missing something here!')\n\n # elif name[1] == 'ln_1' or name[1] == 'ln_2':\n # num = name[1][3]\n # pointer = getattr(pointer, 'layer_norm_'+num)\n # if name[2] == 'b':\n # pointer = pointer.bias\n # elif name[2] == 'g':\n # pointer = pointer.weight\n # else:\n # raise ValueError('I am missing something here!')\n # elif name[1] == 'mlp':\n # pointer = pointer.feed_forward\n # pointer = getattr(pointer, name[2])\n # if name[3] == 'b':\n # pointer = pointer.bias\n # elif name[3] == 'w':\n # pointer = pointer.weight\n # else:\n # raise ValueError('I am missing something here!')\n # else:\n # raise ValueError('I am missing something here!')\n # else:\n # raise ValueError('I am missing something here!')\n \n # if not assigned:\n # if name[0] == 'wte':\n # print(array.shape)\n # continue\n # if name[-1] == 'w' or name[-1] == 'g':\n # array = array.T\n\n # if not isinstance(pointer, list):\n # pointer = [pointer]\n # for pointer_i in pointer:\n # target_size = int(math.ceil(array.shape[0]/8))*8\n # padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size\n # padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]\n # try:\n # assert pointer_i.shape == array.shape or padded_vocab\n # except AssertionError as e:\n \n # e.args += (pointer_i.shape, array.shape)\n # raise\n # if init_something:\n # print(\"Initialize PyTorch weight {}\".format(name))\n # if padded_vocab:\n # pointer_i.data[:array.shape[0]] = torch.from_numpy(array)\n # else:\n # pointer_i.data = torch.from_numpy(array)\n # if model_opt.gpt2_params_std > 0:\n # if padded_vocab:\n # raise NotImplementedError\n # else:\n # pointer_i.orig = torch.from_numpy(array).clone()\n if 'enc_model' in checkpoint:\n load_dict = {k[8:]: v for k, v in checkpoint['enc_model'] if 'encoder' in k}\n encoder.load_state_dict(load_dict, strict=True)\n else:\n if model_opt.param_init != 0.0:\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if model_opt.param_init_glorot:\n for p in model.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') \\\n and model.encoder.embeddings is not None:\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec)\n\n # remove requires_grad from params that are not trained:\n if model_opt.notrain_emb or model_opt.notrain_embanddec:\n if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings:\n model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False\n if model_opt.share_embeddings:\n model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False\n model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False\n model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False\n generator[0].weight.requires_grad = False\n\n if model_opt.notrain_genbias:\n generator[0].bias.requires_grad = False\n\n if model_opt.notrain_embanddec:\n for name, p in load_decoder.layer_norm.named_parameters():\n p.requires_grad = False\n for name, p in load_decoder.transformer_layers.named_parameters():\n if 'context' not in name and 'ctx' not in name: # Takes care of normal and psa versions\n p.requires_grad = False\n \n if model_opt.onlytrainln:\n for name, p in model.decoder.named_parameters():\n if 'layer_norm' not in name:\n p.requires_grad = False\n for p in generator.parameters():\n p.requires_grad = False\n\n if model_opt.onlytrainoutp:\n if model_opt.share_decoder_embeddings:\n raise ValueError\n\n for p in model.decoder.parameters():\n p.requires_grad = False\n\n if model_opt.simple_fusion:\n for p in lm_decoder.parameters():\n p.requires_grad = False\n for p in generator.lm_linear.parameters():\n p.requires_grad = False\n\n model.generator = generator\n model.to(device)\n if model_opt.model_dtype == 'fp16':\n model.half()\n\n for p in model.parameters():\n if hasattr(p, 'orig'):\n p.orig = p.orig.to(device)\n if model_opt.model_dtype == 'fp16':\n p.orig = p.orig.half()\n\n return model", "def train_model_4(model, X_train, y_train, image_name):\n # Train the model\n model.fit(X_train, y_train)\n \n # Save the model\n model_file = os.path.join(OUTPUT_DIR,\n \"{}_model.joblib\".format(image_name))\n joblib.dump(model, model_file)\n \n return model, model" ]
[ "0.5936191", "0.56602675", "0.56224686", "0.5621566", "0.55595684", "0.54760945", "0.54551095", "0.54427326", "0.54231954", "0.54035556", "0.5396976", "0.53836715", "0.5365184", "0.53514045", "0.5305904", "0.5280321", "0.5267586", "0.5260385", "0.525179", "0.5249681", "0.51886994", "0.51437724", "0.5140816", "0.5139759", "0.5129505", "0.5104898", "0.51026154", "0.5098515", "0.50848323", "0.5082763", "0.50805205", "0.50545144", "0.5051393", "0.50468993", "0.5040596", "0.5036791", "0.5004153", "0.49996224", "0.49989206", "0.4954121", "0.49332497", "0.49283913", "0.48869812", "0.48860398", "0.48857573", "0.48800972", "0.48795557", "0.48710334", "0.4866137", "0.48609635", "0.4858296", "0.48539436", "0.4848432", "0.48478758", "0.48432678", "0.48405415", "0.48317084", "0.48316294", "0.4829575", "0.48273927", "0.48261818", "0.48224288", "0.48219284", "0.4799558", "0.47928345", "0.47913942", "0.47834387", "0.47799772", "0.47722653", "0.47678247", "0.4757102", "0.47549942", "0.47549942", "0.47549942", "0.475177", "0.47367325", "0.47366682", "0.473542", "0.4723429", "0.472212", "0.47195935", "0.47148797", "0.47042862", "0.4703259", "0.4702526", "0.46922937", "0.46754417", "0.46744716", "0.4674057", "0.46731102", "0.467095", "0.467093", "0.46656948", "0.46651128", "0.46563908", "0.46491802", "0.46449488", "0.4641726", "0.46398598", "0.46393114" ]
0.70964164
0
Creates a model (with temperature scaling) according to the config given.
def create_model(self) -> None: self._model = create_model_with_temperature_scaling(self.config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model_with_temperature_scaling(config: ModelConfigBase) -> Any:\n # wrap the model around a temperature scaling model if required\n model = config.create_model()\n if isinstance(config, SequenceModelBase) and config.temperature_scaling_config:\n model = ModelWithTemperature(model, config.temperature_scaling_config)\n return model", "def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector", "def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model", "def model_creator(config):\n return nn.Linear(1, 1)", "def __init__(self, data_set, model, config):\n\n self.config = config\n self.data_set = data_set\n # Normalize or standardize the features, to have them ready to use as model input\n self.data_set.shift_and_scale(self.config[\"shift\"], self.config[\"scaling\"])\n self.model = model\n self.model.eval()\n self.device = torch.device(\"cpu\") if not self.config[\"use_gpu\"] \\\n else torch.device(\"cuda:\" + str(self.config[\"gpu_no\"]))", "def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model", "def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model", "def model(self, **config_kwargs):\n measurement = self.get_measurement(**config_kwargs)\n log.debug(\n 'model being created for measurement {0:s}'.format(measurement['name'])\n )\n\n patches = config_kwargs.get('patches', [])\n\n modelspec = {\n 'channels': self.spec['channels'],\n 'parameters': measurement['config']['parameters'],\n }\n for patch in patches:\n modelspec = jsonpatch.JsonPatch(patch).apply(modelspec)\n\n return Model(modelspec, poiname=measurement['config']['poi'], **config_kwargs)", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def run_model(config_file):\n config_file = os.path.join(os.getcwd(), config_file)\n result = Tethys(config_file=config_file)\n result.run_model()\n return result", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def create_shunt_model(self):\r\n\r\n print('\\nCreate shunt model')\r\n\r\n if not self.original_model:\r\n raise ValueError('Original model not yet initialized! Either call create_original_model or set it manually.')\r\n if not self.shunt_params:\r\n raise ValueError('No parameters found in config for shunt model! Create the field [SHUNT_MODEL]')\r\n\r\n logging.info('')\r\n logging.info('#######################################################################################################')\r\n logging.info('############################################ SHUNT MODEL ##############################################')\r\n logging.info('#######################################################################################################')\r\n logging.info('')\r\n\r\n dilation_rate_input, dilation_rate_output = find_input_output_dilation_rates(self.original_model, self.shunt_params['locations'])\r\n\r\n print('Used dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('Creating shunt with dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('')\r\n\r\n with self.activate_distribution_scope():\r\n if self.shunt_params['from_file']:\r\n self.shunt_model = keras.models.load_model(self.shunt_params['filepath'])\r\n print('Shunt model loaded successfully!')\r\n else:\r\n input_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][0]).input_shape[1:]\r\n if isinstance(input_shape_shunt, list):\r\n input_shape_shunt = input_shape_shunt[0][1:]\r\n output_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][1]).output_shape[1:]\r\n if isinstance(output_shape_shunt, list):\r\n output_shape_shunt = output_shape_shunt[0][1:]\r\n\r\n self.shunt_model = Architectures.createShunt(input_shape_shunt,\r\n output_shape_shunt,\r\n arch=self.shunt_params['arch'],\r\n use_se=False,\r\n dilation_rate_input=dilation_rate_input,\r\n dilation_rate_output=dilation_rate_output,\r\n expansion_factor=1.0)\r\n\r\n if self.shunt_params['pretrained']:\r\n self.shunt_model.load_weights(self.shunt_params['weightspath'])\r\n print('Shunt weights loaded successfully!')\r\n\r\n self.shunt_model.summary(print_fn=self.logger.info, line_length=150)\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))", "def do_create_model(**kwargs):\n model_params = {\n 'name': kwargs['dag_run'].conf.get('model_name'),\n 'description': 'A custom DNN regressor model',\n 'regions': [REGION]\n }\n\n ti = kwargs['ti']\n\n is_model = ti.xcom_pull(key='is_project', task_ids='check_model')\n if not is_model:\n mle = MLEngineHook()\n mle.create_model(PROJECT, model_params)", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def config_task(self) -> None:\n if self.hparams[\"model\"] == \"resnet18\":\n self.model = models.resnet18(pretrained=True)\n in_features = self.model.fc.in_features\n self.model.fc = nn.Linear( # type: ignore[attr-defined]\n in_features, out_features=1\n )\n else:\n raise ValueError(f\"Model type '{self.hparams['model']}' is not valid.\")", "def create_model(self, model_config):\n\n return self.conn.create_model(\n **model_config)", "def create_model(model_class, model_params=None, model_name='model'):\n\n model_params = {} if model_params is None else model_params\n\n model = model_class(**model_params)\n\n if special_parameters.load_model: # recover from checkpoint\n _load_model(model, model_name)\n\n # configure usage on GPU\n if use_gpu():\n model.to(first_device())\n model = torch.nn.DataParallel(model, device_ids=all_devices())\n\n # print info about devices\n print_info('Device(s)): ' + str(device_description()))\n\n return model", "def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])", "def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def _random_model(self, input_size, output_size, task, config: dict) -> AbstractModel:\n return create_random_model(input_size, output_size, config, task)", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def model_setup(params):\n n_classes = len(classes_config.training_ids)\n if general_config.model_id == constants.ssdlite:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list)\n elif general_config.model_id == constants.ssd:\n model = resnet_ssd.SSD300(n_classes=n_classes)\n elif general_config.model_id == constants.ssd_modified:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list,\n out_channels=params.out_channels, width_mult=params.width_mult)\n model.to(general_config.device)\n\n return model", "def from_config(cls,config):\n ## find labels in list\n label_list = load_label_list(config.label_list)\n use_cuda = True if torch.cuda.is_available() else False\n\n global_args = {\n \"fp16\" : False,\n \"classification_report\" : True,\n \"tensorboard_dir\" : config.tensorboard_dir,\n \"wandb_project\" : config.wandb_project,\n \"wandb_kwargs\" : {\n \"name\" : config.wandb_name,\n \"entity\" : config.wandb_entity,\n }\n }\n\n model = NERModel(\n config.model_name,\n config.model_type,\n use_cuda=use_cuda,\n labels=label_list,\n args=global_args,\n )\n return cls(model,config)", "def template_model():\n model_type = 'continuous' # either 'discrete' or 'continuous'\n model = do_mpc.model.Model(model_type)\n\n # Model variables:\n var1 = model.set_variable(var_type='_x', var_name='var1')\n var2 = model.set_variable(var_type='_x', var_name='var2')\n\n state = vertcat(var1,var2)\n state_dot = model.set_variable(var_type='_x', var_name='state_dot', shape=(2.1))\n\n input1 = model.set_variable(var_type='_u', var_name='input1')\n\n\n # Parameters:\n # define Parameters\n\n model.set_rhs('var1',state_dot[0])\n model.set_rhs('var2',state_dot[1])\n\n state_dot_rhs = vertcat(\n # rhs1,\n # rhs2)\n model.set_rhs('state_dot',state_dot_rhs)\n\n model.setup()\n\n return model", "def create(self, req, body):\n context = req.environ['meteos.context']\n\n if not self.is_valid_body(body, 'model'):\n raise exc.HTTPUnprocessableEntity()\n\n model = body['model']\n\n LOG.debug(\"Create model with request: %s\", model)\n\n try:\n experiment = self.engine_api.get_experiment(\n context, model['experiment_id'])\n utils.is_valid_status(experiment.__class__.__name__,\n experiment.status,\n constants.STATUS_AVAILABLE)\n template = self.engine_api.get_template(\n context, experiment.template_id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n except exception.InvalidStatus:\n raise\n\n display_name = model.get('display_name')\n display_description = model.get('display_description')\n experiment_id = model.get('experiment_id')\n source_dataset_url = model.get('source_dataset_url')\n dataset_format = model.get('dataset_format', 'csv')\n model_type = model.get('model_type')\n model_params = model.get('model_params')\n swift_tenant = model.get('swift_tenant')\n swift_username = model.get('swift_username')\n swift_password = model.get('swift_password')\n\n new_model = self.engine_api.create_model(context,\n display_name,\n display_description,\n source_dataset_url,\n dataset_format,\n model_type,\n model_params,\n template.id,\n template.job_template_id,\n experiment_id,\n experiment.cluster_id,\n swift_tenant,\n swift_username,\n swift_password)\n\n return self._view_builder.detail(req, new_model)", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def from_config(cls, *args, **kwargs):\n _config = args\n\n if isinstance(args, tuple): # multiple non-keyword arguments were provided\n if len(args) > 0:\n _config = args[0]\n\n else:\n _config = kwargs['config_path']\n kwargs.pop('config_path')\n\n local = False\n if 'make_new_path' in kwargs:\n local = True\n elif isinstance(_config, str) and os.path.isfile(_config):\n local = True\n elif isinstance(_config, dict) and \"category\" in _config:\n local = True\n\n if local:\n config = None\n config_path = None\n\n # we need to build ai4water's Model class\n if isinstance(_config, dict):\n config = _config\n else:\n config_path = _config\n return BaseModel._get_config_and_path(\n cls,\n config=config,\n config_path=config_path,\n **kwargs\n )\n\n # tf1.15 has from_config so call it\n return super().from_config(*args, **kwargs)", "def create_model(self):\n pass", "def create_model(self):\n pass", "def create_mean_teacher_model(self) -> None:\n self._mean_teacher_model = create_model_with_temperature_scaling(self.config)", "def construct_model(self, output_model_path):\n\n input_tensor = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [1, 1, 7, 7])\n output_tensor = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [1, 1, 8, 8])\n ini_w = helper.make_tensor(\"weight\", TensorProto.FLOAT, [1, 1, 2, 2], [1.0, 1.0, 1.0, 1.0])\n ini_b = helper.make_tensor(\"bias\", TensorProto.FLOAT, [1], [0.17])\n conv_tranpose_node = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"input\", \"weight\", \"bias\"],\n [\"output\"],\n kernel_shape=[2, 2],\n output_padding=[0, 0],\n pads=[0, 0, 0, 0],\n strides=[1, 1],\n dilations=[1, 1],\n group=1,\n )\n graph = helper.make_graph(\n [conv_tranpose_node],\n \"conv_transpose_test\",\n [input_tensor],\n [output_tensor],\n initializer=[ini_w, ini_b],\n )\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n model.ir_version = 7 # use stable onnx ir version\n\n onnx.save(model, output_model_path)", "def build_model(cfg, model, gpu_id=None):\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n # Construct the model\n # name = cfg.MODEL.MODEL_NAME\n # model = MODEL_REGISTRY.get(name)(cfg)\n \n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n #, find_unused_parameters=True\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n \n return model", "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())", "def init_model(model):\n model(tf.random.uniform((1, 512, 512, 3)))", "def initialize_model_from_cfg(args, gpu_id=0):\n model = eval(args.model).loot_model(args)\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n\n return model", "def create_model(sess, FLAGS, mode):\n if FLAGS.model == \"vallina\":\n model = LinearModel(FLAGS, mode)\n model.build()\n else:\n pass\n # other model \n\n # create task file\n model_path = os.path.join(FLAGS.logdir, FLAGS.task_name)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n print (\"Save model to {}\".format(model_path))\n elif (FLAGS.reset):\n shutil.rmtree(model_path)\n os.makedirs(model_path)\n print (\"Remove existing model at {} and restart.\".format(model_path))\n else:\n raise ValueError(\"Fail to create the new model.\")\n\n # Save the current configurations\n config = dict(FLAGS.__flags.items())\n with open(\"/\".join([model_path, \"config.json\"]), \"w\") as file:\n json.dump(config, file)\n\n # initialize variables\n sess.run(tf.global_variables_initializer())\n\n return model", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def get_model(model_name: str = \"\", cfg={}) -> torch.nn.Module:\n if model_name == \"default\":\n model = AudioNTT2020(n_mels=cfg.n_mels, d=cfg.feature_d)\n\n elif model_name == \"resnetish34\":\n model = resnetish34()\n\n elif model_name == \"clstm\":\n model = CLSTM()\n\n elif model_name == \"cvt\":\n s1_depth, s2_depth, s3_depth = cfg.depths\n s1_emb_dim, s2_emb_dim, s3_emb_dim = cfg.embed_dims\n s1_mlp_mult, s2_mlp_mult, s3_mlp_mult = cfg.mlp_mults\n\n model = CvT(\n s1_emb_dim=s1_emb_dim,\n s1_depth=s1_depth,\n s1_mlp_mult=s1_mlp_mult,\n s2_emb_dim=s2_emb_dim,\n s2_depth=s2_depth,\n s2_mlp_mult=s2_mlp_mult,\n s3_emb_dim=s3_emb_dim,\n s3_depth=s3_depth,\n s3_mlp_mult=s3_mlp_mult,\n pool=cfg.cvt_pool,\n )\n else:\n raise ValueError(\"Model not found.\")\n return model", "def __init__(self, config):\n self.model = None\n self.config = config\n self.batch_size = config.get('batch_size')\n self.epochs = config.get('epochs')\n self.steps_per_epoch = config.get('steps_per_epoch')\n self.validation_steps = config.get('validation_steps')\n self.distributed = config.get('distributed', False)\n \n # init model\n self.init()", "def config_task(self) -> None:\n weights = self.hyperparams[\"weights\"]\n\n if self.hyperparams[\"model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n\n class_weights = None\n if isinstance(self.class_weights, torch.Tensor):\n class_weights = self.class_weights.to(dtype=torch.float32)\n elif hasattr(self.class_weights, \"__array__\") or self.class_weights:\n class_weights = torch.tensor(self.class_weights, dtype=torch.float32)\n\n self.loss = nn.CrossEntropyLoss(\n ignore_index=ignore_value, weight=class_weights\n )\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n if self.hyperparams[\"model\"] != \"fcn\":\n if weights and weights is not True:\n if isinstance(weights, WeightsEnum):\n state_dict = weights.get_state_dict(progress=True)\n elif os.path.exists(weights):\n _, state_dict = utils.extract_backbone(weights)\n else:\n state_dict = get_weight(weights).get_state_dict(progress=True)\n self.model.encoder.load_state_dict(state_dict)\n\n # Freeze backbone\n if self.hyperparams.get(\"freeze_backbone\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n\n # Freeze decoder\n if self.hyperparams.get(\"freeze_decoder\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.decoder.parameters():\n param.requires_grad = False", "def getModel(config: configuration.Configuration) -> torch.nn.Module:\n if config.modelName == ModelName.DENSE:\n return DenseGenerator(1, 1, n_blocks=config.blockCount)\n elif config.modelName == ModelName.SHALLOW:\n return Shallow(1, 1, )\n elif config.modelName == ModelName.TIRAMISU:\n model = Tiramisu(1, 1, structure=(\n config.down, # Down blocks\n config.bottleneck, # bottleneck layers\n config.up, # Up blocks\n ), checkpoint=False)\n\n model.initialize_kernels(torch.nn.init.kaiming_uniform_, conv=True)\n return model\n else:\n return SimpleCNN()", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n\n baseEncoder = self.createEncoder(inputs)\n baseEncoder = Dropout(self.drop)(baseEncoder)\n\n # Instantiate encoder layers\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(baseEncoder)\n z_log_var = Q_z_log_var(baseEncoder)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n \n G_0 = Dense(8*self.kernel_init)(encoding)\n G_0 = Dropout(self.drop)(G_0)\n baseDecoder = self.createDecoder(G_0)\n\n self.model =Model(inputs, baseDecoder)\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n self.model.summary()\n print(\"Completed model setup.\")", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def create_scaling_model(params, experiments, reflections):\n autos = [None, Auto, \"auto\", \"Auto\"]\n use_auto_model = params.model in autos\n # Determine non-auto model to use outside the loop over datasets.\n if not use_auto_model:\n model_class = None\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == params.model:\n model_class = entry_point.load()\n break\n if not model_class:\n raise ValueError(f\"Unable to create scaling model of type {params.model}\")\n\n for expt, refl in zip(experiments, reflections):\n if not expt.scaling_model or params.overwrite_existing_models:\n # need to make a new model\n if use_auto_model:\n if not expt.scan:\n model = KBScalingModel\n else: # set model as physical unless scan < 1.0 degree\n osc_range = expt.scan.get_oscillation_range()\n abs_osc_range = abs(osc_range[1] - osc_range[0])\n if abs_osc_range < 1.0:\n model = KBScalingModel\n else:\n model = PhysicalScalingModel\n else:\n model = model_class\n expt.scaling_model = model.from_data(params, expt, refl)\n else:\n # allow for updating of an existing model.\n expt.scaling_model.update(params)\n return experiments", "def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ###### Temperature in Kelvin\n model.T_kelvin = model.zero_celsius + model.T_celsius*kelvin\n \n ##### Potentials\n # Resting potential (calculated with Goldman equation)\n model.V_res = (model.R*model.T_kelvin)/model.F * np.log((model.P_K*model.n_init**2*model.K_e + model.P_Na*model.h_init*model.m_init**3*model.Na_e)/\\\n (model.P_K*model.n_init**2*model.K_i + model.P_Na*model.h_init*model.m_init**3*model.Na_i))\n \n # Nerst potential for leakage current; leakage chanels were excluded but could be added by using: g_L*(E_L-(v-V_res)) \n model.E_L = (-1/model.g_L)*(model.P_Na*model.m_init**3*model.h_init*(model.V_res*model.F**2)/(model.R*model.T_kelvin) * \\\n (model.Na_e-model.Na_i*exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))) + \\\n model.P_K*model.n_init**2*(model.V_res*model.F**2)/(model.R*model.T_kelvin) *\\\n (model.K_e-model.K_i*np.exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))))\n \n \n ##### structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5)\n model.structure = np.array(list(np.tile([2,1],model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### Compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(model.structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # dendrite\n model.fiber_inner_diameter = 0.7* model.fiber_outer_diameter\n model.compartment_diameters[:] = model.fiber_inner_diameter\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacitivites\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # internodes\n model.c_m[np.where(model.structure == 1)] = 0*uF/cm**2\n # nodes\n model.c_m[np.where(model.structure == 2)] = model.c_m_layer\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2 \n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Noise term\n model.P_Na_vector = np.zeros(model.nof_comps)*um/second\n model.P_Na_vector[model.structure == 2] = model.P_Na\n model.noise_term = np.sqrt(model.A_surface*model.P_Na_vector)\n \n ##### Compartments to plot\n model.comps_to_plot = range(1,model.nof_comps)\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.n = model.n_init\n neuron.h = model.h_init\n \n ##### Set parameter values of differential equations\n # conductances active compartments\n neuron.g_Na = model.g_Na\n neuron.g_K = model.g_K\n \n # conductances internodes\n neuron.g_Na[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n neuron.g_K[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.E_L = model.E_L\n neuron.g_L = model.g_L \n \n return neuron, model", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def build(model_name):\n return pretrain.factory.create(model_name)", "def create_model(X, tmode, nmode1, nmode2, nmode, window, rank, tlength, seasonp, horizon, f_window):\n model = smooth_tfactor(X, tmode, nmode1, nmode2, nmode, window, rank, tlength, seasonp, horizon, f_window)\n opt = torch.optim.SGD(model.parameters(),lr=0.001)\n return model, opt", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def _initialize_model(self):\n max_value = self.data.max()\n\n if self.model_type == self._GAUSSIAN2D:\n model = models.Gaussian2D(\n x_mean=self.x, y_mean=self.y, x_stddev=1, y_stddev=1\n )\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.x_stddev.bounds = (0, self._box / 4)\n model.y_stddev.bounds = (0, self._box / 4)\n model.x_mean.bounds = (self.x - 5, self.x + 5)\n model.y_mean.bounds = (self.y - 5, self.y + 5)\n\n elif self.model_type == self._MOFFAT2D:\n model = models.Moffat2D()\n model.x_0 = self.x\n model.y_0 = self.y\n model.gamma = 2\n model.alpha = 2\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.alpha.bounds = (1, 6)\n model.gamma.bounds = (0, self._box / 4)\n model.x_0.bounds = (self.x - 5, self.x + 5)\n model.y_0.bounds = (self.y - 5, self.y + 5)\n\n model += models.Const2D(self.fit_sky())\n model.amplitude_1.fixed = True\n return model", "def create(model: TModel) -> ModelTransformer:\n model_backend = get_backend(model)\n if model_backend == BackendType.ONNX:\n from nncf.onnx.graph.model_transformer import ONNXModelTransformer\n\n return ONNXModelTransformer(model)\n if model_backend == BackendType.OPENVINO:\n from nncf.openvino.graph.model_transformer import OVModelTransformer\n\n return OVModelTransformer(model)\n if model_backend == BackendType.TORCH:\n from nncf.torch.model_transformer import PTModelTransformer\n\n return PTModelTransformer(model)\n raise RuntimeError(\n \"Cannot create backend-specific model transformer because {} is not supported!\".format(model_backend)\n )", "def create_reference_model(self, config, tmp_path_factory: pytest.TempPathFactory, *args):\n config = copy.deepcopy(config) # ensure the reference model is not passed to tests\n\n save_folder = tmp_path_factory.mktemp('{device}-{precision}'.format(**config))\n config.update({'save_interval': '1ep', 'save_folder': str(save_folder), 'save_filename': 'ep{epoch}.pt'})\n\n trainer = Trainer(**config)\n trainer.fit()\n\n self.reference_model = trainer.state.model\n self.reference_folder = save_folder", "def create_model(config_obj: Union[ModelConfig, dict], random_seed: int = default_random_seed) -> BaseModel:\n if isinstance(config_obj, dict):\n config_obj = ModelConfig.from_dict(config_obj)\n model_type = get_from_registry(config_obj.model_type, model_type_registry)\n return model_type(config_obj, random_seed=random_seed)", "def __init__(self, model, settings):\n super().__init__(model, settings)\n self.model_part = self.model.CreateModelPart(self.settings[\"model_part_name\"].GetString())\n self.model_part.ProcessInfo.SetValue(KM.DOMAIN_SIZE, self.settings[\"domain_size\"].GetInt())\n self.model_part.ProcessInfo.SetValue(KM.GRAVITY_Z, self.settings[\"gravity\"].GetDouble())\n self.EstimateDeltaTimeUtility = SW.EstimateTimeStepUtility(self.GetComputingModelPart(), self.settings[\"time_stepping\"])", "def make_environment(self):\n\t\tbase_layer = 0\n\t\tself.Gravity = 9.81\n\n\t\t#Private data for to define model\n\t\t__model_max_altitude = 87000\n\t\t__atmosphere_layers = {0:0, 11000:1, 20000:2, 32000:3, 47000:4, 51000:5, 71000:6}\n\t\t__layer_base_data = {\n\t\t\t0:{'temp':288.15, 'lapse':-0.0065, 'press':101325},\n\t\t\t1:{'temp':216.65, 'lapse':0, 'press':22632.1},\n\t\t\t2:{'temp':216.65, 'lapse':0.001, 'press':5474.89},\n\t\t\t3:{'temp':228.65, 'lapse':0.0028, 'press':868.019},\n\t\t\t4:{'temp':270.65, 'lapse':0, 'press':110.906},\n\t\t\t5:{'temp':270.65, 'lapse':-0.0028, 'press':66.9389},\n\t\t\t6:{'temp':214.65, 'lapse':-0.002, 'press':3.95642},\n\t\t\t}\n\t\t__gas_constant = 8.31432#e3\n\t\t__air_molar_mass = 0.0289644\n\t\t__specific_heat_ratio = 1.4\n\t\t__visc_lambda = 1.51204129e-6\n\t\t__visc_sutherland_const = 120.0\n\n\t\tif self.Altitude > __model_max_altitude:\n\t\t\traise helpers.extra_exceptions.ModelExtrapolationException(\n\t\t\t'Exceeded model maximum altitude')\n\n\t\tlayerKeys = __atmosphere_layers.keys()\n\t\tlayerKeys = list(layerKeys)\n\t\tlayerKeys.sort()\n\t\tfor layer in layerKeys:\n\t\t\tif self.Altitude >= layer:\n\t\t\t\tbase_layer = __atmosphere_layers[layer]\n\t\t\t\tbase_alt = layer\n\t\tbase_temp = __layer_base_data[base_layer]['temp']\n\t\tbase_lapse = __layer_base_data[base_layer]['lapse']\n\t\tbase_press = __layer_base_data[base_layer]['press']\n\n\t\tself.Temperature = base_temp + base_lapse * (self.Altitude - base_alt)\n\t\t+ self.Temperature_offset\n\n\t\tif base_lapse == 0:\n\t\t\tself.Pressure = base_press * \\\n\t\t\t\tnp.exp( (-self.Gravity*__air_molar_mass*(self.Altitude-base_alt)) \\\n\t\t\t\t/(__gas_constant*base_temp))\n\t\telse:\n\t\t\tself.Pressure = base_press * \\\n\t\t\t\t(base_temp/self.Temperature) ** \\\n\t\t\t\t(self.Gravity*__air_molar_mass/__gas_constant/base_lapse)\n\n\t\tself.Density = __air_molar_mass*self.Pressure / \\\n\t\t\t__gas_constant/self.Temperature\n\t\tself.Speed_of_sound = np.sqrt(__specific_heat_ratio*__gas_constant* \\\n\t\t\tself.Temperature/__air_molar_mass)\n\t\tself.Dynamic_viscosity = __visc_lambda*self.Temperature**(3.0/2.0)/ \\\n\t\t\t(self.Temperature+__visc_sutherland_const)", "def init_model(config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None):\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config):\n raise TypeError('config must be a filename or Config object, '\n 'but got {}'.format(type(config)))\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n elif 'init_cfg' in config.model.backbone:\n config.model.backbone.init_cfg = None\n config.model.pretrained = None\n config.model.train_cfg = None\n init_default_scope(config.get('default_scope', 'mmseg'))\n\n model = MODELS.build(config.model)\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n dataset_meta = checkpoint['meta'].get('dataset_meta', None)\n # save the dataset_meta in the model for convenience\n if 'dataset_meta' in checkpoint.get('meta', {}):\n # mmseg 1.x\n model.dataset_meta = dataset_meta\n elif 'CLASSES' in checkpoint.get('meta', {}):\n # < mmseg 1.x\n classes = checkpoint['meta']['CLASSES']\n palette = checkpoint['meta']['PALETTE']\n model.dataset_meta = {'classes': classes, 'palette': palette}\n else:\n warnings.simplefilter('once')\n warnings.warn(\n 'dataset_meta or class names are not saved in the '\n 'checkpoint\\'s meta data, classes and palette will be'\n 'set according to num_classes ')\n num_classes = model.decode_head.num_classes\n dataset_name = None\n for name in dataset_aliases.keys():\n if len(get_classes(name)) == num_classes:\n dataset_name = name\n break\n if dataset_name is None:\n warnings.warn(\n 'No suitable dataset found, use Cityscapes by default')\n dataset_name = 'cityscapes'\n model.dataset_meta = {\n 'classes': get_classes(dataset_name),\n 'palette': get_palette(dataset_name)\n }\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model", "def get_model_with_properties():\n \n m = ConcreteModel()\n\n # ------------------------------------------------------------------\n # Data\n # ------------------------------------------------------------------\n\n m.np = 25 # Number of possible tays\n m.c = 4 # Number of components\n m.lc = 1 # Light component\n m.hc = 4 # Heavy component\n\n #### Constant parameters\n m.Rgas = 8.314 # Ideal gas constant in J/mol K\n m.Tref = 298.15 # Reference temperature in K\n\n #### Product specifications\n m.xspec_lc = 0.99 # Final liquid composition for methanol (1)\n m.xspec_hc = 0.99 # Fnal liquid composition for butanol (4)\n m.xspec_inter2 = 0.99 # Final liquid composition for ethanol (2)\n m.xspec_inter3 = 0.99 # Final liquid composition for propanol (3)\n m.Ddes = 50 # Final flowrate in distillate in mol/s\n m.Bdes = 50 # Final flowrate in bottoms in mol/s\n m.Sdes = 50 # Final flowrate in side product streams in mol/s\n\n # #### Known initial values\n m.Fi = m.Ddes + m.Bdes + 2 * m.Sdes # Side feed flowrate in mol/s\n m.Vi = 400 # Initial value for vapor flowrate in mol/s\n m.Li = 400 # Initial value for liquid flowrate in mol/s\n\n m.Tf = 358 # Side feed temperature in K\n\n m.Preb = 1.2 # Reboiler pressure in bar\n m.Pbot = 1.12 # Bottom-most tray pressure in bar\n m.Ptop = 1.08 # Top-most tray pressure in bar\n m.Pcon = 1.05 # Condenser pressure in bar\n m.Pf = 1.02\n\n m.rr0 = 0.893 # Internal reflux ratio initial value\n m.bu0 = 0.871 # Internal reflux ratio initial value\n\n\n #### Scaling factors\n m.Hscale = 1e3 \n m.Qscale = 1e-3 \n\n \n #### Constants for the calculation of liquid heat capacity\n m.cpc = {} # Constant 1 for liquid heat capacity \n m.cpc2 = {} # Constant 2 for liquid heat capacity \n m.cpc[1] = m.Rgas \n m.cpc[2] = 1\n m.cpc2['A', 1] = 1 / 100\n m.cpc2['B', 1] = 1 / 1e4\n m.cpc2['A', 2] = 1\n m.cpc2['B', 2] = 1\n\n\n # ------------------------------------------------------------------\n # Physical Properties\n #\n # Notation:\n # MW ........................ molecular weight in g/gmol\n # TB ........................ boiling point temperature in K\n # TC ........................ critical temperature in K\n # PC ........................ critical pressure in bar\n # w ........................ acentric factor\n # lden ...................... liquid density g/m3,\n # dHvap ..................... heat of vaporization in J/mol.\n # vpA, vpB, vpC, and vpD .... vapor pressure constants\n # cpA, cpB, cpC, and cpD .... heat capacity constants J/mol:\n # 1 for liq and 2 for vapor phase\n #\n # Reference A: R.C. Reid, J.M. Prausnitz and B.E. Poling,\n # \"The Properties of gases and liquids\", 1987 and 2004 Eds.\n #\n # ------------------------------------------------------------------\n\n m.prop = {} # Properties of components:\n cpL = {} # Ruczika-D method for liquid heat capacity calculation\n # (Reference A, page 6.20)\n sumA = {}\n sumB = {}\n sumC = {}\n cpL['a', 'C(H3)(C)'] = 4.19845\n cpL['b', 'C(H3)(C)'] = -0.312709\n cpL['c', 'C(H3)(C)'] = 0.178609\n cpL['a', 'C(H2)(C2)'] = 2.7345\n cpL['b', 'C(H2)(C2)'] = 0.122732\n cpL['c', 'C(H2)(C2)'] = -0.123482\n cpL['a', 'C(H2)(C)(O)'] = 0.517007\n cpL['b', 'C(H2)(C)(O)'] = 1.26631\n cpL['c', 'C(H2)(C)(O)'] = -0.0939713\n cpL['a', 'O(H)(C)'] = 16.1555\n cpL['b', 'O(H)(C)'] = -11.938\n cpL['c', 'O(H)(C)'] = 2.85117\n cpL['a', 'C(H3)(O)'] = 3.70344\n cpL['b', 'C(H3)(O)'] = -1.12884\n cpL['c', 'C(H3)(O)'] = 0.51239\n sumA[1] = (cpL['a', 'C(H3)(O)']\n + cpL['a', 'O(H)(C)']) \n sumB[1] = (cpL['b', 'C(H3)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[1] = (cpL['c', 'C(H3)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[2] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[2] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[2] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[3] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[3] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[3] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[4] = (cpL['a', 'C(H3)(C)']\n + 2 * cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[4] = (cpL['b', 'C(H3)(C)']\n + 2 * cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[4] = (cpL['c', 'C(H3)(C)']\n + 2 * cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n\n ## Methanol: component 1\n m.prop[1, 'MW'] = 32.042\n m.prop[1, 'TB'] = 337.7\n m.prop[1, 'TC'] = 512.6\n m.prop[1, 'PC'] = 80.9\n m.prop[1, 'w'] = 0.556\n m.prop[1, 'lden'] = 792e3\n m.prop[1, 'dHvap'] = 38.376e3\n m.prop[1, 'vpA'] = -8.54796\n m.prop[1, 'vpB'] = 0.76982\n m.prop[1, 'vpC'] = -3.10850\n m.prop[1, 'vpD'] = 1.54481\n m.prop[1, 'cpA', 1] = sumA[1]\n m.prop[1, 'cpB', 1] = sumB[1]\n m.prop[1, 'cpC', 1] = sumC[1]\n m.prop[1, 'cpD', 1] = 0\n m.prop[1, 'cpA', 2] = 2.115e1\n m.prop[1, 'cpB', 2] = 7.092e-2\n m.prop[1, 'cpC', 2] = 2.587e-5\n m.prop[1, 'cpD', 2] = -2.852e-8\n\n\n ## Ethanol: component 2\n m.prop[2, 'MW'] = 46.069\n m.prop[2, 'TB'] = 351.4\n m.prop[2, 'TC'] = 513.9\n m.prop[2, 'PC'] = 61.4\n m.prop[2, 'w'] = 0.644\n m.prop[2, 'lden'] = 789.3e3\n m.prop[2, 'dHvap'] = 42.698e3\n m.prop[2, 'vpA'] = -8.51838\n m.prop[2, 'vpB'] = 0.34163\n m.prop[2, 'vpC'] = -5.73683\n m.prop[2, 'vpD'] = 8.32581\n m.prop[2, 'cpA', 1] = sumA[2]\n m.prop[2, 'cpB', 1] = sumB[2]\n m.prop[2, 'cpC', 1] = sumC[2]\n m.prop[2, 'cpD', 1] = 0\n m.prop[2, 'cpA', 2] = 9.014\n m.prop[2, 'cpB', 2] = 2.141e-1\n m.prop[2, 'cpC', 2] = -8.390e-5\n m.prop[2, 'cpD', 2] = 1.373e-9\n\n\n ## Propanol: component 3\n m.prop[3, 'MW'] = 60.096\n m.prop[3, 'TB'] = 370.3\n m.prop[3, 'TC'] = 536.8\n m.prop[3, 'PC'] = 51.7\n m.prop[3, 'w'] = 0.623\n m.prop[3, 'lden'] = 804e3\n m.prop[3, 'dHvap'] = 47.763e3\n m.prop[3, 'vpA'] = -8.05594\n m.prop[3, 'vpB'] = 4.25183e-2\n m.prop[3, 'vpC'] = -7.51296\n m.prop[3, 'vpD'] = 6.89004\n m.prop[3, 'cpA', 1] = sumA[3]\n m.prop[3, 'cpB', 1] = sumB[3]\n m.prop[3, 'cpC', 1] = sumC[3]\n m.prop[3, 'cpD', 1] = 0\n m.prop[3, 'cpA', 2] = 2.47\n m.prop[3, 'cpB', 2] = 3.325e-1\n m.prop[3, 'cpC', 2] = -1.855e-4\n m.prop[3, 'cpD', 2] = 4.296e-8\n\n\n ## Butanol: component 4\n m.prop[4, 'MW'] = 74.123\n m.prop[4, 'TB'] = 390.9\n m.prop[4, 'TC'] = 563.1\n m.prop[4, 'PC'] = 44.2\n m.prop[4, 'w'] = 0.593\n m.prop[4, 'lden'] = 810e3\n m.prop[4, 'dHvap'] = 52.607e3\n m.prop[4, 'vpA'] = -8.00756\n m.prop[4, 'vpB'] = 0.53783\n m.prop[4, 'vpC'] = -9.34240\n m.prop[4, 'vpD'] = 6.68692\n m.prop[4, 'cpA', 1] = sumA[4]\n m.prop[4, 'cpB', 1] = sumB[4]\n m.prop[4, 'cpC', 1] = sumC[4]\n m.prop[4, 'cpD', 1] = 0\n m.prop[4, 'cpA', 2] = 3.266\n m.prop[4, 'cpB', 2] = 4.18e-1\n m.prop[4, 'cpC', 2] = -2.242e-4\n m.prop[4, 'cpD', 2] = 4.685e-8\n\n\n return m", "def __init__(self,\n modeltype='TLusty'):\n if modeltype == 'TLusty':\n self.modtype = 'TLusty_v10'\n self.filebase = 'T*v10_z*.dat'\n self.path = '/home/kgordon/Dust/Ext/Model_Standards_Data/'\n self.read_tlusty_models(self.filebase, self.path)\n else:\n print('model type not supported')\n exit()", "def custom_model():\n\t# initialize the model\n\t# load weights from path\n\t# returns model\n\tmodel = mlp.get_training_model()\n\tmodel.load_state_dict(torch.load(\"model_wt.pth\"))\n\treturn model", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def create_model(self, C : float =1):\n self.classifier = RandomForestClassifier(max_depth=5)", "def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)", "def convert_from_config(config):\n\n if isinstance(config, str):\n yamlConfig = parse_yaml_config(config)\n else:\n yamlConfig = config\n\n model = None\n if 'OnnxModel' in yamlConfig:\n if __onnx_enabled__:\n model = onnx_to_hls(yamlConfig)\n else:\n raise Exception(\"ONNX not found. Please install ONNX.\")\n elif 'PytorchModel' in yamlConfig:\n if __pytorch_enabled__:\n model = pytorch_to_hls(yamlConfig)\n else:\n raise Exception(\"PyTorch not found. Please install PyTorch.\")\n else:\n model = keras_to_hls(yamlConfig)\n\n return model", "def from_config(cls, config):\n config['posterior'] = tf.keras.layers.deserialize(config['posterior'])\n config['prior'] = tf.keras.layers.deserialize(config['prior'])\n return cls(**config)", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def __init__(self, config=None, class_min=0):\n self.config = self._resolve_config(config)\n self.class_min = self._resolve_class_min(class_min)\n self.model = LogReg(**self.config)\n self.scaler = StandardScaler()", "def __create_model(self, classes):\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained_base=True)\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained=True)\r\n # self._model.reset_class(classes, reuse_weights=[cname for cname in classes if cname in self._model.classes])\r\n if self._model is None or classes != self.classes:\r\n model_name = 'ssd_{}_{}_custom'.format(self.img_size, self.backbone)\r\n self._model = model_zoo.get_model(model_name, classes=classes, pretrained=False, pretrained_base=True,\r\n root=self.temp_path)\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"always\")\r\n self._model.initialize()\r\n self._model.collect_params().reset_ctx(self.ctx)\r\n _, _, _ = self._model(mx.nd.zeros((1, 3, self.img_size, self.img_size), self.ctx))\r\n\r\n self._model.reset_class(classes)\r\n self.classes = classes", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def make_objects():\n cwd = os.getcwd()\n\n os.chdir(\"test_data/protein_load\")\n pmodel = pyODEM.model_loaders.Protein(\"ww_domain.ini\")\n os.chdir(cwd)\n\n pmodel.set_temperature(120.)\n\n return pmodel", "def build_model(config):\n # Load the pretrained model\n detr = get_detr_model(config, include_top=True, weights=\"detr\")\n detr.summary()\n return detr", "def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo", "def _make_model(self):\n self._model = tf.estimator.Estimator(model_fn=self.model_fn,\n model_dir=self.model_dir,\n config=self._config,\n params=self._params,\n )", "def __init__(self, model_name='vgg16'):\n trainer = Trainer(model_name=model_name)\n self.model = trainer.model\n self.model_save_dir = trainer.model_save_dir\n self.model_name = model_name", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def __init__(self, config, xtdim, batch_size):\n self.float_type = 'float32' # This should be the default\n self.config = config\n self.dt = self.config['dt']\n\n self.n_input = self.config['n_input']\n self.n_output = self.config['n_output']\n\n self.batch_size = batch_size\n self.xtdim = xtdim\n\n # time major\n self.x = np.zeros((xtdim, batch_size, self.n_input), dtype=self.float_type)\n self.y = np.zeros((xtdim, batch_size, self.n_output), dtype=self.float_type)\n self.cost_mask = np.zeros((xtdim, batch_size, self.n_output), dtype=self.float_type)\n # strength of input noise\n self._sigma_x = config['sigma_x'] * math.sqrt(2./self.config['alpha'])\n\n if config['rule_name'] == 'timed_spatial_reproduction_broad_tuning' \\\n or config['rule_name'] == 'spatial_reproduction_broad_tuning':\n self.n_guassianline = 32 + 12\n self.sd_gaussianline = 4.\n else:\n self.n_guassianline = 32\n self.sd_gaussianline = 2.\n\n self.pref_line_gaussian = np.arange(0, self.n_guassianline)", "def get_model(model_name, model_config, to_cuda,\n uniform_initialize_bn_weight=False, forward_is_infer=False):\n model = None\n if model_name == 'Tacotron2':\n if forward_is_infer:\n class Tacotron2__forward_is_infer(Tacotron2):\n def forward(self, inputs, input_lengths):\n return self.infer(inputs, input_lengths)\n model = Tacotron2__forward_is_infer(**model_config)\n else:\n model = Tacotron2(**model_config)\n elif model_name == 'WaveGlow':\n if forward_is_infer:\n class WaveGlow__forward_is_infer(WaveGlow):\n def forward(self, spect, sigma=1.0):\n return self.infer(spect, sigma)\n model = WaveGlow__forward_is_infer(**model_config)\n else:\n model = WaveGlow(**model_config)\n else:\n raise NotImplementedError(model_name)\n\n if uniform_initialize_bn_weight:\n init_bn(model)\n\n if to_cuda:\n model = model.cuda()\n return model", "def create_model():\n\n # Create a sequential model (a simple NN is created) adding a softmax activation at the end with 10 units:\n model = Sequential()\n model.add(Dense(units=128, activation=\"relu\", input_shape=(784,)))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=10, activation=\"softmax\"))\n\n # Compile the model using the loss function \"categorical_crossentropy\" and Stocastic Gradient Descent optimizer:\n model.compile(optimizer=SGD(0.001), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # Return the created model\n return model", "def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError", "def __init__(self, config_file_name: str):\n configs_trainer = io.read_yaml(PATH_CONFIG, config_file_name)\n configs_model = configs_trainer[configs_trainer['model']]\n\n # Add trainer configs attributes\n horizons = configs_trainer['forecasting_horizons_trainer']\n self.forecasting_horizons_trainer = range(horizons['smallest_horizon'], horizons['largest_horizon'] + 1)\n\n for name, value in configs_trainer.items():\n if name in ['train_date_when_predicing_min', 'train_date_to_predict_max']:\n self.__setattr__(name, value)\n\n # Initiate individual model configs object (replace attributes that were specified in configs_model).\n configs = io.read_yaml(PATH_CONFIG, configs_trainer['file_name_model_configs'])\n configs = configs[configs_trainer['model']]\n Logger.info('Loaded model configs from file',\n os.path.join(PATH_CONFIG, configs_trainer['file_name_model_configs']), self.__class__.__name__)\n configs.update(configs_model)\n\n def update_train_scope(attr, limit, fct):\n if configs.get(attr) is not None and limit in vars(self):\n date = fct(configs.get(attr), self.__getattribute__(limit))\n configs.update({attr: date})\n\n update_train_scope('train_start', 'train_date_when_predicting_min', max)\n update_train_scope('train_end', 'train_date_to_predict_max', min)\n\n self.configs_individual_model = Configs(configs={k: v for k, v in configs.items()\n if k in Configs.__dict__.keys()})\n\n # Update maximum date to predict train to ensure that we don't overlap with the evaluation period\n if self.configs_individual_model.evaluation_start is not None and self.train_date_to_predict_max is not None:\n max_date_to_predict = substract_period(\n self.configs_individual_model.evaluation_start, 1,\n highest_period=52 if self.configs_individual_model.is_weekly_forecast else 12\n )\n self.train_date_to_predict_max = min(self.train_date_to_predict_max, max_date_to_predict)\n\n Logger.info('Loaded trainer configs from file',\n os.path.join(PATH_CONFIG, config_file_name), self.__class__.__name__)", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def make(model: Type[Model], **kwargs: Any) -> Model:\n return modelfactory_factory(model)(**kwargs)", "def model_fn_builder(config: electra_files.configure_finetuning.FinetuningConfig, tasks,\n num_train_steps, pretraining_config=None):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec\n\n return model_fn", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def model():\n return TimeSeriesMultiReg()", "def __init__(self, model_type, model_cfg, training_cfg):\n super().__init__()\n self.save_hyperparameters()\n\n self.model_cfg = model_cfg\n self.training_cfg = training_cfg\n \n if model_type == \"ConvLSTM\":\n self.model = Conv_LSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"AutoencLSTM\":\n self.model = AutoencLSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"ConvTransformer\":\n self.model = ENS_Conv_Transformer(num_hidden=self.model_cfg[\"num_hidden\"],\n output_dim=self.model_cfg[\"output_channels\"],\n depth=self.model_cfg[\"depth\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n num_conv_layers=self.model_cfg[\"num_conv_layers\"],\n kernel_size=self.model_cfg[\"kernel_size\"],\n img_width=self.model_cfg[\"img_width\"],\n non_pred_channels=self.model_cfg[\"non_pred_channels\"],\n num_layers_query_feat=self.model_cfg[\"num_layers_query_feat\"],\n in_channels=self.model_cfg[\"in_channels\"],\n baseline=self.training_cfg[\"baseline\"])\n self.baseline = self.training_cfg[\"baseline\"]\n self.future_training = self.training_cfg[\"future_training\"]\n self.learning_rate = self.training_cfg[\"start_learn_rate\"]\n self.training_loss = get_loss_from_name(self.training_cfg[\"training_loss\"])\n self.test_loss = get_loss_from_name(self.training_cfg[\"test_loss\"])", "def new(cls, args, src_meta, trg_meta, waitk_lagging, name=None):\n # build source and target modality\n src_modality, trg_modality = cls.build_modalities(args, src_meta, trg_meta)\n encoder_params, decoder_params = {}, {}\n for f in cls.class_or_method_args():\n if f.name in args:\n if f.name.startswith(\"encoder.\"):\n encoder_params[f.name[8:]] = args[f.name]\n elif f.name.startswith(\"decoder.\"):\n decoder_params[f.name[8:]] = args[f.name]\n # build encoder and decoder\n encoder = build_encoder({\n \"encoder.class\": \"TransformerEncoder\",\n \"encoder.params\": encoder_params})\n decoder = build_decoder({\n \"decoder.class\": \"TransformerDecoder\",\n \"decoder.params\": decoder_params})\n model = cls(args, src_meta, trg_meta, src_modality, trg_modality,\n encoder, decoder, name=name)\n model.wait_k = waitk_lagging\n _ = model({\"src\": tf.convert_to_tensor([[1, 2, 3]], tf.int64),\n \"src_padding\": tf.convert_to_tensor([[0, 0., 0]], tf.float32),\n \"trg_input\": tf.convert_to_tensor([[1, 2, 3]], tf.int64)})\n return model", "def create(name, out_channel, pretrain):\n if out_channel == 10 or out_channel == 100 or out_channel == 200:\n # use custom models\n if name not in custom_factory:\n raise KeyError(\"Unknown model:\", name)\n return custom_factory[name](out_channel)\n elif out_channel == 1000:\n if name not in torchvision_factory:\n raise KeyError(\"Unknown model:\", name)\n return torchvision_factory[name](pretrain)\n else:\n raise Exception", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def initialize_thermal_prediction(self, config_file):\n conf_pred = config_file['prediction']['heat']\n conf_powr = config_file['prediction']['power']\n # config_json\n n_day = conf_pred['n_day']\n n_values = conf_pred['n_values_per_day']\n precision_in_h = conf_pred['precision_in_h']\n use_predef_loads = conf_pred['use_predef_loads']\n predef_loads_file_path = conf_pred['path_loads']\n # heating curve\n conf_hk = config_file['components']['heating_curve']\n hk_ta = conf_hk['design_ambient_temperature_oC']\n hk_ti = conf_hk['design_indoor_temperature_oC']\n hk_tv = conf_hk['design_supply_temperature_oC']\n hk_tr = conf_hk['design_return_temperature_oC']\n hk_n = conf_hk['radiator_coefficient_n']\n hk_m = conf_hk['radiator_coefficient_m']\n hk_qn = conf_hk['design_heat_load_in_kW']\n # chp unit\n patm = utils.get_pressure_in_MPa()\n calcopt = utils.get_calc_option()\n eps_el_chp = config_file['components']['chp_unit']['electrical_efficiency']\n eps_th_chp = config_file['components']['chp_unit']['thermal_efficiency']\n qel_n_chp = config_file['components']['chp_unit']['max_electric_power_in_kW']\n chp_tinp = config_file['components']['chp_unit']['design_input_temperature_oC']\n chp_tmax = config_file['components']['chp_unit']['design_output_temperature_oC']\n qth_n_chp = eps_th_chp * qel_n_chp / eps_el_chp # in kW\n mstr_chp = qth_n_chp / (utils.cp_fluid_water(0.5 * (chp_tmax + chp_tinp), patm, calcopt) * (chp_tmax - chp_tinp)) # in kg/s = kW / (kJ/kg/K * K)\n # gas boiler\n qth_n_gb = config_file['components']['gas_boiler']['max_thermal_power_in_kW']\n gb_tinp = config_file['components']['gas_boiler']['design_input_temperature_oC']\n gb_tmax = config_file['components']['gas_boiler']['design_output_temperature_oC']\n mstr_gb = qth_n_gb / (utils.cp_fluid_water(0.5 * (gb_tinp + gb_tmax), patm, calcopt) * (gb_tmax - gb_tinp)) # in kg/s = kW / (kJ/kg/K * K) # in kg/s = kW / (kJ/kg/K * K)\n # storage tank\n effective_height = config_file['components']['storage_tank']['effective_heigth_in_m']\n inner_radius = config_file['components']['storage_tank']['inner_radius_tank_in_m']\n effective_pipe_volume = config_file['components']['storage_tank']['effective_coil_volume_in_m3']\n effective_volume = config_file['components']['storage_tank']['effective_volume_in_m3']\n if (effective_volume <= 0.0):\n effective_volume = math.pi * inner_radius * inner_radius * effective_height - effective_pipe_volume # in m3\n nr_calc = 20\n slice_volume = effective_volume / nr_calc # in m3\n qmax_rod_el = config_file['components']['storage_tank']['power_heating_rod_in_kW']\n open_weather_map_active = config_file['calculation']['platform_mode']['open_weather_map_active']\n # conf_powr\n #print('\\n initialize_thermal_prediction')\n #print('use_predef_loads = {}; {}'.format(use_predef_loads,type(use_predef_loads)))\n #print('predef_loads_file_path = {}; {}'.format(predef_loads_file_path,type(predef_loads_file_path)))\n return predict_thermal.predict_Q(n_day, n_values, precision_in_h, predef_loads_file_path, use_predef_loads, self.output_horizon_in_h, \n self.output_resolution_in_s, conf_powr, hk_tv, hk_tr, hk_ti, hk_ta, hk_qn, hk_n, hk_m, chp_tmax, gb_tmax, slice_volume, \n mstr_chp, mstr_gb, qmax_rod_el, eps_th_chp, eps_el_chp, open_weather_map_active)", "def build_model(\n config: Mapping, cardinalities: Mapping[str, int]\n) -> keras.Model:\n\n model_config = config['model']\n if isinstance(model_config, str):\n model = keras.models.load_model(\n model_config, custom_objects={\n 'loss_fn': _create_loss(config['loss'])\n }\n )\n\n return model\n\n features = Features(config['features'])\n inputs_all = []\n\n # Constituents of different types\n constituent_types = [\n key for key in sorted(model_config.keys()) # Ensure order\n if key not in {'head', 'load_weights'}\n ]\n outputs_constituents = []\n for constituent_type in constituent_types:\n inputs_numerical = keras.Input(\n shape=(None, len(features.numerical(constituent_type))),\n ragged=True, name=f'{constituent_type}_numerical'\n )\n inputs_categorical = OrderedDict()\n for feature in features.categorical(constituent_type):\n inputs_categorical[feature] = keras.Input(\n shape=(None,), ragged=True, name=feature\n )\n inputs_all.append(inputs_numerical)\n inputs_all.extend(inputs_categorical.values())\n\n outputs = _apply_deep_set(\n inputs_numerical, inputs_categorical,\n model_config[constituent_type], cardinalities, constituent_type\n )\n outputs_constituents.append(outputs)\n\n # Head\n inputs_global_numerical = keras.Input(\n shape=(len(features.numerical('global')),),\n name='global_numerical'\n )\n inputs_global_categorical = OrderedDict()\n for feature in features.categorical('global'):\n inputs_global_categorical[feature] = keras.Input(\n shape=(None,), name=feature\n )\n embeddings_global = {\n feature: Embedding(\n cardinalities[feature],\n model_config['head']['embeddings'][feature],\n name=feature + '_embeddings'\n )(inputs)\n for feature, inputs in inputs_global_categorical.items()\n }\n inputs_all.append(inputs_global_numerical)\n inputs_all.extend(inputs_global_categorical.values())\n inputs_head = Concatenate(name='head_concatenate')(\n [inputs_global_numerical]\n + [\n embeddings_global[feature]\n for feature in inputs_global_categorical.values()\n ]\n + outputs_constituents\n )\n outputs = _apply_dense_from_config(\n inputs_head, model_config['head'], name_prefix='head_'\n )\n\n outputs = Dense(1, name='head_dense_output')(outputs) # Output unit\n model = keras.Model(inputs=inputs_all, outputs=outputs, name='full')\n\n model.compile(\n optimizer=_create_optimizer(config.get('optimizer', None)),\n loss=_create_loss(config['loss'])\n )\n if 'load_weights' in model_config:\n # Normally, a saved model should be loaded\n # keras.models.load_model at the beginning of thsi function.\n # However, this is currently not supported for models that use\n # ragged tensors [1]. As a workaround, construct the model anew\n # and then load saved weights. The path to weights would\n # usually be \"{model_directory}/variables/variables\", with the\n # \".index\" file extension stripped off. This doesn't restore\n # the state of the optimizer.\n # [1] https://github.com/tensorflow/tensorflow/issues/41034\n model.load_weights(model_config['load_weights'])\n return model", "def dynamic_model(self, input_val: float) -> float:\n pass", "def create_regressor(config, parameters):\n\n # Mean and Standard Deviation Constants for normalization.\n with file_io.FileIO(parameters.mean_path, mode='r') as f:\n mean = pickle.load(f)\n with file_io.FileIO(parameters.std_path, mode='r') as f:\n std = pickle.load(f)\n\n # Columns to be used as features.\n hour = tf.feature_column.categorical_column_with_identity(\n 'hour', num_buckets=24)\n hour = tf.feature_column.embedding_column(\n hour, dimension=parameters.hour_embedding)\n\n day = tf.feature_column.categorical_column_with_identity(\n 'day', num_buckets=7)\n day = tf.feature_column.embedding_column(\n day, dimension=parameters.day_embedding)\n\n weather = [tf.feature_column.numeric_column(\n 'weather' + str(i),\n normalizer_fn=(lambda x, i = i: (x - mean[i]) / std[i])\n ) for i in range(constants.WEATHER_SIZE)]\n\n distribution = [tf.feature_column.numeric_column(\n 'distribution' + str(i)\n ) for i in range(constants.DISTRIBUTION_SIZE)]\n\n feature_cols = [hour, day] + weather + distribution\n\n # Evaluation metric.\n def mean_absolute_error(labels, predictions):\n \"\"\"Creates mean absolute error metric.\n\n Metric is used to evaluate the model.\n\n Args:\n labels: Evaluation true labels.\n predictions: Evaluation model predictions.\n\n Returns:\n A dictionary with the evaluation metric\n \"\"\"\n pred_values = predictions['predictions']\n return {'mae': tf.metrics.mean_absolute_error(\n labels, pred_values)}\n\n layer = parameters.first_layer_size\n lfrac = parameters.layer_reduction_fraction\n nlayers = parameters.number_layers\n h_units = [layer]\n for _ in range(nlayers - 1):\n h_units.append(math.ceil(layer * lfrac))\n layer = h_units[-1]\n\n estimator = tf.estimator.DNNRegressor(\n feature_columns=feature_cols,\n hidden_units=h_units,\n optimizer=tf.train.AdagradOptimizer(\n learning_rate=parameters.learning_rate),\n dropout=parameters.dropout, config=config)\n estimator = tf.contrib.estimator.add_metrics(\n estimator, mean_absolute_error)\n estimator = tf.contrib.estimator.forward_features(estimator, 'date')\n return estimator", "def from_config(cls, model_config: Union[dict, ModelConfig]) -> Type[AbstractModel]:\n\n if not (model_config and isinstance(model_config, (ModelConfig, dict))):\n msg = f\"Need a valid model config to create a text/tagger model in AutoModel. \" \\\n f\"Found model_config={model_config} of type({type(model_config)})\"\n raise ValueError(msg)\n\n # get model type upon validation\n model_config = cls._resolve_model_config(model_config)\n model_type = cls._get_model_type(model_config)\n\n # load metadata and return\n if model_type == \"text\":\n model_class = AutoTextModel.get_model_class(model_config)\n elif model_type == \"tagger\":\n model_class = AutoTaggerModel.get_model_class(model_config)\n\n return model_class(model_config)", "def build_model():" ]
[ "0.8444309", "0.65563035", "0.637411", "0.6288527", "0.62518233", "0.622743", "0.622743", "0.6217139", "0.6207316", "0.6149929", "0.6077935", "0.6031471", "0.6030025", "0.6021649", "0.5992696", "0.59753346", "0.5956578", "0.5916777", "0.5907874", "0.5907593", "0.58919555", "0.58832514", "0.58558136", "0.58446854", "0.5827892", "0.58207685", "0.5820042", "0.57981926", "0.5787968", "0.5780455", "0.57591975", "0.57591975", "0.57438856", "0.57430595", "0.57415885", "0.572479", "0.569804", "0.56911707", "0.56747377", "0.566897", "0.5659603", "0.56487644", "0.5647228", "0.5642159", "0.5633685", "0.5633215", "0.562707", "0.56157404", "0.56155676", "0.56127864", "0.56115127", "0.5608163", "0.55991167", "0.55971473", "0.5587398", "0.5578337", "0.55727684", "0.55714613", "0.55606776", "0.5551912", "0.5550151", "0.5536657", "0.55251026", "0.5506968", "0.5505085", "0.5504997", "0.54946184", "0.5487506", "0.54857403", "0.5485332", "0.5481162", "0.5479117", "0.5477883", "0.5465073", "0.5459169", "0.5455871", "0.5450354", "0.5448577", "0.5446346", "0.54409885", "0.544092", "0.54407585", "0.544019", "0.54378474", "0.5437089", "0.5432261", "0.5431375", "0.5428835", "0.5424792", "0.5423592", "0.5416969", "0.5416163", "0.5406413", "0.5404628", "0.540462", "0.5401919", "0.5401023", "0.53982085", "0.539786", "0.53921056" ]
0.8335938
1
Loads a checkpoint of a model. The provided model checkpoint must match the stored model.
def try_load_checkpoint_for_model(self) -> bool: if self._model is None: raise ValueError("Model must be created before it can be adjusted.") if not self.checkpoint_path: raise ValueError("No checkpoint provided") if not self.checkpoint_path.is_file(): logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}') return False epoch = ModelAndInfo._load_checkpoint(model=self._model, checkpoint_path=self.checkpoint_path, key_in_state_dict=ModelAndInfo.MODEL_STATE_DICT_KEY, use_gpu=self.config.use_gpu) logging.info(f"Loaded model from checkpoint (epoch: {epoch})") self.checkpoint_epoch = epoch return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load_model(model, transfer_from, sess):\n param_path = final_param_path(model.name, transfer_from)\n step_to_load = FINAL_PARAM_STEPS[model.name][transfer_from]\n util.load_checkpoint_at_step(\n model_name=model.name,\n global_step=step_to_load,\n saver=tf.train.Saver(),\n sess=sess,\n path=param_path)", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load_model(model):\n # Check if the model is a model directory (containing a metagraph and a checkpoint file)\n # or if it is a protobuf file with a frozen graph\n model_exp = os.path.expanduser(model)\n if os.path.isfile(model_exp):\n print('Model filename: %s' % model_exp)\n with tf.gfile.FastGFile(model_exp, 'rb') as f_l:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f_l.read())\n tf.import_graph_def(graph_def, name='')\n else:\n print('Model directory: %s' % model_exp)\n meta_file, ckpt_file = get_model_filenames(model_exp)\n\n print('Metagraph file: %s' % meta_file)\n print('Checkpoint file: %s' % ckpt_file)\n\n saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))\n saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path,\n key_in_state_dict: str, use_gpu: bool) -> int:\n logging.info(f\"Loading checkpoint {checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu)\n\n try:\n state_dict = checkpoint[key_in_state_dict]\n except KeyError:\n logging.error(f\"Key {key_in_state_dict} not found in checkpoint\")\n return False\n\n if isinstance(model, torch.nn.DataParallel):\n result = model.module.load_state_dict(state_dict, strict=False)\n else:\n result = model.load_state_dict(state_dict, strict=False)\n\n if result.missing_keys:\n logging.warning(f\"Missing keys in model checkpoint: {result.missing_keys}\")\n if result.unexpected_keys:\n logging.warning(f\"Unexpected keys in model checkpoint: {result.unexpected_keys}\")\n\n return checkpoint[ModelAndInfo.EPOCH_KEY]", "def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(pretrain_model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % pretrain_model)\n ckpt = tf.train.get_checkpoint_state(pretrain_model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(pretrain_model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(pretrain_model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(pretrain_model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n pretrain_model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n tf.logging.info(\"Copy the pre-trained model %s as the fine-tuned initialization\" % pretrain_model_checkpoint_path)\n\n import glob\n for filename in glob.glob(pretrain_model_checkpoint_path + \"*\"):\n bas = os.path.basename(filename).split(\"-\", 1)[0]\n ext = os.path.basename(filename).rsplit(\".\", 1)[1]\n shutil.copyfile(filename, os.path.join(target_model, bas + \"-0.\" + ext))\n\n with open(os.path.join(target_model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n return", "def restore_checkpoint(model, checkpoint_dir, cuda=False, force=False, pretrain=False):\n try:\n cp_files = [\n file_\n for file_ in os.listdir(checkpoint_dir)\n if file_.startswith(\"epoch=\") and file_.endswith(\".checkpoint.pth.tar\")\n ]\n except FileNotFoundError:\n cp_files = None\n os.makedirs(checkpoint_dir)\n if not cp_files:\n print(\"No saved model parameters found\")\n if force:\n raise Exception(\"Checkpoint not found\")\n else:\n return model, 0, []\n\n # Find latest epoch\n for i in itertools.count(1):\n if \"epoch={}.checkpoint.pth.tar\".format(i) in cp_files:\n epoch = i\n else:\n break\n\n if not force:\n print(\n \"Which epoch to load from? Choose in range [0, {}].\".format(epoch),\n \"Enter 0 to train from scratch.\",\n )\n print(\">> \", end=\"\")\n inp_epoch = int(input())\n if inp_epoch not in range(epoch + 1):\n raise Exception(\"Invalid epoch number\")\n if inp_epoch == 0:\n print(\"Checkpoint not loaded\")\n clear_checkpoint(checkpoint_dir)\n return model, 0, []\n else:\n print(\"Which epoch to load from? Choose in range [1, {}].\".format(epoch))\n inp_epoch = int(input())\n if inp_epoch not in range(1, epoch + 1):\n raise Exception(\"Invalid epoch number\")\n\n filename = os.path.join(\n checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(inp_epoch)\n )\n\n print(\"Loading from checkpoint {}?\".format(filename))\n\n if cuda:\n checkpoint = torch.load(filename)\n else:\n # Load GPU model on CPU\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n try:\n start_epoch = checkpoint[\"epoch\"]\n stats = checkpoint[\"stats\"]\n if pretrain:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\n \"=> Successfully restored checkpoint (trained for {} epochs)\".format(\n checkpoint[\"epoch\"]\n )\n )\n except:\n print(\"=> Checkpoint not successfully restored\")\n raise\n\n return model, inp_epoch, stats", "def load_checkpoint(model, filename, map_location='cpu', strict=False, logger=None):\n checkpoint = _load_checkpoint(filename, map_location)\n if not isinstance(checkpoint, dict):\n raise RuntimeError(f'No state_dict found in checkpoint file {filename}')\n if 'state_dict' in checkpoint:\n state_dict_tmp = checkpoint['state_dict']\n else:\n state_dict_tmp = checkpoint\n state_dict = OrderedDict()\n for k, v in state_dict_tmp.items():\n if k.startswith('module.backbone.'):\n state_dict[k[16:]] = v\n elif k.startswith('module.'):\n state_dict[k[7:]] = v\n elif k.startswith('backbone.'):\n state_dict[k[9:]] = v\n else:\n state_dict[k] = v\n load_state_dict(model, state_dict, strict, logger)\n return checkpoint", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def load_model_saved_with_module(model, checkpoint_path, logger):\n checkpoint = torch.load(checkpoint_path)\n new_state_dict = dict()\n for k, v in checkpoint[\"model_state_dict\"].items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n logger.info(f\"Already restored model from checkpoint: {checkpoint_path}\")\n return model", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_model(self, model_path):\n # Check the model file exists\n if not os.path.isfile(model_path):\n raise ValueError(f\"The model file `{model_path}` is not exists or broken!\")\n\n checkpoint = torch.load(model_path)\n self.model_type = checkpoint['model_type']\n self.label2idx = checkpoint['label2idx']\n self.idx2label = checkpoint['idx2label']\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.to(self.device)", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def load_model_trainer_states_from_checkpoint(self, checkpoint_path, model=None):\n import os\n\n if model is None:\n try:\n import cloudpickle\n except ImportError:\n raise ImportError(\"cloudpickle is required to load model class\")\n logger.info(\"Loading model class\")\n model = cloudpickle.load(open(os.path.join(checkpoint_path, \"model_class.pkl\"), \"rb\"))\n\n self.model = HFWrapper(model)\n logger.info(\"Loading weights of previously trained model\")\n # Restoring model weights\n self.model.load_state_dict(\n # torch.load(os.path.join(training_args.output_dir, \"pytorch_model.bin\"))\n torch.load(os.path.join(checkpoint_path, \"pytorch_model.bin\"))\n )\n # Restoring random state\n rng_file = os.path.join(checkpoint_path, \"rng_state.pth\")\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n # Restoring AMP scaler\n if self.use_amp:\n self.scaler.load_state_dict(torch.load(os.path.join(checkpoint_path, \"scaler.pt\")))", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def _load_checkpoint(filename, map_location=None):\n if filename.startswith('modelzoo://'):\n warnings.warn('The URL scheme of \"modelzoo://\" is deprecated, please '\n 'use \"torchvision://\" instead')\n model_urls = get_torchvision_models()\n model_name = filename[11:]\n checkpoint = load_url_dist(model_urls[model_name])\n elif filename.startswith('torchvision://'):\n model_urls = get_torchvision_models()\n model_name = filename[14:]\n checkpoint = load_url_dist(model_urls[model_name])\n elif filename.startswith('open-mmlab://'):\n model_urls = get_external_models()\n model_name = filename[13:]\n deprecated_urls = get_deprecated_model_names()\n if model_name in deprecated_urls:\n warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '\n f'of open-mmlab://{deprecated_urls[model_name]}')\n model_name = deprecated_urls[model_name]\n model_url = model_urls[model_name]\n # check if is url\n if model_url.startswith(('http://', 'https://')):\n checkpoint = load_url_dist(model_url)\n else:\n filename = osp.join(_get_mmcv_home(), model_url)\n if not osp.isfile(filename):\n raise IOError(f'{filename} is not a checkpoint file')\n checkpoint = torch.load(filename, map_location=map_location)\n elif filename.startswith('mmcls://'):\n model_urls = get_mmcls_models()\n model_name = filename[8:]\n checkpoint = load_url_dist(model_urls[model_name])\n checkpoint = _process_mmcls_checkpoint(checkpoint)\n elif filename.startswith(('http://', 'https://')):\n checkpoint = load_url_dist(filename)\n elif filename.startswith('pavi://'):\n model_path = filename[7:]\n checkpoint = load_pavimodel_dist(model_path, map_location=map_location)\n elif filename.startswith('s3://'):\n checkpoint = load_fileclient_dist(\n filename, backend='ceph', map_location=map_location)\n else:\n if not osp.isfile(filename):\n raise IOError(f'{filename} is not a checkpoint file')\n checkpoint = torch.load(filename, map_location=map_location)\n return checkpoint", "def _resume_from_checkpoint(model: tf.keras.Model,\n model_dir: str,\n train_steps: int) -> int:\n logging.info('Load from checkpoint is enabled.')\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n logging.info('latest_checkpoint: %s', latest_checkpoint)\n if not latest_checkpoint:\n logging.info('No checkpoint detected.')\n return 0\n\n logging.info('Checkpoint file %s found and restoring from '\n 'checkpoint', latest_checkpoint)\n model.load_weights(latest_checkpoint)\n initial_epoch = model.optimizer.iterations // train_steps\n logging.info('Completed loading from checkpoint.')\n logging.info('Resuming from epoch %d', initial_epoch)\n return int(initial_epoch)", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def load_model(model_name):\n model = get_model(training = False)\n checkpoint = torch.load('../models/' + model_name)\n model.load_state_dict(checkpoint['model_state_dict'])\n return model", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def load_checkpoint(checkpoint_file: pl.Path) -> Optional[Dict[str, Any]]:\n if checkpoint_file.exists():\n logger.info(f\"Loading checkpoint {checkpoint_file}.\")\n checkpoint = torch.load(str(checkpoint_file))\n logger.info(f\"Done loading checkpoint from epoch {checkpoint['epoch']}.\")\n else:\n logger.warning(f\"No {checkpoint_file} checkpoint file found. Starting normal.\")\n return checkpoint", "def load_model(model, device, model_path):\n if os.path.exists(model_path):\n print(\"Reading model from \", model_path)\n checkpoint = torch.load(model_path, map_location=torch.device(device))\n model.load_state_dict(checkpoint['state_dict'])\n return model\n else:\n raise RuntimeError('Model does not exist!')", "def load_pretrained_model(self, load_from):\n print(\"loading model from %s\\n\" % (load_from))\n try:\n if self.use_cuda:\n pretrained_dict = torch.load(load_from)\n else:\n pretrained_dict = torch.load(load_from, map_location='cpu')\n\n model_dict = self.online_net.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.online_net.load_state_dict(model_dict)\n print(\"The loaded parameters are:\")\n keys = [key for key in pretrained_dict]\n print(\", \".join(keys))\n print(\"--------------------------\")\n except Exception as e:\n print(\"Failed to load checkpoint...\")\n print(e)", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_model(session: tf.Session, model_dir: Text) -> None:\n saver = tf.train.Saver()\n saver.restore(session, model_dir)", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def resume(self, checkpoint):\n model_dict = paddle.load(checkpoint)\n self.model.set_state_dict(model_dict)", "def try_create_model_and_load_from_checkpoint(self) -> bool:\n self.create_model()\n if self.checkpoint_path:\n # Load the stored model. If there is no checkpoint present, return immediately.\n return self.try_load_checkpoint_for_model()\n return True", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def load_checkpoint(model,\n filename,\n map_location='cpu',\n strict=False,\n logger=None):\n checkpoint = _load_checkpoint(filename, map_location)\n # OrderedDict is a subclass of dict\n if not isinstance(checkpoint, dict):\n raise RuntimeError(\n f'No state_dict found in checkpoint file {filename}')\n # get state_dict from checkpoint\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n elif 'model' in checkpoint:\n state_dict = checkpoint['model']\n elif 'module' in checkpoint:\n state_dict = checkpoint['module']\n else:\n state_dict = checkpoint\n # strip prefix of state_dict\n if list(state_dict.keys())[0].startswith('module.'):\n state_dict = {k[7:]: v for k, v in state_dict.items()}\n\n # for MoBY, load model of online branch\n if sorted(list(state_dict.keys()))[0].startswith('encoder'):\n state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}\n\n # reshape absolute position embedding for Swin\n if state_dict.get('absolute_pos_embed') is not None:\n absolute_pos_embed = state_dict['absolute_pos_embed']\n N1, L, C1 = absolute_pos_embed.size()\n N2, C2, H, W = model.absolute_pos_embed.size()\n if N1 != N2 or C1 != C2 or L != H*W:\n logger.warning(\"Error in loading absolute_pos_embed, pass\")\n else:\n state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)\n\n rank, _ = get_dist_info()\n if \"rel_pos_bias.relative_position_bias_table\" in state_dict:\n if rank == 0:\n print(\"Expand the shared relative position embedding to each layers. \")\n num_layers = model.get_num_layers()\n rel_pos_bias = state_dict[\"rel_pos_bias.relative_position_bias_table\"]\n for i in range(num_layers):\n state_dict[\"blocks.%d.attn.relative_position_bias_table\" % i] = rel_pos_bias.clone()\n\n state_dict.pop(\"rel_pos_bias.relative_position_bias_table\")\n\n all_keys = list(state_dict.keys())\n for key in all_keys:\n if \"relative_position_index\" in key:\n state_dict.pop(key)\n\n if \"relative_position_bias_table\" in key:\n rel_pos_bias = state_dict[key]\n src_num_pos, num_attn_heads = rel_pos_bias.size()\n dst_num_pos, _ = model.state_dict()[key].size()\n dst_patch_shape = model.patch_embed.patch_shape\n if dst_patch_shape[0] != dst_patch_shape[1]:\n raise NotImplementedError()\n num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)\n src_size = int((src_num_pos - num_extra_tokens) ** 0.5)\n dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)\n if src_size != dst_size:\n if rank == 0:\n print(\"Position interpolate for %s from %dx%d to %dx%d\" % (\n key, src_size, src_size, dst_size, dst_size))\n extra_tokens = rel_pos_bias[-num_extra_tokens:, :]\n rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]\n\n def geometric_progression(a, r, n):\n return a * (1.0 - r ** n) / (1.0 - r)\n\n left, right = 1.01, 1.5\n while right - left > 1e-6:\n q = (left + right) / 2.0\n gp = geometric_progression(1, q, src_size // 2)\n if gp > dst_size // 2:\n right = q\n else:\n left = q\n\n # if q > 1.13492:\n # q = 1.13492\n\n dis = []\n cur = 1\n for i in range(src_size // 2):\n dis.append(cur)\n cur += q ** (i + 1)\n\n r_ids = [-_ for _ in reversed(dis)]\n\n x = r_ids + [0] + dis\n y = r_ids + [0] + dis\n\n t = dst_size // 2.0\n dx = np.arange(-t, t + 0.1, 1.0)\n dy = np.arange(-t, t + 0.1, 1.0)\n if rank == 0:\n print(\"x = {}\".format(x))\n print(\"dx = {}\".format(dx))\n\n all_rel_pos_bias = []\n\n for i in range(num_attn_heads):\n z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()\n f = interpolate.interp2d(x, y, z, kind='cubic')\n all_rel_pos_bias.append(\n torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))\n\n rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)\n new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)\n state_dict[key] = new_rel_pos_bias\n\n if 'pos_embed' in state_dict:\n pos_embed_checkpoint = state_dict['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.patch_embed.num_patches\n num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n if rank == 0:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n state_dict['pos_embed'] = new_pos_embed\n\n # interpolate position bias table if needed\n relative_position_bias_table_keys = [k for k in state_dict.keys() if \"relative_position_bias_table\" in k]\n for table_key in relative_position_bias_table_keys:\n table_pretrained = state_dict[table_key]\n table_current = model.state_dict()[table_key]\n L1, nH1 = table_pretrained.size()\n L2, nH2 = table_current.size()\n if nH1 != nH2:\n logger.warning(f\"Error in loading {table_key}, pass\")\n else:\n if L1 != L2:\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n table_pretrained_resized = F.interpolate(\n table_pretrained.permute(1, 0).view(1, nH1, S1, S1),\n size=(S2, S2), mode='bicubic')\n state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)\n\n # load state_dict\n load_state_dict(model, state_dict, strict, logger)\n return checkpoint", "def load_checkpoint(args, trainer, epoch_itr):\n os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']", "def load_model(fn, model):\n if fn[-3] != \".tf\":\n fn += \".tf\"\n if model.saver is None:\n with model.graph.as_default():\n model.saver = tf.train.Saver()\n log(\"Loading model from {}\".format(fn))\n model.saver.restore(model.session, fn)\n log(\"Done loading!\")", "def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)", "def load_model():\n with open(paths.model('model.pkl'), 'rb') as stream:\n return pickle.load(stream)", "def load_model(model, path):\n\tmodel.load_state_dict(torch.load(path))\n\tprint(\"pre-trained model loaded from {}\".format(path))", "def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def load_model(model):\n fin = False\n backup1 = False\n backup2 = False\n\n if os.path.exists(\"TrainedModel/finalModel.pth\"):\n fin = True\n elif os.path.exists(\"TrainedModel/modelBackup.pth\"):\n backup1 = True\n elif os.path.exists(\"TrainedModel/modelBackupBackup.pth\"):\n backup2 = True\n\n if fin:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/finalModel.pth\"))\n return model\n except:\n print(\"finalModel seems to be corrupted, trying a backup...\")\n \n if fin or backup1:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/modelBackup.pth\"))\n return model\n except:\n print(\"modelBackup seems to be corrupted, trying a backup...\")\n\n if fin or backup1 or backup2:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/modelBackupBackup.pth\"))\n return model\n except:\n print(\"modelBackupBackup seems to be corrupted, you're at the end of the line.\")\n\n print(\"There doesn't seem to be anything to load.\")\n return model", "def load_actor(self, checkpoint):\n \n model = torch.load(checkpoint)\n self.actor_local.load_state_dict(model)", "def load_checkpoint(checkpoint_directory,\n session):\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # filter variables if needed.\n print(variables)\n saver_ob = tf.train.Saver(variables, max_to_keep=0)\n os.makedirs(checkpoint_directory, exist_ok=True)\n # verify if we don't have a checkpoint saved directly\n step = 0\n ckpt = tf.train.get_checkpoint_state(checkpoint_directory)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n model_checkpoint_path = ckpt.model_checkpoint_path\n saver_ob.restore(session, model_checkpoint_path)\n step = int(model_checkpoint_path.rsplit('-', 1)[1])\n print('Model loaded = ', step)\n return saver_ob, step", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def load(self, sess, step=None):\n if step==None:\n ckpt_path = tf.train.latest_checkpoint(self.model.ckpt_dir)\n else:\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model-'+str(step))\n self.saver.restore(sess, ckpt_path)\n step = tf.train.global_step(sess, self.gstep)\n print('Load model at step {} from check point {}.'.format(step, ckpt_path))", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def maybe_load_model(savedir, container):\n if savedir is None:\n return\n\n state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))\n if container is not None:\n logger.log(\"Attempting to download model from Azure\")\n found_model = container.get(savedir, 'training_state.pkl.zip')\n else:\n found_model = os.path.exists(state_path)\n if found_model:\n state = pickle_load(state_path, compression=True)\n model_dir = \"model-{}\".format(state[\"num_iters\"])\n if container is not None:\n container.get(savedir, model_dir)\n U.load_state(os.path.join(savedir, model_dir, \"saved\"))\n logger.log(\"Loaded models checkpoint at {} iterations\".format(state[\"num_iters\"]))\n return state", "def load_pretrained_model(model, pretrained_model_path, verbose=False):\n\n if isinstance(pretrained_model_path, str):\n if not os.path.exists(pretrained_model_path):\n raise IOError(\n \"Can't find pretrained model: {}\".format(pretrained_model_path)\n )\n\n print(\"Loading checkpoint from '{}'\".format(pretrained_model_path))\n pretrained_state = torch.load(pretrained_model_path)[\"state_dict\"]\n else:\n # incase pretrained model weights are given\n pretrained_state = pretrained_model_path\n\n print(len(pretrained_state), \" keys in pretrained model\")\n\n current_model_state = model.state_dict()\n print(len(current_model_state), \" keys in current model\")\n pretrained_state = {\n key: val\n for key, val in pretrained_state.items()\n if key in current_model_state and val.size() == current_model_state[key].size()\n }\n\n print(\n len(pretrained_state),\n \" keys in pretrained model are available in current model\",\n )\n current_model_state.update(pretrained_state)\n model.load_state_dict(current_model_state)\n\n if verbose:\n non_available_keys_in_pretrained = [\n key\n for key, val in pretrained_state.items()\n if key not in current_model_state\n or val.size() != current_model_state[key].size()\n ]\n non_available_keys_in_current = [\n key\n for key, val in current_model_state.items()\n if key not in pretrained_state or val.size() != pretrained_state[key].size()\n ]\n\n print(\n \"not available keys in pretrained model: \", non_available_keys_in_pretrained\n )\n print(\"not available keys in current model: \", non_available_keys_in_current)\n\n return model", "def load_weights_infer(checkpoint_path, model):\n try:\n # catalyst weights\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")[\"model_state_dict\"]\n except:\n # anything else\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(state_dict, strict=True)\n except:\n # for clf + seg for seg only prediction\n print(f\"Non-strict loading of weights from {checkpoint_path}\")\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n return model", "def get_model(self, model: Optional[torch.nn.Module] = None) -> torch.nn.Module:\n with self.as_directory() as tempdir:\n model_path = os.path.join(tempdir, self.MODEL_FILENAME)\n if not os.path.exists(model_path):\n raise RuntimeError(\n \"`model.pt` not found within this checkpoint. Make sure you \"\n \"created this `TorchCheckpoint` from one of its public \"\n \"constructors (`from_state_dict` or `from_model`).\"\n )\n model_or_state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if isinstance(model_or_state_dict, torch.nn.Module):\n if model:\n warnings.warn(\n \"TorchCheckpoint already contains all information needed. \"\n \"Discarding provided `model` argument. This means: \"\n \"If you are using BatchPredictor, you should do \"\n \"`BatchPredictor.from_checkpoint(checkpoint, TorchPredictor)` by\"\n \"removing kwargs `model=`. \"\n \"If you are using TorchPredictor directly, you should do \"\n \"`TorchPredictor.from_checkpoint(checkpoint)` by removing kwargs \"\n \"`model=`.\"\n )\n model = load_torch_model(\n saved_model=model_or_state_dict, model_definition=model\n )\n return model", "def load_model_from_checkpoint(file, device):\r\n\r\n if device == 'cuda':\r\n # Load all tensors onto GPU\r\n map_location = lambda storage, loc: storage.cuda()\r\n else:\r\n # Load all tensors onto CPU\r\n map_location = lambda storage, loc: storage\r\n\r\n # Assuming model was trained and checkpoint saved on Linux, but predict.py inference is executed using Windows.\r\n # Then, it is required to implement the following quick fix, because otherwise the exception is raised:\r\n # \"NotImplementedError: cannot instantiate 'PosixPath' on your system\"\r\n # Credits to https://stackoverflow.com/questions/57286486/i-cant-load-my-model-because-i-cant-put-a-posixpath\r\n if type(file) == pathlib.WindowsPath:\r\n tmp_PosixPath = pathlib.PosixPath\r\n pathlib.PosixPath = pathlib.WindowsPath\r\n\r\n parameters = torch.load(file, map_location=map_location)\r\n\r\n # Restore default\r\n if type(file) == pathlib.WindowsPath:\r\n pathlib.WindowsPath = pathlib.PosixPath\r\n pathlib.PosixPath = tmp_PosixPath\r\n\r\n model = train.create_model(parameters)\r\n\r\n model.class_to_idx = parameters.get('train_datasets_class_to_idx')\r\n model.load_state_dict(parameters.get('state_dict'), strict=False)\r\n\r\n return model, parameters", "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n while True:\n try:\n latest_filename = \"checkpoint_best\" if ckpt_dir==\"eval\" else None\n ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n return ckpt_state.model_checkpoint_path\n except:\n tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n time.sleep(10)", "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n while True:\n try:\n latest_filename = \"checkpoint_best\" if ckpt_dir == \"eval\" else None\n ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n return ckpt_state.model_checkpoint_path\n except:\n tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n time.sleep(10)", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict", "def load_model(self):\n if self.save_path is not None:\n if isfile(self.save_path):\n self.model.load_state_dict(load(self.save_path))\n else:\n raise ValueError(\"Cannot find model save file: \" + self.save_path)", "def parse_checkpoint(checkpoint_path):\n with gfile.Open(checkpoint_path, 'rb') as fp:\n raw_contents = fp.read()\n if raw_contents.startswith(b'model_checkpoint_path'):\n raise ValueError(\n 'Attempting to restore a TensorFlow checkpoint as a native T5X '\n f'checkpoint. Path: {checkpoint_path}')\n return serialization.msgpack_restore(raw_contents)", "def load_ckpt(model,\n weight_path,\n **kargs):\n #model.set_state_dict(state_dict)\n\n if not osp.isfile(weight_path):\n raise IOError(f'{weight_path} is not a checkpoint file')\n #state_dicts = load(weight_path)\n\n logger = get_logger(\"paddlevideo\")\n state_dicts = paddle.load(weight_path)\n if \"VisionTransformer\" in str(model): # For TimeSformer case\n tmp = pretrain_vit_param_trans(model, state_dicts, kargs['num_patches'], kargs['seg_num'], kargs['attention_type'])\n else:\n tmp = {}\n total_len = len(model.state_dict())\n with tqdm(total=total_len, position=1, bar_format='{desc}', desc=\"Loading weights\") as desc:\n for item in tqdm(model.state_dict(), total=total_len, position=0):\n name = item\n desc.set_description('Loading %s' % name)\n if name not in state_dicts: # Convert from non-parallel model\n if str('backbone.' + name) in state_dicts:\n tmp[name] = state_dicts['backbone.' + name]\n else: # Convert from parallel model\n tmp[name] = state_dicts[name]\n time.sleep(0.01)\n ret_str = \"loading {:<20d} weights completed.\".format(len(model.state_dict()))\n desc.set_description(ret_str)\n model.set_state_dict(tmp)" ]
[ "0.8315826", "0.8164179", "0.8112332", "0.8067118", "0.8025133", "0.80095977", "0.79719204", "0.792332", "0.78759766", "0.7870545", "0.7857625", "0.78201485", "0.777726", "0.76251996", "0.7613241", "0.76099503", "0.7592557", "0.75774544", "0.75367856", "0.75308794", "0.75168675", "0.74812114", "0.74299854", "0.7393436", "0.7393114", "0.73721254", "0.73643696", "0.73471385", "0.7321017", "0.72932446", "0.72932446", "0.728845", "0.7267254", "0.72477764", "0.7239574", "0.72306263", "0.7226572", "0.71733505", "0.71688586", "0.7142259", "0.7099037", "0.70949304", "0.704722", "0.70450854", "0.7042497", "0.7031897", "0.702985", "0.70279515", "0.7022414", "0.7019927", "0.699904", "0.6978824", "0.6975688", "0.69705427", "0.6964377", "0.69538903", "0.69510853", "0.6942883", "0.69246125", "0.6895234", "0.68862903", "0.6883913", "0.68792444", "0.68792444", "0.68496513", "0.6846704", "0.68443406", "0.68164563", "0.6801924", "0.68012434", "0.67903143", "0.67903143", "0.67737925", "0.6766907", "0.67509836", "0.67359954", "0.6733248", "0.672762", "0.6722152", "0.67018026", "0.6700658", "0.6699358", "0.66965634", "0.6696061", "0.66934144", "0.6687621", "0.66813284", "0.6652732", "0.66485566", "0.6634807", "0.66253996", "0.6618365", "0.6614342", "0.65795475", "0.65704346", "0.656933", "0.65646607", "0.6561511", "0.655886", "0.6556308" ]
0.7488327
21
Updates the torch model so that input minibatches are parallelized across the batch dimension to utilise multiple gpus. If model parallel is set to True and execution is in test mode, then model is partitioned to perform full volume inference.
def adjust_model_for_gpus(self) -> None: if self._model is None: raise ValueError("Model must be created before it can be adjusted.") # Adjusting twice causes an error. if self.is_model_adjusted: logging.debug("model_and_info.is_model_adjusted is already True") if self._optimizer: raise ValueError("Create an optimizer only after creating and adjusting the model.") self._model = ModelAndInfo._adjust_for_gpus(model=self._model, config=self.config, model_execution_mode=self.model_execution_mode) self.is_model_adjusted = True logging.debug("model_and_info.is_model_adjusted set to True")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model", "def parallelize(self):\r\n self.parallel = True\r\n self.network = torch.nn.DataParallel(self.network)", "def parallelize(self):\n self.parallel = True\n self.network = torch.nn.DataParallel(self.network)", "def train_parallel(config):\n _setup_parallel_env()\n print(f\" | Starting training on {os.getenv('RANK_SIZE', None)} devices.\")\n\n pre_train_dataset = load_dataset(\n data_files=config.pre_train_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(\n data_files=config.fine_tune_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.fine_tune_dataset else None\n test_dataset = load_dataset(\n data_files=config.test_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset)", "def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)", "def init_model_parallel(self, global_rank: int, world_size: int) -> None:\n app_state = AppState()\n\n # we initialize megatron-lm model parallel and data parallel groups\n # after initializing DDP with PTL.\n if app_state.model_parallel_size is not None:\n # destroy groups in case they have already been created\n # this happens with multiple calls to trainer.test for example\n parallel_state.destroy_model_parallel()\n if torch.distributed.is_initialized():\n parallel_state.initialize_model_parallel(\n tensor_model_parallel_size=app_state.tensor_model_parallel_size,\n pipeline_model_parallel_size=app_state.pipeline_model_parallel_size,\n virtual_pipeline_model_parallel_size=app_state.virtual_pipeline_model_parallel_size,\n pipeline_model_parallel_split_rank=app_state.pipeline_model_parallel_split_rank,\n use_fp8=app_state.use_fp8,\n )\n\n # assert that fake tp and pp rank match after model parallel init\n assert app_state.tensor_model_parallel_rank == parallel_state.get_tensor_model_parallel_rank()\n assert app_state.pipeline_model_parallel_rank == parallel_state.get_pipeline_model_parallel_rank()\n\n app_state.tensor_model_parallel_group = parallel_state.get_tensor_model_parallel_group()\n app_state.data_parallel_group = parallel_state.get_data_parallel_group()\n app_state.data_parallel_rank = parallel_state.get_data_parallel_rank()\n app_state.data_parallel_size = parallel_state.get_data_parallel_world_size()\n app_state.pipeline_model_parallel_group = parallel_state.get_pipeline_model_parallel_group()\n\n # create MPI process group for UCX-based communication APIs\n if app_state.init_mpi_proc_group:\n torch.distributed.new_group(backend='mpi')", "def partition_data_parallel(\n graph: GraphModule,\n model: nn.Module,\n optimizer: Optional[torch.optim.Optimizer],\n params_buffers: Dict[str, torch.Tensor],\n named_states: Dict[str, Any],\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n mesh: DeviceMesh,\n parallel_style: DataParallelStyle,\n input_batch_dim: int,\n) -> GraphModule:\n num_params_buffers = len(params_buffers)\n flattened_states = pytree.tree_flatten(named_states)[0]\n num_states = len(flattened_states)\n\n changed = graph.graph.eliminate_dead_code()\n if changed:\n graph.recompile()\n\n # 1. First build up data parallel strategies for the whole graph\n strategy_map = build_data_parallel_strategies(\n graph, num_params_buffers, num_states, mesh=mesh, batch_dim=input_batch_dim\n )\n\n # 2. Next we mark the data parallel strategy for each node base on\n # the parallel_style\n mark_data_parallel_shardings(\n graph,\n num_parameters=num_params_buffers,\n num_states=num_states,\n dp_strategy_map=strategy_map,\n parallel_mode=parallel_style,\n )\n\n # 3. Partition the single machine graph to the distribute graph\n partitioned_graph = partitioner(graph)\n\n # preserve node types for the expanded graph\n for node in partitioned_graph.graph.nodes:\n if node in strategy_map:\n node_strategy = strategy_map[node]\n if isinstance(node_strategy, DataParallelStrategy):\n node.meta[\"node_type\"] = node_strategy.node_type\n elif isinstance(node_strategy, TupleStrategy):\n node.meta[\"node_type\"] = NodeType.NON_TENSOR\n else:\n raise RuntimeError(f\"Unknown node strategy {node_strategy}\")\n else:\n # if the nodes are expanded nodes (collectives), we mark them\n # the same type as the input node.\n input_node = node.all_input_nodes[0]\n node.meta[\"node_type\"] = input_node.meta[\"node_type\"]\n\n # 4. Last, inplace partition the weights and optim states to\n # DTensors base on the parallel style\n accessor = NamedMemberAccessor(model)\n for param_key, param in params_buffers.items():\n placement: Placement = Replicate()\n if parallel_style == DataParallelStyle.FULLY_SHARD:\n placement = Shard(0)\n elif parallel_style != DataParallelStyle.REPLICATE:\n raise RuntimeError(f\"parallel style {parallel_style} not supported yet\")\n\n dtensor_param = distribute_tensor(param, mesh, [placement])\n # update re-parameterized module param dict and optim states dict to DTensor\n params_buffers[param_key] = dtensor_param.to_local()\n # update module parameters to DTensor\n accessor.set_tensor(param_key, dtensor_param)\n\n # update the optimizer state key and values to DTensor\n if optimizer is not None and param in optimizer.state:\n param_states = named_states[param_key]\n param_dtensor_states = {}\n for state_key, state_val in param_states.items():\n if isinstance(state_val, torch.Tensor) and state_val.ndim > 0:\n # shard/replicate non-scalar tensors, for scalar tensor, we\n # don't do anything\n dtensor_state = distribute_tensor(state_val, mesh, [placement])\n param_dtensor_states[state_key] = dtensor_state\n param_states[state_key] = dtensor_state.to_local()\n else:\n param_dtensor_states[state_key] = state_val\n\n optimizer.state.pop(param) # type: ignore[call-overload]\n optimizer.state[dtensor_param] = param_dtensor_states # type: ignore[index]\n\n return partitioned_graph", "def train(self, mode=True):\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n # print('the type train model : {}'.format(type(m)))\n if isinstance(m, torch.nn.BatchNorm2d) or \\\n isinstance(m, linklink.nn.syncbn_layer.SyncBatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n # print('the freeze module: {} of {}th'.format(type(m), count))\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n\n loader = tqdm.tqdm(loader, total=num_batches)\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def initialize_multitask_model(\n *,\n model_def: nn.Module,\n input_spec: Dict[Tuple[Tuple[str, str], ...],\n Sequence[Union[Tuple[Tuple[int, ...], jnp.dtype],\n Tuple[int, ...]]]],\n config: ml_collections.ConfigDict,\n rngs: Union[jnp.ndarray, Mapping[str, jnp.ndarray]],\n) -> Tuple[PyTree, PyTree, int, Optional[Dict[str, float]]]:\n\n def init_fn(model_def):\n for kwargs, in_spec in input_spec.items():\n\n if config.get('batch_sizes') is not None:\n batch_size = config.batch_sizes.get(dict(kwargs)['dataset'])\n else:\n batch_size = config.batch_size\n\n batch_size = (batch_size // jax.device_count()) if batch_size else None\n\n input_shapetype = [\n debug_utils.input_spec_to_jax_shape_dtype_struct(\n spec, batch_size=batch_size) for spec in in_spec\n ]\n dummy_input = []\n for in_st in input_shapetype:\n dummy_input.append(jnp.zeros(in_st.shape, in_st.dtype))\n model_def(\n *dummy_input, train=False, debug=False, **dict(kwargs))\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def _initialize_model(rngs):\n \"\"\"Initialization function to be jitted.\"\"\"\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state\n\n if not isinstance(rngs, dict):\n rngs = {'params': rngs}\n init_params, init_model_state = _initialize_model(rngs)\n # Pop out params rng:\n rngs.pop('params')\n\n # Count number of trainable parameters:\n num_trainable_params = debug_utils.log_param_shapes(init_params)\n\n # Count gflops:\n count_flops = config.get('count_flops',\n ml_collections.ConfigDict({'count_flops': True}))\n if count_flops:\n variables = {'params': init_params, **init_model_state}\n gflops_dict = {}\n gflops_all = 0\n for kwargs, in_spec in input_spec.items():\n flops = debug_utils.compute_flops(\n flax_model_apply_fn=functools.partial(\n model_def.apply,\n variables,\n train=False,\n debug=False,\n rngs=rngs,\n **dict(kwargs)),\n input_spec=count_flops.get('input_spec', in_spec),\n fuse_multiply_add=count_flops.get('fuse_multiply_add', True))\n gflops = flops / (10**9)\n gflops_key = 'gflops/' + '/'.join(f'{x}={y}' for x, y in kwargs)\n gflops_dict[gflops_key] = gflops\n gflops_all += gflops\n gflops_dict['gflops'] = gflops_all\n else:\n gflops_dict = None\n\n return init_params, init_model_state, num_trainable_params, gflops_dict", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def run_training(self, schema_params, export_model=False, output_model_dir=None):\n # Log distributed execution context, which includes cluster configuration\n logger.info(f\"Commencing {self.effect_name} training\")\n logger.info(f\"Execution context : {self.execution_context}\")\n\n # Create partition_index_list\n partition_index_list = self._get_partition_list()\n logger.info(f\"This worker on work on the following list of partitions : {partition_index_list}\")\n\n # Sequentially train model on partitions\n for partition_index in partition_index_list:\n logger.info(f\"Commencing {self.effect_name} training for partition index : {partition_index}\")\n\n # Resolve partitioned data directory from raw path params from user\n checkpoint_path = self._anchor_directory(\n self.model.checkpoint_path,\n partition_index)\n training_data_dir = self._anchor_directory(self.model.training_data_dir,\n partition_index)\n validation_data_dir = self._anchor_directory(self.model.validation_data_dir,\n partition_index) if self.model.validation_data_dir else None\n\n if is_empty_directory(training_data_dir):\n logger.info(f\"{training_data_dir} is empty, no dataset to train on.\")\n continue\n # Train model\n self.execution_context[constants.PARTITION_INDEX] = partition_index\n self.model.train(training_data_dir=training_data_dir,\n validation_data_dir=validation_data_dir,\n metadata_file=self.model.metadata_file,\n checkpoint_path=checkpoint_path,\n execution_context=self._prepare_training_context(partition_index),\n schema_params=schema_params)\n\n # Chief should export model\n is_chief = self.execution_context[constants.IS_CHIEF]\n if export_model and is_chief:\n logger.info(f\"Exporting model to directory : {output_model_dir}\")\n self.model.export(output_model_dir=output_model_dir)", "def data_parallel(self, batch_size, inputs):\n inputs = list(inputs)\n\n # quick path: only one device, do not slice\n if len(self.work_devices) == 1:\n assert(self.main_device == self.work_devices[0])\n yield self.main_device, False, tuple(inputs)\n\n # slow path: multi-GPUs\n else:\n # the GPUs are not in the same group, place variables on CPU\n if self.main_device not in self.work_devices:\n yield self.main_device, True, tuple(inputs)\n\n # build the paralleled computation graph for each device\n with tf.name_scope('data_parallel') as ns:\n pass # generate a name scope to place our data slicing ops\n\n k = len(self.work_devices)\n for i, device in enumerate(self.work_devices):\n dev_inputs = []\n with tf.name_scope(ns + 'tower_gpu_{}'.format(i)):\n for inp in inputs:\n slice_len = (batch_size + k - 1) // k\n low, high = slice_len * i, slice_len * (i + 1)\n dev_inputs.append(inp[low: high])\n yield device, False, tuple(dev_inputs)", "def run(rank, world_size, config):\n setup(rank, world_size, master_addr=config.neural_network.train.DistributedDataParallel.MASTER_ADDR, master_port=config.neural_network.train.DistributedDataParallel.MASTER_PORT)\n\n torch.manual_seed(int(config.neural_network.train.random_seed))\n training_dataloader, validation_dataloader, batch_size = partition_dataset(rank, world_size, config)\n\n total_epochs = int(config.neural_network.train.epochs)\n learning_rate = float(config.neural_network.train.learning_rate)\n\n n_hourglass = int(config.neural_network.PoseNet.n_hourglass)\n in_channels = int(config.neural_network.PoseNet.in_channels)\n out_channels = int(config.neural_network.PoseNet.out_channels)\n channel_increase = int(config.neural_network.PoseNet.channel_increase)\n model = PoseNet(n_hourglass=n_hourglass, in_channels=in_channels, out_channels=out_channels, channel_increase=channel_increase).to(rank)\n model = DistributedDataParallel(model, device_ids=[rank])\n\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate)\n loss_fn = HeatMapLossBatch()\n\n train_loader = iter(training_dataloader)\n valid_loader = iter(validation_dataloader)\n\n for epoch in range(total_epochs):\n training_dataloader.sampler.set_epoch(epoch)\n epoch_loss = 0.0\n\n train_iters = 0\n while train_iters < int(config.neural_network.train.train_iterations):\n train_iters += 1\n\n try:\n images, heatmaps = next(train_loader)\n except StopIteration:\n train_loader = iter(training_dataloader)\n images, heatmaps = next(train_loader)\n\n images = images.cuda(non_blocking=True)\n heatmaps = heatmaps.cuda(non_blocking=True)\n optimizer.zero_grad()\n output = model(images)\n loss = loss_fn(output, heatmaps)\n epoch_loss += utils.make_output(loss)\n loss.backward()\n average_gradients(model, world_size)\n optimizer.step()\n\n # validation\n with torch.no_grad():\n validation_loss = 0\n validation_dataloader.sampler.set_epoch(epoch)\n\n valid_iters = 0\n while valid_iters < int(config.neural_network.train.valid_iterations):\n valid_iters += 1\n\n try:\n images, heatmaps = next(valid_loader)\n except StopIteration:\n train_loader = iter(validation_dataloader)\n images, heatmaps = next(valid_loader)\n\n output = model(images)\n loss = loss_fn(output, heatmaps)\n validation_loss += utils.make_output(loss)\n\n epoch_train_loss = epoch_loss/config.neural_network.train.train_iterations\n epoch_valid_loss = validation_loss/config.neural_network.train.valid_iterations\n print(f\"rank:{dist.get_rank():2d} epoch:{epoch:3d} epoch_train_loss:{epoch_train_loss:0.4f} epoch_valid_loss:{epoch_valid_loss:0.4f}\")\n\n save_checkpoint = (rank == 0 and epoch > 0 and config.neural_network.train.checkpoint.save and epoch % config.neural_network.train.checkpoint.save_every == 0)\n if save_checkpoint:\n torch.save(model.state_dict(), config.neural_network.train.checkpoint.path) # saving it in one process is sufficient.\n dist.barrier()\n\n cleanup()", "def may_data_parallel(model):\n if torch.cuda.device_count() > 1:\n model = TransparentDataParallel(model)\n return model", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def sync_model(model):\n size = float(dist.get_world_size())\n\n for param in model.parameters():\n dist.broadcast(param.data, 0)", "def initialize_model(self):\n args = self.args\n\n if self.args.search_space == 'nasbench':\n self.model_fn = NasBenchNetSearchDarts\n self.fixmodel_fn = NasBenchNet\n model = self.model_fn(args)\n utils = darts_nasbench_utils\n else:\n raise NotImplementedError(\"Not supported\")\n # finialize model update\n if args.gpus > 0:\n if self.args.gpus == 1:\n model = model.cuda()\n self.parallel_model = model\n else:\n self.model = model\n self.parallel_model = nn.DataParallel(self.model).cuda()\n # IPython.embed(header='checking replicas and others.')\n else:\n self.parallel_model = model\n\n darts = DartsArchitect(model, args=args)\n model = self.parallel_model\n # logging.info(\"DARTS param size = %fMB\", utils.count_parameters_in_MB(darts))\n self.train_fn = partial(darts_train_model, args=args, architect=darts, sampler=None)\n self.eval_fn = partial(darts_model_validation, args=args, verbose=True)\n self.controller = darts\n\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n optimizer = torch.optim.SGD(\n model.parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n )\n\n # scheduler as Cosine.\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.learning_rate_min)\n return model, optimizer, scheduler, darts, None", "def train(self):\r\n print(\"Starting training now\")\r\n cuda = True if torch.cuda.is_available() else False\r\n if cuda:\r\n self.model.cuda()\r\n\r\n # Construct optimizer after the model moved to GPU\r\n self.optm = self.make_optimizer()\r\n self.lr_scheduler = self.make_lr_scheduler(self.optm)\r\n\r\n dim_x = self.flags.dim_x\r\n dim_y = self.flags.dim_y\r\n dim_z = self.flags.dim_z\r\n dim_tot = self.flags.dim_tot\r\n\r\n # Time keeping\r\n tk = time_keeper(time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))\r\n\r\n for epoch in range(self.flags.train_step):\r\n # Set to Training Mode\r\n train_loss = 0\r\n self.model.train()\r\n # If MMD on x-space is present from the start, the model can get stuck.\r\n # Instead, ramp it up exponetially.\r\n loss_factor = min(1., 2. * 0.002 ** (1. - (float(epoch) / self.flags.train_step)))\r\n\r\n for j, (x, y) in enumerate(self.train_loader):\r\n batch_size = len(x)\r\n\r\n ######################\r\n # Preparing the data #\r\n ######################\r\n # Pad the x, y with zero_noise\r\n y_clean = y.clone() # keep a copy of y for backward\r\n x_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_x)\r\n y_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_y - dim_z)\r\n z = torch.randn(batch_size, dim_z)\r\n if cuda:\r\n x = x.cuda() # Put data onto GPU\r\n y = y.cuda() # Put data onto GPU\r\n x_pad = x_pad.cuda()\r\n y_pad = y_pad.cuda()\r\n y_clean = y_clean.cuda()\r\n z = z.cuda()\r\n\r\n # Concate the x and y with pads and add y with small purtubation\r\n y += self.flags.y_noise_scale * torch.randn(batch_size, dim_y, device=device)\r\n\r\n x, y = torch.cat((x, x_pad), dim=1), torch.cat((z, y_pad, y), dim=1)\r\n\r\n ################\r\n # Forward step #\r\n ################\r\n self.optm.zero_grad() # Zero the gradient first\r\n ypred = self.model(x) # Get the Ypred\r\n\r\n\r\n # Do the MSE loss for reconstruction, Doesn't compare z part (only pad and y itself)\r\n MSE_loss_y = self.make_loss(logit=ypred[:, dim_z:], labels=y[:, dim_z:])\r\n\r\n # Use the maximum likelihood method\r\n log_det = self.model.log_jacobian(x=x)\r\n #print(\"The log determinant is\", log_det)\r\n Forward_loss = 0.5 * (MSE_loss_y / self.flags.lambda_mse + torch.mean(torch.pow(z,2))) - torch.mean(log_det)\r\n Forward_loss.backward()\r\n\r\n ######################\r\n # Gradient Clipping #\r\n ######################\r\n for parameter in self.model.parameters():\r\n parameter.grad.data.clamp_(-self.flags.grad_clamp, self.flags.grad_clamp)\r\n\r\n #########################\r\n # Descent your gradient #\r\n #########################\r\n self.optm.step() # Move one step the optimizer\r\n\r\n # MLE training\r\n train_loss += Forward_loss \r\n\r\n # Calculate the avg loss of training\r\n train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)\r\n\r\n if epoch % self.flags.eval_step == 0: # For eval steps, do the evaluations and tensor board\r\n # Record the training loss to the tensorboard\r\n self.log.add_scalar('Loss/total_train', train_avg_loss, epoch)\r\n self.log.add_scalar('Loss/MSE_y_train', MSE_loss_y, epoch)\r\n\r\n # Set to Evaluation Mode\r\n self.model.eval()\r\n print(\"Doing Evaluation on the model now\")\r\n\r\n test_loss = 0\r\n for j, (x, y) in enumerate(self.test_loader): # Loop through the eval set\r\n batch_size = len(x)\r\n\r\n ######################\r\n # Preparing the data #\r\n ######################\r\n # Pad the x, y with zero_noise\r\n y_clean = y.clone() # keep a copy of y for backward\r\n x_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_x)\r\n y_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_y - dim_z)\r\n z = torch.randn(batch_size, dim_z)\r\n if cuda:\r\n x = x.cuda() # Put data onto GPU\r\n y = y.cuda() # Put data onto GPU\r\n x_pad = x_pad.cuda()\r\n y_pad = y_pad.cuda()\r\n y_clean = y_clean.cuda()\r\n z = z.cuda()\r\n\r\n # Concate the x and y with pads and add y with small purtubation\r\n y += self.flags.y_noise_scale * torch.randn(batch_size, dim_y, device=device)\r\n\r\n x, y = torch.cat((x, x_pad), dim=1), torch.cat((z, y_pad, y), dim=1)\r\n\r\n ################\r\n # Forward step #\r\n ################\r\n self.optm.zero_grad() # Zero the gradient first\r\n ypred = self.model(x) # Get the Ypred\r\n # Do the MSE loss for reconstruction, Doesn't compare z part (only pad and y itself)\r\n MSE_loss_y = self.make_loss(logit=ypred[:, dim_z:], labels=y[:, dim_z:])\r\n\r\n log_det = self.model.log_jacobian(x=x)\r\n #print(\"The log determinant is\", log_det)\r\n Forward_loss = 0.5 * (MSE_loss_y / self.flags.lambda_mse + torch.mean(torch.pow(z,2))) - torch.mean(log_det)\r\n test_loss += Forward_loss\r\n # Aggregate the other loss (in np form)\r\n\r\n # Record the testing loss to the tensorboard\r\n test_avg_loss = test_loss.cpu().data.numpy() / (j+1)\r\n\r\n self.log.add_scalar('Loss/total_test', test_avg_loss, epoch)\r\n self.log.add_scalar('Loss/MSE_y_test', MSE_loss_y, epoch)\r\n\r\n print(\"This is Epoch %d, training loss %.5f, validation loss %.5f\" \\\r\n % (epoch, train_avg_loss, test_avg_loss ))\r\n\r\n # Model improving, save the model down\r\n if test_avg_loss < self.best_validation_loss:\r\n self.best_validation_loss = train_avg_loss\r\n self.save()\r\n print(\"Saving the model down...\")\r\n\r\n if self.best_validation_loss < self.flags.stop_threshold:\r\n print(\"Training finished EARLIER at epoch %d, reaching loss of %.5f\" %\\\r\n (epoch, self.best_validation_loss))\r\n break\r\n\r\n # Learning rate decay upon plateau\r\n self.lr_scheduler.step(train_avg_loss)\r\n tk.record(1) # Record the total time of the training peroid\r", "def update_model(self, verbose):\n if self.comm.project.meshes == \"multi-mesh\":\n self.comm.lasif.move_gradient_to_cluster()\n\n if not self.task_dict[\"summing_completed\"]:\n grad_summer = GradientSummer(comm=self.comm)\n grad_summer.sum_gradients(\n events=self.comm.project.non_val_events_in_iteration,\n output_location=self.raw_gradient_path,\n batch_average=True,\n sum_vpv_vph=True,\n store_norms=True,\n )\n write_xdmf(self.raw_gradient_path)\n self.task_dict[\"summing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Summing already done\")\n\n if not self.task_dict[\"raw_update_completed\"]:\n self._update_model(raw=True, smooth=False, verbose=verbose)\n self.task_dict[\"raw_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Raw updating already completed\")\n\n if not self.task_dict[\"smoothing_completed\"]:\n self.perform_smoothing()\n self.task_dict[\"smoothing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smoothing already done\")\n\n if not self.task_dict[\"smooth_update_completed\"]:\n self._update_model(raw=False, smooth=True, verbose=verbose)\n self.task_dict[\"smooth_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smooth updating already completed\")\n\n if not self.task_dict[\"iteration_finalized\"]:\n self._finalize_iteration(verbose=verbose)\n self.task_dict[\"iteration_finalized\"] = True\n self._update_task_file()\n else:\n self.print(\"Iteration already finalized\")\n\n self.finish_task()", "def train(model, data_loader, optimizer, epoch, train_mloss, train_rloss, train_acc, learning_rate, lr_wr, output_tensor):\r\n print('===> Training mode')\r\n\r\n num_batches = len(data_loader) # iteration per epoch. e.g: 469\r\n total_step = args.epochs * num_batches\r\n epoch_tot_acc = 0\r\n\r\n # Switch to train mode\r\n model.train()\r\n\r\n if args.cuda:\r\n # When we wrap a Module in DataParallel for multi-GPUs\r\n model = model.module\r\n\r\n start_time = timer()\r\n\r\n for batch_idx, (data, target) in enumerate(tqdm(data_loader, unit='batch')):\r\n batch_size = data.size(0)\r\n global_step = batch_idx + (epoch * num_batches) - num_batches\r\n\r\n labels = target\r\n target_one_hot = utils.one_hot_encode(target, length=args.num_classes)\r\n assert target_one_hot.size() == torch.Size([batch_size, 10])\r\n\r\n data, target = Variable(data), Variable(target_one_hot)\r\n\r\n if args.cuda:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n labels = labels.to(args.device)\r\n\r\n # Train step - forward, backward and optimize\r\n optimizer.zero_grad()\r\n #utils.exponential_decay_LRR(optimizer, args.lr, global_step, args.decay_steps, args.decay_rate, args.staircase)\r\n # learning rate policies\r\n if args.find_lr:\r\n utils.find_lr(optimizer, global_step)\r\n\r\n elif args.exp_decay_lr:\r\n utils.exponential_decay_LRR(\r\n optimizer, args.lr, global_step, args.decay_steps, args.decay_rate, args.staircase)\r\n\r\n elif args.one_cycle_policy:\r\n utils.one_cycle_policy(optimizer, args.lr, global_step, total_step)\r\n\r\n elif args.warm_restarts:\r\n # lr_wr.update_lr(optimizer, num_batches)\r\n lr_wr.update_lr(optimizer)\r\n\r\n output, reconstruction = model(data, labels, True)\r\n # utils.write_tensor(output, output_tensor)\r\n loss, margin_loss, recon_loss = loss_func(\r\n output, target, args.regularization_scale, reconstruction, data, args.device, batch_size)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n for param_group in optimizer.param_groups:\r\n lr_temp = param_group['lr']\r\n learning_rate.write('%.10f \\n' % lr_temp)\r\n\r\n # Calculate accuracy for each step and average accuracy for each epoch\r\n acc = utils.accuracy(output, labels, args.cuda)\r\n epoch_tot_acc += acc\r\n epoch_avg_acc = epoch_tot_acc / (batch_idx + 1)\r\n\r\n train_mloss.write('%.6f \\n' % margin_loss)\r\n train_rloss.write('%.6f \\n' % recon_loss)\r\n train_acc.write('%.6f \\n' % acc)\r\n\r\n # Print losses\r\n if batch_idx % args.log_interval == 0:\r\n template = 'Epoch {}/{}, ' \\\r\n 'Step {}/{}: ' \\\r\n '[Total loss: {:.6f},' \\\r\n '\\tMargin loss: {:.6f},' \\\r\n '\\tReconstruction loss: {:.6f},' \\\r\n '\\tBatch accuracy: {:.6f},' \\\r\n '\\tAccuracy: {:.6f}]'\r\n tqdm.write(template.format(\r\n epoch,\r\n args.epochs,\r\n global_step,\r\n total_step,\r\n loss.data.item(),\r\n margin_loss.data.item(),\r\n recon_loss.data.item() if args.use_reconstruction_loss else 0,\r\n acc,\r\n epoch_avg_acc))\r\n\r\n # Print time elapsed for an epoch\r\n end_time = timer()\r\n\r\n global avg_training_time_per_epoch\r\n\r\n avg_training_time_per_epoch = (avg_training_time_per_epoch * (epoch - 1) + end_time - start_time) / epoch\r\n\r\n print('Time elapsed for epoch {}: {:.0f}s.'.format(epoch, end_time - start_time))", "def test_auto_scale_batch_size_set_model_attribute(tmpdir, use_hparams):\n tutils.reset_seed()\n\n hparams = EvalModelTemplate.get_default_hparams()\n before_batch_size = hparams.get('batch_size')\n\n class HparamsEvalModelTemplate(EvalModelTemplate):\n\n def dataloader(self, *args, **kwargs):\n # artificially set batch_size so we can get a dataloader\n # remove it immediately after, because we want only self.hparams.batch_size\n setattr(self, \"batch_size\", before_batch_size)\n dataloader = super().dataloader(*args, **kwargs)\n del self.batch_size\n return dataloader\n\n datamodule_model = MNISTDataModule(data_dir=tmpdir, batch_size=111) # this datamodule should get ignored!\n datamodule_fit = MNISTDataModule(data_dir=tmpdir, batch_size=before_batch_size)\n\n model_class = HparamsEvalModelTemplate if use_hparams else EvalModelTemplate\n model = model_class(**hparams)\n model.datamodule = datamodule_model # unused when another module gets passed to .tune() / .fit()\n\n trainer = Trainer(default_root_dir=tmpdir,\n max_epochs=1,\n auto_scale_batch_size=True,\n gpus=1)\n trainer.tune(model, datamodule_fit)\n after_batch_size = model.hparams.batch_size if use_hparams else model.batch_size\n assert trainer.datamodule == datamodule_fit\n assert before_batch_size != after_batch_size\n assert after_batch_size <= len(trainer.train_dataloader.dataset)\n assert datamodule_fit.batch_size == after_batch_size\n # should be left unchanged, since it was not passed to .tune()\n assert datamodule_model.batch_size == 111", "def _train(args): \n\n #device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n logger.info(\"Device Type: {}\".format(device))\n\n logger.info(\"Loading SUN360 dataset\")\n transform = transforms.Compose(\n [transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n target_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()]) \n\n trainset = SUN360Dataset(\"imagedata.json\",transform = transform, target_transform = target_transform)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n \"\"\"\n testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,\n download=False, transform=transform)\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n \"\"\" \n\n logger.info(\"Model loaded\")\n model = EfficientNet.from_name('efficientnet-b0',conv_type='Equi')\n\n if torch.cuda.device_count() > 1:\n logger.info(\"Gpu count: {}\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n\n model = model.to(device)\n\n criterion = CELoss().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(0, args.epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, EM , CM = data\n inputs, EM, CM = inputs.to(device), EM.to(device), CM.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n EMLoss, CMLoss = map_loss(outputs,EM,CM,criterion)\n loss = EMLoss + CMLoss\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n print('Finished Training')\n return _save_model(model, args.model_dir)", "def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )", "def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()", "def configure_ddp(self):\n\n if (hasattr(self.model, 'megatron_amp_o2') and self.model.megatron_amp_o2) or (\n hasattr(self.model, 'with_distributed_adam') and self.model.with_distributed_adam\n ):\n # do not use DDP if using megatron amp O2 or distributed optimizer\n self._model = _LightningModuleWrapperBase(self.model)\n else:\n app_state = AppState()\n\n if app_state.model_parallel_size is not None:\n\n logging.info(f\"Configuring DDP for model parallelism.\")\n\n # With model parallelism, multiple GPUs form a large \"logical GPU\"\n # this means that data parallel groups span multiple GPUs\n # and are non-trivial\n # TODO: for megatron-lm self.model is a list\n # Removing self.pre_configure_ddp() as DDP's 'find_unused_parameters' now defaults\n # to False in PTL 2.0 and hence pre_configure_ddp() is removed in ddp.py\n # self.pre_configure_ddp()\n # device_ids = self.determine_ddp_device_ids()\n self._model = DistributedDataParallel(\n _LightningModuleWrapperBase(self.model),\n process_group=parallel_state.get_data_parallel_group(),\n **self._ddp_kwargs,\n )\n\n if self.no_ddp_communication_hook:\n # When using custom gradient accumulation and allreduce, disable\n # DDP communication hook that works on the gradient bucket.\n # Instead, use the custom gradient function and communication hook,\n # which is defined in the master optimizer wrapper.\n self._model.require_backward_grad_sync = False\n self._model.register_comm_hook(None, noop_hook)\n\n else:\n super().configure_ddp()", "def create_embeddings(model_fc, ds, model_name, storage_path, storage_size=1000, parallel=True): \n\n # create folder when doesn't exist yet\n try:\n os.makedirs(storage_path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n \n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n feature_extractor = model_fc\n if parallel:\n feature_extractor = nn.DataParallel(model_fc)\n target_dataset = ds\n len_target_dataset = len(target_dataset)\n # save some memory\n\n feature_extractor.eval()\n \n with torch.no_grad():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(f\"Moving model to {device}\")\n feature_extractor = feature_extractor.to(device)\n params = {'batch_size': 50,\n 'shuffle': False,\n 'num_workers': 6,\n 'pin_memory': False}\n\n print(f\"Length of dataset is {len_target_dataset}\")\n if (len_target_dataset >= storage_size):\n\n if len_target_dataset % storage_size != 0:\n until_i = (len_target_dataset // storage_size + 1)\n else:\n until_i = (len_target_dataset // storage_size)\n\n for i in range(until_i):\n\n \"\"\"Check if we overshot the entries\"\"\"\n if ((i+1)*storage_size <= len_target_dataset):\n t_dataset = torch.utils.data.Subset(target_dataset, range(i*storage_size, (i+1)*storage_size))\n else:\n remainder = len_target_dataset - i*storage_size\n print(f\"Calculating for remainder: {remainder} because we want to extract {(i+1)*storage_size}\")\n t_dataset = torch.utils.data.Subset(target_dataset, range(i*storage_size, (i*storage_size) + remainder))# use remainder\n\n training_generator = data.DataLoader(t_dataset, **params)\n\n features = torch.Tensor([]).to(device)\n labels = torch.LongTensor([]).to(device)\n\n for local_batch, local_labels in training_generator:\n local_batch = local_batch.to(device)\n local_labels = local_labels.to(device)\n output = feature_extractor(local_batch)\n features = torch.cat([features, output], dim=0)\n labels = torch.cat([labels, local_labels], dim=0)\n\n print(features.size())\n features = features.to(\"cpu\")\n labels = labels.to(\"cpu\")\n\n x = features.detach().numpy()\n y = labels.detach().numpy()\n\n np.savez_compressed(f'{storage_path}/{model_name}_{i}.npz', x=x, y=y)\n\n del features\n del labels\n del local_batch\n del local_labels\n torch.cuda.empty_cache()\n\n if (len_target_dataset < storage_size):\n training_generator = data.DataLoader(target_dataset, **params)\n features = torch.Tensor([]).to(device)\n labels = torch.LongTensor([]).to(device)\n\n for local_batch, local_labels in training_generator:\n local_batch = local_batch.to(device)\n local_labels = local_labels.to(device)\n output = feature_extractor(local_batch)\n features = torch.cat([features, output], dim=0)\n labels = torch.cat([labels, local_labels], dim=0)\n\n print(features.size())\n features = features.to(\"cpu\")\n labels = labels.to(\"cpu\")\n\n x = features.detach().numpy()\n y = labels.detach().numpy()\n\n np.savez_compressed(f'{storage_path}/{model_name}_0.npz', x=x, y=y)\n\n del features\n del labels\n del local_batch\n del local_labels\n torch.cuda.empty_cache()", "def train():\n init_distributed_mode(args)\n save_dir = TRAIN_CFG['save_dir']\n if not os.path.exists(save_dir) and torch.distributed.get_rank() == 0:\n os.mkdir(save_dir)\n kwargs = {}\n # If augmenting data, disable Pytorch's own augmentataion\n # This has to be done manually as augmentation is embedded\n # refer : https://github.com/pytorch/vision/issues/2263\n base_path = DATASET_CFG['base_path']\n train_set = DATASET_CFG['train']\n valid_set = DATASET_CFG['valid']\n dset_mean_std = DATASET_CFG['mean_std']\n if dset_mean_std is not None:\n dataset_mean = [i/255. for i in dset_mean_std[0]]\n dataset_std = [i/255. for i in dset_mean_std[1]]\n else:\n dataset_mean, dataset_std = compute_mean_std(base_path, train_set)\n kwargs['image_mean'] = dataset_mean\n kwargs['image_std'] = dataset_std\n kwargs['min_size'] = DATASET_CFG['min_size']\n kwargs['max_size'] = DATASET_CFG['max_size']\n kwargs['box_detections_per_img'] = 300 # increase max det to max val in our benchmark\n\n # Set benchmark related parameters\n if benchmark == 'ScutHead':\n combined_cfg = {**cfg, **sh_anchors}\n elif benchmark == 'CrowdHuman':\n combined_cfg = {**cfg, **ch_anchors}\n elif benchmark == 'Combined':\n combined_cfg = {**cfg, **combined_anchors}\n else:\n raise ValueError(\"New dataset has to be registered\")\n\n # Create Model\n default_filter = False\n model = customRCNN(cfg=combined_cfg,\n use_deform=NET_CFG['use_deform'],\n ohem=NET_CFG['ohem'],\n context=NET_CFG['context'],\n custom_sampling=NET_CFG['custom_sampling'],\n default_filter=default_filter,\n soft_nms=NET_CFG['soft_nms'],\n upscale_rpn=NET_CFG['upscale_rpn'],\n median_anchors=NET_CFG['median_anchors'],\n **kwargs).cuda() \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],\n find_unused_parameters=True)\n model_without_ddp = model.module\n\n # Create Optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=HYP_CFG['learning_rate'],\n momentum=HYP_CFG['learning_rate'],\n weight_decay=HYP_CFG['weight_decay'])\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=TRAIN_CFG['milestones'],\n gamma=HYP_CFG['gamma'])\n # Restore from checkpoint\n pt_model = TRAIN_CFG['pretrained_model']\n if pt_model:\n model_without_ddp = restore_network(model_without_ddp, pt_model,\n only_backbone=TRAIN_CFG['only_backbone'])\n \n # Create training and vaid dataset\n dataset_param = {'mean': dataset_mean, 'std':dataset_std,\n 'shape':(kwargs['min_size'], kwargs['max_size'])}\n batch_size = HYP_CFG['batch_size']\n train_dataset = HeadDataset(train_set,\n base_path,\n dataset_param,\n train=True)\n val_dataset = HeadDataset(valid_set,\n base_path,\n dataset_param,\n train=False)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,\n batch_size,\n drop_last=True)\n train_data_loader = torch.utils.data.DataLoader(train_dataset,\n batch_sampler=train_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)\n val_batch_sampler = torch.utils.data.BatchSampler(val_sampler,\n batch_size,\n drop_last=True)\n val_data_loader = torch.utils.data.DataLoader(val_dataset,\n batch_sampler=val_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n # Fastforward the LR decayer\n start_epoch = TRAIN_CFG['start_epoch']\n max_epoch = TRAIN_CFG['max_epoch']\n for _ in range(0, -1):\n scheduler.step()\n\n # Start training\n print(\"======= Training for \" + str(max_epoch) + \"===========\")\n for epoch in range(start_epoch, int(max_epoch) + 1):\n if epoch % TRAIN_CFG['eval_every'] == 0:\n print(\"========= Evaluating Model ==========\")\n result_dict = evaluate(model, val_data_loader, benchmark=benchmark)\n if torch.distributed.get_rank() == 0:\n logging.info('Eval score at {0} epoch is {1}'.format(str(epoch),\n result_dict))\n \n train_one_epoch(model, optimizer, train_data_loader,\n device, epoch, print_freq=1000)\n scheduler.step()\n if torch.distributed.get_rank() == 0:\n print(\"Saving model\")\n torch.save(model.state_dict(), osp.join(save_dir,\n TRAIN_CFG['exp_name'] + '_epoch_' + str(epoch) + '.pth'))", "def calibrate_model(model, criterion, data_loader, neval_batches):\n model.eval()\n cpu = torch.device(\"cpu\")\n \n cnt = 0\n\n with torch.no_grad():\n for image, target in data_loader:\n image = image.to(cpu)\n target = target.to(cpu)\n output = model(image)\n loss = criterion(output, target)\n cnt += 1\n if cnt >= neval_batches:\n return", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def keras_multitask(self, args):\n start_time = time.time()\n\n # if self.args.log_metrics:\n # utils.wandb_init_logs(self.config[\"multitask_trainer\"])\n\n embedding_type = self.config[\"multitask_trainer\"][\"embedding_type\"]\n max_len = int(self.config[\"multitask_trainer\"][\"max_len\"])\n\n reader = SciciteReader(self.config[\"preprocessor\"])\n print(\"Loading data...\")\n text, labels, sections, worthiness = reader.load_data(\n _type=\"train\", multitask=True\n )\n text_dev, labels_dev, _, _ = reader.load_data(_type=\"dev\", multitask=False)\n text_test, labels_test, _, _ = reader.load_data(_type=\"test\", multitask=False)\n\n keras_model = MultitaskLearner(self.config)\n\n if embedding_type == \"bert\" or embedding_type == \"albert\":\n input_ids, input_masks, input_segments = keras_model.prepare_input_data(\n text\n )\n (\n dev_input_ids,\n dev_input_masks,\n dev_input_segments,\n ) = keras_model.prepare_input_data(text_dev)\n (\n test_input_ids,\n test_input_masks,\n test_input_segments,\n ) = keras_model.prepare_input_data(text_test)\n\n print(\"Preparing data...\")\n text_tensor, text_tokenizer = keras_model.prepare_data(text, max_len=max_len)\n labels_tensor, labels_tokenizer = keras_model.prepare_data(labels)\n sections_tensor, sections_tokenizer = keras_model.prepare_data(sections)\n worthiness_tensor, worthiness_tokenizer = keras_model.prepare_data(worthiness)\n\n text_tensor_dev = keras_model.prepare_dev_data(\n text_dev, text_tokenizer, max_len=max_len\n )\n labels_tensor_dev = keras_model.prepare_dev_data(labels_dev, labels_tokenizer)\n text_tensor_test = keras_model.prepare_dev_data(\n text_test, text_tokenizer, max_len=max_len\n )\n labels_tensor_test = keras_model.prepare_dev_data(labels_test, labels_tokenizer)\n\n print(\"Creating datasets...\")\n if embedding_type == \"lstm\":\n dataset = keras_model.create_dataset(\n text=text_tensor,\n labels=labels_tensor,\n sections=sections_tensor,\n worthiness=worthiness_tensor,\n ids=None,\n mask=None,\n segments=None,\n )\n dev_dataset = keras_model.create_dev_dataset(\n text=text_tensor_dev,\n ids=None,\n mask=None,\n segments=None,\n labels=labels_tensor_dev,\n )\n test_dataset = keras_model.create_dev_dataset(\n text=text_tensor_test,\n ids=None,\n mask=None,\n segments=None,\n labels=labels_tensor_test,\n )\n elif embedding_type == \"bert\" or embedding_type == \"albert\":\n dataset = keras_model.create_dataset(\n text=None,\n labels=labels_tensor,\n sections=sections_tensor,\n worthiness=worthiness_tensor,\n ids=input_ids,\n mask=input_masks,\n segments=input_segments,\n )\n dev_dataset = keras_model.create_dev_dataset(\n text=None,\n ids=dev_input_ids,\n mask=dev_input_masks,\n segments=dev_input_segments,\n labels=labels_tensor_dev,\n )\n test_dataset = keras_model.create_dev_dataset(\n text=None,\n ids=test_input_ids,\n mask=test_input_masks,\n segments=test_input_segments,\n labels=labels_tensor_test,\n )\n\n vocab_size = len(text_tokenizer.word_index.keys()) + 1\n labels_size = len(labels_tokenizer.word_index.keys())\n section_size = len(sections_tokenizer.word_index.keys())\n worthiness_size = len(worthiness_tokenizer.word_index.keys())\n\n print(\"Creating model...\")\n keras_model.create_model(vocab_size, labels_size, section_size, worthiness_size)\n print(\"Fitting model...\")\n keras_model.fit_model(dataset, dev_dataset)\n\n print(\"Saving model...\")\n keras_model.save_model()\n\n print(\"Evaluating...\")\n keras_model.eval(test_dataset, save_output=True)\n keras_model.eval(test_dataset, save_output=False)\n\n end_time = time.time()\n total_time = end_time - start_time\n print(\"Execution time:\", str(datetime.timedelta(seconds=total_time)))", "def train_model_batch(model, config, test, resume=None):\n\n if config['optimizer']['method'] == 'adagrad':\n optimizer = Adagrad()\n elif config['optimizer']['method'] == 'adadelta':\n optimizer = Adadelta()\n elif config['optimizer']['method'] == 'adam':\n optimizer = Adam()\n else: # default SGD\n params = config['optimizer']['params']\n if resume is None: # New experiment\n optimizer = SGD(lr=params['lrate'], momentum=params['momentum'], decay=params['decay'],\n nesterov=params['nesterov'])\n iepoch = 0\n else: # Resume training\n nlrate = params['lrate'] - ((params['lrate'] / config['train']['epochs']) * params['epochs_trained'])\n\n optimizer = SGD(lr=nlrate, momentum=params['momentum'], decay=params['decay'],\n nesterov=params['nesterov'])\n iepoch = config['train']['epochs_trained']\n\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n classweight = detransweights(config['train']['classweight'])\n if 'log' not in config or config['log'] == 'db':\n dblog = DBLog(database=mongoconnection, config=config, model=model, modelj=model.to_json(), resume=resume)\n else:\n dblog = FileLog(config=config, modelj=model.to_json())\n\n recode = None if 'recode' not in config else recoding_dictionary(config['recode'])\n\n train = Dataset(config['datapath'], config['traindata'], config['zfactor'], imgord=config['imgord'],\n nclasses=test.nclasses, recode=recode)\n\n # Train Epochs\n logs = {'loss': 0.0, 'acc': 0.0, 'val_loss': 0.0, 'val_acc': 0.0}\n train.open()\n chunks, _ = train.chunks_list()\n\n for epoch in range(iepoch, config['train']['epochs']):\n\n shuffle(chunks)\n\n # Train Batches\n lloss = []\n lacc = []\n for chunk in chunks:\n train.load_chunk(chunk, config['train']['batchsize'])\n\n for p in train.perm:\n loss, acc = model.train_on_batch(train.X_train[p], train.y_train[p], class_weight=classweight)\n lloss.append(loss)\n lacc.append(acc)\n\n logs['loss'] = float(np.mean(lloss))\n logs['acc'] = float(np.mean(lacc))\n\n logs['val_loss'], logs['val_acc'] = model.evaluate(test.X_train, test.y_train, verbose=0)\n\n force_stop = dblog.force_stop()\n dblog.on_epoch_end(epoch, logs=logs)\n\n if config['savepath']:\n model.save(config['savepath'] + '/' + str(dblog.id) + '.h5')\n\n # If the training is stopped remotely training stops\n if force_stop:\n break\n train.close()\n\n scores = model.evaluate(test.X_train, test.y_train, verbose=0)\n dblog.on_train_end(logs={'acc': logs['acc'], 'val_acc': scores[1]})\n y_pred = model.predict_classes(test.X_train, verbose=0)\n dblog.save_final_results(scores, confusion_matrix(test.y_labels, y_pred),\n classification_report(test.y_labels, y_pred))", "def train_epoch(model, data_loader, data_iter, optimizer, device,\n epoch_size=None, eval_cluster_error=True, core_reset=False,\n eval_rank=False, mc_mode=False, lip_mode=False):\n data_tic = epoch_tic = time.time()\n data_rtime, reset_rtime = 0.0, 0.0\n metrics = None\n conf_mats = ut.AverageMeter() if eval_cluster_error else None\n resets = [] if core_reset else None\n comp_err = ut.AverageMeter() if mc_mode else None\n itr, epochN = 1, 0\n epoch_stop = False\n if data_iter is None:\n data_iter = iter(data_loader)\n model.epoch_init()\n while not epoch_stop:\n try:\n data_tup = next(data_iter)\n except StopIteration:\n if epoch_size is None or epochN >= epoch_size:\n data_iter, epoch_stop = None, True\n break\n else:\n data_iter = iter(data_loader)\n data_tup = next(data_iter)\n if epochN >= epoch_size:\n epoch_stop = True\n\n if len(data_tup) == 3:\n x, groups, x0 = data_tup\n if x0 is not None:\n x0 = x0.to(device)\n else:\n x, groups = data_tup\n x0 = None\n x = x.to(device)\n batch_size = x.shape[0]\n epochN += batch_size\n data_rtime += time.time() - data_tic\n\n # opt step\n optimizer.zero_grad()\n (batch_obj_mean, batch_obj, batch_loss,\n batch_reg_in, batch_reg_out) = model.objective(x)\n\n if torch.isnan(batch_obj_mean.data):\n raise RuntimeError('Divergence! NaN objective.')\n\n batch_obj_mean.backward()\n optimizer.step()\n\n batch_metrics = [batch_obj, batch_loss, batch_reg_in, batch_reg_out]\n if metrics is None:\n metrics = [ut.AverageMeter() for _ in range(len(batch_metrics))]\n for kk in range(len(batch_metrics)):\n metrics[kk].update(batch_metrics[kk].cpu(), batch_size)\n\n # eval batch cluster confusion\n if eval_cluster_error:\n batch_conf_mats = torch.stack([\n torch.from_numpy(ut.eval_confusion(model.groups[:, ii], groups,\n model.k, true_classes=data_loader.dataset.classes))\n for ii in range(model.replicates)])\n conf_mats.update(batch_conf_mats, 1)\n\n # eval batch completion if in missing data setting\n if mc_mode and x0 is not None:\n batch_comp_err = model.eval_comp_error(x0)\n comp_err.update(batch_comp_err.cpu(), batch_size)\n\n if core_reset:\n reset_tic = time.time()\n batch_resets = model.core_reset()\n if batch_resets.shape[0] > 0:\n rIdx = np.unique(batch_resets[:, 0].astype(np.int64))\n ut.reset_optimizer_state(model, optimizer, rIdx)\n batch_resets = np.insert(batch_resets, 0, itr, axis=1)\n resets.append(batch_resets)\n reset_rtime += time.time() - reset_tic\n\n itr += 1\n data_tic = time.time()\n\n # evaluate summary metrics\n metrics = torch.stack([met.avg for met in metrics])\n conf_mats, errors, error_stats = _cluster_error_summary(eval_cluster_error,\n conf_mats, model)\n resets, reset_count, rep_reset_counts = _resets_summary(core_reset, resets,\n model)\n svs, rank_stats = _rank_summary(eval_rank, model)\n comp_err, comp_err_stats = _comp_err_summary(mc_mode, comp_err, model)\n lip, lip_stats = _lip_summary(lip_mode, model)\n\n rtime = time.time() - epoch_tic\n sampsec = epochN / rtime\n\n metrics, metrics_summary = _all_metrics_summary(metrics, errors, error_stats,\n reset_count, rep_reset_counts, rank_stats, comp_err, comp_err_stats, lip,\n lip_stats, sampsec, rtime, data_rtime, reset_rtime)\n return metrics_summary, metrics, conf_mats, resets, svs, data_iter", "def module_transfer_to_device(self) -> None:\n for name, module in self.modules.items():\n module.to(self.device)\n if self.device.type == 'cuda':\n self.modules[name] = torch.nn.DataParallel(module, self.gpu_ids)\n return", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def train_step(args, model: torch.nn.Module, batch: Tuple, meters: AverageMeterSet, epoch: int, batch_idx: int):\n labeled_batch, unlabeled_batch = batch\n labeled, targets = labeled_batch\n unlabeled_k, _ = unlabeled_batch\n\n # One hot labels\n targets = torch.zeros(args.batch_size, args.num_classes).scatter_(\n 1, targets.view(-1, 1), 1\n )\n\n unlabeled_k = [u_k.to(args.device) for u_k in unlabeled_k]\n labeled = labeled.to(args.device)\n targets = targets.to(args.device)\n\n # Disable batch-norm running_mean and running_var updates for pseduo-label forward passes\n set_bn_running_updates(model, enable=False)\n with torch.no_grad():\n preds = [\n torch.softmax(model(u_k.to(args.device)), dim=1) for u_k in unlabeled_k\n ]\n avg_preds = torch.stack(preds).mean(dim=0)\n sharpened_preds = torch.pow(avg_preds, 1 / args.temperature)\n unlabeled_targets = sharpened_preds / sharpened_preds.sum(dim=-1, keepdim=True)\n unlabeled_targets = unlabeled_targets.detach()\n\n all_inputs = torch.cat([labeled] + unlabeled_k, dim=0)\n all_targets = torch.cat(\n [targets] + [unlabeled_targets for _ in range(len(unlabeled_k))], dim=0\n )\n\n mixed_input, mixed_targets = mixup(all_inputs, all_targets, args.alpha)\n\n # Interleave labeled and unlabeled samples to avoid biased batch norm calculation\n mixed_input = list(torch.split(mixed_input, args.batch_size))\n mixed_input = interleave(mixed_input, args.batch_size)\n\n # Only update running batch-norm parameters for first batch of mixed batches\n set_bn_running_updates(model, enable=True)\n logits = [model(mixed_input[0])]\n set_bn_running_updates(model, enable=False)\n for input in mixed_input[1:]:\n logits.append(model(input))\n\n # Put interleaved samples back - reverses interleaving applied before\n logits = interleave(logits, args.batch_size)\n logits_x = logits[0]\n logits_u = torch.cat(logits[1:], dim=0)\n\n # Cross entropy loss for labeled samples\n labeled_loss = -torch.sum(\n F.log_softmax(logits_x, dim=1) * mixed_targets[: args.batch_size], dim=1\n )\n # L2-distance loss for unlabeled samples\n unlabeled_loss = torch.mean(\n (torch.softmax(logits_u, dim=1) - mixed_targets[args.batch_size :]) ** 2\n )\n\n # Update unlabeled loss weight based on current step (linear rampup to max. value over first 16 epochs)\n step = epoch * args.iters_per_epoch + (batch_idx + 1)\n wu = (\n args.wu * linear_rampup(step, 16 * args.iters_per_epoch)\n if not args.resume\n else args.wu\n )\n\n # Total loss\n loss = torch.mean(labeled_loss) + wu * unlabeled_loss\n\n meters.update(\"total_loss\", loss.item(), targets.size(0))\n meters.update(\"labeled_loss\", torch.mean(labeled_loss).item(), targets.size(0))\n meters.update(\"unlabeled_loss\", unlabeled_loss.item(), targets.size(0))\n meters.update(\"wu\", wu, 1)\n return loss", "def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)", "def run(net, loader, edit_set_cmd, model_name):\n answ = []\n accs = []\n ss_vc = []\n image_ids =[]\n ques_ids = []\n softmax = nn.Softmax(dim=1).cuda()\n for v, q, a, idx, img_id, ques_id, q_len in tqdm(loader): # image, ques to vocab mapped , answer, item (sth to help index shuffled data with), len_val\n #ipdb.set_trace()\n var_params = {\n 'volatile': False,\n 'requires_grad': False,\n }\n v = Variable(v.cuda(async=True), **var_params)\n q = Variable(q.cuda(async=True), **var_params)\n a = Variable(a.cuda(async=True), **var_params)\n q_len = Variable(q_len.cuda(async=True), **var_params) ### len of question\n\n with torch.no_grad():\n out = net(v, q, q_len)\n softmax_vc = softmax(out) # torch.size(128,3000)\n #ipdb.set_trace() ## check type of softmax_vc- enforce it to torch16 here itself/ alse see what happens when np.16..\n acc = utils.batch_accuracy(out.data, a.data).cpu() #torch.Size([128, 1]) official vqa acc for every questions\n\n # store information about evaluation of this minibatch\n _, answer = out.data.cpu().max(dim=1) ### torch.Size([128) !!!! this is the predicted answer id!!!\n answ.append(answer.view(-1)) # pred_ans_id\n ss_vc.append(softmax_vc) # #torch.Size([128, 3000])\n accs.append(acc.view(-1)) # official vqa accurcay per question\n ques_ids.append(ques_id.view(-1))\n\n if config.vis_attention:\n output_qids_answers = []\n if config.fintuned_model_test:\n model_name = 'finetuned_' + model_name\n if edit_set_cmd:\n saaa_vqa_ans_q_id = '/BS/vedika3/nobackup/pytorch-vqa/cvpr_rebuttal_' + model_name + '_edit_vqa_ans_q_id.pickle'\n print(img_id)\n ipdb.set_trace()\n output_qids_answers += [\n {'ans_id': p, 'ques_id': qid, 'accuracy': acc}\n for p, qid, acc in zip(answ, ques_ids, accs)]\n else:\n saaa_vqa_ans_q_id = '/BS/vedika3/nobackup/pytorch-vqa/cvpr_rebuttal_' + model_name + '_orig_vqa_ans_q_id.pickle'\n print(img_id)\n ipdb.set_trace()\n output_qids_answers += [\n {'ans_id': p, 'ques_id': qid, 'accuracy': acc}\n for p, qid, acc in zip(answ, ques_ids, accs)]\n\n with open(saaa_vqa_ans_q_id, 'wb') as f:\n pickle.dump(output_qids_answers, f, pickle.HIGHEST_PROTOCOL)\n\n exit()\n\n\n\n if edit_set_cmd:\n image_ids.append(img_id)\n else:\n image_ids.append(img_id.view(-1))\n\n\n\n\n ss_vc = torch.cat(ss_vc, dim=0) ## softmax_vectors\n answ = torch.cat(answ, dim=0) ## pred_ans_id\n accs = torch.cat(accs, dim=0) ## official vqa accurcay per question\n\n ques_ids = torch.cat(ques_ids, dim=0)\n if edit_set_cmd:\n image_ids = [item for sublist in image_ids for item in sublist]\n else:\n image_ids = torch.cat(image_ids, dim=0)\n ### might be string in edit config case\n print('the accuracy is:', torch.mean(accs)) ### mean of entire accuracy vector # tensor(0.6015) for val set\n\n\n\n\n\n return answ, image_ids, ques_ids, ss_vc", "def do_train_job(self):\n # get the initial tensor dict\n # initial_tensor_dict = self.wrapped_model.get_tensor_dict()\n\n # get the training data size\n data_size = self.wrapped_model.get_training_data_size()\n\n # train the model\n # FIXME: model header \"version\" needs to be changed to \"rounds_trained\"\n # FIXME: We assume the models allow training on partial batches.\n # FIXME: Currently, num_batches_per_round overrides epochs per round. Is this the correct behavior?\n if self.num_batches_per_round is not None:\n num_batches = self.num_batches_per_round\n else:\n batches_per_epoch = int(np.ceil(data_size/self.wrapped_model.data.batch_size))\n num_batches = int(np.floor(batches_per_epoch * self.epochs_per_round))\n loss = self.wrapped_model.train_batches(num_batches=num_batches)\n self.logger.debug(\"{} Completed the training job for {} batches.\".format(self, num_batches))\n\n # get the trained tensor dict and store any designated to be held out from aggregation\n shared_tensors = self._remove_and_save_holdout_tensors(self.wrapped_model.get_tensor_dict(with_opt_vars=self._with_opt_vars()))\n\n # create the model proto\n if self.send_model_deltas:\n deltas = self.create_deltas(tensor_dict=shared_tensors)\n model_proto = construct_proto(tensor_dict=deltas[\"tensor_dict\"],\n model_id=self.model_header.id,\n model_version=self.model_header.version,\n compression_pipeline=self.compression_pipeline,\n is_delta=True,\n delta_from_version=deltas[\"delta_from_version\"])\n else:\n model_proto = construct_proto(tensor_dict=shared_tensors,\n model_id=self.model_header.id,\n model_version=self.model_header.version,\n compression_pipeline=self.compression_pipeline,\n is_delta=False,\n delta_from_version=-1)\n\n self.logger.debug(\"{} - Sending the model to the aggregator.\".format(self))\n\n reply = self.channel.UploadLocalModelUpdate(LocalModelUpdate(header=self.create_message_header(), model=model_proto, data_size=data_size, loss=loss))\n self.validate_header(reply)\n check_type(reply, LocalModelUpdateAck, self.logger)\n self.logger.info(\"{} - Model update succesfully sent to aggregator\".format(self))", "def update_model(engine, batch):\n\t\tengine.model.train()\n\t\tengine.model.rpn.nms_thresh = 0.7\n\t\timg, target = prepare_batch(batch, device=get_device(engine.model))\n\t\tengine.optimizer.zero_grad()\n\t\tloss = engine.model(img, target)\n\t\tlosses = sum(l for l in loss.values())\n\t\tlosses.backward()\n\t\tengine.optimizer.step()\n\t\treturn loss", "def train_shared(self, dag=None):\n model = self.shared\n model.train()\n self.controller.eval()\n raw_total_loss = 0\n total_loss = 0\n for step,batch in enumerate(self.train_data_loader):\n dags = dag if dag else self.controller.sample(\n self.args.shared_num_sample)\n if self.args.use_ref and step<self.args.ref_model_num:\n dags=[self.args.ref_arch]\n\n inputs=torch.from_numpy(batch['data']).cuda()\n targets=torch.from_numpy(batch['seg'].astype(int)).cuda()\n targets=get_multi_class_labels(targets,n_labels=self.args.n_classes)\n \n print('epoch :',self.epoch,'step :', step, 'time:' ,time.time()-self.time)\n print(dags[0])\n #print('momery',torch.cuda.memory_allocated(device=None))\n\n loss = self.get_loss(inputs,targets,dags)\n raw_total_loss += loss.data\n \n #print('after model momery',torch.cuda.memory_allocated(device=None))\n print('loss :', loss.item())\n\n\n # update\n self.shared_optim.zero_grad()\n loss.backward()\n self.shared_optim.step()\n\n total_loss += loss.data\n\n if ((step % self.args.log_step) == 0) and (step > 0):\n self._summarize_shared_train(total_loss, raw_total_loss)\n raw_total_loss = 0\n total_loss = 0\n self._summarize_shared_train(total_loss, raw_total_loss)", "def train(model, config, logger, record): \n # initialize userIDs\n users_to_sample = config.users\n userIDs = np.arange(config.users) \n\n # initialize the optimizer for the server model\n dataset = assign_user_data(config, logger)\n\n # initialize the delta offset buffers and local residual buffers\n offset_buffers = []\n residual_buffers = []\n for user in range(users_to_sample):\n offset_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n residual_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n\n global_updater = GlobalUpdater(config, model.state_dict()) \n\n # before optimization, report the result first\n validate_and_log(model, dataset, config, record, logger)\n \n for comm_round in range(config.rounds):\n userIDs_candidates = userIDs[:users_to_sample]\n \n # Wait for all users updating locally\n local_packages = []\n for i, user_id in enumerate(userIDs_candidates):\n user_resource = assign_user_resource(config, user_id, \n dataset[\"train_data\"], dataset[\"user_with_data\"])\n updater = LocalUpdater(user_resource, config)\n updater.local_step(model, offset_buffers[user_id])\n local_package = updater.uplink_transmit()\n local_packages.append(local_package)\n\n # Update the global model\n global_updater.global_step(model, local_packages, residual_buffers)\n\n # Update local offsets\n update_offset_buffers(offset_buffers, \n residual_buffers,\n global_updater.accumulated_delta, \n config.tau) \n\n # log and record\n logger.info(\"Round {:d}\".format(comm_round))\n validate_and_log(model, dataset, config, record, logger)\n\n # if comm_round == config.scheduler[0]:\n # config.lr *= config.lr_scaler\n # config.scheduler.pop(0)", "def _update_model(self, X_all, Y_all):\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.set_XY(X_all, Y_all)\n\n # WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...\n if self.max_iters > 0:\n # --- update the model maximizing the marginal likelihood.\n if self.optimize_restarts==1:\n self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)\n else:\n self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose)", "def train_step(self, batch: dict, epoch: int):\n\n with torch.cuda.amp.autocast(self.mixed_precision):\n \n # Update momentum {key, pseudo} networks\n with torch.no_grad():\n self._momentum_update_key_net()\n self._momentum_update_pseudo_net()\n\n # Get data (3 views)\n x_q = batch['x1'].to(self.local_rank)\n x_k = batch['x2'].to(self.local_rank)\n x_ps = batch['x3'].to(self.local_rank)\n \n # Compute strong query features; (B, f)\n z_q = F.normalize(self.net_q(x_q), dim=1)\n\n with torch.no_grad():\n \n # Shuffle across nodes (gpus)\n x_k, idx_unshuffle_k = ForMoCo.batch_shuffle_ddp(x_k)\n x_ps, idx_unshuffle_ps = ForMoCo.batch_shuffle_ddp(x_ps)\n \n # Compute {key, pseudo} features; (B, f)\n z_k = F.normalize(self.net_k(x_k), dim=1)\n z_ps = F.normalize(self.net_ps(x_ps), dim=1)\n \n # Restore {key, pseudo} features to their original nodes\n z_k = ForMoCo.batch_unshuffle_ddp(z_k, idx_unshuffle_k)\n z_ps = ForMoCo.batch_unshuffle_ddp(z_ps, idx_unshuffle_ps)\n\n # Compute loss\n loss, logits, labels, loss_pseudo, probs_pseudo_neg = \\\n self.loss_function(z_q, z_ps, z_k, self.queue.buffer, threshold=self.threshold)\n \n # Backpropagate & update\n if loss_pseudo.isnan() or (epoch <= self.ramp_up):\n self.backprop(loss)\n else:\n alpha = 1.0\n self.backprop(loss + alpha * loss_pseudo)\n \n # Compute metrics\n with torch.no_grad():\n \n # Accuracy of true positives against all negatives\n rank_1 = TopKAccuracy(k=1)(logits, labels)\n \n # Accuracy of pseudo positives with ground truth labels\n above_threshold = probs_pseudo_neg.ge(self.threshold)\n num_pseudo = above_threshold.sum()\n \n # No pseudo positives may have been selected\n if self.queue.is_reliable and (num_pseudo > 0):\n labels_query = batch['y'].to(self.local_rank) # (B, )\n labels_queue = self.queue.labels # (k, )\n is_correct = labels_query.view(-1, 1).eq(labels_queue.view(1, -1)) # (B, 1) @ (1, k) -> (B, k)\n num_correct = is_correct.masked_select(above_threshold).sum()\n precision = torch.true_divide(num_correct, num_pseudo)\n else:\n num_correct = torch.zeros(1, dtype=torch.long, device=num_pseudo.device)\n precision = torch.zeros(1, dtype=torch.float32, device=num_pseudo.device)\n \n # Update memory queue\n self.queue.update(keys=z_k, labels=batch['y'].to(self.local_rank))\n\n return {\n 'loss': loss.detach(),\n 'loss_pseudo': loss_pseudo.detach(), # (1, ) or tensor(nan)\n 'rank@1': rank_1,\n 'num_correct': num_correct,\n 'num_pseudo': num_pseudo,\n 'precision': precision,\n }", "def train(cfg):\n # Set up environment.\n distributed.init_distributed_training(cfg)\n\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n\n # Print config.\n if distributed.is_master_proc():\n print(\"Train with config:\")\n print(pprint.pformat(cfg))\n\n # Create train and val loaders.\n train_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.TRAIN_SPLIT, train=True)\n train_loader = DataLoader(\n train_dataset,\n batch_size=int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)),\n shuffle=(False if cfg.NUM_GPUS > 1 else True),\n sampler=(DistributedSampler(train_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n if cfg.DATA.VAL_SPLIT is not None:\n val_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.VAL_SPLIT, train=False)\n val_loader = DataLoader(\n val_dataset,\n batch_size=(1 if cfg.NUM_GPUS > 1 else cfg.TRAIN.BATCH_SIZE),\n shuffle=False,\n sampler=(DistributedSampler(val_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n\n # Build the model and print model statistics.\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # Construct the model\n model = PanopticNarrativeGroundingBaseline(cfg, device=device)\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device,\n find_unused_parameters=True\n )\n if cfg.LOG_MODEL_INFO and distributed.is_master_proc():\n print(\"Model:\\n{}\".format(model))\n print(\"Params: {:,}\".format(np.sum([p.numel() for p in model.parameters()]).item()))\n print(\"Mem: {:,} MB\".format(torch.cuda.max_memory_allocated() / 1024 ** 3))\n print(\"nvidia-smi\")\n os.system(\"nvidia-smi\")\n\n if cfg.MODEL.BERT_FREEZE:\n if cfg.NUM_GPUS > 1:\n for param in model.module.bert_encoder.model.bert.encoder.layer.parameters():\n param.requires_grad = False\n else:\n for param in model.bert_encoder.model.bert.encoder.layer.parameters():\n param.requires_grad = False\n\n # Construct the optimizer.\n def optimizer_wrapper(Optim, **kwargs):\n def init_func(model):\n return Optim(model.parameters(), **kwargs)\n return init_func\n\n optimizers = {\n \"adamax\": (\n optimizer_wrapper(optim.Adamax, lr=cfg.SOLVER.BASE_LR),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n \"adam\": (\n optimizer_wrapper(optim.Adam, lr=cfg.SOLVER.BASE_LR),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n \"sgd\": (\n optimizer_wrapper(optim.SGD, lr=cfg.SOLVER.BASE_LR, momentum=0.9),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n }\n\n if cfg.SOLVER.OPTIMIZING_METHOD not in optimizers:\n cfg.SOLVER.OPTIMIZING_METHOD = 'adam'\n if distributed.is_master_proc():\n print(\"{0} not defined in available optimizer list, fallback to Adam\")\n\n optimizer, _ = optimizers[cfg.SOLVER.OPTIMIZING_METHOD]\n optimizer = optimizer(model)\n if distributed.is_master_proc():\n print('optimizer: {}'.format(optimizer))\n\n # Load a checkpoint to resume training if applicable.\n checkpoint_path = osp.join(cfg.OUTPUT_DIR, 'checkpoint.pth')\n if osp.exists(checkpoint_path):\n if distributed.is_master_proc():\n print('Resuming training: loading model from: {0}'.format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n start_epoch = checkpoint['epoch'] + 1\n model_final_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n if osp.exists(model_final_path):\n model_final = torch.load(model_final_path)\n best_val_score = model_final['accuracy']\n else:\n best_val_score = None\n elif osp.exists(cfg.TRAIN.CHECKPOINT_FILE_PATH):\n if distributed.is_master_proc():\n print('Loading model from: {0}'.format(cfg.TRAIN.CHECKPOINT_FILE_PATH))\n checkpoint = torch.load(cfg.TRAIN.CHECKPOINT_FILE_PATH, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n start_epoch, best_val_score = 0, None\n else: \n start_epoch, best_val_score = 0, None\n\n # Define loss function\n loss_function = nn.BCEWithLogitsLoss()\n\n if distributed.is_master_proc():\n print('Train begins...')\n if cfg.TRAIN.EVAL_FIRST:\n accuracy = evaluate(val_loader, model, -1, cfg)\n if best_val_score is None or accuracy > best_val_score:\n best_val_score = accuracy\n try:\n # Perform the training loop\n for epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):\n epoch_start_time = time.time()\n # Shuffle the dataset\n if cfg.NUM_GPUS > 1:\n train_loader.sampler.set_epoch(epoch)\n # Train for one epoch\n train_loss = train_epoch(train_loader, model, optimizer, loss_function, epoch, cfg)\n accuracy = evaluate(val_loader, model, epoch, cfg) \n\n if distributed.is_master_proc():\n # Save best model in the validation set\n if best_val_score is None or accuracy > best_val_score:\n best_val_score = accuracy\n model_final_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n model_final = {\n \"epoch\": epoch,\n \"model_state\": model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n \"accuracy\": accuracy\n }\n torch.save(model_final, model_final_path)\n print('-' * 89)\n print('| end of epoch {:3d} | time: {:5.2f}s '\n '| epoch loss {:.6f} |'.format(\n epoch, time.time() - epoch_start_time, train_loss))\n print('-' * 89)\n except KeyboardInterrupt:\n if distributed.is_master_proc():\n print('-' * 89)\n print('Exiting from training early')", "def mount(xpu, model):\n # Unwrap the core model if necessary\n model = xpu.raw(model)\n model = xpu.move(model)\n if xpu._device_ids and len(xpu._device_ids) > 1:\n model = ContainerDataParallel(\n model, device_ids=xpu._device_ids,\n output_device=xpu._main_device_id)\n else:\n model = DataSerial(model)\n return model", "def mpirun_pipeline(image=\"uber/horovod:0.13.11-tf1.10.0-torch0.4.0-py3.5\",\n\t\t\t\t\t\t batch_size=\"64\",\n\t\t\t\t\t\t optimizer='momentum',\n sync_source='https://github.com/tensorflow/benchmarks.git',\n git_sync_branch='cnn_tf_v1.9_compatible',\n data='user-susan:/training',\n gpus=1,\n workers=1,\n cpu_limit='2',\n metric='images/sec',\n memory_limit='10Gi'):\n\n env = ['NCCL_DEBUG=INFO','GIT_SYNC_BRANCH={0}'.format(git_sync_branch)]\n\n train=arena.mpi_job_op(\n \tname=\"all-reduce\",\n \timage=image,\n \tenv=env,\n data=[data],\n workers=workers,\n sync_source=sync_source,\n gpus=gpus,\n cpu_limit=cpu_limit,\n memory_limit=memory_limit,\n metrics=[metric],\n \tcommand=\"\"\"\n \tmpirun python code/benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py --model resnet101 \\\n \t--batch_size {0} --variable_update horovod --optimizer {1}\\\n \t--summary_verbosity=3 --save_summaries_steps=10\n \t\"\"\".format(batch_size, optimizer)\n )", "def train_epoch(self, epoch=None):\n self.model.train()\n loader_iterators = dict([(k, iter(v))\n for k, v in self.train_loaders.items()])\n train_losses_ts = dict(\n [(k, torch.tensor(0.).to(self.device)) for k in self.task_ids])\n train_metrics_ts = dict(\n [(k, torch.tensor(0.).to(self.device)) for k in self.task_ids])\n total_batches = min([len(loader)\n for _, loader in self.train_loaders.items()])\n num_branches = dict()\n for idx, (ctrl, block) in enumerate(self.model.control_blocks()):\n n_branches = max(len(ctrl.serving_tasks), 1.)\n num_branches[idx] = torch.tensor(n_branches, device=self.device)\n\n pbar = tqdm(desc=' train', total=total_batches, ascii=True)\n for batch_idx in range(total_batches):\n self.model.zero_grad()\n\n # for each task, calculate head grads and accumulate body grads\n for task_idx, task_id in enumerate(self.task_ids):\n data, target = loader_iterators[task_id].next()\n data, target = data.to(self.device), target.to(self.device)\n\n # do inference with backward\n output = self.model(data, task_id)\n loss = self.losses[task_id](output, target)\n wloss = self.loss_weights[task_id] * loss\n wloss.backward()\n\n # calculate training metrics\n with torch.no_grad():\n train_losses_ts[task_id] += loss.sum()\n train_metrics_ts[task_id] += \\\n self.metrics[task_id](output, target)\n\n # network slimming\n if self.slimming is not None:\n slim_loss = self.slimming * slimming_loss(self.model)\n if slim_loss > 1e-5:\n slim_loss.backward()\n\n # averaging out body gradients and optimize the body\n for idx, (_, block) in enumerate(self.model.control_blocks()):\n for p in block.parameters():\n p.grad /= num_branches[idx]\n self.optimizers.step()\n pbar.update()\n\n for task_id in self.task_ids:\n train_losses_ts[task_id] /= \\\n len(self.train_loaders[task_id].dataset)\n train_metrics_ts[task_id] /= \\\n len(self.train_loaders[task_id].dataset)\n\n train_losses = dict([(k, v.item())\n for k, v in train_losses_ts.items()])\n train_metrics = dict([(k, v.item())\n for k, v in train_metrics_ts.items()])\n pbar.close()\n return train_losses, train_metrics", "def __init__(\n self,\n model: nn.Module,\n input_path: Union[Path, str],\n out_activations: Dict[str, str],\n out_boundary_weights: Dict[str, bool],\n stride: int,\n patch_size: Tuple[int, int],\n instance_postproc: str,\n padding: int = None,\n batch_size: int = 8,\n normalization: str = None,\n device: str = \"cuda\",\n n_devices: int = 1,\n save_intermediate: bool = False,\n save_dir: Union[Path, str] = None,\n save_format: str = \".mat\",\n checkpoint_path: Union[Path, str] = None,\n n_images: int = None,\n type_post_proc: Callable = None,\n sem_post_proc: Callable = None,\n **kwargs,\n ) -> None:\n super().__init__(\n model=model,\n input_path=input_path,\n out_activations=out_activations,\n out_boundary_weights=out_boundary_weights,\n patch_size=patch_size,\n padding=padding,\n batch_size=batch_size,\n normalization=normalization,\n instance_postproc=instance_postproc,\n device=device,\n save_intermediate=save_intermediate,\n save_dir=save_dir,\n save_format=save_format,\n checkpoint_path=checkpoint_path,\n n_images=n_images,\n n_devices=n_devices,\n type_post_proc=type_post_proc,\n sem_post_proc=sem_post_proc,\n **kwargs,\n )\n\n self.stride = stride", "def test_torch_prepare_model(ray_start_4_cpus_2_gpus):\n\n def train_fn():\n model = torch.nn.Linear(1, 1)\n\n # Wrap in DDP.\n model = train.torch.prepare_model(model)\n\n # Make sure model is wrapped in DDP.\n assert isinstance(model, DistributedDataParallel)\n\n # Make sure model is on cuda.\n assert next(model.parameters()).is_cuda\n\n trainer = Trainer(\"torch\", num_workers=2, use_gpu=True)\n trainer.start()\n trainer.run(train_fn)\n trainer.shutdown()", "def compute_parallel(self, inputs, communicator):\n self.compute_sequential([inputs], [communicator])", "def worker(self, gpu_id: int):\n if self.seed is not None:\n make_deterministic(self.seed)\n self.current_rank = self.rank\n if self.distributed:\n if self.multiprocessing:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n self.current_rank = self.rank * self.ngpus_per_node + gpu_id\n dist.init_process_group(\n backend=self.dist_backend,\n init_method=self.dist_url,\n world_size=self.world_size,\n rank=self.current_rank\n )\n # set up process logger\n self.logger = logging.getLogger(\"worker_rank_{}\".format(self.current_rank))\n self.logger.propagate = False\n handler = QueueHandler(self.logger_queue)\n self.logger.addHandler(handler)\n self.logger.setLevel(logging.INFO)\n\n # only write in master process\n if self.current_rank == 0:\n self.tb_writer = self.tb_writer_constructor()\n\n self.logger.info(\n \"Use GPU: %d for training, current rank: %d\",\n gpu_id,\n self.current_rank\n )\n # get dataset\n train_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"train\"\n )\n val_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"val\"\n )\n # create model\n self.model = get_model(\n model_name=self.global_cfg[\"model\"][\"name\"],\n num_classes=self.global_cfg[\"dataset\"][\"n_classes\"]\n )\n\n self.device = torch.device(\"cuda:{}\".format(gpu_id))\n self.model.to(self.device)\n\n batch_size = self.global_cfg[\"training\"][\"batch_size\"]\n n_workers = self.global_cfg[\"training\"][\"num_workers\"]\n if self.distributed:\n batch_size = int(batch_size / self.ngpus_per_node)\n n_workers = int((n_workers + self.ngpus_per_node - 1) / self.ngpus_per_node)\n if self.global_cfg[\"training\"][\"sync_bn\"]:\n self.model = SyncBatchNorm.convert_sync_batchnorm(self.model)\n self.model = DistributedDataParallel(self.model, device_ids=[gpu_id])\n self.logger.info(\"batch_size: {}, workers: {}\".format(batch_size, n_workers))\n\n # define loss function (criterion) and optimizer\n self.loss_fn = CrossEntropyLoss().to(self.device)\n\n optimizer_cls = get_optimizer(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params = copy.deepcopy(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params.pop(\"name\")\n self.optimizer: Optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)\n self.logger.info(\"Loaded optimizer:\\n%s\", self.optimizer)\n\n # scheduler\n self.scheduler = get_scheduler(self.optimizer, self.global_cfg[\"training\"][\"lr_schedule\"])\n\n if self.distributed:\n train_sampler = DistributedSampler(\n train_dataset,\n shuffle=True,\n drop_last=True\n )\n val_sampler = DistributedSampler(\n val_dataset,\n shuffle=False\n )\n else:\n train_sampler = RandomSampler(train_dataset)\n val_sampler = SequentialSampler(val_dataset)\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=train_sampler\n )\n\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=val_sampler\n )\n self.logger.info(\n \"Load dataset done\\nTraining: %d imgs, %d batchs\\nEval: %d imgs, %d batchs\",\n len(train_dataset),\n len(train_loader),\n len(val_dataset),\n len(self.val_loader)\n )\n iter_generator = make_iter_dataloader(train_loader)\n\n while self.iter < self.global_cfg[\"training\"][\"train_iters\"]:\n img, label = next(iter_generator)\n self.train_iter(img, label)\n\n def is_val():\n p1 = self.iter != 0\n p2 = (self.iter + 1) % self.global_cfg[\"training\"][\"val_interval\"] == 0\n p3 = self.iter == self.global_cfg[\"training\"][\"train_iters\"] - 1\n return (p1 and p2) or p3\n\n # have a validation\n if is_val():\n self.validate()\n # end one iteration\n self.iter += 1", "def optimize_model(input,\n model_type='bert',\n num_heads=0,\n hidden_size=0,\n optimization_options=None,\n opt_level=0,\n use_gpu=False,\n only_onnxruntime=False):\n (optimizer_class, producer, run_onnxruntime) = MODEL_CLASSES[model_type]\n\n temp_model_path = None\n if opt_level > 1: # Optimization specified for an execution provider.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=use_gpu, opt_level=opt_level)\n elif run_onnxruntime:\n # Use Onnxruntime to do optimizations (like constant folding and cast elimation) that is not specified to exection provider.\n # CPU provider is used here so that there is no extra node for GPU memory copy.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=False, opt_level=1)\n\n model = load_model(temp_model_path or input, format=None, load_external_data=True)\n\n if model.producer_name and producer != model.producer_name:\n logger.warning(\n f\"Model producer not matched: Expect {producer}, Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter.\"\n )\n\n if optimization_options is None:\n optimization_options = BertOptimizationOptions(model_type)\n\n optimizer = optimizer_class(model, num_heads, hidden_size)\n\n if not only_onnxruntime:\n optimizer.optimize(optimization_options)\n\n # Remove the temporary model.\n if temp_model_path:\n os.remove(temp_model_path)\n logger.debug(\"Remove tempoary model: {}\".format(temp_model_path))\n\n optimizer.model.producer_name = \"onnxruntime.transformers\"\n from onnxruntime import __version__ as onnxruntime_version\n optimizer.model.producer_version = onnxruntime_version\n\n return optimizer", "def train(self, mode=True):\n super().train(mode)\n if mode and self.freeze_2d and self.backbone is not None:\n self._freeze(self.backbone)\n return self", "def _update_model(self, normalization_type='stats'):\n if self.num_acquisitions % self.model_update_interval == 0:\n\n # input that goes into the model (is unziped in case there are categorical variables)\n X_inmodel = self.space.unzip_inputs(self.X)\n\n # Y_inmodel is the output that goes into the model\n if self.normalize_Y:\n Y_inmodel = normalize(self.Y, normalization_type)\n else:\n Y_inmodel = self.Y\n\n self.model.updateModel(X_inmodel, Y_inmodel, None, None)", "def __init__(self,\n names,\n data,\n embedding_fns,\n encoder_fns_1,\n encoder_fns_2,\n logits_fns,\n evaluation_fns,\n # MTL\n mixing_ratios,\n L2_coefficient=None,\n is_distill=False,\n distill_coefficient_loc=None,\n distill_coefficient_scale=None,\n distill_temperature=1.0,\n # optimization\n optimizer=\"Adam\",\n learning_rate=0.001,\n gradient_clipping_norm=2.0,\n # misc\n graph=None,\n logdir=None,\n main_model_index=0,\n debug_mode=False):\n \n super(MultitaskBaseModel, self).__init__(\n logdir=logdir, graph=graph,\n saver_max_to_keep=MAX_CHECKPOINTS_TO_KEEP)\n\n num_models = len(names)\n _check_list_compatability(data, num_models)\n _check_fn_list_compatability(embedding_fns, num_models, True)\n _check_fn_list_compatability(encoder_fns_1, num_models, True)\n _check_fn_list_compatability(encoder_fns_2, num_models, True)\n _check_fn_list_compatability(logits_fns, num_models, False)\n _check_fn_list_compatability(evaluation_fns, num_models, False)\n\n # check mixing ratios and MTL\n if len(names) == 1:\n raise ValueError(\"Not supported\")\n _mr_compatible(mixing_ratios, num_models, print_out=True)\n if main_model_index != 0:\n raise ValueError(\"`main_model_index` must be set to `0`\")\n\n self._names = names\n self._data = data\n self._embedding_fns = embedding_fns\n self._encoder_fns_1 = encoder_fns_1\n self._encoder_fns_2 = encoder_fns_2\n self._logits_fns = logits_fns\n self._evaluation_fns = evaluation_fns\n\n # MTL\n self._mixing_ratios = mixing_ratios\n self._L2_coefficient = L2_coefficient\n self._is_disill = is_distill\n self._distill_temperature = distill_temperature\n self._distill_coefficient_loc = distill_coefficient_loc\n self._distill_coefficient_scale = distill_coefficient_scale\n\n self._optimizer = optimizer\n self._learning_rate = learning_rate\n self._gradient_clipping_norm = gradient_clipping_norm\n\n self._main_model_index = main_model_index\n self._debug = collections.defaultdict(list)\n self._debug_mode = debug_mode", "def convert_model(self, model: nn.Module) -> nn.Module:\n if self.sync_bn is not None:\n try:\n model = convert_sync_batchnorm(model, self.sync_bn)\n except ValueError as e:\n self.logger.error('cfg.sync_bn should be \"torch\" or '\n f'\"mmcv\", but got {self.sync_bn}')\n raise e\n\n return model", "def update(self, batch):\n if self.opt['cuda']:\n inputs = [Variable(torch.LongTensor(b).cuda()) for b in batch[:3]]\n subj_start_binary = Variable(torch.LongTensor(batch[5]).cuda()).float()\n subj_end_binary = Variable(torch.LongTensor(batch[6]).cuda()).float()\n obj_start_relation = Variable(torch.LongTensor(batch[7]).cuda())\n obj_end_relation = Variable(torch.LongTensor(batch[8]).cuda())\n subj_start_type = Variable(torch.LongTensor(batch[9]).cuda())\n subj_end_type = Variable(torch.LongTensor(batch[10]).cuda())\n obj_start_type = Variable(torch.LongTensor(batch[11]).cuda())\n obj_end_type = Variable(torch.LongTensor(batch[12]).cuda())\n nearest_subj_start_position_for_each_token = Variable(torch.LongTensor(batch[13]).cuda())\n distance_to_nearest_subj_start = Variable(torch.LongTensor(batch[14]).cuda())\n distance_to_subj = Variable(torch.LongTensor(batch[15]).cuda())\n nearest_obj_start_position_for_each_token = Variable(torch.LongTensor(batch[3]).cuda())\n distance_to_nearest_obj_start = Variable(torch.LongTensor(batch[4]).cuda())\n else:\n inputs = [Variable(torch.LongTensor(b)) for b in batch[:4]]\n subj_start_label = Variable(torch.LongTensor(batch[4])).float()\n subj_end_label = Variable(torch.LongTensor(batch[5])).float()\n obj_start_label = Variable(torch.LongTensor(batch[6]))\n obj_end_label = Variable(torch.LongTensor(batch[7]))\n subj_type_start_label = Variable(torch.LongTensor(batch[8]))\n subj_type_end_label = Variable(torch.LongTensor(batch[9]))\n obj_type_start_label = Variable(torch.LongTensor(batch[10]))\n obj_type_end_label = Variable(torch.LongTensor(batch[11]))\n subj_nearest_start_for_each = Variable(torch.LongTensor(batch[12]))\n subj_distance_to_start = Variable(torch.LongTensor(batch[13]))\n \n \n mask = (inputs[0].data>0).float()\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n\n \n subj_start_logits, subj_end_logits, obj_start_logits, obj_end_logits = self.model(inputs, distance_to_subj)\n\n subj_start_loss = self.obj_criterion(subj_start_logits.view(-1, self.opt['num_subj_type']+1), subj_start_type.view(-1).squeeze()).view_as(mask)\n subj_start_loss = torch.sum(subj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n subj_end_loss = self.obj_criterion(subj_end_logits.view(-1, self.opt['num_subj_type']+1), subj_end_type.view(-1).squeeze()).view_as(mask)\n subj_end_loss = torch.sum(subj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_start_loss = self.obj_criterion(obj_start_logits.view(-1, self.opt['num_class']+1), obj_start_relation.view(-1).squeeze()).view_as(mask)\n obj_start_loss = torch.sum(obj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_end_loss = self.obj_criterion(obj_end_logits.view(-1, self.opt['num_class']+1), obj_end_relation.view(-1).squeeze()).view_as(mask)\n obj_end_loss = torch.sum(obj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n loss = self.opt['subj_loss_weight']*(subj_start_loss + subj_end_loss) + (obj_start_loss + obj_end_loss)\n \n # backward\n loss.backward()\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data.item()\n return loss_val", "def update(self, batch_size=None, concurrent=False, max_concurrent_workers=None,\n send_signals=True, _use_super=False, return_queryset=False, **kwargs):\n if _use_super:\n return super().update(**kwargs)\n\n if send_signals:\n pre_update.send(sender=self.model, instances = self)\n\n n_concurrent_writers = self._get_n_concurrent_workers(max_concurrent_workers)\n concurrent = self._get_concurrent(concurrent)\n\n chunks = self.get_chunks(batch_size, n_concurrent_writers)\n\n n = 0\n\n if concurrent:\n # question: how do you pass arguments in this function?\n jobs = [partial(BulkModelQuerySet._update_chunk, self, chunk, **kwargs) for chunk in chunks if chunk]\n executor = ConcurrentExecutor(jobs)\n results = executor.run_async()\n n = sum(results)\n\n else:\n for chunk in chunks:\n if not chunk:\n # skip empty chunks (only happens in the case of an empty queryset)\n continue\n\n n += self._update_chunk(chunk, **kwargs)\n\n if send_signals:\n post_update.send(sender = self.model, instances = self)\n\n if return_queryset:\n _ids = []\n for obj in self:\n _id = getattr(obj, 'id') or getattr(obj, 'pk')\n if _id:\n _ids.append(_id)\n\n return self.filter(id__in = _ids)\n\n return n", "def dist_setting(current_gpu, model, args):\n print(\"channels_last : {}\".format(args.channels_last))\n if args.channels_last:\n args.memory_format = torch.channels_last\n else:\n args.memory_format = torch.contiguous_format\n\n if args.apex:\n args.lr = args.lr*float(args.batch_size*args.world_size)/256.\n args.current_gpu = current_gpu\n if args.current_gpu is not None:\n print(\"Use GPU: {} for training\".format(args.current_gpu))\n\n if args.multigpus_distributed:\n args.rank = args.num_gpus * args.host_num + args.current_gpu\n dist.init_process_group(backend=args.backend,\n rank=args.rank, world_size=args.world_size)\n logger.info('Initialized the distributed environment: \\'{}\\' backend on {} nodes. '.format(\n args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(\n dist.get_rank(), args.num_gpus))\n else:\n args.rank = 0\n\n if args.sync_bn:\n import apex\n print(\"using apex synced BN\")\n model = apex.parallel.convert_syncbn_model(model)\n\n if args.multigpus_distributed:\n if args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n args.batch_size = int(args.batch_size / args.num_gpus)\n logger.info(\"Batch size for each GPU: {}\".format(args.batch_size))\n if not args.apex:\n model.cuda(args.current_gpu)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.current_gpu])\n else:\n if not args.apex:\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n if not args.apex:\n model = model.cuda(args.current_gpu)\n else:\n if not args.apex:\n model = torch.nn.DataParallel(model).cuda()\n\n return model, args", "def update(self, x_train_single, updated_h):\n x_row = x_train_single.toarray()\n for i in range(self.num_models):\n self.models[i].partial_fit(x_row, [updated_h[i]])", "def prepare(self, n_cores=1, ipp_client=None):\n if len(self.shape_parameters):\n self.morpher = MORPHERS[self.config['morpher']](self.config.get('morpher_config', {}),\n self.shape_parameters)\n zs_list = self.morpher.get_anchor_points(bounds=self.get_bounds())\n\n # Create the configs for each new model\n configs = []\n for zs in zs_list:\n config = deepcopy(self.pdf_base_config)\n for i, (setting_name, (anchors, _, _)) in enumerate(self.shape_parameters.items()):\n # Translate from zs to settings using the anchors dict. Maybe not all settings are numerical.\n config[setting_name] = anchors[zs[i]]\n if ipp_client is None and n_cores != 1:\n # We have to compute in parallel: must have delayed computation on\n config['delay_pdf_computation'] = True\n configs.append(config)\n\n # Create the new models\n if n_cores == 1:\n models = [Model(c) for c in tqdm(configs, desc=\"Computing/loading models on one core\")]\n\n elif ipp_client is not None:\n models = create_models_ipyparallel(configs, ipp_client,\n block=self.config.get('block_during_paralellization', False))\n\n else:\n models = [Model(c) for c in tqdm(configs, desc=\"Preparing model computation tasks\")]\n\n hashes = set()\n for m in models:\n for s in m.sources:\n hashes.add(s.hash)\n\n compute_many(hashes, n_cores)\n\n # Reload models so computation takes effect\n models = [Model(c) for c in tqdm(configs, desc=\"Loading computed models\")]\n\n # Add the new models to the anchor_models dict\n for zs, model in zip(zs_list, models):\n self.anchor_models[tuple(zs)] = model\n\n # Build the interpolator for the rates of each source.\n self.mus_interpolator = self.morpher.make_interpolator(f=lambda m: m.expected_events(),\n extra_dims=[len(self.source_name_list)],\n anchor_models=self.anchor_models)\n\n self.is_data_set = False\n self.is_prepared = True", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def update_dependencies():\r\n\r\n if par['task'] == 'mnist':\r\n par['n_tasks'] = 100\r\n par['input_shape'] = [28, 28]\r\n par['n_input'] = np.product(par['input_shape'])\r\n par['n_output'] = 10\r\n elif par['task'] == 'omniglot':\r\n par['input_shape'] = [26, 26]\r\n par['n_input'] = 256 if par['conv_input'] else np.product(par['input_shape'])\r\n par['n_output'] = par['n_ways'] #par['n_meta_tasks'] + par['n_test_tasks']\r\n\r\n par['layer_dims'] = [par['n_input']] + par['hidden_layers'] + [par['n_output']]\r\n\r\n\r\n par['n_layers'] = len(par['layer_dims'])\r\n if par['task'] == 'mnist' or par['task'] == 'imagenet':\r\n par['labels_per_task'] = 10\r\n elif par['task'] == 'cifar':\r\n par['labels_per_task'] = 5", "def bn_update(loader, model):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n for input, _ in loader:\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def train(world_rank=0, world_size=4, train_data=None, train_target=None, do_log=False, comms=None):\n torch.manual_seed(1234)\n model = Net()\n optimizer = optim.SGD(model.parameters(),\n lr=0.01, momentum=0.5)\n\n num_batches = train_data.shape[1]\n\n if (world_rank == 0 and do_log):\n print(\"Started Training\")\n total_data = len(train_data)\n epochs = 1\n total_steps = epochs * total_data\n local_time_communication = 0\n local_total_time_communication = 0\n\n for epoch in range(epochs):\n epoch_loss = 0.0\n count = 0\n for data, target in zip(train_data, train_target):\n data = np.reshape(data, (data.shape[0], 1, data.shape[1], data.shape[2])) / 128.0\n count = count + 1\n result = '{0:.4g}'.format((count / float(total_steps)) * 100.0)\n if (world_rank == 0):\n print(\"Progress {}% \\r\".format(result), end='\\r')\n optimizer.zero_grad()\n output = model(data)\n # this comes with data loading mechanism use target or target.long()\n # depending on network specifications.\n target = target.long()\n loss = F.nll_loss(output, target)\n epoch_loss += loss.item()\n # print(epoch_loss)\n loss.backward()\n if (world_rank == 0):\n local_time_communication = time.time()\n average_gradients_mpi(model, comm=comms, world_size=4)\n if (world_rank == 0):\n local_time_communication = time.time() - local_time_communication\n local_total_time_communication = local_total_time_communication + local_time_communication\n optimizer.step()\n if (world_rank == 0):\n print('Rank ', world_rank, ', epoch ',\n epoch, ': ', epoch_loss / num_batches)\n return model, local_total_time_communication", "def train(self, mode=True):\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()", "def enable_model_cpu_offload(self, gpu_id=0):\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook", "def train_model(self\n\t\t, epochs=100\n\t\t, minibatch_size=20\n\t\t, yield_every_iteration=False):\n\n\t\tif self.input_batch is None:\n\t\t\traise ValueError(\"Denoising autoencoder must be initialised with \"\n\t\t\t\t\"input data to train model independently.\")\n\t\tif self.output_batch is None:\n\t\t\traise ValueError(\"RMI denoising autoencoder must be initialised \"\n\t\t\t\t\"with output data to train model independently.\")\n\n\t\tbatch_count = self.input_batch.get_value(\n\t\t\tborrow=True).shape[0]//minibatch_size\n\n\t\tfor epoch in xrange(epochs):\n\t\t\tcosts = []\n\t\t\tfor index in xrange(batch_count):\n\t\t\t\tcost = self.train_model_once(index, minibatch_size)\n\t\t\t\tcosts.append(cost)\n\t\t\t\tif yield_every_iteration:\n\t\t\t\t\tyield (index, cost)\n\n\t\t\tif not yield_every_iteration:\n\t\t\t\tyield (epoch, numpy.mean(costs))", "async def fit_model_on_worker(\n worker,\n built_model: sy.Plan,\n built_loss_fn: sy.Plan,\n encrypters,\n batch_size: int,\n curr_round: int,\n max_nr_batches: int,\n lr: float,\n):\n num_of_parameters = len(built_model.parameters())\n built_model.id = \"GlobalModel\"\n # built_loss_fn.id = \"LossFunc\"\n # model_config = sy.ModelConfig(model=built_model,\n # loss_fn=built_loss_fn,\n # optimizer=\"SGD\",\n # batch_size=batch_size,\n # optimizer_args={\"lr\": lr},\n # epochs=1,\n # max_nr_batches=max_nr_batches)\n # model_config_send_start = time.time()\n built_model.send(worker)\n # model_config_send_end = time.time()\n print(\"[trace] GlobalInformationSend duration\", worker.id, model_config_send_end - model_config_send_start)\n\n return_ids = [0, 1]\n for i in range(num_of_parameters):\n return_ids.append(\"p\" + str(i))\n\n fit_sagg_start = time.time()\n result_list = await worker.async_fit_sagg_mc(dataset_key=\"mnist\", encrypters=encrypters, return_ids=return_ids)\n fit_sagg_end = time.time()\n print(\"[trace] FitSagg\", \"duration\", worker.id, fit_sagg_end - fit_sagg_start)\n\n loss = result_list[0]\n num_of_training_data = result_list[1]\n enc_params = result_list[2:]\n\n print(\"Iteration %s: %s loss: %s\" % (curr_round, worker.id, loss))\n\n return worker.id, enc_params, loss, num_of_training_data", "def test_multitask(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def test_build_default_model(self):\n cfg = get_cfg_defaults()\n cfg.SYSTEM.NUM_GPUS = self.num_gpu\n model = build_model(cfg, self.device)\n self.assertTrue(isinstance(model, (torch.nn.Module,\n torch.nn.DataParallel,\n torch.nn.parallel.DistributedDataParallel)))", "def main(batch_size, saves_dir=TENSORFLOW_SAVES_DIR):\n batches = [1, 8, 16, 32, 64]\n if batch_size:\n batches = [batch_size]\n\n for batch_size in batches:\n print(\"Batch size: {}\".format(batch_size))\n batch = np.random.random((batch_size, 224, 224, 3))\n\n # our default model\n tf.reset_default_graph()\n usual_model = Model()\n measure_model(usual_model, \"Usual model\", batch)\n usual_model.sess.close()\n\n # our binary file\n tf.reset_default_graph()\n frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='constant_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(frozen_model, \"Frozen model\", batch)\n frozen_model.sess.close()\n\n # binary file with some constant operations\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='optimized_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, \"Optimized frozen model\", batch)\n optimized_frozen_model.sess.close()\n\n # model quantized with python\n model_name = \"Quantized with python\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_python.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)\n\n # model quantized with bazel\n model_name = \"Quantized with bazel\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_bazel.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)", "def _train_task(self, train_loader, val_loader):\n if self._task == 0:\n epochs = 90\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.1, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [50, 60], gamma=0.1)\n self._train(train_loader, val_loader, epochs, optimizer, scheduler)\n return\n\n # Training on all new + examplars\n print(\"Training\")\n self._finetuning = False\n epochs = 60\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.1, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [40, 50], gamma=0.1)\n self._train(train_loader, val_loader, epochs, optimizer, scheduler)\n\n # Fine-tuning on sub-set new + examplars\n print(\"Fine-tuning\")\n self._old_model = self._network.copy().freeze()\n\n self._finetuning = True\n self._build_examplars(train_loader,\n n_examplars=self._k // (self._n_classes - self._task_size))\n train_loader.dataset.set_idxes(self.examplars) # Fine-tuning only on balanced dataset\n\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.01, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 20], gamma=0.1)\n self._train(train_loader, val_loader, 40, optimizer, scheduler)", "def train(self, mode=True):\n super(CRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def train_wrapper(model):\n if FLAGS.pretrained_model:\n model.load(FLAGS.pretrained_model)\n # load data\n train_input_handle, test_input_handle = datasets_factory.data_provider(\n FLAGS.dataset_name,\n FLAGS.train_data_paths,\n FLAGS.valid_data_paths,\n FLAGS.batch_size * FLAGS.n_gpu,\n FLAGS.img_width,\n seq_length=FLAGS.total_length,\n is_training=True)\n\n eta = FLAGS.sampling_start_value\n\n for itr in range(1, FLAGS.max_iterations + 1):\n if train_input_handle.no_batch_left():\n train_input_handle.begin(do_shuffle=True)\n ims = train_input_handle.get_batch()\n if FLAGS.dataset_name == 'penn':\n ims = ims['frame']\n ims = preprocess.reshape_patch(ims, FLAGS.patch_size)\n\n eta, real_input_flag = schedule_sampling(eta, itr)\n\n trainer.train(model, ims, real_input_flag, FLAGS, itr)\n\n if itr % FLAGS.snapshot_interval == 0:\n model.save(itr)\n\n if itr % FLAGS.test_interval == 0:\n trainer.test(model, test_input_handle, FLAGS, itr)\n\n train_input_handle.next()", "def train_next_model(self, wait=True, input_data_s3_prefix=None, input_model_id=None):\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n\n # use 'last_trained_model_id' by default as input model for next training\n if input_model_id is None and self.experiment_record._last_trained_model_id is not None:\n logger.info(\n f\"Use last trained model {self.experiment_record._last_trained_model_id} \"\n \"as pre-trained model for training\"\n )\n\n input_model_id = self.experiment_record._last_trained_model_id\n\n if input_model_id != self.experiment_record._last_trained_model_id:\n # No deployment if the given model is not ready\n if not self._check_if_model_ready(input_model_id):\n return\n\n # experiment only allows one training job at a time,\n # validate no other training request is in progress\n if (\n self.experiment_record._training_state is not None\n and self.experiment_record._training_state.endswith(\"ING\")\n ):\n logger.error(\n f\"A training request with model id '{self.experiment_record._next_model_to_train_id}' \"\n f\"was in the state of '{self.experiment_record._training_state}'. \"\n \"Please wait until the training job is finished.\"\n )\n raise InvalidUsageException(\n \"Please wait for old Training Job to Complete before requesting a new one!\"\n )\n else:\n # update next_model_to_train_id and training state\n next_model_to_train_id = ModelManager.name_next_model(experiment_id=self.experiment_id)\n\n logger.info(f\"Starting training job for ModelId '{next_model_to_train_id}''\")\n\n self.exp_db_client.update_experiment_next_model_to_train_id(\n self.experiment_id, next_model_to_train_id\n )\n self.exp_db_client.update_experiment_training_state(\n self.experiment_id, TrainingState.PENDING\n )\n\n manifest_file_path = None\n if isinstance(input_data_s3_prefix, list):\n # generate manifest file and upload to s3 when having multiple inputs\n manifest_file_path = self._generate_manifest(input_data_s3_prefix)\n\n try:\n self.next_model_to_train = ModelManager(\n model_db_client=self.model_db_client,\n experiment_id=self.experiment_id,\n model_id=next_model_to_train_id,\n image=self.image,\n role=self.resource_manager.iam_role_arn,\n instance_config=self.resource_manager.training_fleet_config,\n boto_session=self.boto_session,\n algor_config=self.algor_config,\n )\n self.next_model_to_train.fit(\n wait=wait,\n input_model_id=input_model_id,\n input_data_s3_prefix=input_data_s3_prefix,\n manifest_file_path=manifest_file_path,\n logs=wait,\n )\n except Exception as e:\n logger.error(e)\n pass\n\n # wait until exp ddb table updated\n if self.local_mode or wait:\n trained_state = (\n self.experiment_record._training_state == TrainingState.TRAINED\n and self.experiment_record._last_trained_model_id == next_model_to_train_id\n and self.experiment_record._next_model_to_train_id is None\n )\n num_retries = 0\n\n while not trained_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table training status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n trained_state = (\n self.experiment_record._training_state == TrainingState.TRAINED\n and self.experiment_record._last_trained_model_id == next_model_to_train_id\n and self.experiment_record._next_model_to_train_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Training job '{self.experiment_record._next_model_to_train_id}' \"\n f\"was in state of '{self.experiment_record._training_state}'. Expected it to be TRAINED.\"\n )\n if (\n self.experiment_record._training_state == TrainingState.FAILED\n or self.experiment_record._training_state == TrainingState.STOPPED\n ):\n raise SageMakerTrainingJobException(\n f\"Training job '{self.experiment_record._next_model_to_train_id}' \"\n f\"ended in state of '{self.experiment_record._training_state}'. Please check Sagemaker logs for \"\n \"more information.\"\n )", "def bn_update(loader, model, device):\n if not check_bn(model):\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print('no bn in model?!')\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>!')\n # return model\n\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n\n model = model.to(device)\n pbar = tqdm(loader, unit=\"samples\", unit_scale=loader.batch_size)\n for sample in pbar:\n inputs, targets, target_lengths = sample['input'].to(device), sample['label'].to(device), sample['label_length'].to(device)\n\n inputs = inputs.to(device)\n b = inputs.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n # model(inputs)\n # TODO:\n model(inputs, False, targets, target_lengths, 275, test_dataset.tokenizer)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n return model", "def train(self, mode=True, freeze_bn=False):\n super(NetFeat, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train(config, model, train_iterator, criterion, optimizer, scheduler=None):\n if isinstance(model, collections.Iterable) or isinstance(\n optimizer, collections.Iterable) or isinstance(\n scheduler, collections.Iterable):\n raise ValueError(\n \"Need to provide custom training function if using multi-model \"\n \"or multi-scheduler or multi-optimizer training.\")\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n timers = {k: TimerStat() for k in [\"h2d\", \"fwd\", \"grad\", \"apply\"]}\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n for batch_idx, (features, target) in enumerate(train_iterator):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # Create non_blocking tensors for distributed training\n with timers[\"h2d\"]:\n if torch.cuda.is_available():\n features = features.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n with timers[\"fwd\"]:\n output = model(features)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n losses.update(loss.item(), features.size(0))\n\n with timers[\"grad\"]:\n # compute gradients in a backward pass\n optimizer.zero_grad()\n\n if config.get(USE_FP16):\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n with timers[\"apply\"]:\n # Call step of optimizer to update model params\n optimizer.step()\n\n if scheduler and config.get(SCHEDULER_STEP) == SCHEDULER_STEP_BATCH:\n scheduler.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if config.get(TEST_MODE) and batch_idx == 0:\n break\n\n if scheduler and config.get(SCHEDULER_STEP) == SCHEDULER_STEP_EPOCH:\n scheduler.step()\n\n stats = {\n \"batch_time\": batch_time.avg,\n BATCH_COUNT: batch_idx + 1,\n \"train_loss\": losses.avg,\n \"data_time\": data_time.avg,\n }\n stats.update({k: t.mean for k, t in timers.items()})\n return stats", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def grid_search(train_loader, val_loader, criterion, alpha, beta):\n\n # Initializing training variables\n best_acc = 0\n all_losses = []\n\n # Initializing log file\n logfile = open('./model_compound_scaling/logfiles/logfile.txt', 'a+')\n logfile.write('depth multiplier: {}, width multiplier: {}\\n'.format(alpha, beta))\n\n # Building the model\n if args.dataset == 'CIFAR100' or args.dataset == 'CIFAR10':\n model = micronet(d_multiplier=alpha, w_multiplier=beta)\n\n elif args.dataset == 'ImageNet':\n model = image_micronet(d_multiplier=alpha, w_multiplier=beta)\n\n # If multipile GPUs are used\n if use_cuda and torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n\n # Transfers model to device (GPU/CPU). Device is globally initialized.\n model.to(device)\n\n # Defining the optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=True)\n\n # KERAS like summary of the model architecture\n # summary(your_model, input_size=(channels, H, W), batch_size=-1, device=\"cuda\")\n if use_cuda:\n if args.dataset == 'CIFAR100' or args.dataset == 'CIFAR10':\n summary(model, (3, 32, 32), batch_size=args.batch_size)\n print(model)\n\n elif args.dataset == 'ImageNet':\n summary(model, (3, args.image_size, args.image_size), batch_size=args.batch_size)\n print(model)\n\n # Optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_acc = checkpoint['acc']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n load_last_epoch = checkpoint['epoch']-1\n print(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n load_last_epoch = -1\n\n # Learning rate schedulers for cifar_micronet and imagenet_micronet\n if args.dataset == 'CIFAR100' or args.data == 'CIFAR10':\n lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,\n T_max = args.epochs,\n eta_min = 0,\n last_epoch = load_last_epoch)\n\n elif args.dataset == 'ImageNet':\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=[30, 60, 90],\n gamma=0.1,\n last_epoch = load_last_epoch)\n\n # START TRAINING\n start_time = time.time()\n model.train()\n\n for epoch in range(args.start_epoch, args.epochs):\n\n print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))\n\n # Executing training process\n running_loss, running_accuracy = train(train_loader, model, criterion, optimizer, epoch)\n\n # Evaluation\n model.eval()\n val_loss, val_accuracy = evaluate(model, criterion, val_loader)\n\n # Logging the accuracies\n all_losses += [(epoch, running_loss, val_loss, running_accuracy, val_accuracy)]\n print('Epoch {0} running loss {1:.3f} val loss {2:.3f} running acc {3:.3f} '\n 'val acc{4:.3f} time {5:.3f}'.format(*all_losses[-1], time.time() - start_time))\n logfile.write('Epoch {0} running loss {1:.3f} val loss {2:.3f} running acc {3:.3f} '\n 'val acc{4:.3f} time {5:.3f}\\n'.format(*all_losses[-1], time.time() - start_time))\n\n # Saving checkpoint\n torch.save({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'acc': val_accuracy,\n 'lr': optimizer.param_groups[0]['lr']\n }, args.resume)\n\n # Make a lr scheduler step\n lr_scheduler.step()\n\n # Checking if current epoch yielded best validation accuracy\n is_best = val_accuracy > best_acc\n best_acc = max(val_accuracy, best_acc)\n\n # If so, saving best model state_dict\n if is_best and epoch > 0:\n torch.save(model.state_dict(), './model_compound_scaling/saved_models/best_model.pt')\n\n # Switch back to train mode\n model.train()\n start_time = time.time()", "def run_mlp_experiment(args, device):\n validation_ratio, record_train_acc, record_val_acc, record_test_acc = utils.configure_training_mode(args)\n\n train_loader, validation_loader, test_loader = datasets.build_loaders_by_dataset(\n args.dataset, args.batch_size, validation_ratio=validation_ratio, train_validation_split_seed=0)\n local_loss_list = utils.get_loss(args)\n nonlinearity = utils.get_nonlinearity(args)\n\n optimizer_local, local_opt_arguments_dict, local_scheduler_arguments_dict, \\\n optimizer_final, final_opt_arguments_dict, final_scheduler_arguments_dict = \\\n utils.choose_optimizers_and_parameters(args)\n\n conv_sizes = []\n do_pooling = []\n kernel_sizes = []\n\n fc_layers = [args.mlp_layer_size, args.mlp_layer_size, args.mlp_layer_size]\n\n if args.divisive_norm_fc:\n divisive_norm_list = [networks.DivisiveNorm(args.divnorm_power, args.grouping_dim,\n args.grouped_var_delta)\n for i in range(len(fc_layers))]\n else:\n divisive_norm_list = None\n\n alt_feedback_type = None\n if args.feedback_alignment:\n alt_feedback_type = 'feedback_alignment'\n elif args.sign_symmetry:\n alt_feedback_type = 'sign_symmetry'\n\n net = networks.Network(nonlinearity, local_loss_list, optimizer_local,\n torch.optim.lr_scheduler.MultiStepLR, conv_sizes, kernel_sizes,\n do_pooling, fc_layers, 'max', args.dataset, bias=False,\n local_opt_arguments_dict=local_opt_arguments_dict,\n local_scheduler_arguments_dict=local_scheduler_arguments_dict,\n dropout_p=args.dropout_p, batch_norm=args.batch_norm,\n divisive_norm_list_conv=None, divisive_norm_list_fc=divisive_norm_list,\n spatial_dropout=args.spatial_dropout, alt_feedback_type=alt_feedback_type)\n\n net = net.to(device)\n print(net)\n\n final_loss = nn.CrossEntropyLoss()\n\n if args.backprop:\n final_opt = optimizer_final(net.parameters(), **final_opt_arguments_dict)\n compute_local_loss = False\n update_local_loss = False\n else:\n final_opt = optimizer_final(net.softmax_layer.parameters(), **final_opt_arguments_dict)\n compute_local_loss = True\n update_local_loss = True\n\n final_scheduler = torch.optim.lr_scheduler.MultiStepLR(final_opt, **final_scheduler_arguments_dict)\n\n train_acc, val_acc, test_acc = utils.train_network(\n net, device, final_loss, final_opt, final_scheduler, args.n_epochs, train_loader, validation_loader,\n test_loader, compute_local_loss=compute_local_loss, update_local_loss=update_local_loss,\n record_train_acc=record_train_acc, record_val_acc=record_val_acc, record_test_acc=record_test_acc,\n print_results=True, backprop_batch_manhattan=args.backprop_batch_manhattan)\n\n return train_acc, val_acc, test_acc", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def just_clml(model):\n logging.info(f\"just_clml | {model['name']}\")\n logging.info(\"-------------- BEGIN ORIGINAL --------------\")\n logging.info(model[\"mod\"])\n logging.info(\"-------------- END ORIGINAL ----------------\")\n tmp_dir = tempfile.mkdtemp()\n with tvm.transform.PassContext(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n logging.info(\"Partitioning for CLML...\")\n mod = tvm.relay.op.contrib.clml.partition_for_clml(model[\"mod\"], model[\"params\"])\n partitioned_model = model.copy()\n partitioned_model[\"mod\"] = mod\n logging.info(\"-------------- BEGIN PARTITIONED --------------\")\n logging.info(partitioned_model[\"mod\"])\n logging.info(\"-------------- END PARTITIONED ----------------\")\n targets = []\n targets.append(OPENCL)\n targets.append(tvm.target.Target(\"clml\", HOST))\n compile_and_benchmark(\"just_clml\", partitioned_model, targets, tmp_dir)", "def test_no_model_parallel(self):\n for m in ['transformer/generator', 'transformer/ranker']:\n try:\n _ = self._distributed_train_model(model=m, model_parallel=True)\n except RuntimeError:\n pass\n else:\n self.fail('Did not raise RuntimeError')", "def pretrained():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659)", "def update_model(train_dir, image_size = 224, batch_size = 8, epochs = 2):\n \n # Create a data generator and specify\n # the parameters for augmentation\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n \n # create an iterator for data generator\n # and autment the images\n \n train_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size=(image_size, image_size),\n batch_size= batch_size,\n class_mode='categorical')\n \n #load pretrained model\n model = models.load_model('vgg16_finetuned.h5')\n \n # Compile the pretrained model in order to update its weight\n model.compile(loss='categorical_crossentropy',\n optimizer = optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n \n # use keras checkpoint to update the model weight\n file_path = 'vgg16_finetuned.h5'\n checkpoint = ModelCheckpoint(file_path)\n callbacks_list = [checkpoint]\n \n # Train the model to update model weight\n history = model.fit_generator(\n train_generator,\n steps_per_epoch = train_generator.samples/train_generator.batch_size,\n epochs = epochs,\n callbacks = callbacks_list)", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def fit(\n self,\n lr: float,\n epochs: int,\n model_dir: str = \"checkpoints\",\n model_name: str = None,\n momentum: float = 0.95,\n weight_decay: float = 0.0001,\n mixed_prec: bool = False,\n use_one_cycle_policy: bool = False,\n warmup_pct: float = 0.3,\n lr_gamma: float = 0.1,\n lr_step_size: float = None,\n grad_steps: int = 2,\n save_model: bool = False,\n ) -> None:\n # set epochs\n self.epochs = epochs\n\n # set lr_step_size based on epochs\n if lr_step_size is None:\n lr_step_size = np.ceil(2 / 3 * self.epochs)\n\n # set model name\n if model_name is None:\n model_name = self.model_name\n\n os.makedirs(model_dir, exist_ok=True)\n\n data_loaders = {}\n data_loaders[\"train\"] = self.dataset.train_dl\n data_loaders[\"valid\"] = self.dataset.test_dl\n\n # Move model to gpu before constructing optimizers and amp.initialize\n device = torch_device()\n self.model.to(device)\n count_devices = num_devices()\n torch.backends.cudnn.benchmark = True\n\n named_params_to_update = {}\n total_params = 0\n for name, param in self.model.named_parameters():\n total_params += 1\n if param.requires_grad:\n named_params_to_update[name] = param\n\n print(\"Params to learn:\")\n if len(named_params_to_update) == total_params:\n print(\"\\tfull network\")\n else:\n for name in named_params_to_update:\n print(f\"\\t{name}\")\n\n # create optimizer\n optimizer = optim.SGD(\n list(named_params_to_update.values()),\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n )\n\n # Use mixed-precision if available\n # Currently, only O1 works with DataParallel: See issues https://github.com/NVIDIA/apex/issues/227\n if mixed_prec:\n # break if not AMP_AVAILABLE\n assert AMP_AVAILABLE\n # 'O0': Full FP32, 'O1': Conservative, 'O2': Standard, 'O3': Full FP16\n self.model, optimizer = amp.initialize(\n self.model,\n optimizer,\n opt_level=\"O1\",\n loss_scale=\"dynamic\",\n # keep_batchnorm_fp32=True doesn't work on 'O1'\n )\n\n # Learning rate scheduler\n if use_one_cycle_policy:\n # Use warmup with the one-cycle policy\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=lr,\n total_steps=self.epochs,\n pct_start=warmup_pct,\n base_momentum=0.9 * momentum,\n max_momentum=momentum,\n )\n else:\n # Simple step-decay\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=lr_step_size, gamma=lr_gamma,\n )\n\n # DataParallel after amp.initialize\n model = (\n nn.DataParallel(self.model) if count_devices > 1 else self.model\n )\n\n criterion = nn.CrossEntropyLoss().to(device)\n\n # set num classes\n topk = 5\n if topk >= self.num_classes:\n topk = self.num_classes\n\n for e in range(1, self.epochs + 1):\n print(\n f\"Epoch {e} =========================================================\"\n )\n print(f\"lr={scheduler.get_lr()}\")\n\n self.results.append(\n self.train_an_epoch(\n model,\n data_loaders,\n device,\n criterion,\n optimizer,\n grad_steps=grad_steps,\n mixed_prec=mixed_prec,\n topk=topk,\n )\n )\n\n scheduler.step()\n\n if save_model:\n self.save(\n os.path.join(\n model_dir,\n \"{model_name}_{epoch}.pt\".format(\n model_name=model_name, epoch=str(e).zfill(3),\n ),\n )\n )\n self.plot_precision_loss_curves()", "def train_epoch(loader, model, criterion, optimizer, cuda=True, verbose=False, subset=None,\n ia_model=None, ia_batch_c=64, ):\n loss_sum = 0.0\n stats_sum = defaultdict(float)\n correct_1 = 0.0\n correct_5 = 0.0\n verb_stage = 0\n\n num_objects_current = 0\n num_batches = len(loader)\n\n model.train()\n\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for i, (input, target) in enumerate(loader):\n if cuda:\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n loss, output, stats = criterion(model, input, target)\n\n optimizer.zero_grad()\n loss.backward()\n\n optimizer.step()\n loss_sum += loss.data.item() * input.size(0)\n for key, value in stats.items():\n stats_sum[key] += value * input.size(0)\n\n #pred = output.data.argmax(1, keepdim=True)\n #correct += pred.eq(target.data.view_as(pred)).sum().item()\n _, pred = output.topk(5, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n correct_1 += correct[0].view(-1).float().sum(0)\n correct_5 += correct[:5].view(-1).float().sum(0)\n\n num_objects_current += input.size(0)\n\n if verbose and 10 * (i + 1) / num_batches >= verb_stage + 1:\n print('Stage %d/10. Loss: %12.4f. Acc: %6.2f. Top 5 Acc: %6.2f' % (\n verb_stage + 1, loss_sum / num_objects_current,\n correct_1 / num_objects_current * 100.0,\n correct_5 / num_objects_current * 100.0\n ))\n verb_stage += 1\n # print(loss_sum / num_objects_current)\n if ia_model is not None and i % ia_batch_c == 0:\n ia_model.collect_model(model)\n\n correct_5 = correct_5.cpu()\n correct_1 = correct_1.cpu()\n return {\n 'loss': loss_sum / num_objects_current,\n 'accuracy': correct_1 / num_objects_current * 100.0,\n 'top5_accuracy': correct_5 / num_objects_current * 100.0,\n 'stats': {key: value / num_objects_current for key, value in stats_sum.items()}\n }", "def train(molecule: Sequence[system.Atom],\n spins: Tuple[int, int],\n batch_size: int,\n network_config: Optional[NetworkConfig] = None,\n pretrain_config: Optional[PretrainConfig] = None,\n optim_config: Optional[OptimConfig] = None,\n kfac_config: Optional[KfacConfig] = None,\n mcmc_config: Optional[MCMCConfig] = None,\n logging_config: Optional[LoggingConfig] = None,\n multi_gpu: bool = False,\n double_precision: bool = False,\n graph_path: Optional[str] = None):\n\n if not mcmc_config:\n mcmc_config = MCMCConfig()\n if not logging_config:\n logging_config = LoggingConfig()\n if not pretrain_config:\n pretrain_config = PretrainConfig()\n if not optim_config:\n optim_config = OptimConfig()\n if not kfac_config:\n kfac_config = KfacConfig()\n if not network_config:\n network_config = NetworkConfig()\n\n nelectrons = sum(spins)\n precision = tf.float64 if double_precision else tf.float32\n\n if multi_gpu:\n strategy = tf.distribute.MirroredStrategy()\n else:\n # Get the default (single-device) strategy.\n strategy = tf.distribute.get_strategy()\n if multi_gpu:\n batch_size = batch_size // strategy.num_replicas_in_sync\n logging.info('Setting per-GPU batch size to %s.', batch_size)\n logging_config.replicas = strategy.num_replicas_in_sync\n logging.info('Running on %s replicas.', strategy.num_replicas_in_sync)\n\n # Create a re-entrant variable scope for network.\n with tf.variable_scope('model') as model:\n pass\n\n with strategy.scope():\n with tf.variable_scope(model, auxiliary_name_scope=False) as model1:\n with tf.name_scope(model1.original_name_scope):\n fermi_net = networks.FermiNet(\n atoms=molecule,\n nelectrons=spins,\n slater_dets=network_config.determinants,\n hidden_units=network_config.hidden_units,\n after_det=network_config.after_det,\n architecture=network_config.architecture,\n r12_ee_features=network_config.r12_ee_features,\n r12_en_features=network_config.r12_en_features,\n pos_ee_features=network_config.pos_ee_features,\n build_backflow=network_config.build_backflow,\n use_backflow=network_config.backflow,\n jastrow_en=network_config.jastrow_en,\n jastrow_ee=network_config.jastrow_ee,\n jastrow_een=network_config.jastrow_een,\n logdet=True,\n envelope=network_config.use_envelope,\n residual=network_config.residual,\n pretrain_iterations=pretrain_config.iterations)\n\n scf_approx = scf.Scf(\n molecule,\n nelectrons=spins,\n restricted=False,\n basis=pretrain_config.basis)\n if pretrain_config.iterations > 0:\n scf_approx.run()\n\n hamiltonian_ops = hamiltonian.operators(molecule, nelectrons)\n if mcmc_config.init_means:\n if len(mcmc_config.init_means) != 3 * nelectrons:\n raise RuntimeError('Initial electron positions of incorrect shape. '\n '({} not {})'.format(\n len(mcmc_config.init_means), 3 * nelectrons))\n init_means = [float(x) for x in mcmc_config.init_means]\n else:\n init_means = assign_electrons(molecule, spins)\n\n # Build the MCMC state inside the same variable scope as the network.\n with tf.variable_scope(model, auxiliary_name_scope=False) as model1:\n with tf.name_scope(model1.original_name_scope):\n data_gen = mcmc.MCMC(\n fermi_net,\n batch_size,\n init_mu=init_means,\n init_sigma=mcmc_config.init_width,\n move_sigma=mcmc_config.move_width,\n dtype=precision)\n with tf.variable_scope('HF_data_gen'):\n hf_data_gen = mcmc.MCMC(\n scf_approx.tf_eval_slog_hartree_product,\n batch_size,\n init_mu=init_means,\n init_sigma=mcmc_config.init_width,\n move_sigma=mcmc_config.move_width,\n dtype=precision)\n\n with tf.name_scope('learning_rate_schedule'):\n global_step = tf.train.get_or_create_global_step()\n lr = optim_config.learning_rate * tf.pow(\n (1.0 / (1.0 + (tf.cast(global_step, tf.float32) /\n optim_config.learning_rate_delay))),\n optim_config.learning_rate_decay)\n\n if optim_config.learning_rate < 1.e-10:\n logging.warning('Learning rate less than 10^-10. Not using an optimiser.')\n optim_fn = lambda _: None\n update_cached_data = None\n elif optim_config.use_kfac:\n cached_data = tf.get_variable(\n 'MCMC_cache',\n initializer=tf.zeros(shape=data_gen.walkers.shape, dtype=precision),\n use_resource=True,\n trainable=False,\n dtype=precision,\n )\n if kfac_config.adapt_damping:\n update_cached_data = tf.assign(cached_data, data_gen.walkers)\n else:\n update_cached_data = None\n optim_fn = lambda layer_collection: mean_corrected_kfac_opt.MeanCorrectedKfacOpt( # pylint: disable=g-long-lambda\n invert_every=kfac_config.invert_every,\n cov_update_every=kfac_config.cov_update_every,\n learning_rate=lr,\n norm_constraint=kfac_config.norm_constraint,\n damping=kfac_config.damping,\n cov_ema_decay=kfac_config.cov_ema_decay,\n momentum=kfac_config.momentum,\n momentum_type=kfac_config.momentum_type,\n loss_fn=lambda x: tf.nn.l2_loss(fermi_net(x)[0]),\n train_batch=data_gen.walkers,\n prev_train_batch=cached_data,\n layer_collection=layer_collection,\n batch_size=batch_size,\n adapt_damping=kfac_config.adapt_damping,\n is_chief=True,\n damping_adaptation_decay=kfac_config.damping_adaptation_decay,\n damping_adaptation_interval=kfac_config.damping_adaptation_interval,\n min_damping=kfac_config.min_damping,\n use_passed_loss=False,\n estimation_mode='exact',\n )\n else:\n adam = tf.train.AdamOptimizer(lr)\n optim_fn = lambda _: adam\n update_cached_data = None\n\n qmc_net = qmc.QMC(\n hamiltonian_ops,\n fermi_net,\n data_gen,\n hf_data_gen,\n clip_el=optim_config.clip_el,\n check_loss=optim_config.check_loss,\n )\n\n qmc_net.train(\n optim_fn,\n optim_config.iterations,\n logging_config,\n using_kfac=optim_config.use_kfac,\n strategy=strategy,\n scf_approx=scf_approx,\n global_step=global_step,\n determinism_mode=optim_config.deterministic,\n cached_data_op=update_cached_data,\n write_graph=os.path.abspath(graph_path) if graph_path else None,\n burn_in=mcmc_config.burn_in,\n mcmc_steps=mcmc_config.steps,\n )", "def _local_train(self, dataloader_with_memory, num_updates):\n # Local train\n _size = len(dataloader_with_memory)\n self.model = self.model.train()\n for _batch in range(num_updates):\n X, y = dataloader_with_memory.get_samples()\n X, y = X.to(self._device), y.to(self._device)\n if _batch == 0:\n # Initialize the batch-size using the first batch to avoid\n # edge cases with drop_last=False\n _batch_size = X.shape[0]\n _num_batches_per_epoch = (_size // _batch_size) + int(\n (_size % _batch_size) != 0\n )\n # Compute prediction and loss\n _pred = self.model(X)\n _loss = self._loss(_pred, y)\n\n # Backpropagation\n _loss.backward()\n self._optimizer.step()\n self._optimizer.zero_grad()\n self.num_batches_seen += 1\n _loss, _current_epoch = (\n _loss.item(),\n self.num_batches_seen // _num_batches_per_epoch,\n )\n\n if self.log:\n if _batch % self.log_period == 0:\n print(\n f\"loss: {_loss:>7f} after {self.num_batches_seen:>5d}\"\n f\" batches of data amounting to {_current_epoch:>5d}\"\n \" epochs.\"\n )\n self.writer.add_scalar(\n f\"client{self.client_id}/train/Loss\",\n _loss,\n self.num_batches_seen,\n )\n\n if _current_epoch > self.current_epoch:\n # At each epoch we look at the histograms of all the\n # network's parameters\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(\n f\"client{self.client_id}/{name}\", p, _current_epoch\n )\n\n self.current_epoch = _current_epoch", "def __init__(\n self,\n config: ModelParallelConfig,\n encoder_type: enum,\n total_virtual_tokens: int,\n token_dim: int,\n hidden_size,\n lstm_dropout: float,\n num_layers: int,\n init_std: float,\n taskname: str = \"taskname\",\n ):\n super().__init__()\n self.token_dim = token_dim\n self.input_size = token_dim\n self.output_size = token_dim\n self.hidden_size = hidden_size\n self.total_virtual_tokens = total_virtual_tokens\n self.encoder_type = encoder_type\n self.activation = \"gelu\"\n self.init_std = init_std\n self.taskname = taskname\n\n # Set fixed indicies for forward pass\n self.register_buffer(\"indices\", torch.LongTensor(list(range(self.total_virtual_tokens))))\n\n # embedding\n self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)\n self.inference_table = InferenceTable(taskname, self.token_dim, self.total_virtual_tokens)\n\n if self.encoder_type == PromptEncoderType.EMBEDDING:\n init.xavier_normal_(self.embedding.weight)\n elif self.encoder_type == PromptEncoderType.LSTM:\n # LSTM\n self.lstm_head = torch.nn.LSTM(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=num_layers,\n dropout=lstm_dropout,\n bidirectional=True,\n batch_first=True,\n )\n\n self.mlp_head = nn.Sequential(\n nn.Linear(self.hidden_size * 2, self.hidden_size * 2),\n nn.ReLU(),\n nn.Linear(self.hidden_size * 2, self.output_size),\n )\n\n elif self.encoder_type == PromptEncoderType.MLP:\n if num_layers <= 1:\n raise ValueError(\n \"The MLP prompt encoder must have at least 2 layers, and exactly 2 layers is recommended.\"\n )\n\n layers = [nn.Linear(self.input_size, self.hidden_size), nn.ReLU()]\n for _ in range(num_layers - 2):\n layers.extend([nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU()])\n\n layers.append(nn.Linear(self.hidden_size, self.output_size))\n self.mlp_head = nn.Sequential(*layers)\n\n elif self.encoder_type == PromptEncoderType.TPMLP:\n self.tpmlp = TPMLP(config, self.total_virtual_tokens, self.hidden_size, self.output_size, self.init_std,)\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")", "def freeze_model(self):\n # BN layers need to be freezed explicitly since they cannot be freezed via '.requires_grad=False'\n for module in self.modules():\n if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):\n module.eval()\n \n # freeze all parameters\n for param in self.parameters():\n param.requires_grad = False" ]
[ "0.63490736", "0.6151383", "0.6141063", "0.610996", "0.6103388", "0.60952216", "0.59621423", "0.5786127", "0.5765342", "0.5759161", "0.57430667", "0.56928396", "0.56227285", "0.55986506", "0.55617046", "0.5559076", "0.55540496", "0.54423124", "0.5432483", "0.5429761", "0.53854805", "0.5384869", "0.53817934", "0.53600156", "0.53561264", "0.5350233", "0.5349301", "0.53333473", "0.53306377", "0.5314398", "0.53087825", "0.5301014", "0.52993435", "0.52933156", "0.52720755", "0.52712536", "0.5258186", "0.5258186", "0.5251674", "0.52379596", "0.52373564", "0.52313924", "0.5228925", "0.522534", "0.5220899", "0.5212246", "0.51980895", "0.51866823", "0.51849663", "0.5177942", "0.5170435", "0.51685554", "0.5165454", "0.5159893", "0.51560956", "0.51559895", "0.5139509", "0.51391095", "0.5136114", "0.512678", "0.51242214", "0.5118667", "0.5118409", "0.51033586", "0.50989825", "0.50975966", "0.50948894", "0.50940615", "0.50862724", "0.5077368", "0.507667", "0.5076242", "0.50735146", "0.50717145", "0.50702214", "0.50615233", "0.50608146", "0.5060788", "0.5054207", "0.50515693", "0.5049052", "0.5048281", "0.5047195", "0.5044287", "0.50442797", "0.50406003", "0.5039876", "0.5039842", "0.5033337", "0.503278", "0.50327045", "0.5030533", "0.5029399", "0.50265086", "0.5020748", "0.50137746", "0.50134945", "0.49994838", "0.49953678", "0.49927482", "0.49920082" ]
0.0
-1
Generates the model summary, which is required for model partitioning across GPUs, and then moves the model to GPU with data parallel/model parallel by calling adjust_model_for_gpus.
def create_summary_and_adjust_model_for_gpus(self) -> None: if self._model is None: raise ValueError("Model must be created before it can be adjusted.") if self.config.is_segmentation_model: summary_for_segmentation_models(self.config, self._model) # Prepare for mixed precision training and data parallelization (no-op if already done). # This relies on the information generated in the model summary. self.adjust_model_for_gpus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_and_print_model_summary(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n random_state = RandomStateSnapshot.snapshot_random_state()\n # There appears to be a bug in apex, where previous use (in training for example) causes problems\n # when another model is later built on the CPU (for example, before loading from a checkpoint)\n # https://github.com/NVIDIA/apex/issues/694\n # Hence, move the model to the GPU before doing model summary.\n if config.use_gpu:\n model = model.cuda()\n if isinstance(config, ScalarModelBase):\n # To generate the model summary, read the first item of the dataset. Then use the model's own\n # get_model_input function to convert the dataset item to input tensors, and feed them through the model.\n train_dataset = config.get_torch_dataset_for_inference(ModelExecutionMode.TRAIN)\n train_item_0 = next(iter(train_dataset.as_data_loader(shuffle=False, batch_size=1, num_dataload_workers=0)))\n model_inputs = get_scalar_model_inputs_and_labels(config, model, train_item_0).model_inputs\n # The model inputs may already be converted to float16, assuming that we would do mixed precision.\n # However, the model is not yet converted to float16 when this function is called, hence convert back to float32\n summary = ModelSummary(model)\n summary.generate_summary(input_tensors=model_inputs, log_summaries_to_files=config.log_summaries_to_files)\n elif config.is_segmentation_model:\n summary_for_segmentation_models(config, model)\n assert model.summarizer\n summary = model.summarizer # type: ignore\n else:\n raise ValueError(\"Don't know how to generate a summary for this type of model?\")\n RUN_CONTEXT.log(LoggingColumns.NumTrainableParameters, summary.n_trainable_params)\n random_state.restore_random_state()", "def model_summary():\n print(\"\\n\")\n print(\"=\" * 30 + \"Model Structure\" + \"=\" * 30)\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n print(\"=\" * 60 + \"\\n\")", "def _build_model(self):\n tf.set_random_seed(self.params.tf_random_seed)\n np.random.seed(4321)\n phase_train = not (self.params.eval or self.params.forward_only)\n\n log_fn('Generating model')\n losses = []\n device_grads = []\n all_logits = []\n all_top_1_ops = []\n all_top_5_ops = []\n enqueue_ops = []\n gpu_compute_stage_ops = []\n gpu_grad_stage_ops = []\n\n with tf.device(self.global_step_device):\n global_step = tf.train.get_or_create_global_step()\n \n # Build the processing and model for the worker.\n (image_producer_ops,\n image_producer_stages) = self._build_image_processing(shift_ratio=0)\n image_producer_ops = tf.group(*image_producer_ops)\n update_ops = None\n staging_delta_ops = []\n\n for device_num in range(len(self.devices)):\n with self.variable_mgr.create_outer_variable_scope(\n device_num), tf.name_scope('tower_%i' % device_num) as name_scope:\n results = self.add_forward_pass_and_gradients(\n phase_train, device_num, device_num,\n image_producer_stages[device_num], gpu_compute_stage_ops,\n gpu_grad_stage_ops)\n if phase_train:\n losses.append(results['loss'])\n device_grads.append(results['gradvars'])\n \n\n if device_num == 0:\n # Retain the Batch Normalization updates operations only from the\n # first tower. These operations update the moving mean and moving\n # variance variables, which are updated (but not used) during\n # training, and used during evaluation. The moving mean and variance\n # approximate the true mean and variance across all images in the\n # dataset. Therefore, in replicated mode, these moving averages would\n # be almost identical for each tower, and so we only update and save\n # the moving averages for one tower. In parameter server mode, all\n # towers share a copy of the variables so we also only need to update\n # and save the moving averages once.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)\n staging_delta_ops = list(self.variable_mgr.staging_delta_ops)\n \n enqueue_ops.append(tf.group(*gpu_compute_stage_ops))\n\n fetches = self._build_fetches(global_step, all_logits, losses, device_grads,\n enqueue_ops, update_ops, all_top_1_ops,\n all_top_5_ops, phase_train)\n return (image_producer_ops, enqueue_ops, fetches)", "def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model", "def _regular_build(self):\n # This overwrites define_model, is that ok?\n self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101\n self.define_model,\n create_scope_now_=True)\n\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n\n # TODO Move clean and summary to proper section\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(\n tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(\n tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(\n tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(\n tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n\n self.writer = tf.summary.FileWriter(self.output_path,\n self.session.graph)\n self.saver = tf.train.Saver()\n # TODO Add routine to save\n logging.info('Model construction complete.')", "def _build_all_models(self):\r\n self.output_tensors = {}\r\n self.loss_terms = {}\r\n self.metrics = {}\r\n\r\n def _build_datasource_summaries(data_sources, mode):\r\n \"\"\"Register summary operations for input data from given data sources.\"\"\"\r\n with tf.variable_scope('%s_data' % mode):\r\n for data_source_name, data_source in data_sources.items():\r\n tensors = data_source.output_tensors\r\n for key, tensor in tensors.items():\r\n summary_name = '%s/%s' % (data_source_name, key)\r\n shape = tensor.shape.as_list()\r\n num_dims = len(shape)\r\n if num_dims == 4: # Image data\r\n if shape[1] == 1 or shape[1] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_first')\r\n elif shape[3] == 1 or shape[3] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_last')\r\n # TODO: fix issue with no summary otherwise\r\n elif num_dims == 2:\r\n self.summary.histogram(summary_name, tensor)\r\n else:\r\n logger.debug('I do not know how to create a summary for %s (%s)' %\r\n (summary_name, tensor.shape.as_list()))\r\n\r\n def _build_train_or_test(mode):\r\n data_sources = self._train_data if mode == 'train' else self._test_data\r\n\r\n # Build model\r\n output_tensors, loss_terms, metrics = self.build_model(data_sources, mode=mode)\r\n\r\n # Record important tensors\r\n self.output_tensors[mode] = output_tensors\r\n self.loss_terms[mode] = loss_terms\r\n self.metrics[mode] = metrics\r\n\r\n # Create summaries for scalars\r\n if mode == 'train':\r\n for name, loss_term in loss_terms.items():\r\n self.summary.scalar('loss/%s/%s' % (mode, name), loss_term)\r\n for name, metric in metrics.items():\r\n self.summary.scalar('metric/%s/%s' % (mode, name), metric)\r\n\r\n # Build the main model\r\n if len(self._train_data) > 0:\r\n _build_datasource_summaries(self._train_data, mode='train')\r\n _build_train_or_test(mode='train')\r\n logger.info('Built model.')\r\n\r\n # Print no. of parameters and lops\r\n flops = tf.profiler.profile(\r\n options=tf.profiler.ProfileOptionBuilder(\r\n tf.profiler.ProfileOptionBuilder.float_operation()\r\n ).with_empty_output().build())\r\n logger.info('------------------------------')\r\n logger.info(' Approximate Model Statistics ')\r\n logger.info('------------------------------')\r\n logger.info('FLOPS per input: {:,}'.format(flops.total_float_ops / self._batch_size))\r\n logger.info(\r\n 'Trainable Parameters: {:,}'.format(\r\n np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])\r\n )\r\n )\r\n logger.info('------------------------------')\r\n\r\n # If there are any test data streams, build same model with different scope\r\n # Trainable parameters will be copied at test time\r\n if len(self._test_data) > 0:\r\n _build_datasource_summaries(self._test_data, mode='test')\r\n with tf.variable_scope('test'):\r\n _build_train_or_test(mode='test')\r\n logger.info('Built model for live testing.')\r\n\r\n if self._enable_live_testing:\r\n self._tester._post_model_build() # Create copy ops to be run before every test run\r", "def main(model_arch: str, images: List, batch_size: int,\n batches_per_step: int, loop: bool, num_iterations: int, num_ipus: int, mode: str, data: str,\n available_memory_proportion: float, gen_report: bool, save_graph_pb: bool, use_ipu_model: bool) -> None:\n\n if (available_memory_proportion <= 0.05) or (available_memory_proportion > 1):\n raise ValueError('Invalid \"availableMemoryProportion\" value: must be a float >=0.05'\n ' and <=1 (default value is 0.6)')\n\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --log_cycle_count=0\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--log_cycle_count=0\"\n\n if data == \"synthetic\":\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"\"\n\n if use_ipu_model:\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_ipu_model\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_ipu_model\"\n\n # Select model architecture\n model_cls = model_dict[model_arch]\n if model_arch == 'googlenet':\n model_arch = 'inceptionv1'\n config = Path(f'configs/{model_arch}.yml')\n\n # Create graph and data iterator\n loop_op, infeed_initializer, outfeed_op = construct_graph(model_cls, config,\n f\"./checkpoints/{model_arch}/\",\n batch_size, batches_per_step,\n images, loop,\n model_cls.preprocess_method(), num_ipus,\n mode, save_graph_pb)\n # Run on model or device\n if gen_report:\n get_report(loop_op, infeed_initializer, outfeed_op, f\"{config.stem}_report.txt\",\n available_memory_proportion=available_memory_proportion)\n else:\n ground_truth = tuple([Path(filename).stem for filename in images])\n run_inference(loop_op, infeed_initializer, outfeed_op, batch_size, batches_per_step, config.stem,\n model_cls.decode_method(), ground_truth, num_iterations, num_ipus, mode, data,\n available_memory_proportion=available_memory_proportion)", "def train(hparams, summary_dir, num_gpus, model_type, max_steps, save_step,\n data_dir, num_targets, dataset, validate, seed, shuffled, shift,\n pad, batch_size=128):\n summary_dir += '/train/'\n with tf.Graph().as_default():\n # Build model\n features = get_features('train', batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=False,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n # Print stats\n param_stats = tf.profiler.profile(\n tf.get_default_graph(),\n options=tf.contrib.tfprof.model_analyzer.\n TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\n writer = tf.summary.FileWriter(summary_dir)\n run_experiment(load_training, summary_dir, writer, train_experiment,\n model, result, max_steps, save_step)\n writer.close()", "def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)", "def summary_for_segmentation_models(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n assert isinstance(model, BaseModel)\n crop_size = config.crop_size\n if isinstance(crop_size, int):\n crop_size = (crop_size, crop_size, crop_size)\n try:\n model.generate_model_summary(crop_size, log_summaries_to_files=config.log_summaries_to_files)\n except AttributeError as e:\n logging.warning(f\"summary_for_segmentation_models failed with exception {e}\")", "def build_model(cfg, model, gpu_id=None):\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n # Construct the model\n # name = cfg.MODEL.MODEL_NAME\n # model = MODEL_REGISTRY.get(name)(cfg)\n \n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n #, find_unused_parameters=True\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n \n return model", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def create_summary_and_adjust_mean_teacher_model_for_gpus(self) -> None:\n if self._mean_teacher_model is None:\n raise ValueError(\"Mean teacher model must be created before it can be adjusted.\")\n\n if self.config.is_segmentation_model:\n summary_for_segmentation_models(self.config, self._mean_teacher_model)\n # Prepare for mixed precision training and data parallelization (no-op if already done).\n # This relies on the information generated in the model summary.\n self.adjust_mean_teacher_model_for_gpus()", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def summarize_model(\n model: keras.Model, fig_dir: Union[str, None] = None\n) -> None:\n\n submodels = []\n for layer in model.layers:\n if isinstance(layer, TimeDistributed):\n submodels.append(layer.layer)\n\n for submodel in submodels:\n submodel.summary()\n model.summary()\n\n if fig_dir is not None:\n for submodel in submodels:\n keras.utils.plot_model(\n submodel, os.path.join(fig_dir, f'model_{submodel.name}.png'),\n dpi=300\n )\n keras.utils.plot_model(\n model, os.path.join(fig_dir, 'model_full.png'), dpi=300\n )", "def main():\n\n args = get_arguments()\n\n w, h = map(int, args.input_size.split(','))\n\n config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\n args.model = config['model']\n print('ModelType:%s'%args.model)\n print('NormType:%s'%config['norm_style'])\n gpu0 = args.gpu\n batchsize = args.batchsize\n\n model_name = os.path.basename( os.path.dirname(args.restore_from) )\n #args.save += model_name\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n confidence_path = os.path.join(args.save, 'submit/confidence')\n label_path = os.path.join(args.save, 'submit/labelTrainIds')\n label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')\n for path in [confidence_path, label_path, label_invalid_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n if args.model == 'DeepLab':\n model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])\n elif args.model == 'Oracle':\n model = Res_Deeplab(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_ORC\n elif args.model == 'DeeplabVGG':\n model = DeeplabVGG(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_VGG\n\n if args.restore_from[:4] == 'http' :\n saved_state_dict = model_zoo.load_url(args.restore_from)\n else:\n saved_state_dict = torch.load(args.restore_from)\n\n try:\n model.load_state_dict(saved_state_dict)\n except:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(saved_state_dict)\n model.eval()\n model.cuda(gpu0)\n\n testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n scale = 1.25\n testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n\n if version.parse(torch.__version__) >= version.parse('0.4.0'):\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n else:\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear')\n\n sm = torch.nn.Softmax(dim = 1)\n log_sm = torch.nn.LogSoftmax(dim = 1)\n kl_distance = nn.KLDivLoss( reduction = 'none')\n prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]\n prior = torch.from_numpy(prior)\n for index, img_data in enumerate(zip(testloader, testloader2) ):\n batch, batch2 = img_data\n image, _, name = batch\n image2, _, name2 = batch2\n\n inputs = image.cuda()\n inputs2 = image2.cuda()\n print('\\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')\n if args.model == 'DeepLab':\n with torch.no_grad():\n output1, output2 = model(inputs)\n output_batch = interp(sm(0.5* output1 + output2))\n\n heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)\n\n output1, output2 = model(fliplr(inputs))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs\n\n output1, output2 = model(inputs2)\n output_batch += interp(sm(0.5* output1 + output2))\n output1, output2 = model(fliplr(inputs2))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs2\n ratio = 0.95\n output_batch = output_batch.cpu() / 4\n # output_batch = output_batch *(ratio + (1 - ratio) * prior)\n output_batch = output_batch.data.numpy()\n heatmap_batch = heatmap_batch.cpu().data.numpy()\n elif args.model == 'DeeplabVGG' or args.model == 'Oracle':\n output_batch = model(Variable(image).cuda())\n output_batch = interp(output_batch).cpu().data.numpy()\n\n output_batch = output_batch.transpose(0,2,3,1)\n score_batch = np.max(output_batch, axis=3)\n output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)\n\n threshold = 0.3274\n for i in range(output_batch.shape[0]):\n output_single = output_batch[i,:,:]\n output_col = colorize_mask(output_single)\n output = Image.fromarray(output_single)\n\n name_tmp = name[i].split('/')[-1]\n dir_name = name[i].split('/')[-2]\n save_path = args.save + '/' + dir_name\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n output.save('%s/%s' % (save_path, name_tmp))\n print('%s/%s' % (save_path, name_tmp))\n output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))\n\n # heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])\n # fig = plt.figure()\n # plt.axis('off')\n # heatmap = plt.imshow(heatmap_tmp, cmap='viridis')\n # fig.colorbar(heatmap)\n # fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))\n\n if args.set == 'test' or args.set == 'val':\n # label\n output.save('%s/%s' % (label_path, name_tmp))\n # label invalid\n output_single[score_batch[i, :, :] < threshold] = 255\n output = Image.fromarray(output_single)\n output.save('%s/%s' % (label_invalid_path, name_tmp))\n # conficence\n\n confidence = score_batch[i, :, :] * 65535\n confidence = np.asarray(confidence, dtype=np.uint16)\n print(confidence.min(), confidence.max())\n iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)\n\n return args.save", "def create_snapshot_model(model_args):\n # similar to create_separate_model but with experts pretrained\n # 1. get model directory path with models at each epoch for a global model\n # 2. choose the model at epochs that gives best validation performance for each cohort\n # as starting point\n # 3. finetune the resulting model\n tasks = model_args['tasks']\n X_val, y_val, cohorts_val = model_args['X_val'], model_args['y_val'], model_args['cohorts_val']\n val_loader = create_loader(X_val, y_val, batch_size=100, shuffle=False) \n # convert y_val and cohorts_val to numpy\n y_val, cohorts_val = dataset2numpy(y_val).astype(int), dataset2numpy(cohorts_val).astype(int)\n\n experts_auc = [(None, 0) for _ in range(len(tasks))] # init to (n model, 0 auc)\n for fn in glob.glob(model_args['global_model_dir'] + \"/epoch*.m\"):\n net = torch.load(fn)\n y_pred = get_output(net, val_loader).ravel()\n for i, task in enumerate(tasks):\n y_val_in_task = y_val[cohorts_val == task]\n y_pred_in_task = y_pred[cohorts_val == task]\n try:\n auc = roc_auc_score(y_val_in_task, y_pred_in_task)\n except:\n auc = 0.1 # slightly larger than 0 but shouldn't be selected\n if auc > experts_auc[i][1]:\n experts_auc[i] = (net, auc)\n\n experts = nn.ModuleList([expert for expert, auc in experts_auc])\n # currently is inefficient by running all models for all tasks\n # I should be able to just run the required expert\n model = Separate_MIMIC_Model(experts)\n return model", "def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')", "def main(batch_size, saves_dir=TENSORFLOW_SAVES_DIR):\n batches = [1, 8, 16, 32, 64]\n if batch_size:\n batches = [batch_size]\n\n for batch_size in batches:\n print(\"Batch size: {}\".format(batch_size))\n batch = np.random.random((batch_size, 224, 224, 3))\n\n # our default model\n tf.reset_default_graph()\n usual_model = Model()\n measure_model(usual_model, \"Usual model\", batch)\n usual_model.sess.close()\n\n # our binary file\n tf.reset_default_graph()\n frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='constant_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(frozen_model, \"Frozen model\", batch)\n frozen_model.sess.close()\n\n # binary file with some constant operations\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='optimized_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, \"Optimized frozen model\", batch)\n optimized_frozen_model.sess.close()\n\n # model quantized with python\n model_name = \"Quantized with python\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_python.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)\n\n # model quantized with bazel\n model_name = \"Quantized with bazel\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_bazel.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):\n # datetime object containing current date and time\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n pdf = FPDF()\n pdf.set_title(\"training_summary_{}_{}\".format(model_name.lower(), dt_string))\n pdf.add_page()\n pdf.set_xy(0, 10)\n pdf.set_font(\"Helvetica\", \"BI\", 16)\n pdf.set_text_color(25, 33, 78)\n pdf.set_draw_color(25, 33, 78)\n pdf.cell(20)\n pdf.cell(\n 200,\n 10,\n \"Model Training Summary: {}\".format(model_name.upper()),\n 0,\n 2,\n )\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(\n 200,\n 5,\n dt_string,\n 0,\n 2,\n )\n\n # Model Configuration Section\n pdf.cell(150, 10, \"Model Configuration:\", 0, 2)\n pdf.cell(30, 10, \"Parameter\", 1, 0)\n pdf.cell(140, 10, \"Value\", 1, 2)\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-30)\n attributes = [\n \"model_dir\",\n \"log_dir\",\n \"check_dir\",\n \"current_epoch\",\n \"overwrite\",\n \"exp_name\",\n ]\n for i, val in enumerate(hyperparams):\n if val not in attributes:\n pdf.cell(30, 10, \"%s\" % (val), 1, 0)\n pdf.cell(140, 10, \"%s\" % (hyperparams[val]), 1, 2)\n pdf.cell(-30)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Model Performance Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Performance Stats:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n\n loss = train_stats[\"test_loss\"]\n acc = train_stats[\"test_acc\"]\n\n pdf.set_text_color(255, 96, 80)\n pdf.cell(35, 6, \"Best Loss:\", 0, 0)\n pdf.cell(\n 45, 6, \"{:.3f} (Epoch {})\".format(min(loss), loss.index(min(loss)) + 1), 0, 0\n )\n pdf.cell(60, 6, \"Training Duration:\", 0, 0)\n pdf.cell(30, 6, \"{:.3f} (s)\".format(train_stats[\"total_dur\"]), 0, 2)\n pdf.cell(-140)\n pdf.cell(35, 6, f\"Best Accuracy:\", 0, 0)\n pdf.cell(45, 6, \"{:.3f} (Epoch {})\".format(max(acc), acc.index(max(acc)) + 1), 0, 0)\n pdf.cell(60, 6, \"Average Epoch Duration:\", 0, 0)\n pdf.cell(\n 30,\n 6,\n \"{:.3f} (s)\".format(train_stats[\"total_dur\"] / hyperparams[\"current_epoch\"]),\n 0,\n 2,\n )\n pdf.cell(-140)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Loss Curve Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Loss Curve:\", 0, 2)\n pdf.image(img_path, x=None, y=None, w=160, h=0, type=\"PNG\", link=\"\")\n\n # Second Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20)\n\n # Model Arch Section\n pdf.cell(150, 20, \"Model Configuration:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n if model_arch is None:\n model_arch = \"No model configuration was provided\"\n pdf.set_text_color(255, 96, 80)\n pdf.multi_cell(180, 8, str(model_arch))\n\n # Third Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20, \" \")\n\n # Training Loss Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 20, \"Detailed Loss Output:\", 0, 2)\n pdf.cell(40, 8, \"Epoch\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Acc\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Acc\", 1, 2, \"C\")\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-130)\n for i in range(0, len(train_stats[\"train_loss\"])):\n pdf.cell(40, 8, \"{}\".format((i + 1)), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_acc\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_acc\"][i])), 1, 2, \"C\")\n pdf.cell(-130)\n pdf.cell(90, 3, \"\", 0, 2)\n\n pdf.output(\n os.path.join(\n os.path.dirname(img_path),\n \"training_summary_{}.pdf\".format(model_name.lower()),\n ),\n \"F\",\n )", "def main_stats_model(y_train: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.ndarray,\n model_name: str = '',\n model_parameters: dict = None,\n model_preprocessing: str = '',\n sequence_origin: str = '',\n primers_origin: str = '',\n taxonomy_level: Union[List[int], int] = '',\n selected_primer: Union[List[str], str] = '',\n test_size: float = 0.2,\n feature_importances: np.ndarray = None,\n k: int = 4,\n save_csv: bool = False,\n xgb_model=None,\n rf_model=None,\n save_model=False,\n save_tree: int = 0):\n model_path = folder_paths['model_results'] + model_name + '{}'.format(slash)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n folder_number = get_new_model_folder_number(model_name=model_name)\n analysis_path = model_path + '{:0>5d}_analysis_{}_{}{}'.format(folder_number, selected_primer, taxonomy_level, slash)\n os.makedirs(analysis_path)\n\n log_path = analysis_path + 'model_results.txt'\n logger = StatLogger(log_path=log_path)\n\n # Basic information on configuration\n test_size = get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin,\n primers_origin, taxonomy_level, selected_primer, test_size, logger)\n\n # Metrics of model results\n main_class_prop, accuracy = get_metrics_model(y_train, y_test, y_pred, logger, feature_importances, k, save_tree,\n xgb_model,\n analysis_path=analysis_path)\n\n if save_csv:\n add_optimal_model_params(folder_number, selected_primer, taxonomy_level, accuracy, model_parameters,\n model_path=model_path)\n\n if save_model:\n if xgb_model is not None:\n xgb_model.save_model(analysis_path+'0001.model')\n if rf_model is not None:\n filename = analysis_path+'0001.model'\n pickle.dump(rf_model, open(filename, 'wb'))\n\n logger.close_file()\n\n return test_size, main_class_prop, accuracy", "def _prepare_models(self):\n if self.freeze_layers is not None:\n self._set_freeze_layers()\n self._load_weight_if_possible()\n print(self.keras_model.summary())\n self.show_configuration()", "def _benchmark_cnn(self):\n self.single_session = False\n (image_producer_ops, enqueue_ops, fetches) = self._build_model()\n fetches_list = nest.flatten(list(fetches.values()))\n main_fetch_group = tf.group(*fetches_list)\n execution_barrier = None\n \n\n global_step = tf.train.get_global_step()\n with tf.device(self.global_step_device):\n with tf.control_dependencies([main_fetch_group]):\n fetches['inc_global_step'] = global_step.assign_add(1)\n\n\n local_var_init_op = tf.local_variables_initializer()\n variable_mgr_init_ops = [local_var_init_op]\n with tf.control_dependencies([local_var_init_op]):\n variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())\n local_var_init_op_group = tf.group(*variable_mgr_init_ops)\n\n summary_op = tf.summary.merge_all()\n is_chief = (not self.job_name or self.task_index == 0)\n summary_writer = None\n \n # We run the summaries in the same thread as the training operations by\n # passing in None for summary_op to avoid a summary_thread being started.\n # Running summaries and training operations in parallel could run out of\n # GPU memory.\n saver = tf.train.Saver(\n self.variable_mgr.savable_variables(), save_relative_paths=True)\n ready_for_local_init_op = None\n \n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=self.params.train_dir,\n ready_for_local_init_op=ready_for_local_init_op,\n local_init_op=local_var_init_op_group,\n saver=saver,\n global_step=global_step,\n summary_op=None,\n save_model_secs=self.params.save_model_secs,\n summary_writer=summary_writer)\n\n step_train_times = []\n start_standard_services = (\n self.params.summary_verbosity >= 1 or\n self.dataset.queue_runner_required())\n target = self.cluster_manager.get_target() if self.cluster_manager else ''\n with sv.managed_session(\n master=target,\n config=create_config_proto(self.params),\n start_standard_services=start_standard_services) as sess:\n image_producer = cnn_util.ImageProducer(sess, image_producer_ops,\n self.batch_group_size)\n image_producer.start()\n for i in xrange(len(enqueue_ops)):\n sess.run(enqueue_ops[:(i + 1)])\n image_producer.notify_image_consumption()\n self.init_global_step, = sess.run([global_step])\n if not self.single_session:\n global_step_watcher = GlobalStepWatcher(\n sess, global_step,\n self.num_workers * self.num_warmup_batches +\n self.init_global_step,\n self.num_workers * (self.num_warmup_batches + self.num_batches) - 1)\n global_step_watcher.start()\n \n\n log_fn('Running warm up')\n local_step = -1 * self.num_warmup_batches\n done_fn = global_step_watcher.done\n loop_start_time = time.time()\n while not done_fn():\n if local_step == 0:\n log_fn('Done warm up')\n \n header_str = 'Step\\tImg/sec\\tloss'\n \n log_fn(header_str)\n \n # reset times to ignore warm up batch\n step_train_times = []\n loop_start_time = time.time()\n \n fetch_summary = None\n summary_str = benchmark_one_step(\n sess, fetches, local_step,\n self.batch_size * (self.num_workers if self.single_session else 1),\n step_train_times, self.trace_filename, image_producer, self.params,\n fetch_summary)\n \n local_step += 1\n loop_end_time = time.time()\n # Waits for the global step to be done, regardless of done_fn.\n \n num_steps = global_step_watcher.num_steps()\n elapsed_time = global_step_watcher.elapsed_time()\n\n average_wall_time = elapsed_time / num_steps if num_steps > 0 else 0\n images_per_sec = ((self.num_workers * self.batch_size) / average_wall_time\n if average_wall_time > 0 else 0)\n\n log_fn('-' * 64)\n log_fn('total images/sec: %.2f' % images_per_sec)\n log_fn('-' * 64)\n image_producer.done()\n #if is_chief:\n # store_benchmarks({'total_images_per_sec': images_per_sec}, self.params)\n # Save the model checkpoint.\n \n sv.stop()\n return {\n 'num_workers': self.num_workers,\n 'num_steps': num_steps,\n 'average_wall_time': average_wall_time,\n 'images_per_sec': images_per_sec\n }", "def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)", "def main():\n args = arg_parser()\n if(args.gpu):\n gpu = args.gpu\n else:\n print(\"GPU mode not specified, will use the default value - Use GPU\")\n gpu = \"Y\"\n # Device setting:\n device = device_setting(gpu)\n \n # Prepare the datasets and dataloaders:\n print(\"\\nPreparing dataset for train/valid/test ...\")\n train_loader, valid_loader, test_loader, train_data, valid_data, test_data = load_dataset()\n \n # Model architects, criterion and optimizer:\n print(\"\\nNetwork archetecture building ...\")\n model, criterion, optimizer = network(device=device,\n architecture=args.architecture,\n learning_rate=args.learning_rate,\n hidden_size=args.hidden_size,\n dropout=args.dropout,\n output_size=args.output_size)\n \n # Train the model:\n print(\"\\n\")\n model = train(model=model,\n epochs=5,\n learning_rate=args.learning_rate,\n criterion=criterion,\n optimizer=optimizer,\n train_loader=train_loader,\n valid_loader=valid_loader,\n device=device)\n \n # Validate the model performance on the test set:\n print(\"\\nValidate model performance on test set ...\")\n test(model=model, test_loader=test_loader, device=device)\n \n # Save model checkpoint:\n print(\"\\nSave model checkpoint ...\")\n save(model=model, train_data=train_data, epochs=args.epochs, architecture=args.architecture)", "def auto_model_profiling(model_info, server_name, device_util_thd=0.01, device_memory_thd=0.01, period=10):\n\n different_kind_devices = collections.OrderedDict()\n for gpu in GPUtil.getGPUs():\n if gpu.name not in different_kind_devices:\n different_kind_devices[gpu.name] = gpu\n\n for device in list(different_kind_devices.values()):\n profiler = Profiler(model_info=model_info, server_name=server_name)\n monitor = UtilMonitor(device, profiler, period, device_util_thd, device_memory_thd)\n monitor.start()", "def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n self._add_placeholders()\n with tf.device(\"/gpu:0\"):\n self._add_seq2seq()\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n if self._hps.mode == 'train':\n self._add_train_op()\n self._summaries = tf.summary.merge_all()\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)\n \n print('#'*78,'\\nprinting model variables:')\n total_parameters = 0\n for variable in tf.trainable_variables():\n shape = variable.get_shape().as_list()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim\n print('{:}: shape={:}, variable_parameters={:}'.format(\n variable.name, shape, variable_parameters))\n total_parameters += variable_parameters\n print('total model parameters: {:}'.format(total_parameters))\n print('#'*78)", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def build_model(cfg, gpu_id=None):\n # Construct the model\n if MODEL_REGISTRY.get(cfg.MODEL.NAME) == None:\n # attempt to find standard models\n model = BaseVideoModel(cfg)\n else:\n # if the model is explicitly defined,\n # it is directly constructed from the model pool\n model = MODEL_REGISTRY.get(cfg.MODEL.NAME)(cfg)\n\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n model = model.cuda(device=cur_device)\n \n model_ema = None\n if cfg.MODEL.EMA.ENABLE:\n model_ema = ModelEmaV2(model, decay=cfg.MODEL.EMA.DECAY)\n\n try:\n # convert batchnorm to be synchronized across \n # different GPUs if needed\n sync_bn = cfg.BN.SYNC_BN\n if sync_bn == True and cfg.NUM_GPUS * cfg.NUM_SHARDS > 1:\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n except:\n sync_bn = None\n\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS*cfg.NUM_SHARDS > 1:\n # Make model replica operate on the current device\n if cfg.PAI:\n # Support distributed training on the cluster\n model = torch.nn.parallel.DistributedDataParallel(\n module=model\n )\n else:\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n\n return model, model_ema", "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def run_model(model, data, inst, cfg, *, var='gender'):\n\n seed = stan_seed(inst, var)\n\n users = data.profiles.assign(unum=np.arange(len(data.profiles), dtype='i4') + 1)\n\n lists = data.lists.reset_index()\n lists['Algorithm'] = lists['Algorithm'].astype('category')\n algos = lists['Algorithm'].cat.categories\n\n lists = lists.join(users[['unum']], on='user')\n\n _log.info('running full model on %d profiles and %d lists (%d algorithms) for %s',\n len(data.profiles), len(data.lists), len(algos), inst)\n timer = Stopwatch()\n\n stan_data = {\n 'A': len(algos),\n 'J': len(users),\n 'NL': len(lists),\n 'ru': lists['unum'],\n 'ra': lists['Algorithm'].cat.codes + 1,\n }\n if var == 'gender':\n stan_data['n'] = users['Known']\n stan_data['y'] = users['female']\n stan_data['rn'] = lists['Known']\n stan_data['ry'] = lists['female']\n out_pfx = 'full'\n elif var == 'dcode':\n stan_data['n'] = users['dcknown']\n stan_data['y'] = users['dcyes']\n stan_data['rn'] = lists['dcknown']\n stan_data['ry'] = lists['dcyes']\n out_pfx = 'full-dcode'\n else:\n raise ValueError(f'unknown variant {var}')\n\n fit = model.sampling(stan_data, seed=seed, check_hmc_diagnostics=True, **cfg)\n _log.info('full-model sampling for %s finished in %s', inst, timer)\n summary = fit.stansummary(pars=[\"mu\", \"sigma\", \"nMean\", \"nDisp\", \"recB\", \"recS\", \"recV\"])\n print(summary)\n (data_dir / inst / f'{out_pfx}-model.txt').write_text(summary)\n\n _log.info('extracting samples')\n samples = fit.extract(permuted=True)\n write_samples(data_dir / inst / f'{out_pfx}-samples.h5', samples, algo_names=list(algos))", "def compute(self):\n first_row = True\n assert (self.num_samples_batch <= 64), \"Batch sizes must be small, <= 64\"\n image_names_list, labels = read_img_names_labels_csv(self.image_vs_labels_csv)\n if len(image_names_list) < 64:\n self.num_samples_batch = len(image_names_list)\n samples_path_list = [os.path.join(self.image_samples_folder, img_name)\n for img_name in image_names_list[0:self.num_samples_batch]]\n batch_ndarray = read_images(samples_path_list, self.image_height, self.image_width)\n model_load_time = self.metric_load_model()\n pre_process_time = self.metric_pre_process_time(batch_ndarray)\n inf_time = self.metric_inference(batch_ndarray)\n col1 = \"Time taken to load the model(in sec)\"\n col2 = \"Time taken to pre_process a batch of \" + str(self.num_samples_batch) + \" images(in sec)\"\n col3 = \"Time taken to predict a batch of \" + str(self.num_samples_batch) + \" images(in sec)\"\n\n if not os.path.isdir(self.root_folder_to_save_results):\n os.makedirs(self.root_folder_to_save_results, exist_ok=True)\n with open(os.path.join(self.root_folder_to_save_results, \"model_inference.csv\"), \"w\", newline='') as \\\n model_inference_file:\n headers = [col1, col2, col3]\n model_inference_writer = DictWriter(model_inference_file, delimiter=',', fieldnames=headers)\n if first_row:\n model_inference_writer.writeheader()\n first_row = False\n model_inference_writer.writerow({col1: model_load_time, col2: pre_process_time, col3: inf_time})\n print(\"model_inference.csv file generated successfully..\")\n\n self.metric_layers(batch_ndarray, self.input_placeholder_tensor_name)\n # Check if the metrics result files have been generated successfully and give a success/failure message\n return result_message_performance_metrics(self.root_folder_to_save_results)", "def multi_gpu_online_evaluation(\n model: Module,\n data_loader: DataLoader,\n metric: Union[str, Sequence[str]] = 'EPE',\n tmpdir: Optional[str] = None,\n gpu_collect: bool = False) -> Dict[str, np.ndarray]:\n\n model.eval()\n metrics = metric if isinstance(metric, (type, list)) else [metric]\n result_metrics = []\n\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n\n for data in data_loader:\n with torch.no_grad():\n batch_results = model(test_mode=True, **data)\n # data['img_metas'] is Datacontainer\n img_metas = data['img_metas'].data[0]\n batch_flow = []\n batch_flow_gt = []\n batch_valid = []\n\n # a batch of result and a batch of img_metas\n for i in range(len(batch_results)):\n result = batch_results[i]\n img_meta = img_metas[i]\n # result.keys() is 'flow' or ['flow_fw','flow_bw']\n # img_meta.keys() is 'flow_gt' or ['flow_fw_gt','flow_bw_gt']\n for k in result.keys():\n\n if img_meta.get(k + '_gt', None) is None:\n # img_meta does not have flow_bw_gt, so just check\n # the forward predicted.\n if k == 'flow_bw':\n continue\n elif k == 'flow_fw':\n batch_flow_gt.append(img_meta['flow_gt'])\n else:\n batch_flow_gt.append(img_meta[k + '_gt'])\n\n batch_flow.append(result[k])\n batch_valid.append(\n img_meta.get('valid', np.ones_like(result[k][..., 0])))\n\n batch_results_metrics = eval_metrics(batch_flow, batch_flow_gt,\n batch_valid, metrics)\n # result_metrics is different from result_metrics in\n # `single_gpu_online_evaluation`\n # result_metrics is Sequence[Dict[str,ndarray]]\n result_metrics.append(batch_results_metrics)\n\n if rank == 0:\n batch_size = len(batch_results)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n from mmflow.apis.test import collect_results_cpu, collect_results_gpu\n if gpu_collect:\n result_metrics = collect_results_gpu(result_metrics, len(dataset))\n else:\n result_metrics = collect_results_cpu(result_metrics, len(dataset),\n tmpdir)\n rank, world_size = get_dist_info()\n if rank == 0:\n sys.stdout.write('\\n')\n # result_metrics_ is final result of evaluation with type\n # dict(metric_name=metric)\n result_metrics_ = dict()\n\n for sample_result_metrics in result_metrics:\n for k in sample_result_metrics.keys():\n if result_metrics_.get(k, None) is None:\n result_metrics_[k] = sample_result_metrics[k] / len(\n result_metrics)\n else:\n result_metrics_[k] += sample_result_metrics[k] / len(\n result_metrics)\n\n return result_metrics_", "def main(gpu_device='/gpu:0', cpu_device='/cpu:0'):\n config = Config()\n params = experiment_params()\n model_tools.model_builder(\n params=params,\n config=config,\n model_spec=build_model,\n gpu_device=gpu_device,\n cpu_device=cpu_device)", "def main(gpu_device='/gpu:0', cpu_device='/cpu:0'):\n config = Config()\n params = experiment_params()\n model_tools.model_builder(\n params=params,\n config=config,\n model_spec=build_model,\n gpu_device=gpu_device,\n cpu_device=cpu_device)", "def optimize_models_asr(args, models):\n for model in models:\n model.make_generation_fast_(\n beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,\n need_attn=args.print_alignment,\n )\n\n model.to(dev)", "def _export_model_representations(self, config):\n\n self.logger.msg1(\"Preparing model representations\")\n modelsets = get_modelsets(self.dbpath, self.obo, config.partition_size)\n prefix = self.rootpath + \"-models-\"\n for i, refset in enumerate(modelsets):\n progress = str(i+1) + \"/\" + str(len(modelsets))\n self.logger.msg1(\"Saving model representations: \"+progress)\n refset.save(prefix + str(i+1), \"phenotype\", what=(\"data\",))", "def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector", "def cli(ctx: click.Context,\n experiment: str,\n devices: List[int],\n ) -> None:\n f = EXPERIMENTS[experiment]\n try:\n model, B, C, _devices = f(devices)\n except ValueError as exc:\n # Examples:\n # ValueError: too few devices to hold given partitions (devices: 1, paritions: 2)\n ctx.fail(str(exc))\n\n optimizer = SGD(model.parameters(), lr=0.1)\n\n in_device = _devices[0]\n out_device = _devices[-1]\n torch.cuda.set_device(in_device)\n\n input = torch.rand(32, 3, 192, 192, device=in_device)\n target = torch.rand(32, 1, 192, 192, device=out_device)\n\n # HEADER ======================================================================================\n\n title = f'{experiment}, U-Net ({B}, {C})'\n click.echo(title)\n\n if isinstance(model, GPipe):\n click.echo(f'balance: {model.balance}')\n\n click.echo('torchgpipe: %s, python: %s, torch: %s, cudnn: %s, cuda: %s, gpu: %s' % (\n torchgpipe.__version__,\n platform.python_version(),\n torch.__version__,\n torch.backends.cudnn.version(),\n torch.version.cuda,\n torch.cuda.get_device_name(in_device)))\n\n hr()\n\n # PARAMETERS ==================================================================================\n\n param_count = sum(p.storage().size() for p in model.parameters())\n param_size = sum(p.storage().size() * p.storage().element_size() for p in model.parameters())\n param_scale = 2 # param + grad\n\n click.echo(f'# of Model Parameters: {param_count:,}')\n click.echo(f'Total Model Parameter Memory: {param_size*param_scale:,} Bytes')\n\n # ACTIVATIONS =================================================================================\n\n try:\n torch.cuda.empty_cache()\n for d in _devices:\n torch.cuda.reset_max_memory_cached(d)\n\n for _ in range(2):\n output = model(input)\n output = cast(Tensor, output)\n loss = F.binary_cross_entropy_with_logits(output, target)\n loss.backward()\n optimizer.step()\n\n max_memory = 0\n for d in _devices:\n torch.cuda.synchronize(d)\n max_memory += torch.cuda.max_memory_cached(d)\n\n latent_size = max_memory - param_size*param_scale\n click.echo(f'Peak Activation Memory: {latent_size:,} Bytes')\n click.echo(f'Total Memory: {max_memory:,} Bytes')\n\n # MAX MEMORY PER DEVICE =======================================================================\n\n finally:\n hr()\n\n for d in _devices:\n memory_usage = torch.cuda.memory_cached(d)\n click.echo(f'{d!s}: {memory_usage:,} Bytes')", "def model_fn(features, labels, mode, params):\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec", "def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):\n\n # Build embeddings.\n if model_opt.model_type == \"text\":\n src_field = fields[\"src\"]\n src_emb = build_embeddings(model_opt, src_field)\n else:\n src_emb = None\n\n # Build encoder.\n encoder = build_encoder(model_opt, src_emb)\n\n # Build decoder.\n tgt_field = fields[\"tgt\"]\n tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n # src/tgt vocab should be the same if `-share_vocab` is specified.\n assert src_field.base_field.vocab == tgt_field.base_field.vocab, \\\n \"preprocess with -share_vocab if you use share_embeddings\"\n\n tgt_emb.word_lut.weight = src_emb.word_lut.weight\n\n if model_opt.share_position_embeddings:\n tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight\n\n decoder = build_decoder(model_opt, tgt_emb)\n\n # Build NMTModel(= encoder + decoder).\n if gpu and gpu_id is not None:\n device = torch.device(\"cuda\", gpu_id)\n elif gpu and not gpu_id:\n device = torch.device(\"cuda\")\n elif not gpu:\n device = torch.device(\"cpu\")\n\n # Build separate LM if doing simple fusion\n if model_opt.simple_fusion:\n layers = 12\n size = 768\n heads = 12\n\n lm_decoder_opt = copy.deepcopy(model_opt)\n lm_decoder_opt.dec_layers = layers\n lm_decoder_opt.use_GPT_version_ctxattn = False\n lm_decoder_opt.use_GPT_version_psa = False\n lm_decoder_opt.use_GPT_version_unconditional = True\n lm_decoder_opt.tgt_word_vec_size = size\n lm_decoder_opt.rnn_size = size\n lm_decoder_opt.dec_rnn_size = size\n lm_decoder_opt.transformer_ff = size*4\n lm_decoder_opt.dec_heads = heads\n lm_decoder_opt.position_encoding_learned_dec = True\n lm_decoder_opt.share_decoder_embeddings = True\n lm_decoder_opt.dropout = 0\n\n lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False)\n logger.info(lm_decoder_emb)\n\n lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb)\n load_decoder = lm_decoder\n\n model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder)\n\n generator = SimpleFusionGenerator(model_opt.dec_rnn_size,\n lm_decoder_opt.dec_rnn_size,\n len(fields[\"tgt\"].base_field.vocab))\n generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight\n\n if model_opt.share_decoder_embeddings:\n generator.decoder_linear.weight = decoder.embeddings.word_lut.weight\n gen_linear = generator.lm_linear\n else:\n load_decoder = decoder\n if model_opt.unconditional:\n model = onmt.models.UncondModel(decoder)\n else:\n model = onmt.models.NMTModel(encoder, decoder)\n\n # Build Generator.\n if not model_opt.copy_attn:\n if model_opt.generator_function == \"sparsemax\":\n gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)\n else:\n gen_func = nn.LogSoftmax(dim=-1)\n\n if model_opt.padded_vocab_fix_me_later:\n gen_func = nn.Sequential(PadGen(), gen_func)\n\n generator = nn.Sequential(\n nn.Linear(model_opt.dec_rnn_size,\n len(fields[\"tgt\"].base_field.vocab)),\n Cast(torch.float32),\n gen_func\n )\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n gen_linear = generator[0]\n else:\n tgt_base_field = fields[\"tgt\"].base_field\n vocab_size = len(tgt_base_field.vocab)\n pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]\n generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)\n if model_opt.share_decoder_embeddings:\n generator.linear.weight = decoder.embeddings.word_lut.weight\n gen_linear = generator.linear\n\n if model_opt.encdec_share_params:\n for name, p in decoder.named_parameters():\n if 'ctx' in name or 'context' in name:\n continue\n pointer = encoder\n attrs = name.split('.')\n for attr_name in attrs[:-1]:\n pointer = getattr(pointer, attr_name)\n\n # pointer now has the encoder version of the parameter parent\n setattr(pointer, attrs[-1], p)\n\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n # Normally, just load the model parameters from checkpoint\n if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint:\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.b_2',\n r'\\1.layer_norm\\2.bias', s)\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.a_2',\n r'\\1.layer_norm\\2.weight', s)\n return s\n \n checkpoint['model'] = {fix_key(k): v\n for k, v in checkpoint['model'].items()}\n # end of patch for backward compatibility\n\n # Initialize rest of parameters normally\n if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from:\n for p in decoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n \n # Always initialize encoder parameters normally\n for p in encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if model_opt.ctx_weight_param:\n for name, p in decoder.named_parameters():\n if 'ctx_weight' in name:\n p.data.zero_()\n if 'ctx_bias' in name:\n p.data.fill_(-10)\n\n\n model.load_state_dict(checkpoint['model'], strict=False)\n generator.load_state_dict(checkpoint['generator'], strict=False)\n else:\n # load the gpt parameters\n if 'gpt2_params' in checkpoint:\n init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or model_opt.GPT_representation_mode != 'none'\n \n if init_something:\n # Initialize all the weights first\n if model_opt.gpt2_init_zero:\n for p in decoder.parameters():\n p.data.zero_()\n if model_opt.simple_fusion:\n generator.decoder_linear.weight.data.zero_()\n generator.decoder_linear.bias.data.zero_()\n else:\n for p in decoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n \n # Always initialize encoder parameters normally\n if encoder is not None:\n for p in encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n if model_opt.zero_bias_init:\n gen_linear.bias.data.zero_()\n\n if model_opt.ctx_weight_param:\n for name, p in decoder.named_parameters():\n if 'ctx_weight' in name:\n p.data.zero_()\n if 'ctx_bias' in name:\n p.data.fill_(-10)\n gen_linear.bias.data.zero_()\n\n load_models = []\n if model_opt.GPT_representation_mode != 'none':\n load_embs = []\n if model_opt.GPT_representation_loc in ['both', 'src']:\n load_models.append(src_emb.gpt_model)\n load_embs.append(src_emb)\n if model_opt.GPT_representation_loc in ['both', 'tgt']:\n load_models.append(tgt_emb.gpt_model)\n load_embs.append(tgt_emb)\n \n else:\n if model_opt.gpt2_init_embanddec or model_opt.simple_fusion:\n load_models = [load_decoder]\n elif model_opt.gpt2_init_embandenc:\n load_models = [encoder]\n \n it_list = list(checkpoint['gpt2_params'])\n for lm_idx, load_model in enumerate(load_models):\n #print(lm_idx, load_model)\n for name, array in it_list:\n name = name[12:] # skip \"transformer.\"\n name = name.split('.')\n\n assigned = False\n if name[0] == 'wpe':\n if model_opt.GPT_representation_mode != 'none':\n pointer = load_embs[lm_idx].make_embedding.pe.pe.weight\n else:\n pointer = load_model.embeddings.make_embedding.pe.pe.weight\n\n elif name[0] == 'wte':\n if model_opt.GPT_representation_mode != 'none':\n pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]\n else:\n pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]\n if not model_opt.nopretrain_decemb:\n pointer.append(gen_linear.weight)\n if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:\n pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)\n\n elif name[0] == 'ln_f':\n if name[1] == 'weight':\n pointer = load_model.layer_norm.weight\n elif name[1] == 'bias':\n pointer = load_model.layer_norm.bias\n else:\n raise ValueError('I am missing something here!')\n\n elif name[0] == 'h':\n layer_num = name[1]\n pointer = getattr(load_model.transformer_layers, layer_num)\n if name[2] == 'attn':\n assigned = True\n pointer = pointer.self_attn\n full_data = torch.from_numpy(array)\n if name[3] == 'c_attn':\n end_size = full_data.shape[-1]//3\n assert full_data.shape[-1] % 3 == 0\n if name[4] == 'bias':\n if init_something:\n pointer.linear_query.bias.data = full_data[:end_size]\n pointer.linear_keys.bias.data = full_data[end_size:end_size*2]\n pointer.linear_values.bias.data = full_data[end_size*2:]\n if model_opt.gpt2_params_std > 0:\n pointer.linear_query.bias.orig = full_data[:end_size].clone()\n pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()\n pointer.linear_values.bias.orig = full_data[end_size*2:].clone()\n elif name[4] == 'weight':\n if init_something:\n pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()\n pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()\n pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()\n if model_opt.gpt2_params_std > 0:\n pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()\n pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()\n pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()\n else:\n raise ValueError('I am missing something here!')\n elif name[3] == 'c_proj':\n if name[4] == 'bias':\n if init_something:\n pointer.final_linear.bias.data = full_data\n if model_opt.gpt2_params_std > 0:\n pointer.final_linear.bias.orig = full_data.clone()\n elif name[4] == 'weight':\n if init_something:\n pointer.final_linear.weight.data = full_data.t().contiguous()\n if model_opt.gpt2_params_std > 0:\n pointer.final_linear.weight.orig = full_data.t().contiguous().clone()\n\n else:\n raise ValueError('I am missing something here!')\n\n elif name[2] == 'ln_1' or name[2] == 'ln_2':\n num = name[2][3]\n pointer = getattr(pointer, 'layer_norm_'+num)\n if name[2] == 'bias':\n pointer = pointer.bias\n elif name[2] == 'weight':\n pointer = pointer.weight\n else:\n raise ValueError('I am missing something here!')\n elif name[2] == 'mlp':\n pointer = pointer.feed_forward\n pointer = getattr(pointer, name[2])\n if name[3] == 'bias':\n pointer = pointer.bias\n elif name[3] == 'weight':\n pointer = pointer.weight\n else:\n raise ValueError('I am missing something here!')\n else:\n raise ValueError('I am missing something here!')\n else:\n raise ValueError('I am missing something here!')\n \n if not assigned:\n # if name[0] == 'wte':\n # print(array.shape)\n # continue\n if name[-1] == 'weight':\n array = array.T\n\n if not isinstance(pointer, list):\n pointer = [pointer]\n for pointer_i in pointer:\n target_size = int(math.ceil(array.shape[0]/8))*8\n padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size\n padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]\n try:\n assert pointer_i.shape == array.shape or padded_vocab\n except AssertionError as e:\n \n e.args += (pointer_i.shape, array.shape)\n raise\n if init_something:\n print(\"Initialize PyTorch weight {}\".format(name))\n if padded_vocab:\n pointer_i.data[:array.shape[0]] = torch.from_numpy(array)\n else:\n pointer_i.data = torch.from_numpy(array)\n if model_opt.gpt2_params_std > 0:\n if padded_vocab:\n raise NotImplementedError\n else:\n pointer_i.orig = torch.from_numpy(array).clone()\n # name = name[6:] # skip \"model/\"\n # name = name.split('/')\n\n # assigned = False\n # if name[0] == 'wpe':\n # if model_opt.GPT_representation_mode != 'none':\n # pointer = load_embs[lm_idx].make_embedding.pe.pe.weight\n # else:\n # pointer = load_model.embeddings.make_embedding.pe.pe.weight\n\n # elif name[0] == 'wte':\n # if model_opt.GPT_representation_mode != 'none':\n # pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]\n # else:\n # pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]\n # if not model_opt.nopretrain_decemb:\n # pointer.append(gen_linear.weight)\n # if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:\n # pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)\n\n # elif name[0] == 'ln_f':\n # if name[1] == 'g':\n # pointer = load_model.layer_norm.weight\n # elif name[1] == 'b':\n # pointer = load_model.layer_norm.bias\n # else:\n # raise ValueError('I am missing something here!')\n\n # elif name[0][0] == 'h':\n # layer_num = name[0][1:]\n # pointer = getattr(load_model.transformer_layers, layer_num)\n # if name[1] == 'attn':\n # assigned = True\n # pointer = pointer.self_attn\n # full_data = torch.from_numpy(array)\n # if name[2] == 'c_attn':\n # end_size = full_data.shape[-1]//3\n # assert full_data.shape[-1] % 3 == 0\n # if name[3] == 'b':\n # if init_something:\n # pointer.linear_query.bias.data = full_data[:end_size]\n # pointer.linear_keys.bias.data = full_data[end_size:end_size*2]\n # pointer.linear_values.bias.data = full_data[end_size*2:]\n # if model_opt.gpt2_params_std > 0:\n # pointer.linear_query.bias.orig = full_data[:end_size].clone()\n # pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()\n # pointer.linear_values.bias.orig = full_data[end_size*2:].clone()\n # elif name[3] == 'w':\n # if init_something:\n # pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()\n # pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()\n # pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()\n # if model_opt.gpt2_params_std > 0:\n # pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()\n # pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()\n # pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()\n # else:\n # raise ValueError('I am missing something here!')\n # elif name[2] == 'c_proj':\n # if name[3] == 'b':\n # if init_something:\n # pointer.final_linear.bias.data = full_data\n # if model_opt.gpt2_params_std > 0:\n # pointer.final_linear.bias.orig = full_data.clone()\n # elif name[3] == 'w':\n # if init_something:\n # pointer.final_linear.weight.data = full_data.t().contiguous()\n # if model_opt.gpt2_params_std > 0:\n # pointer.final_linear.weight.orig = full_data.t().contiguous().clone()\n\n # else:\n # raise ValueError('I am missing something here!')\n\n # elif name[1] == 'ln_1' or name[1] == 'ln_2':\n # num = name[1][3]\n # pointer = getattr(pointer, 'layer_norm_'+num)\n # if name[2] == 'b':\n # pointer = pointer.bias\n # elif name[2] == 'g':\n # pointer = pointer.weight\n # else:\n # raise ValueError('I am missing something here!')\n # elif name[1] == 'mlp':\n # pointer = pointer.feed_forward\n # pointer = getattr(pointer, name[2])\n # if name[3] == 'b':\n # pointer = pointer.bias\n # elif name[3] == 'w':\n # pointer = pointer.weight\n # else:\n # raise ValueError('I am missing something here!')\n # else:\n # raise ValueError('I am missing something here!')\n # else:\n # raise ValueError('I am missing something here!')\n \n # if not assigned:\n # if name[0] == 'wte':\n # print(array.shape)\n # continue\n # if name[-1] == 'w' or name[-1] == 'g':\n # array = array.T\n\n # if not isinstance(pointer, list):\n # pointer = [pointer]\n # for pointer_i in pointer:\n # target_size = int(math.ceil(array.shape[0]/8))*8\n # padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size\n # padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]\n # try:\n # assert pointer_i.shape == array.shape or padded_vocab\n # except AssertionError as e:\n \n # e.args += (pointer_i.shape, array.shape)\n # raise\n # if init_something:\n # print(\"Initialize PyTorch weight {}\".format(name))\n # if padded_vocab:\n # pointer_i.data[:array.shape[0]] = torch.from_numpy(array)\n # else:\n # pointer_i.data = torch.from_numpy(array)\n # if model_opt.gpt2_params_std > 0:\n # if padded_vocab:\n # raise NotImplementedError\n # else:\n # pointer_i.orig = torch.from_numpy(array).clone()\n if 'enc_model' in checkpoint:\n load_dict = {k[8:]: v for k, v in checkpoint['enc_model'] if 'encoder' in k}\n encoder.load_state_dict(load_dict, strict=True)\n else:\n if model_opt.param_init != 0.0:\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if model_opt.param_init_glorot:\n for p in model.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') \\\n and model.encoder.embeddings is not None:\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec)\n\n # remove requires_grad from params that are not trained:\n if model_opt.notrain_emb or model_opt.notrain_embanddec:\n if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings:\n model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False\n if model_opt.share_embeddings:\n model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False\n model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False\n model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False\n generator[0].weight.requires_grad = False\n\n if model_opt.notrain_genbias:\n generator[0].bias.requires_grad = False\n\n if model_opt.notrain_embanddec:\n for name, p in load_decoder.layer_norm.named_parameters():\n p.requires_grad = False\n for name, p in load_decoder.transformer_layers.named_parameters():\n if 'context' not in name and 'ctx' not in name: # Takes care of normal and psa versions\n p.requires_grad = False\n \n if model_opt.onlytrainln:\n for name, p in model.decoder.named_parameters():\n if 'layer_norm' not in name:\n p.requires_grad = False\n for p in generator.parameters():\n p.requires_grad = False\n\n if model_opt.onlytrainoutp:\n if model_opt.share_decoder_embeddings:\n raise ValueError\n\n for p in model.decoder.parameters():\n p.requires_grad = False\n\n if model_opt.simple_fusion:\n for p in lm_decoder.parameters():\n p.requires_grad = False\n for p in generator.lm_linear.parameters():\n p.requires_grad = False\n\n model.generator = generator\n model.to(device)\n if model_opt.model_dtype == 'fp16':\n model.half()\n\n for p in model.parameters():\n if hasattr(p, 'orig'):\n p.orig = p.orig.to(device)\n if model_opt.model_dtype == 'fp16':\n p.orig = p.orig.half()\n\n return model", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def test_build_default_model(self):\n cfg = get_cfg_defaults()\n cfg.SYSTEM.NUM_GPUS = self.num_gpu\n model = build_model(cfg, self.device)\n self.assertTrue(isinstance(model, (torch.nn.Module,\n torch.nn.DataParallel,\n torch.nn.parallel.DistributedDataParallel)))", "def train_model(model, data_train, criterion, optimizer, csv_folder, gpu_id=0):\n model.train()\n model.cuda(gpu_id)\n for batch, (_, images, masks) in enumerate(data_train):\n w_prev = get_model_weights(model)\n # if batch%10 == 0:\n #print('Batch:', batch, 'of', len(data_train))\n images = Variable(images.cuda(gpu_id))\n masks = Variable(masks.cuda(gpu_id))\n outputs = model(images)\n #print(masks.shape, outputs.shape)\n loss = criterion(outputs, masks)\n optimizer.zero_grad()\n loss.backward()\n # Update weights\n optimizer.step()\n w_after = get_model_weights(model)\n diff = find_weight_diff(w_after, w_prev)\n export_history(diff, csv_folder, \"weight_difference.csv\")", "def main(opt):\n\n outputDir = \"processedOutput\"\n os.makedirs(outputDir, exist_ok=True)\n\n print(\"-------------------\")\n print(\"Processing results:\")\n print(\"-------------------\")\n \n cuda = torch.cuda.is_available()\n\n hr_shape = (opt.hr_height, opt.hr_width)\n\n # Count the number of unique residual layers mentioned in the generator state dict:\n generatorStateDict = torch.load(GetModelDataPath(\"generator\")) # Load the max trained weights from the /saved_models directory\n resBlocks = {}\n for key in generatorStateDict:\n processedKey = re.split(r'^(res_blocks\\.[0-9].)', key)\n if len(processedKey) > 1:\n resBlocks[processedKey[1]] = processedKey[1] # Insert an arbitrary entry: We just care about counting the unique keys\n\n num_residual_blocks = len(resBlocks)\n print(\"Counted \" + str(num_residual_blocks) + \" residual blocks in loaded generator state dict\")\n\n # Initialize generator and discriminator\n generator = GeneratorResNet(n_residual_blocks=num_residual_blocks)\n \n if cuda:\n print(\"Cuda is supported!!!\")\n torch.cuda.empty_cache()\n\n generator = generator.cuda()\n\n # Load pretrained models\n generator.load_state_dict(generatorStateDict)\n\n Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor\n\n\n #----------------\n # Process images:\n #----------------\n print(\"Processing images using the trained model:\")\n\n torch.cuda.empty_cache()\n\n testStartTime = time.time()\n totalTestTime = 0\n numTests = 0\n\n with torch.no_grad(): # Prevent OOM errors\n\n # Set models to eval mode, so batchnorm is disabled\n generator.eval()\n\n dataPath = GetDataPath(opt.valid_dataset_name)\n\n dataloader = DataLoader(\n ImageLoader(dataPath),\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.n_cpu,\n )\n\n # Process:\n for i, imgs in enumerate(dataloader):\n testStartTime = time.time()\n\n # Configure model input\n imgs_lr = Variable(imgs[\"img\"].type(Tensor))\n\n # Generate a high resolution image from low resolution input\n gen_hr = generator(imgs_lr)\n\n # --------------\n # Log Progress\n # --------------\n testTime = time.time() - testStartTime\n sys.stdout.write(\n \"[Processed image %d/%d] [Test time: %fs]\\n\"\n % (i, len(dataloader), testTime)\n )\n \n gen_hr = make_grid(gen_hr, nrow=1, normalize=True)\n\n save_image(gen_hr, GetArbitraryPath(outputDir) + (\"0\" if i < 10 else \"\") + \"%d.png\" % (i + 1), normalize=False)\n\n # Record the iteration time:\n totalTestTime = totalTestTime + testTime\n numTests = numTests + 1\n\n\n # ------------\n # Print stats:\n # ------------\n testTime = time.time() - testStartTime\n averageTestTime = totalTestTime / numTests\n\n print(\"\\Processing results:\\n-------------\")\n print(\"Total processing time = \" + str(testTime) + \" (secs) for \" + str(len(dataloader.dataset)) + \" test images\")\n print(\"Average processing time = \" + str(averageTestTime) + \" (secs)\")", "def build_sys_rec_model():\n print(\"building model...\")\n model = Merchant2VecModel()\n model.train(final_training=True)\n model.save_model()", "def save_model(self, epoch):\n # Set the name for the model\n gen_lungs_filename = 'gen_lungs_model_epoch_{}.h5'.format(epoch + 1)\n disc_lungs_filename = 'disc_lungs_model_epoch_{}.h5'.format(epoch + 1)\n train_summary_lungs_filename = 'train_summary_lungs_epoch_{}.csv'.format(epoch + 1)\n\n gen_organs_filename = 'gen_organs_model_epoch_{}.h5'.format(epoch + 1)\n disc_organs_filename = 'disc_organs_model_epoch_{}.h5'.format(epoch + 1)\n train_summary_organs_filename = 'train_summary_organs_epoch_{}.csv'.format(epoch + 1)\n\n # Save the model and train summary\n self.generator_lungs.save(op.join(self.model_dir, gen_lungs_filename), include_optimizer=True)\n self.disc_lungs.save(op.join(self.model_dir, disc_lungs_filename), include_optimizer=True)\n self.summary_writer_lungs.to_csv(op.join(self.train_summary_dir, train_summary_lungs_filename))\n\n self.generator_organs.save(op.join(self.model_dir, gen_organs_filename), include_optimizer=True)\n self.disc_organs.save(op.join(self.model_dir, disc_organs_filename), include_optimizer=True)\n self.summary_writer_organs.to_csv(op.join(self.train_summary_dir, train_summary_organs_filename))\n return self", "def eval_model(device, model, sampler, loss_compute, logit_modifier_fxn, token_sampler,\n print_every, max_len, user_items_df, max_name_len=15, ingr_map=None, \n base_save_dir='', pad_ingr=None, ppx_only=False, **tensor_kwargs):\n start = datetime.now()\n results_dicts = []\n\n # Extract into tuples and list\n tensor_names, base_tensors = zip(*tensor_kwargs.items())\n\n # Iterate through batches in the epoch\n model.eval()\n with torch.no_grad():\n total_tokens = 0\n total_name_tokens = 0\n total_loss = 0.0\n total_name_loss = 0.0\n print_tokens = 0\n\n for i, batch in enumerate(tqdm(sampler.epoch_batches(), total=sampler.n_batches), 1):\n batch_users, items = [t.to(device) for t in batch]\n\n # Fill out batch information\n batch_map = dict(zip(\n tensor_names,\n get_batch_information_general(items, *base_tensors)\n ))\n use_ingr_embedding = batch_map['ingr_tensor'].size(-1) != MAX_INGR * MAX_INGR_TOK\n\n user_prior_technique_masks = torch.stack([get_user_prior_techniques_mask(\n user_ix=uix.item(), item_ix=iix.item(),\n user_items_df=user_items_df, tech_mask_tensor=tensor_kwargs['tech_mask_tensor'],\n device=device, normalize=True\n ) for uix, iix in zip(batch_users, items)], dim=0)\n\n # Logistics\n this_batch_size = batch_map['steps_tensor'].size(0)\n this_batch_num_tokens = (batch_map['steps_tensor'] != PAD_INDEX).data.sum().item()\n this_batch_num_name_tokens = (batch_map['name_tensor'] != PAD_INDEX).data.sum().item()\n name_targets = batch_map['name_tensor'][:, :-1]\n\n '''\n Teacher forcing - evaluate\n '''\n # Comparing out(token[t-1]) to token[t]\n (log_probs, _), (name_log_probs, _) = model.forward(\n device=device, inputs=(\n batch_map['calorie_level_tensor'],\n batch_map['name_tensor'],\n batch_map['ingr_tensor']\n ),\n ingr_masks=batch_map['ingr_mask_tensor'],\n user_prior_technique_masks=user_prior_technique_masks,\n targets=batch_map['steps_tensor'][:, :-1], max_len=max_len-1,\n start_token=START_INDEX, teacher_forcing=True,\n name_targets=name_targets,\n max_name_len=max_name_len-1,\n visualize=False\n )\n loss, name_loss = loss_compute(\n log_probs, batch_map['steps_tensor'][:, 1:],\n name_outputs=name_log_probs,\n name_targets=name_targets,\n norm=this_batch_size,\n model=model,\n clip=None\n )\n\n total_loss += loss\n total_name_loss += name_loss\n\n # Logging\n total_tokens += this_batch_num_tokens\n total_name_tokens += this_batch_num_name_tokens\n print_tokens += this_batch_num_tokens\n\n del log_probs, name_log_probs\n\n # Short-circuit if we only want to calculate test perplexity\n if ppx_only:\n if i % print_every == 0:\n elapsed = datetime.now() - start\n print(\"Epoch Step: {} LM Loss: {:.5f}; Name Loss: {:.5f}; Tok/s: {:.3f}\".format(\n i, loss / this_batch_size, name_loss / this_batch_size,\n print_tokens / elapsed.seconds\n ))\n start = datetime.now()\n print_tokens = 0\n continue\n\n '''\n Non-teacher-forcing - Generate!\n '''\n # Generates probabilities\n (log_probs, output_tokens, ingr_attns, prior_tech_attns), \\\n (name_log_probs, name_output_tokens) = model.forward(\n device=device, inputs=(\n batch_map['calorie_level_tensor'],\n batch_map['name_tensor'],\n batch_map['ingr_tensor']\n ),\n ingr_masks=batch_map['ingr_mask_tensor'],\n user_prior_technique_masks=user_prior_technique_masks,\n targets=batch_map['steps_tensor'][:, :-1], max_len=max_len-1,\n start_token=START_INDEX, teacher_forcing=False,\n logit_modifier_fxn=logit_modifier_fxn, token_sampler=token_sampler,\n visualize=True, max_name_len=max_name_len-1, name_targets=name_targets,\n )\n\n del log_probs, name_log_probs\n\n # Generated recipe\n calorie_levels, technique_strs, ingredient_strs, gold_strs, generated_strs, \\\n prior_items, recipe_reprs = get_batch_generated_recipes(\n batch_users=batch_users, batch_generated=output_tokens,\n max_ingr=MAX_INGR, max_ingr_tok=MAX_INGR_TOK,\n names_generated=name_output_tokens, ingr_map=ingr_map,\n user_items_df=user_items_df, **batch_map\n )\n\n for ix in range(len(generated_strs)):\n # Create save location: test_i<item>_u<user>\n ii = items[ix].data.item()\n uu = batch_users[ix].data.item()\n sample_id = 'test_i{}_u{}'.format(ii, uu)\n trial_save_dir = os.path.join(base_save_dir, sample_id)\n if not os.path.exists(trial_save_dir):\n os.mkdir(trial_save_dir)\n\n # Output tokens for heatmap axes\n out_indices = output_tokens[ix].detach().cpu().numpy().tolist()\n out_tokens = decode_ids(out_indices)\n trunc_indices = out_indices[:out_indices.index(END_INDEX)] \\\n if END_INDEX in out_indices else out_indices\n output_len = len(trunc_indices)\n output_techniques = [t for t in TECHNIQUES_LIST if t in generated_strs[ix]]\n results_dicts.append({\n 'u': uu,\n 'i': ii,\n 'generated': generated_strs[ix],\n 'n_tokens': output_len,\n 'generated_techniques': output_techniques,\n 'n_techniques': len(output_techniques)\n })\n\n # Save output\n with open(os.path.join(trial_save_dir, 'output.txt'), 'w+', encoding='utf-8') as wf:\n wf.write(recipe_reprs[ix])\n\n # Ingredient Attention\n ingr_attentions = np.matrix([\n a.squeeze().detach().cpu().numpy().tolist() for a in ingr_attns[ix]\n ]).T\n ingr_attn_df = pd.DataFrame(\n ingr_attentions[:len(ingredient_strs[ix])],\n index=ingredient_strs[ix], columns=out_tokens\n )\n ingr_attn_df = ingr_attn_df[ingr_attn_df.index != '']\n ingr_attn_df.to_pickle(\n os.path.join(trial_save_dir, 'ingredient_attention.pkl')\n )\n\n # Prior Technique Attention\n prior_tech_attention = np.matrix([\n a.squeeze().detach().cpu().numpy().tolist() for a in prior_tech_attns[ix]\n ]).T\n prior_tech_attn_df = pd.DataFrame(\n prior_tech_attention, index=TECHNIQUES_LIST + ['PAD'], columns=out_tokens\n )\n prior_tech_attn_df = prior_tech_attn_df[(prior_tech_attn_df.T != 0.0).any()]\n prior_tech_attn_df.to_pickle(\n os.path.join(trial_save_dir, 'prior_tech_attention.pkl')\n )\n\n if i % print_every == 0:\n elapsed = datetime.now() - start\n print(\"Epoch Step: {} LM Loss: {:.5f}; Name Loss: {:.5f}; Tok/s: {:.3f}\".format(\n i, loss / this_batch_size, name_loss / this_batch_size,\n print_tokens / elapsed.seconds\n ))\n print('SAMPLE DECODED RECIPE:\\n\\n{}\\n\\n'.format(recipe_reprs[0]))\n start = datetime.now()\n print_tokens = 0\n\n # Reshuffle the sampler\n sampler.renew_indices()\n\n if total_name_tokens > 0:\n print('\\nName Perplexity: {}'.format(\n np.exp(total_name_loss / float(total_name_tokens))\n ))\n\n # Store perplexity\n ppx = np.exp(total_loss / float(total_tokens))\n with open(os.path.join(base_save_dir, 'ppx.pkl'), 'wb') as wf:\n pickle.dump(ppx, wf)\n print('PERPLEXITY: {:.5f}'.format(\n ppx\n ))\n\n if not ppx_only:\n # Store recipe information -- generated string, # tokens (length), tech, # tech\n gen_df = pd.DataFrame(results_dicts)[[\n 'u', 'i', 'generated', 'n_tokens', 'generated_techniques', 'n_techniques'\n ]]\n df_loc = os.path.join(base_save_dir, 'generated_df.pkl')\n gen_df.to_pickle(df_loc)\n print('Saved generation DF to {}'.format(\n df_loc\n ))\n print(gen_df.head(3))", "def run(opt):\n # logging\n trn_log, val_log = set_logger(opt)\n\n # model related stuff\n device = torch.device(\"cuda\")\n trn_set, val_set, wmp_set = get_dsets(opt)\n model = get_model(opt, device)\n optimizer = getattr(optim, opt.optim.name)(\n model.parameters(), **vars(opt.optim.args)\n )\n # batch_size\n batch_size = opt.trn_loader.batch_size\n\n rlog.info(U.config_to_string(opt))\n rlog.info(\"Model: %s\", str(model))\n rlog.info(\"Optimizer: %s \\n\", str(optimizer))\n\n # Warm-up the mode on a partition of the training dataset\n if wmp_set is not None:\n rlog.info(\"Warming-up on dset of size %d\", len(wmp_set))\n for epoch in range(opt.warmup.epochs):\n # train for one epoch\n trn_loss, trn_acc = train(\n DataLoader(wmp_set, **vars(opt.trn_loader)),\n model,\n optimizer,\n get_criterion(opt, model, len(wmp_set) // batch_size),\n mc_samples=opt.trn_mcs,\n )\n\n val_stats = valid_stats(opt, model, val_set)\n trn_stats = train_stats(opt, model, wmp_set)\n trn_stats[\"loss\"], trn_stats[\"acc\"] = trn_loss, trn_acc\n\n # to pickle and tensorboard\n val_log.trace(step=epoch, **val_stats)\n trn_log.trace(step=epoch, **trn_stats)\n\n # to console\n for log, stats in zip([trn_log, val_log], [trn_stats, val_stats]):\n log.info(log.fmt.format(epoch, stats[\"acc\"], stats[\"loss\"]))\n\n # extra logging\n model_stats(opt, epoch, model)\n\n # maybe reset optimizer after warmup\n if opt.warmup.reset_optim:\n rlog.info(\"\\nWarmup ended. Resetting optimizer.\")\n optimizer = getattr(optim, opt.optim.name)(\n model.parameters(), **vars(opt.optim.args)\n )\n\n # Train on the full training dataset\n if wmp_set is not None:\n epochs = range(opt.warmup.epochs, opt.warmup.epochs + opt.epochs)\n else:\n epochs = range(opt.epochs)\n\n rlog.info(\"\\nTraining on dset: %s\", str(trn_set))\n for epoch in epochs:\n trn_loss, trn_acc = train(\n DataLoader(trn_set, **vars(opt.trn_loader)),\n model,\n optimizer,\n get_criterion(opt, model, len(trn_set) // batch_size),\n mc_samples=opt.trn_mcs,\n )\n\n val_stats = valid_stats(opt, model, val_set)\n trn_stats = train_stats(opt, model, trn_set)\n trn_stats[\"loss\"], trn_stats[\"acc\"] = trn_loss, trn_acc\n\n # to pickle and tensorboard\n val_log.trace(step=epoch, **val_stats)\n trn_log.trace(step=epoch, **trn_stats)\n\n # to console\n for log, stats in zip([trn_log, val_log], [trn_stats, val_stats]):\n log.info(log.fmt.format(epoch, stats[\"acc\"], stats[\"loss\"]))\n\n # extra logging\n model_stats(opt, epoch, model)", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def _generate_model(self, specs, experiment = None, filename = 'dist/app/Model.hs'):\n with open(filename, \"w\") as file:\n self._write_model(file, specs, experiment = experiment)", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def dist_setting(current_gpu, model, args):\n print(\"channels_last : {}\".format(args.channels_last))\n if args.channels_last:\n args.memory_format = torch.channels_last\n else:\n args.memory_format = torch.contiguous_format\n\n if args.apex:\n args.lr = args.lr*float(args.batch_size*args.world_size)/256.\n args.current_gpu = current_gpu\n if args.current_gpu is not None:\n print(\"Use GPU: {} for training\".format(args.current_gpu))\n\n if args.multigpus_distributed:\n args.rank = args.num_gpus * args.host_num + args.current_gpu\n dist.init_process_group(backend=args.backend,\n rank=args.rank, world_size=args.world_size)\n logger.info('Initialized the distributed environment: \\'{}\\' backend on {} nodes. '.format(\n args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(\n dist.get_rank(), args.num_gpus))\n else:\n args.rank = 0\n\n if args.sync_bn:\n import apex\n print(\"using apex synced BN\")\n model = apex.parallel.convert_syncbn_model(model)\n\n if args.multigpus_distributed:\n if args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n args.batch_size = int(args.batch_size / args.num_gpus)\n logger.info(\"Batch size for each GPU: {}\".format(args.batch_size))\n if not args.apex:\n model.cuda(args.current_gpu)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.current_gpu])\n else:\n if not args.apex:\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n if not args.apex:\n model = model.cuda(args.current_gpu)\n else:\n if not args.apex:\n model = torch.nn.DataParallel(model).cuda()\n\n return model, args", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def train_model(train_generator, validation_generator):\n # we build a test generator to benchmark the model on unseen data\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n test_generator = test_datagen.flow_from_directory(\n test_path,\n target_size=(200, 200),\n color_mode=\"rgb\",\n shuffle=True,\n class_mode='sparse',\n batch_size=batch_size)\n model = build_model()\n filepath = join(save_path, weights_path)\n checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', save_best_only=True, mode='max')\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=epochs // 5, verbose=1, restore_best_weights=True)\n log_dir = join(home, save_path, 'logs', 'fit_smart', datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)\n callbacks_list = [early_stopping, checkpoint, tensorboard_callback]\n # origin [sessions] models each [epochs] times\n max_acc = 0.0\n for i in range(sessions):\n # model training and evaluation\n history = model.fit(\n train_generator,\n steps_per_epoch=train_generator.samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples // batch_size\n , verbose=2, callbacks=callbacks_list, workers=multiprocessing.cpu_count(),\n use_multiprocessing=False)\n model.load_weights(join(save_path, weights_path))\n test_loss, test_acc = model.evaluate(test_generator, steps=len(test_generator))\n # save model if it performed better\n if test_acc > max_acc:\n max_acc = test_acc\n model.save(join(home, save_path, model_name))\n print(\"accuracy: \", test_acc, \"\\n Loss:\", test_loss)", "def trainer(current_gpu, args):\n\n model_history = train_utils.init_model_history()\n batch_size = args.batch_size\n num_epochs = args.num_epochs\n feature_extract = False\n\n model = train_utils.initialize_ft_model(args.model_name, num_classes=args.num_classes, feature_extract=feature_extract)\n model, args = dist_setting(current_gpu, model, args)\n logger.info(f\"==> Training on rank {args.rank}.\")\n logger.info(args)\n \n dataloaders, transforms, train_sampler = train_utils.create_dataloaders(\n args.train_dir, args.valid_dir, rank=args.rank, \n world_size=args.world_size, batch_size=batch_size,\n num_workers=args.num_workers\n )\n \n optimizer = train_utils.initialize_optimizer(model, feature_extract, lr=args.lr*args.world_size, momentum=0.9) \n criterion = nn.CrossEntropyLoss()\n\n since = time.time()\n val_acc_history = []\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc1 = 0.0\n num_samples = {k: len(dataloaders[k].dataset) for k, v in dataloaders.items()}\n num_steps = {k: int(np.ceil(len(dataloaders[k].dataset) / (batch_size*args.world_size))) for k, v in dataloaders.items()}\n device = torch.device(f'cuda:{current_gpu}') \n\n for epoch in range(1, num_epochs+1):\n \n batch_time = train_utils.AverageMeter('Time', ':6.3f')\n data_time = train_utils.AverageMeter('Data', ':6.3f')\n losses = train_utils.AverageMeter('Loss', ':.4e')\n top1 = train_utils.AverageMeter('Acc@1', ':6.2f')\n \n logger.info('-' * 40)\n logger.info('[Rank {}, Epoch {}/{}] Processing...'.format(args.rank, epoch, num_epochs))\n logger.info('-' * 40)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'valid']: \n\n if phase == 'train':\n model.train() # Set model to training mode\n if args.multigpus_distributed:\n dataloaders[phase].sampler.set_epoch(epoch) # Set epoch count for DistributedSampler \n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n epoch_tic = time.time() \n tic = time.time()\n \n for i, (inputs, labels) in enumerate(dataloaders[phase]):\n # measure data loading time\n data_time.update(time.time() - tic) \n \n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n with torch.set_grad_enabled(phase=='train'):\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n probs, preds = torch.max(outputs, 1)\n \n # Compute gradient and do stochastic gradient descent\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n acc1 = train_utils.accuracy(outputs, labels, topk=(1,))\n \n # Average loss and accuracy across processes for logging\n if args.multigpus_distributed:\n reduced_loss = train_utils.reduce_tensor(loss.data, args)\n reduced_acc1 = train_utils.reduce_tensor(acc1[0], args)\n else:\n reduced_loss = loss.data\n reduced_acc1 = acc1[0]\n\n losses.update(train_utils.to_python_float(reduced_loss), inputs.size(0))\n top1.update(train_utils.to_python_float(reduced_acc1), inputs.size(0))\n \n # measure elapsed time\n batch_time.update(time.time() - tic)\n tic = time.time()\n\n if phase == 'train' and i % args.log_interval == 0:\n step_loss = running_loss / ((i+1)*batch_size)\n step_acc = running_corrects.double() / ((i+1)*batch_size)\n logger.info(f'[Rank {args.rank}, Epoch {epoch}/{num_epochs}, Step {i+1}/{num_steps[phase]}] {phase}-acc: {step_acc:.4f}, '\n f'{phase}-loss: {step_loss:.4f}, data-time: {data_time.val:.4f}, batch-time: {batch_time.val:.4f}') \n \n\n ## Waiting until finishing operations on GPU (Pytorch default: async)\n torch.cuda.synchronize()\n \n if current_gpu == 0: \n logger.info(f'[Epoch {epoch}/{num_epochs}] {phase}-acc: {top1.avg:.4f}, '\n f'{phase}-loss: {losses.val:.4f}, time: {time.time()-epoch_tic:.4f}') \n \n model_history['epoch'].append(epoch)\n model_history['batch_idx'].append(i)\n model_history['data_time'].append(data_time.val) \n model_history['batch_time'].append(batch_time.val)\n model_history['losses'].append(losses.val)\n model_history['top1'].append(top1.val)\n\n if phase == 'valid':\n is_best = top1.avg > best_acc1\n best_acc1 = max(top1.avg, best_acc1)\n \n if (args.multigpus_distributed and args.rank % args.num_gpus == 0):\n #train_utils.save_history(os.path.join(args.output_data_dir, 'model_history.p'), model_history) \n train_utils.save_model({\n 'epoch': epoch + 1,\n 'model_name': args.model_name,\n 'state_dict': model.module.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_acc1': best_acc1,\n 'loss': losses\n }, is_best, args.model_chkpt_dir, args.model_dir)\n elif not args.multigpus_distributed:\n #train_utils.save_history(os.path.join(args.output_data_dir, 'model_history.p'), model_history) \n train_utils.save_model({\n 'epoch': epoch + 1,\n 'model_name': args.model_name,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_acc1': best_acc1,\n 'loss': losses\n }, is_best, args.model_chkpt_dir, args.model_dir) \n \n \n time_elapsed = time.time() - since\n if current_gpu == 0:\n logger.info('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n logger.info('Best val acc: {:.4f}'.format(best_acc1))\n \n if args.multigpus_distributed:\n dist_cleanup()", "def train_model1(model, criterion, optimizer, scheduler, device, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n model.to(device)\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs).squeeze(-1)\n # if outputs.shape != labels.shape:\n # print(outputs.shape)\n # print(outputs.shape)\n loss = criterion(outputs, labels)\n # preds = outputs\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n # print(outputs.device, labels.to('cpu').device)\n running_corrects += acc_calculate(outputs, labels)\n # print(labels.data)\n if phase == 'train':\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model,best_acc", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def main(cfg):\n # logging.info(f'Found base dir {cfg.BASE_DIR}')\n\n rng = cfg.experiment.random_seed\n if rng == -1:\n rng = np.random.randint(0, 1000)\n\n if rng is not False:\n np.random.seed(rng)\n torch.torch.manual_seed(rng)\n\n dcc = cfg.dataset.get('creation_args', dict())\n if dcc.get('dim_normalise_mean', False):\n dim = dcc.dim\n dcc.f_mean = float(dcc.f_mean / np.sqrt(dim))\n dcc.p_mean = float(dcc.p_mean / np.sqrt(dim))\n logging.info(\n f'Updating means in dataset cfg: {cfg.dataset.creation_args}')\n\n stats = dict(\n dir=os.getcwd(),\n host=socket.gethostname(),\n job_id=os.getenv(\"SLURM_JOB_ID\", None),\n random_state=rng)\n STATS_STATUS = False\n\n logging.info(\n f'Logging to {stats[\"dir\"]} on {stats[\"host\"]} '\n f'for id={cfg.get(\"id\", -1)}')\n\n logging.info(f'Slurm job: {stats[\"job_id\"]}.')\n logging.info(f'Setting random seed to {rng}.')\n logging.info(f'Uniform clip val is {cfg.acquisition.uniform_clip}.')\n\n hoover = Hoover(cfg.hoover)\n\n model = None\n\n # Right now this averages over both train and testing!\n for run in range(cfg.experiment.n_runs):\n if run % cfg.experiment.log_every == 0 or cfg.experiment.debug:\n logging.info(f'Run {run} in {os.getcwd()} ****NEW RUN****')\n if cuda := torch.cuda.is_available():\n logging.info(f'Still using cuda: {cuda}.')\n else:\n logging.info('No cuda found!')\n os.system('touch cuda_failure.txt')\n\n dataset = maps.dataset[cfg.dataset.name](\n cfg.dataset, model_cfg=cfg.model)\n\n # Train model on training data.\n if (not cfg.model.get('keep_constant', False)) or (model is None):\n # default case\n model = maps.model[cfg.model.name](cfg.model)\n\n # test_data = model.make_loader(dataset.test_data, train=False)\n # loss = model.evaluate(model.model, test_data)\n # logging.info(f'Model test loss is {loss}.')\n\n # train_data = model.make_loader(dataset.train_data, train=False)\n # loss = model.evaluate(model.model, train_data)\n # logging.info(f'Model train loss is {loss}.')\n\n model.fit(*dataset.train_data)\n\n loss = model.performance(\n *dataset.test_data, dataset.cfg['task_type'])\n # logging.info(\n # f'Weights vs 1 : {np.sqrt(np.sum((model.model.coef_ - 1)**2))}')\n\n if cfg.experiment.get('constant_val_set', False):\n add_val_idxs_to_cfg(cfg, model.val_idxs)\n\n if not STATS_STATUS:\n STATS_STATUS = True\n stats['loss'] = loss\n to_json(stats, 'stats.json')\n # test_data = model.make_loader(dataset.test_data, train=False)\n # loss = model.evaluate(model.model, test_data)\n # logging.info(f'Model test loss is {loss}.')\n\n # Always predict on test data again\n # TODO: need to fix this for efficient prediction\n if cfg.model.get('efficient', False):\n logging.debug('Eficient prediction on test set.')\n model = make_efficient(model, dataset)\n\n # if cfg.experiment.debug:\n # Report train error\n # logging.info('Model train error:')\n # model.performance(\n # *dataset.train_data, dataset.cfg.task_type)\n\n # if not check_valid(model, dataset):\n # continue\n\n if run < cfg.experiment.save_data_until:\n hoover.add_data(run, dataset.export())\n\n for acq_dict in cfg.acquisition_functions:\n # Slightly unclean, but could not figure out how to make\n # this work with Hydra otherwise\n acquisition = list(acq_dict.keys())[0]\n acq_cfg_name = list(acq_dict.values())[0]\n\n if cfg.experiment.debug:\n logging.info(f'\\t Acquisition: {acquisition}')\n\n # Reset selected test_indices.\n dataset.restart(acquisition)\n\n if (n := acq_cfg_name) is not None:\n acq_config = cfg['acquisition_configs'][n]\n else:\n acq_config = None\n\n experiment = Experiment(\n run, cfg, dataset, model, acquisition, acq_config)\n\n i = 0\n while not experiment.finished:\n i += 1\n # print('debug', i)\n if cfg.experiment.debug:\n logging.info(\n f'\\t Acquisition: {acquisition} – \\t Step {i}.')\n\n experiment.step(i)\n\n # Add config to name for logging.\n if (n := acq_cfg_name) is not None:\n acquisition = f'{acquisition}_{n}'\n\n # Extract results from acquisition experiment\n hoover.add_results(run, acquisition, experiment.export_data())\n\n if run % cfg.experiment.get('save_every', 1e19) == 0:\n logging.info('Intermediate save.')\n hoover.save()\n\n logging.info('Completed all runs.')\n hoover.save()", "def train():\n\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Get images and labels for blood_model.\n blood_datasets = blood_model.inputs(eval_data=False)\n\n # randomize the inputs look\n x, y_, data, keep_prob = blood_model.prepare_input()\n\n # build the convolution network\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n # Calculate loss.\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n\n train_op = blood_model.train(loss, global_step)\n\n sess = tf.InteractiveSession()\n\n sess.run(tf.initialize_all_variables())\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n saver = tf.train.Saver()\n\n check_filesystem()\n\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test', sess.graph)\n\n _ = reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step)+1, FLAGS.max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print(\"step %d, training accuracy %g, loss %g\" % (step, train_accuracy, loss_output))\n\n if (step % 1000 == 0 or (step + 1) == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n validation_writer.add_summary(summary_validation, step)\n print(\"validation accuracy %g\" % accuracy_validation)\n\n # save checkpoint\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print(\"saving checkpoint\")", "def set_model(self, model):\r\n self.model = model.model\r\n with context.eager_mode():\r\n self._close_writers()\r\n if self.write_graph:\r\n with self._get_writer(self._train_run_name).as_default():\r\n with summary_ops_v2.always_record_summaries():\r\n if not self.model.run_eagerly:\r\n summary_ops_v2.graph(K.get_graph(), step=0)\r\n\r\n summary_writable = (\r\n self.model._is_graph_network or # pylint: disable=protected-access\r\n self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access\r\n if summary_writable:\r\n summary_ops_v2.keras_model('keras', self.model, step=0)\r\n\r\n if self.embeddings_freq:\r\n self._configure_embeddings()", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def model_train(fold_name, model_dir, model_dict, dataset_path, development_subj, mu, sigma):\n valid_frames_before=200\n valid_frames_after=0\n valid_batch_size=8\n generators = TXT_Train_Validation_Generators(dataset_path=dataset_path, subject_list=development_subj, train_size=model_dict[\"train_set_ratio\"], frames_before=model_dict[\n \"frames\"]-model_dict[\"frame_shift\"], frames_after=model_dict[\"frame_shift\"], view_IDs=model_dict[\"view_IDs\"], batch_size=model_dict[\"batch_size\"], mu=mu, sigma=sigma, label_name=model_dict[\"label_name\"], shuffle=True,\n valid_frames_before=valid_frames_before, valid_frames_after=valid_frames_after, valid_batch_size=valid_batch_size)\n train_gen, valid_gen = generators.get_train(), generators.get_valid()\n losses = Losses_Keras(\n frames=model_dict['frames'], frame_shift=model_dict['frame_shift'])\n loss_fnc = losses.get_by_name(model_dict[\"loss_function\"])\n ap_metrics = [AUC_AP(), Accuracy_AP(), Precision_AP(),\n Recall_AP(), PrecisionAtRecall_AP(0.95)]\n fp_hdf5 = os.path.join(model_dir, fold_name+\".hdf5\")\n fp_hdf5 = os.path.join(model_dir, fold_name+\".hdf5\")\n mcp = ModelCheckpoint(fp_hdf5, monitor='val_loss', verbose=True,\n save_best_only=True, save_weights_only=True)\n tbl = tensorflow.keras.callbacks.TensorBoard(os.path.join(model_dir, 'logs{}'.format(fold_name)))\n metrics = ap_metrics\n callbacks = [mcp, tbl]\n optimizer = tensorflow.keras.optimizers.Adam(learning_rate=model_dict['learning_rate'])\n epochs = model_dict[\"epochs\"]\n #### 1\n compile_kwargs = {\"loss\": loss_fnc,\n \"optimizer\": optimizer, \"metrics\": metrics}\n fit_kwargs = {\"x\": train_gen, \"epochs\": epochs,\n \"validation_data\": valid_gen, \"callbacks\": callbacks}\n Setup = SETUP_DIC[model_dict[\"architecture\"]]\n setup = Setup(name=model_dict[\"name\"], compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs,\n TPA_view_IDs=model_dict['view_IDs'])\n # setup.delete_existing_model_data_and_output()\n print(setup.model.summary())\n\n setup.train()\n setup.write_architecture()\n # setup.plot_metrics(plot_val_metrics=valid_gen)\n #### /1\n #### 2\n # Get optimal threshold.\n print(\"Getting optimal threshold...\")\n # RELOAD\n data_models_model_path = setup.data_models_model_path\n setup = Model_Evaluation(data_models_model_path, fold_name=fold_name,\n stateful=False, weights_ext=\"hdf5\", load_scaling=False)\n\n # https://support.sas.com/en/books/reference-books/analyzing-receiver-operating-characteristic-curves-with-sas/review.html\n # Gonen, Mithat. 2007. Analyzing Receiver Operating Characteristic Curves with SAS. Cary, NC: SAS Institute Inc.\n preds_list, trues_list = [], []\n # generators = [train_gen, valid_gen] if valid_gen else [train_gen]\n generators = [valid_gen] if valid_gen else [train_gen]\n for generator in generators:\n for i in range(len(generator)):\n x, y = generator[i]\n preds_list.append(setup.model.predict(x))\n trues_list.append(y)\n preds = np.vstack(preds_list)\n trues = np.vstack(trues_list)\n labels_dict, predictions_dict = {}, {}\n for idx, l in enumerate(zip(preds, trues)):\n pred, true = l\n predictions_dict[idx] = pred[:, 1]\n sample_class = true[-1][-1]\n labels_dict[idx] = model_dict[\"frames\"] - \\\n model_dict[\"frame_shift\"] if sample_class else -1\n if valid_gen:\n labels_dict[idx] = valid_frames_before if sample_class else -1\n prc_pre_fpr, prc_pre_tpr, prc_pre_thresholds = plots.prediction_pr_curve(\n labels_dict, predictions_dict)\n # get optimal threshold\n fpr, tpr, thresh = prc_pre_fpr[:-1], prc_pre_tpr[:-1], prc_pre_thresholds\n xy = np.stack([fpr, tpr]).T\n ideal = np.array([1, 1])\n d = ideal-xy\n D = (d*d).sum(axis=-1)\n optimal_threshold = thresh[D.argmin()]\n with open(os.path.join(data_models_model_path, project.THRESHOLD_FILE_PATTERN.format(fold_name)), \"wb\") as f:\n pickle.dump(optimal_threshold, f)\n #### /2\n print(\"Trained {}\".format(model_dict[\"name\"]))\n clear_session()\n return True", "def model(self, hyperparams, test_mode=False):\n run_doc = OrderedDict() # Document important hyperparameters\n run_start_time = time.time()\n run_id = str(uuid4())\n # TODO: Not ideal: Loads from memory every time. Use generator?\n train_data, train_targets, test_data, test_targets = \\\n self.data_loader(dataset=hyperparams['dataset'], size=hyperparams['dataset_size'])\n run_doc['dataset'] = hyperparams['dataset']\n run_doc['data_size'] = len(train_targets)\n # Visualization tools\n if config.INPUT_DEBUG:\n image_analysis(image=train_data[0, :, :, :], label=train_targets[0, :])\n # Input shape comes from image shape\n img_width = train_data[0].shape[0]\n img_height = train_data[0].shape[1]\n num_channels = train_data[0].shape[2]\n input_shape = (img_width, img_height, num_channels)\n run_doc['input_shape'] = '(%d, %d, %d)' % input_shape\n input_tensor = Input(shape=input_shape, dtype='float32', name='input_image')\n try: # Model creation is in separate file\n x, run_doc = custom_model(input_tensor, params=hyperparams, run_doc=run_doc)\n except ValueError as e:\n if not test_mode: # If not testing, ignore error causing models\n return {'loss': 100, 'status': STATUS_OK}\n else:\n raise e\n # Final layer classifies into 4 possible actions\n output = layers.Dense(4, activation='softmax')(x)\n # File names for the model and logs\n log_file = os.path.join(self._logs_dir, run_id)\n model_file = os.path.join(self._models_dir, run_id + '.h5')\n # Add some callbacks so we can track progress using Tensorboard\n callbacks = [keras.callbacks.EarlyStopping('val_loss', patience=config.TRAIN_PATIENCE, mode=\"min\")]\n if not test_mode: # Don't save models/logs if in testing mode\n callbacks += [keras.callbacks.TensorBoard(log_dir=log_file),\n keras.callbacks.ModelCheckpoint(model_file, save_best_only=True)]\n # Choice of optimizer and optimization parameters\n if hyperparams['optimizer'] == 'sgd':\n optimizer = optimizers.SGD(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'rmsprop':\n optimizer = optimizers.RMSprop(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'nadam':\n optimizer = optimizers.Nadam(lr=hyperparams[\"learning_rate\"],\n schedule_decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'adam':\n optimizer = optimizers.Adam(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n # Save optimizer parameters to run doc\n run_doc['optimizer'] = hyperparams['optimizer']\n run_doc['opt_learning_rate'] = hyperparams[\"learning_rate\"]\n run_doc['opt_decay'] = hyperparams[\"decay\"]\n run_doc['opt_clipnorm'] = hyperparams[\"clipnorm\"]\n # Create and compile the model\n model = Model(input_tensor, output)\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n # Print out model summary and store inside run documentation as list of strings\n model.summary()\n run_doc['model_summary'] = []\n model.summary(print_fn=(lambda a: run_doc['model_summary'].append(a)))\n # Fit the model to the datasets\n self.log.info(\"Fitting model (eval %d of %d) ...\" % (self._eval_idx + 1, self._max_eval))\n self._eval_idx += 1\n model.fit(x=train_data, y=train_targets,\n batch_size=hyperparams['batch_size'],\n epochs=hyperparams['epochs'],\n validation_data=(test_data, test_targets),\n callbacks=callbacks,\n verbose=1)\n val_loss, val_acc = model.evaluate(x=test_data, y=test_targets, verbose=2)\n self.log.info(\" .... Completed!\")\n self.log.info(\" -- Evaluation time %ds\" % (time.time() - run_start_time))\n self.log.info(\" -- Total time %ds\" % (time.time() - self._start_time))\n # Save training parameters to run doc\n run_doc['batch_size'] = hyperparams['batch_size']\n run_doc['epochs'] = hyperparams['epochs']\n run_doc['val_loss'] = val_loss\n run_doc['val_acc'] = val_acc\n # Results are used to pick best pirate\n self._results[run_id] = val_loss\n # Save run_doc to pickle file in model directory\n run_doc_file_name = run_id + '.pickle'\n if not test_mode: # Don't save docs if in testing mode\n with open(os.path.join(self._models_dir, run_doc_file_name), 'wb') as f:\n pickle.dump(run_doc, f)\n self.log.info('Run Dictionary %s' % str(run_doc))\n # Delete the session to prevent GPU memory from getting full\n keras.backend.clear_session()\n # Optimizer minimizes validation loss\n return {'loss': val_loss, 'status': STATUS_OK}", "def train_on_one_batch(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess: #config=tf.ConfigProto(log_device_placement=True)\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 2), save_model_path)\n\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def combineModelsAndExport(builderSpec, nmsSpec, fileName, quantize=False):\n try:\n print(f\"Combine CoreMl model with nms and export model\")\n # Combine models to a single one\n pipeline = ct.models.pipeline.Pipeline(\n input_features=[\n (\"image\", ct.models.datatypes.Array(3, 460, 460)),\n (\"iouThreshold\", ct.models.datatypes.Double()),\n (\"confidenceThreshold\", ct.models.datatypes.Double()),\n ],\n output_features=[\"confidence\", \"coordinates\"],\n )\n\n # Required version (>= ios13) in order for mns to work\n pipeline.spec.specificationVersion = 4\n\n pipeline.add_model(builderSpec)\n pipeline.add_model(nmsSpec)\n\n pipeline.spec.description.input[0].ParseFromString(\n builderSpec.description.input[0].SerializeToString()\n )\n pipeline.spec.description.output[0].ParseFromString(\n nmsSpec.description.output[0].SerializeToString()\n )\n pipeline.spec.description.output[1].ParseFromString(\n nmsSpec.description.output[1].SerializeToString()\n )\n\n # Metadata for the model‚\n pipeline.spec.description.input[\n 1\n ].shortDescription = \"(optional) IOU Threshold override (Default: 0.6)\"\n pipeline.spec.description.input[\n 2\n ].shortDescription = \"(optional) Confidence Threshold override (Default: 0.4)\"\n pipeline.spec.description.output[\n 0\n ].shortDescription = \"Boxes \\xd7 Class confidence\"\n pipeline.spec.description.output[\n 1\n ].shortDescription = \"Boxes \\xd7 [x, y, width, height] (relative to image size)\"\n pipeline.spec.description.metadata.versionString = \"1.0\"\n pipeline.spec.description.metadata.shortDescription = \"yolov5\"\n pipeline.spec.description.metadata.author = \"Leon De Andrade\"\n pipeline.spec.description.metadata.license = \"\"\n\n model = ct.models.MLModel(pipeline.spec)\n model.save(fileName)\n\n if quantize:\n fileName16 = fileName.replace(\".mlmodel\", \"_16.mlmodel\")\n modelFp16 = ct.models.neural_network.quantization_utils.quantize_weights(\n model, nbits=16\n )\n modelFp16.save(fileName16)\n\n fileName8 = fileName.replace(\".mlmodel\", \"_8.mlmodel\")\n modelFp8 = ct.models.neural_network.quantization_utils.quantize_weights(\n model, nbits=8\n )\n modelFp8.save(fileName8)\n\n print(f\"CoreML export success, saved as {fileName}\")\n except Exception as e:\n print(f\"CoreML export failure: {e}\")", "def meta_train(tasks, model, args, device, method='random', meta_iters=10000, num_updates=5, meta_batch_size=5):\n # Define logging\n os.makedirs(args.save_path, exist_ok=True)\n writer = SummaryWriter(\n os.path.join(args.save_path, 'runs', '{}'.format(datetime.now()).replace(\":\", \"_\")))\n\n header = ' Time Task Iteration Loss Accuracy'\n log_template = '{:>10} {:>25} {:10.0f} {:10.6f} {:10.6f}'\n test_template = 'Test mean: {}, Test std: {}'\n\n print(header)\n start = time.time()\n\n # Define optimizers, lr schedulers and loss function\n optimizer_bert = AdamW(params=model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n optimizer = optim.Adam(params=chain(model.proto_net.encoder.mlp.parameters(),\n model.output_layer.parameters()),\n lr=args.lr)\n scheduler_bert = get_cosine_schedule_with_warmup(optimizer_bert, 200, meta_iters)\n scheduler = get_cosine_schedule_with_warmup(optimizer, 0, meta_iters)\n # ProtoNets always have CrossEntropy loss due to softmax output\n cross_entropy = nn.CrossEntropyLoss()\n\n print('Loading Tokenizer..')\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n special_tokens_dict = {'additional_special_tokens': [\"[MNT]\", \"[URL]\"]}\n\n num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n print('We have added', num_added_toks, 'tokens')\n model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.\n\n # setup task sampler and task model\n sampler = TaskSampler(tasks, method=method, custom_task_ratio=args.custom_task_ratio, supp_query_split=True)\n task_model = type(model)(args)\n task_model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n\n iterations = 0\n # Iterate over the data\n train_iter = sampler.get_iter('train', tokenizer, batch_size=args.batch_size, shuffle=True)\n model.train()\n\n # setup validation task and episodes for evaluation\n val_task = get_validation_task(args)\n episodes = torch.load(args.episodes)\n\n # dummy data to overwrite old values of task model output layer\n dummy_w = torch.randn((args.mlp_dims[-1], 2))\n dummy_b = torch.randn(2)\n\n average_query_loss = 0\n best_query_loss = 1e+9\n best_test_mean = -1\n best_test_last = -1\n convergence_tolerance_cnt = 0\n # outer loop (meta-iterations)\n for i in range(meta_iters):\n grads = []\n task_losses_inner = {}\n task_accuracies_inner = {}\n task_losses_outer = {}\n task_accuracies_outer = {}\n # inner loop (sample different tasks)\n for task_sample in range(meta_batch_size):\n # clone original model\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n task_model.to(device)\n task_model.train()\n\n # new optimizer for every new task model\n task_optimizer_bert = optim.SGD(params=task_model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n task_optimizer = optim.SGD(params=chain(task_model.proto_net.encoder.mlp.parameters(),\n task_model.output_layer.parameters()),\n lr=args.inner_lr)\n\n # prepare support and query set\n batch = next(train_iter)\n support = batch[:3]\n query = batch[3:]\n\n # setup output layer (via meta-model's prototype network)\n proto_embeddings = model.proto_net(support[0].to(device), attention_mask=support[2].to(device))\n prototypes = model.proto_net.calculate_centroids((proto_embeddings, support[1]), sampler.get_num_classes())\n W, b = task_model.calculate_output_params(prototypes.detach())\n task_model.initialize_classifier(W, b)\n\n # train some iterations on support set\n for update in range(num_updates):\n task_optimizer_bert.zero_grad()\n task_optimizer.zero_grad()\n predictions = task_model(support[0].to(device), attention_mask=support[2].to(device))\n task_loss = cross_entropy(predictions, support[1].long().squeeze().to(device))\n task_loss.backward()\n task_optimizer.step()\n task_optimizer_bert.step()\n\n # record task losses and accuracies for logging\n task_losses_inner[sampler.get_name()] = task_loss.item()\n task_accuracies_inner[sampler.get_name()] = sampler.calculate_accuracy(predictions, support[1].to(device))\n\n # trick to add prototypes back to computation graph\n W = 2 * prototypes + (W - 2 * prototypes).detach()\n b = -prototypes.norm(dim=1)**2 + (b + prototypes.norm(dim=1)**2).detach()\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # calculate gradients for meta update on the query set\n predictions = task_model(query[0].to(device), attention_mask=query[2].to(device))\n query_loss = cross_entropy(predictions, query[1].long().squeeze().to(device))\n query_loss.backward()\n\n # record task losses and accuracies for logging\n task_losses_outer[sampler.get_name()] = query_loss.item()\n task_accuracies_outer[sampler.get_name()] = sampler.calculate_accuracy(predictions, query[1].to(device))\n average_query_loss += query_loss.item()\n\n # register W and b parameters again to avoid error in weight update\n W = nn.Parameter(W)\n b = nn.Parameter(b)\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # save gradients of first task model\n if task_sample == 0:\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads.append(param.grad.clone())\n # add the gradients of all task samples\n else:\n p = 0\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads[p] += param.grad.clone()\n p += 1\n\n # perform meta update\n # first load/add the calculated gradients in the meta-model\n # (already contains gradients from prototype calculation)\n p = 0\n for param in model.parameters():\n if param.requires_grad and param.grad is not None:\n param.grad += grads[p]\n p += 1\n # update model parameters according to the gradients from inner loop (clear gradients afterwards)\n optimizer.step()\n optimizer_bert.step()\n scheduler.step()\n scheduler_bert.step()\n optimizer.zero_grad()\n optimizer_bert.zero_grad()\n\n iterations += 1\n if iterations % args.log_every == 0:\n average_query_loss /= (args.log_every*meta_batch_size)\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n iter_acc = sum(task_accuracies_outer.values()) / len(task_accuracies_outer.values())\n writer.add_scalar('Meta_Average/Loss/outer'.format(sampler.get_name()), iter_loss, iterations)\n writer.add_scalar('Meta_Average/Accuracy/outer'.format(sampler.get_name()), iter_acc, iterations)\n for t in tasks:\n task_name = t.get_name()\n if task_name in task_losses_inner.keys():\n writer.add_scalar('{}/Loss/inner'.format(task_name), task_losses_inner[task_name], iterations)\n writer.add_scalar('{}/Accuracy/inner'.format(task_name), task_accuracies_inner[task_name], iterations)\n writer.add_scalar('{}/Loss/outer'.format(task_name), task_losses_outer[task_name], iterations)\n writer.add_scalar('{}/Accuracy/outer'.format(task_name), task_accuracies_outer[task_name], iterations)\n print(log_template.format(\n str(timedelta(seconds=int(time.time() - start))),\n sampler.get_name(),\n iterations,\n iter_loss,\n iter_acc))\n\n # save best snapshot\n if average_query_loss < best_query_loss:\n best_query_loss = average_query_loss\n average_query_loss = 0\n snapshot_prefix = os.path.join(args.save_path, 'best_query')\n snapshot_path = (\n snapshot_prefix +\n '_loss_{:.5f}_iter_{}_model.pt'\n ).format(best_query_loss, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n # evaluate in k shot fashion\n if iterations % args.eval_every == 0:\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n test_mean, test_std = k_shot_testing(task_model, episodes, val_task, device, num_updates=args.inner_updates,\n num_test_batches=args.num_test_batches)\n writer.add_scalar('{}/Acc'.format(val_task.get_name()), test_mean, iterations)\n writer.add_scalar('{}/STD'.format(val_task.get_name()), test_std, iterations)\n print(test_template.format(test_mean, test_std), flush=True)\n if test_mean > best_test_mean:\n best_test_mean = test_mean\n snapshot_prefix = os.path.join(args.save_path, 'best_test_{}'.format(val_task.get_name()))\n snapshot_path = (\n snapshot_prefix +\n '_acc_{:.5f}_iter_{}_model.pt'\n ).format(best_test_mean, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n \n if test_mean > best_test_last:\n best_test_last = best_test_mean\n convergence_tolerance_cnt = 0\n else:\n convergence_tolerance_cnt += 1\n\n if convergence_tolerance_cnt == args.convergence_tolerance:\n break\n\n\n # saving redundant parameters\n # Save model checkpoints.\n if iterations % args.save_every == 0:\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n snapshot_prefix = os.path.join(args.save_path, 'snapshot')\n snapshot_path = (\n snapshot_prefix +\n '_iter_{}_loss_{}_model.pt'\n ).format(iterations, iter_loss)\n logging.debug('Saving model...')\n model.save_model(snapshot_path)\n # Keep only the last snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n writer.close()", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def _setup_summaries(self, sess):\n # Output directory for models and summaries\n\n\n print(\"Writing to {}\\n\".format(os.path.abspath(self._log_dir)))\n\n train_summary_dir = os.path.join(self._log_dir, \"summaries\", \"train\")\n self._train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n val_summary_dir = os.path.join(self._log_dir, \"summaries\", \"validation\")\n self._val_summary_writer = tf.summary.FileWriter(val_summary_dir, sess.graph)\n\n # Model checkpoints\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n self.checkpoint_dir = os.path.abspath(os.path.join(self._save_dir, \"checkpoints/\"))\n\n if not os.path.exists(self.checkpoint_dir):\n os.makedirs(self.checkpoint_dir)\n\n self._saver = tf.train.Saver(max_to_keep=10) # Save model after each epoch\n\n self.train_summary_op = tf.summary.merge(self._train_summaries)\n self.val_summary_op = tf.summary.merge(self._val_summaries)\n\n print(\"--------------------------------------------------\")\n print(\"\\ntensorboard --logdir {}\".format(os.path.abspath(self._log_dir)))\n print(\"\\ntensorboard --logdir {} --port 6007\".format(os.path.abspath(self.checkpoint_dir)))\n print(\"--------------------------------------------------\")", "def run_model(self):\n hparams = self.hparams\n\n # Build the child graph\n with tf.Graph().as_default(), tf.device(\n '/cpu:0' if FLAGS.use_cpu else '/gpu:0'):\n m, meval = self._build_models()\n\n # Figure out what epoch we are on\n starting_epoch = self._calc_starting_epoch(m)\n\n # Run the validation error right at the beginning\n valid_accuracy = self.eval_child_model(\n meval, self.data_loader, 'val')\n tf.logging.info('Before Training Epoch: {} Val Acc: {}'.format(\n starting_epoch, valid_accuracy))\n training_accuracy = None\n\n for curr_epoch in xrange(starting_epoch, hparams.num_epochs):\n\n # Run one training epoch\n training_accuracy = self._run_training_loop(m, curr_epoch)\n\n valid_accuracy = self.eval_child_model(\n meval, self.data_loader, 'val')\n tf.logging.info('Epoch: {} Valid Acc: {}'.format(\n curr_epoch, valid_accuracy))\n\n valid_accuracy, test_accuracy = self._compute_final_accuracies(\n meval)\n\n tf.logging.info(\n 'Train Acc: {} Valid Acc: {} Test Acc: {}'.format(\n training_accuracy, valid_accuracy, test_accuracy))", "def _main():\n\n # setup paths\n json_model_path = osp.join(FLAGS.input_dir, FLAGS.json_model_fname)\n weights_path = osp.join(FLAGS.input_dir, FLAGS.weights_fname)\n save_path = osp.splitext(json_model_path)[0][:-6] + \"graph_w\" + str(weights_path.split(\"_\")[-1][:-3]) + \".pb\"\n print(\"Loading Model: \" + json_model_path)\n print(\"Loading Weights: \" + weights_path)\n\n # Set keras to test phase\n k.set_learning_phase(0)\n\n # Load json and weights, then compile model\n with open(json_model_path, 'r') as json_file:\n loaded_model_json = json_file.read()\n model = model_from_json(loaded_model_json)\n model.load_weights(weights_path)\n model.compile(loss='mse', optimizer='sgd')\n\n # Freeze graph\n frozen_graph = freeze_session(k.get_session(), output_names=[out.op.name for out in model.outputs])\n\n # Write graph to protobuf file\n tf.train.write_graph(frozen_graph, \"model\", save_path, as_text=False)\n print(\"Written Graph to: \" + save_path)", "def learn(model: KW_Model,\n trainloader: DataLoader,\n testloader: DataLoader,\n optimizer: optim.Optimizer,\n nb_epoch: int,\n device: torch.device,\n eval_fn: Callable[[List[bool], List[Qid]], Dict[Qid, float]],\n mean_window: int = 50,\n entropy_lambda: float = 0.025,\n smt_lambda: float = 1.0,\n reinforce_lambda: float = 1.0,\n ) -> Tuple[nn.Module, Dict[str, List[torch.tensor]], Dict[str, List[torch.tensor]]]:\n print(\"Memory usage: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n past_rewards = {str(q_id.long().item()): deque(maxlen=mean_window)\n for _, _, q_ids, _ in chain(trainloader, testloader)\n for q_id in q_ids}\n \n logs = [\"reward\",\n \"scaled_entropy\",\n \"scaled_reinforce\",\n \"scaled_smt\",\n \"total_loss\",\n \"accuracy\"]\n train_logs = {log: list() for log in logs}\n test_logs = {log: list() for log in logs}\n del logs\n \n for epoch in range(nb_epoch):\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n print(f\"\\nEpoch {epoch}\")\n \n print(\"Begin epoch: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n model.train()\n for x, y, q_id, masks in trainloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n\n # entropy_lambda = min(1.01*entropy_lambda, 0.025)\n # reinforce_lambda = min(1.01*reinforce_lambda, 1.0)\n # smt_lambda = max(0.99*smt_lambda, 0.05)\n loss, reinforce_loss, entropy, smt_loss = losses\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().cpu().sum().tolist()\n nb_total += masks.cpu().sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {mean(running_loss): .3f} Rewa {mean(running_reward): .5f}\", end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n\n # Logs\n train_logs[\"reward\"].append(mean(running_reward))\n train_logs[\"scaled_entropy\"].append(mean(entropies))\n train_logs[\"scaled_reinforce\"].append(mean(reinforces))\n train_logs[\"scaled_smt\"].append(mean(smts))\n train_logs[\"total_loss\"].append(mean(running_loss))\n train_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n \n train_loss, train_reward = mean(running_loss), mean(running_reward)\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n model.eval()\n for x, y, q_id, masks in testloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n loss, reinforce_loss, entropy, smt_loss = losses\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().sum().tolist()\n nb_total += masks.sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {train_loss: .3f} Rewa {train_reward: .3f}\",\n f\"Te Loss{mean(running_loss): .3f} Rewa {mean(running_reward): .3f}\",\n end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n \n \n # Logs\n test_logs[\"reward\"].append(mean(running_reward))\n test_logs[\"scaled_entropy\"].append(mean(entropies))\n test_logs[\"scaled_reinforce\"].append(mean(reinforces))\n test_logs[\"scaled_smt\"].append(mean(smts))\n test_logs[\"total_loss\"].append(mean(running_loss))\n test_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n\n return model, train_logs, test_logs", "def main(u_net_settings):\n model = build_u_net(*u_net_settings)\n print(model.summary())", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op, self.global_step]\n else:\n fetches = [self.train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1", "def prepare_training_components(self):\n # Instantiate model objects.\n self.instantiate_model_objects()\n\n # Create checkpoint machinery to save/restore checkpoints.\n self.create_checkpoint_machinery()\n\n # Create summary file writer.\n self.summary_file_writer = tf.summary.create_file_writer(\n logdir=os.path.join(self.params[\"output_dir\"], \"summaries\"),\n name=\"summary_file_writer\"\n )", "def sample_model(model, x, y, params_init, model_loss='multi_class_linear_output' ,num_samples=10, num_steps_per_sample=10, step_size=0.1, burn=0, inv_mass=None, jitter=None, normalizing_const=1., softabs_const=None, explicit_binding_const=100, fixed_point_threshold=1e-5, fixed_point_max_iterations=1000, jitter_max_tries=10, sampler=Sampler.HMC, integrator=Integrator.IMPLICIT, metric=Metric.HESSIAN, debug=False, tau_out=1.,tau_list=None, store_on_GPU = True, desired_accept_rate=0.8, verbose = False):\n\n device = params_init.device\n params_shape_list = []\n params_flattened_list = []\n build_tau = False\n if tau_list is None:\n tau_list = []\n build_tau = True\n for weights in model.parameters():\n params_shape_list.append(weights.shape)\n params_flattened_list.append(weights.nelement())\n if build_tau:\n tau_list.append(torch.tensor(1.))\n\n log_prob_func = define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=normalizing_const, device = device)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n return sample(log_prob_func, params_init, num_samples=num_samples, num_steps_per_sample=num_steps_per_sample, step_size=step_size, burn=burn, jitter=jitter, inv_mass=inv_mass, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, fixed_point_threshold=fixed_point_threshold, fixed_point_max_iterations=fixed_point_max_iterations, jitter_max_tries=jitter_max_tries, sampler=sampler, integrator=integrator, metric=metric, debug=debug, desired_accept_rate=desired_accept_rate, store_on_GPU = store_on_GPU, verbose = verbose)", "def compute_stats(self, epoch, loader, setname):\n args = self.args\n self.model.eval()\n ndevbatches = len(self.dev_loader)\n logging.info(f\"Evaluating {ndevbatches} batches ...\")\n\n recons, gts = defaultdict(list), defaultdict(list)\n acquisition_machine_by_fname = dict()\n with torch.no_grad():\n for batch_idx, batch in enumerate(self.dev_loader):\n progress = epoch + batch_idx/ndevbatches\n logging_epoch = batch_idx % args.log_interval == 0\n logging_epoch_info = batch_idx % (2 * args.log_interval) == 0\n log = logging.info if logging_epoch_info else logging.debug\n\n self.start_of_test_batch_hook(progress, logging_epoch)\n\n batch = self.preprocess_data(batch)\n output, target = self.predict(batch)\n output = self.unnorm(output, batch)\n target = self.unnorm(target, batch)\n fname, slice = batch.fname, batch.slice\n\n for i in range(output.shape[0]):\n slice_cpu = slice[i].item()\n recons[fname[i]].append((slice_cpu, output[i].float().cpu().numpy()))\n gts[fname[i]].append((slice_cpu, target[i].float().cpu().numpy()))\n\n acquisition_type = batch.attrs_dict['acquisition'][i]\n machine_type = batch.attrs_dict['system'][i]\n acquisition_machine_by_fname[fname[i]] = machine_type + '_' + acquisition_type\n\n if logging_epoch or batch_idx == ndevbatches-1:\n gpu_memory_gb = torch.cuda.memory_allocated()/1000000000\n host_memory_gb = utils.host_memory_usage_in_gb()\n log(f\"Evaluated {batch_idx+1} of {ndevbatches} (GPU Mem: {gpu_memory_gb:2.3f}gb Host Mem: {gpu_memory_gb:2.3f}gb)\")\n sys.stdout.flush()\n\n if self.args.debug_epoch_stats:\n break\n del output, target, batch\n\n logging.debug(f\"Finished evaluating\")\n self.end_of_test_epoch_hook()\n\n recons = {\n fname: np.stack([pred for _, pred in sorted(slice_preds)])\n for fname, slice_preds in recons.items()\n }\n gts = {\n fname: np.stack([pred for _, pred in sorted(slice_preds)])\n for fname, slice_preds in gts.items()\n }\n\n nmse, psnr, ssims = [], [], []\n ssim_for_acquisition_machine = defaultdict(list)\n recon_keys = list(recons.keys()).copy()\n for fname in recon_keys:\n pred_or, gt_or = recons[fname].squeeze(1), gts[fname].squeeze(1)\n pred, gt = transforms.center_crop_to_smallest(pred_or, gt_or)\n del pred_or, gt_or\n\n ssim = evaluate.ssim(gt, pred)\n acquisition_machine = acquisition_machine_by_fname[fname]\n ssim_for_acquisition_machine[acquisition_machine].append(ssim)\n ssims.append(ssim)\n nmse.append(evaluate.nmse(gt, pred))\n psnr.append(evaluate.psnr(gt, pred))\n del gt, pred\n del recons[fname], gts[fname]\n\n if len(nmse) == 0:\n nmse.append(0)\n ssims.append(0)\n psnr.append(0)\n\n min_vol_ssim = np.argmin(ssims)\n min_vol = str(recon_keys[min_vol_ssim])\n logging.info(f\"Min vol ssims: {min_vol}\")\n sys.stdout.flush()\n\n del recons, gts\n\n acquisition_machine_losses = dict.fromkeys(self.dev_data.system_acquisitions, 0)\n for key, value in ssim_for_acquisition_machine.items():\n acquisition_machine_losses[key] = np.mean(value)\n\n losses = {'NMSE': np.mean(nmse),\n 'PSNR': np.mean(psnr),\n 'SSIM': np.mean(ssims),\n 'SSIM_var': np.var(ssims),\n 'SSIM_min': np.min(ssims),\n **acquisition_machine_losses}\n\n return losses", "def adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n # Adjusting twice causes an error.\n if self.is_model_adjusted:\n logging.debug(\"model_and_info.is_model_adjusted is already True\")\n\n if self._optimizer:\n raise ValueError(\"Create an optimizer only after creating and adjusting the model.\")\n\n self._model = ModelAndInfo._adjust_for_gpus(model=self._model,\n config=self.config,\n model_execution_mode=self.model_execution_mode)\n\n self.is_model_adjusted = True\n logging.debug(\"model_and_info.is_model_adjusted set to True\")", "def create_models(self):\r\n self.all_ratings = AllRatingsWithCommon(\r\n experts=self.users,\r\n objects=self.videos,\r\n output_features=self.features,\r\n name=\"prod\",\r\n )\r\n\r\n print_memory(stage=\"DPLF:ratings_nodata_created\")\r\n\r\n # creating models\r\n self.user_to_model = {\r\n user: FeaturelessPreferenceLearningModel(\r\n expert=user, all_ratings=self.all_ratings\r\n )\r\n for user in self.users\r\n }\r\n\r\n print_memory(stage=\"DPLF:models_created\")\r\n\r\n # before creating the aggregator, filling models with data\r\n self.user_to_size = {\r\n user: self.fill_model_data(self.user_to_model[user], user)\r\n for user in tqdmem(self.users, desc=\"fill_data\")\r\n }\r\n\r\n # virtual 'common' data\r\n fplm_common = FeaturelessPreferenceLearningModel(\r\n expert=AllRatingsWithCommon.COMMON_EXPERT, all_ratings=self.all_ratings\r\n )\r\n fplm_common.on_dataset_end()\r\n\r\n print_memory(stage=\"DPLF:data_filled\")\r\n\r\n # resetting the model given the data\r\n self.all_ratings.reset_model()\r\n\r\n print_memory(stage=\"DPLF:model_reset_ok\")\r\n\r\n # aggregating models\r\n self.aggregator = FeaturelessMedianPreferenceAverageRegularizationAggregator(\r\n models=[self.user_to_model[u] for u in self.users]\r\n )\r\n self.aggregator.certification_status = self.user_certified\r\n\r\n print_memory(stage=\"DPLF:aggregator_created\")", "def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = next(self.rollout_provider)\n batch = process_rollout(rollout, gamma=self.config.discount)\n\n should_compute_summary = (self.task == 0 \n and self.local_steps % self.config.summary_every == 0)\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op, self.global_step]\n else:\n fetches = [self.train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.r: batch.r,\n self.w: batch.w,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(\n tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1", "def run(self):\n\n self._logger.debug(\"Starting Dummy Model: modelID=%s;\" % (self._modelID))\n\n # =========================================================================\n # Initialize periodic activities (e.g., for model result updates)\n # =========================================================================\n periodic = self._initPeriodicActivities()\n\n self._optimizedMetricLabel = self._optimizeKeyPattern\n self._reportMetricLabels = [self._optimizeKeyPattern]\n\n # =========================================================================\n # Create our top-level loop-control iterator\n # =========================================================================\n if self._iterations >= 0:\n iterTracker = iter(xrange(self._iterations))\n else:\n iterTracker = iter(itertools.count())\n\n # =========================================================================\n # This gets set in the unit tests. It tells the worker to sys exit\n # the first N models. This is how we generate orphaned models\n doSysExit = False\n if self._sysExitModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._sysExitModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n doSysExit = True\n\n if self._delayModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._delayModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n time.sleep(10)\n \n # DEBUG!!!! infinite wait if we have 50 models\n #if len(modelIDs) >= 50:\n # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]\n # while not jobCancel:\n # time.sleep(1)\n # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]\n\n if self._errModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._errModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n raise RuntimeError(\"Exiting with error due to errModelRange parameter\")\n\n # =========================================================================\n # Delay, if necessary\n if self._delay is not None:\n time.sleep(self._delay)\n\n # =========================================================================\n # Run it!\n # =========================================================================\n self._currentRecordIndex = 0\n while True:\n\n # =========================================================================\n # Check if the model should be stopped\n # =========================================================================\n\n # If killed by a terminator, stop running\n if self._isKilled:\n break\n\n # If job stops or hypersearch ends, stop running\n if self._isCanceled:\n break\n\n # If model is mature, stop running ONLY IF we are not the best model\n # for the job. Otherwise, keep running so we can keep returning\n # predictions to the user\n if self._isMature:\n if not self._isBestModel:\n self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED\n break\n else:\n self._cmpReason = self._jobsDAO.CMPL_REASON_EOF\n\n # =========================================================================\n # Get the the next record, and \"write it\"\n # =========================================================================\n try:\n self._currentRecordIndex = next(iterTracker)\n except StopIteration:\n break\n\n # \"Write\" a dummy output value. This is used to test that the batched\n # writing works properly\n\n self._writePrediction(ModelResult(None, None, None, None))\n\n periodic.tick()\n\n # =========================================================================\n # Compute wait times. See if model should exit\n # =========================================================================\n\n if self.__shouldSysExit(self._currentRecordIndex):\n sys.exit(1)\n\n # Simulate computation time\n if self._busyWaitTime is not None:\n time.sleep(self._busyWaitTime)\n self.__computeWaitTime()\n\n # Asked to abort after so many iterations?\n if doSysExit:\n sys.exit(1)\n\n # Asked to raise a jobFailException?\n if self._jobFailErr:\n raise utils.JobFailException(\"E10000\",\n \"dummyModel's jobFailErr was True.\")\n\n # =========================================================================\n # Handle final operations\n # =========================================================================\n if self._doFinalize:\n if not self._makeCheckpoint:\n self._model = None\n\n # Delay finalization operation\n if self._finalDelay is not None:\n time.sleep(self._finalDelay)\n\n self._finalize()\n\n self._logger.info(\"Finished: modelID=%r \"% (self._modelID))\n\n return (self._cmpReason, None)", "def represent():\n\tmodel.eval()\n\twith torch.no_grad():\n\n\t\tall_data = []\n\t\tall_targets = []\n\n\t\tfor batch_idx, (data, labels) in enumerate(nat_test_loader):\n\t\t\tall_data.append(data)\n\t\t\tall_targets.append(labels.float()+50) # +50 for nat data, for distinction between nat and syn\n\t\tfor batch_idx, (data, labels) in enumerate(syn_test_loader):\n\t\t\tall_data.append(data)\n\t\t\tall_targets.append(labels.float())\n\n\t\tall_data = torch.cat(all_data, 0) # Merges the list of tensors\n\t\tall_data = all_data.cuda()\n\t\tall_targets = torch.cat(all_targets, 0)\n\n\t\trepresentation = model.representation(all_data)\n\t\t\n\t\ttorch.save(representation, directory + \"/representations/repr\" + str(epoch) + \".pt\")\n\t\twith open(directory + \"/representations/tar\" + str(epoch) + \".log\", \"w\") as f:\n\t\t\tfor t in all_targets:\n\t\t\t\tf.write(str(t.item()) + \"\\n\")\n\n\t\t# Optional: Plotting of the UMAP in each represent()\n\t\t#sns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t#reducer = umap.UMAP()\n\t\t#embedding = reducer.fit_transform(representation.cpu())\n\t\t# flatui = [\"#ff0000\", \"#000000\", \"#001800\", \"#003000\", \"#004800\", \"#006000\", \"#007800\", \"#009000\", \"#00a800\", \"#00c000\", \"#00d800\"]\n\t\t# plt.scatter(embedding[:, 0], embedding[:, 1], c=[sns.color_palette(flatui)[x] for x in all_targets.int()])\n\t\t#plt.scatter(embedding[:, 0], embedding[:, 1], c=all_targets.cpu())\n\t\t#plt.gca().set_aspect('equal', 'datalim')\n\t\t#plt.title('UMAP projection of cell data', fontsize=24);\n\t\t#plt.savefig(directory + \"/umap_\" + str(epoch) + \".png\")\n\t\t#plt.clf()", "def train(model, train_sampler, train_loader, test_sampler, test_loader,\n use_cuda, epochs, loss_func, optimizer_name, lr,\n batch_log_interval, hvd, smlb_out):\n console = smlb_out.log.console\n device = smlb_out.log.device\n\n # training history (validation only)\n with console.subproc('Creating training history'):\n loss_val_hist = torch.zeros(epochs, dtype=torch.float32)\n acc_val_hist = torch.zeros(epochs, dtype=torch.float32)\n\n # send to device\n if use_cuda:\n with console.subproc('Sending model and history to device'):\n model.cuda()\n loss_val_hist = loss_val_hist.cuda()\n acc_val_hist = acc_val_hist.cuda()\n\n # loss\n with console.subproc('Creating loss function'):\n loss_func = eval(f'nn.{loss_func}()')\n console.message(f'Loss function: {loss_func}')\n\n # optimizer\n with console.subproc('Creating optimizer'):\n console.message(f'Learning rate specified: {lr}')\n console.message(f'Reduction operation: {\"hvd.Average\"}')\n console.message(f'Learning rate will be scaled by a factor of '\n f'{hvd.size()} (hvd.size())')\n optimizer = eval(f'torch.optim.{optimizer_name}(model.parameters(), '\n f'lr={lr * hvd.size()})')\n console.message(f'Optimizer: {optimizer}')\n # Horovod: wrap optimizer with DistributedOptimizer\n optimizer = hvd.DistributedOptimizer(optimizer, op=hvd.Average)\n\n # Horovod: broadcast model and optimizer\n with console.subproc('Broadcasting model and optimizer'):\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n # ----------------------\n # Epoch loop starts here\n # ----------------------\n console.begin('**** EPOCH LOOP ****')\n device.begin('**** EPOCH LOOP ****')\n for epoch in range(epochs):\n # only log on device within epoch loop\n device.begin(f'Epoch: {epoch}')\n\n # -------------------\n # Training batch loop\n # -------------------\n device.begin('Training batch loop')\n # stamp train epoch in system monitor\n smlb_out.system.stamp_event(f'epoch {epoch}: train')\n # enter train mode\n model.train()\n # Horovod: set epoch to sampler for shuffling\n train_sampler.set_epoch(epoch)\n # batch loop\n for batch_idx, (batch_x, batch_y) in enumerate(train_loader):\n if use_cuda:\n batch_x, batch_y = batch_x.cuda(), batch_y.cuda()\n # forward, loss, acc\n pred_y = model(batch_x)\n loss = loss_func(pred_y, batch_y)\n # backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if batch_idx % batch_log_interval == 0:\n # accuracy on batch\n with torch.no_grad():\n acc = compute_acc(pred_y, batch_y)\n # Horovod: use train_sampler to determine the number of\n # samples in this worker's partition\n device.message(\n '[{:5d}/{:5d} ({:3.0f}%)] loss={:f}, acc={:f}, '\n 'elapsed={:f} sec'.format(\n batch_idx * len(batch_x), len(train_sampler),\n 100 * batch_idx / len(train_loader), loss, acc,\n device.elapsed_shallowest))\n device.ended('Training batch loop')\n\n # ----------------------\n # Validation on test set\n # ----------------------\n device.begin('Validating on test set')\n # stamp validate epoch in system monitor\n smlb_out.system.stamp_event(f'epoch {epoch}: validate')\n # enter eval mode\n model.eval()\n # accumulate loss and acc\n loss_val = torch.zeros((1,), dtype=torch.float32)\n acc_val = torch.zeros((1,), dtype=torch.float32)\n if use_cuda:\n loss_val, acc_val = loss_val.cuda(), acc_val.cuda()\n for batch_x, batch_y in test_loader:\n if use_cuda:\n batch_x, batch_y = batch_x.cuda(), batch_y.cuda()\n # forward, loss, acc\n with torch.no_grad():\n pred_y = model(batch_x)\n loss_val += loss_func(pred_y, batch_y)\n acc_val += compute_acc(pred_y, batch_y) * len(pred_y)\n if use_cuda:\n loss_val, acc_val = loss_val.cpu(), acc_val.cpu()\n loss_val /= len(test_sampler)\n acc_val /= len(test_sampler)\n # average metrics across ranks and save to history\n with device.subproc('Averaging metrics across ranks (allreduce)'):\n loss_val_hist[epoch] = metric_average(loss_val, 'avg_loss', hvd)\n acc_val_hist[epoch] = metric_average(acc_val, 'avg_accuracy', hvd)\n # log device-wise and average metrics\n device.message('Metrics on rank: loss_val={:f}, acc_val={:f}'\n .format(loss_val.item(), acc_val.item()))\n device.message('Average metrics: loss_val={:f}, acc_val={:f}'\n .format(loss_val_hist[epoch], acc_val_hist[epoch]))\n device.ended('Validating on test set')\n\n # only show average on console\n console.message(f'Epoch {epoch:2d}: '\n f'loss_val={loss_val_hist[epoch]:f}, '\n f'acc_val={acc_val_hist[epoch]:f}, '\n f'elapsed={device.elapsed_shallowest:f} sec')\n device.ended(f'Epoch: {epoch}')\n device.ended('**** EPOCH LOOP ****')\n console.ended('**** EPOCH LOOP ****')\n\n # send model and data back to CPU\n if use_cuda:\n with console.subproc('Sending model and history back to cpu'):\n model.cpu()\n loss_val_hist = loss_val_hist.cpu()\n acc_val_hist = acc_val_hist.cpu()\n\n # return history\n return {'loss_val': loss_val_hist.numpy().tolist(),\n 'acc_val_hist': acc_val_hist.numpy().tolist()}", "def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()", "def single_gpu_online_evaluation(\n model: Module,\n data_loader: DataLoader,\n metric: Union[str, Sequence[str]] = 'EPE') -> Dict[str, np.ndarray]:\n\n model.eval()\n metrics = metric if isinstance(metric, (type, list)) else [metric]\n result_metrics = defaultdict(list)\n\n prog_bar = mmcv.ProgressBar(len(data_loader))\n for data in data_loader:\n with torch.no_grad():\n batch_results = model(test_mode=True, **data)\n img_metas = data['img_metas'].data[0]\n batch_flow = []\n batch_flow_gt = []\n batch_valid = []\n # a batch of result and a batch of img_metas\n for i in range(len(batch_results)):\n result = batch_results[i]\n img_meta = img_metas[i]\n\n # result.keys() is 'flow' or ['flow_fw','flow_bw']\n # img_meta.keys() is 'flow_gt' or ['flow_fw_gt','flow_bw_gt']\n for k in result.keys():\n\n if img_meta.get(k + '_gt', None) is None:\n # img_meta does not have flow_bw_gt, so just check\n # the forward predicted.\n if k == 'flow_bw':\n continue\n elif k == 'flow_fw':\n batch_flow_gt.append(img_meta['flow_gt'])\n else:\n batch_flow_gt.append(img_meta[k + '_gt'])\n\n batch_flow.append(result[k])\n batch_valid.append(\n img_meta.get('valid', np.ones_like(result[k][..., 0])))\n\n batch_results_metrics = eval_metrics(batch_flow, batch_flow_gt,\n batch_valid, metrics)\n for i_metric in metrics:\n result_metrics[i_metric].append(\n batch_results_metrics[i_metric])\n\n prog_bar.update()\n\n for i_metric in metrics:\n if result_metrics.get(i_metric) is None:\n raise KeyError(f'Model cannot compute {i_metric}')\n result_metrics[i_metric] = np.array(result_metrics[i_metric]).mean()\n\n return result_metrics", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def run():\n # get arguments\n args = parse_args()\n assert args.batch_size % args.gpu_num == 0\n assert args.gru_hidden_size % 2 == 0\n\n # create a logger\n logger = logging.getLogger(\"GACM\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n check_path(args.save_dir)\n check_path(args.load_dir)\n check_path(args.result_dir)\n check_path(args.summary_dir)\n if args.log_dir:\n check_path(args.log_dir)\n file_handler = logging.FileHandler(args.log_dir + time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time())) + '.txt')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n logger.info('Running with args : {}'.format(args))\n\n logger.info('Checking the directories...')\n for dir_path in [args.save_dir, args.result_dir, args.summary_dir]:\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n \n global Dataset\n global Agent\n logger.info('Agent version: {}.0'.format(args.agent_version))\n logger.info('Dataset version: {}.0'.format(args.dataset_version))\n logger.info('Checking the directories...')\n Dataset = importlib.import_module('dataset{}'.format(args.dataset_version)).Dataset\n Agent = importlib.import_module('Agent{}'.format(args.agent_version)).Agent\n \n if args.pretrain:\n pretrain(args)\n if args.train:\n train(args)\n if args.test:\n test(args)\n if args.rank:\n rank(args)\n if args.generate_synthetic_dataset:\n generate_synthetic_dataset(args)\n logger.info('run done.')", "def continueTraining(self,model): \n self.setOldModel(model)\n self.model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n\n # fit model to data\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)", "def validate(model,dataloader,criterions,epoch,plots):\n # switch to evaluate mode\n model.eval()\n\n running_loss = 0.0\n running_oa = 0.0\n running_moa = 0.0\n\n avg_losses={}\n avg_accuracies={}\n avg_moas={}\n for i in range(6):\n avg_losses[i] = AverageMeter()\n avg_accuracies[i] = AverageMeter()\n avg_moas[i] = AverageMeter()\n\n tq_bar = tqdm(enumerate(dataloader),total=len(dataloader),ncols=80,desc='Testing')\n for batch_id, (images, labels_group) in tq_bar:\n # if i>25:\n # break\n if torch.cuda.is_available():\n images = [Variable(image.cuda()) for image in images]\n labels_group = [labels for labels in labels_group]\n else:\n print('Cuda not available')\n images = [Variable(image) for image in images]\n labels_group = [labels for labels in labels_group]\n\n\n batch_losses = []\n batch_accuracies = []\n batch_moas = []\n\n for img, labels in zip(images, labels_group):\n outputs = model(img)\n net_batch_size = outputs[0].size(0)\n if torch.cuda.is_available():\n labels = [Variable(label.cuda()) for label in labels]\n else:\n labels = [Variable(label) for label in labels]\n for i,pair in enumerate(zip(outputs, labels)):\n accuracy = accuracy_dense(pair[0].data, pair[1].data)\n moa,_ = mAP_dense(pair[0].data, pair[1].data)\n batch_losses.append(criterions[i](pair[0], pair[1]))\n batch_accuracies.append(accuracy)\n batch_moas.append(moa)\n\n for i in range(6):\n avg_losses[i].update(batch_losses[i].data[0], net_batch_size)\n avg_accuracies[i].update(batch_accuracies[i], net_batch_size)\n avg_moas[i].update(batch_moas[i], net_batch_size)\n\n ## LOSS COMPUTATION\n # loss_weight = [auto_loss_weight(0,epoch), auto_loss_weight(1,epoch), auto_loss_weight(2,epoch), auto_loss_weight(3,epoch), auto_loss_weight(4,epoch)]\n if epoch < 40:\n loss_weight = [0.1, 0.1, 0.1, 0.1, 0.1, 0.5]\n else:\n loss_weight = [0.5, 0.1, 0.1, 0.1, 0.1, 0.1]\n\n # loss_weight = [1., 0.01, 0.01, 0.01, 0.01, 0.01] # fait converger en OA la HD layer\n loss_weight = [1.,0.7, 0.6, 0.5, 0.1, 0.05, 0.01]\n\n total_batch_loss = 0\n for w, l in zip(loss_weight, batch_losses):\n total_batch_loss += w*l\n\n\n running_loss += total_batch_loss.data[0]\n # running_oa += oa\n # running_hd_moa += hd_moa\n for i in range(6):\n plots.plot(\"Total loss (running)\", \"val \"+str(i), epoch*len(dataloader)+batch_id+1, avg_losses[i].val)\n plots.plot(\"OA (running)\", \"val \"+str(i), epoch*len(dataloader)+batch_id+1, avg_accuracies[i].val)\n plots.plot(\"mOA (running)\", \"val \"+str(i), epoch*len(dataloader)+batch_id+1, avg_moas[i].val)\n for i in range(6):\n plots.plot(\"Total loss (final mean of epoch)\", \"val \"+str(i), epoch+1, avg_losses[i].val)\n plots.plot(\"OA (final mean of epoch)\", \"val \"+str(i), epoch+1, avg_accuracies[i].val)\n plots.plot(\"mOA (final mean of epoch)\", \"val \"+str(i), epoch+1, avg_moas[i].val)" ]
[ "0.7002343", "0.62532896", "0.62355524", "0.6192523", "0.61289847", "0.6088246", "0.59602416", "0.5950651", "0.59498066", "0.5881187", "0.588016", "0.5873111", "0.58373106", "0.5834648", "0.58186084", "0.58035755", "0.57857174", "0.57534796", "0.5750009", "0.5738947", "0.57329243", "0.57293063", "0.5724346", "0.572426", "0.5712572", "0.5712538", "0.5695092", "0.5662597", "0.56513256", "0.56502306", "0.56392473", "0.5633596", "0.56204194", "0.5605654", "0.560299", "0.5602616", "0.55908704", "0.5583066", "0.5583066", "0.55685157", "0.55606824", "0.5541374", "0.55315965", "0.5520299", "0.5502626", "0.5501811", "0.54928994", "0.5489176", "0.54840636", "0.54821354", "0.5480017", "0.54777974", "0.54713184", "0.54696155", "0.5467606", "0.54670405", "0.54670405", "0.544921", "0.5446324", "0.54421264", "0.5441747", "0.5438025", "0.5434241", "0.5434241", "0.5433711", "0.54318", "0.543083", "0.54269797", "0.54237646", "0.5415333", "0.5406319", "0.540625", "0.54046565", "0.5404422", "0.54024684", "0.54013276", "0.5398329", "0.5398325", "0.53956664", "0.53949535", "0.5391708", "0.5382466", "0.5377092", "0.53674394", "0.53629076", "0.5362611", "0.5357792", "0.5351773", "0.53506273", "0.5340325", "0.5335969", "0.53341746", "0.5333548", "0.533172", "0.53313357", "0.53299886", "0.5327225", "0.53262573", "0.5325484", "0.5324858" ]
0.7819205
0
Creates a model as per the config, and loads the parameters from the given checkpoint path. Also updates the checkpoint_epoch.
def try_create_model_and_load_from_checkpoint(self) -> bool: self.create_model() if self.checkpoint_path: # Load the stored model. If there is no checkpoint present, return immediately. return self.try_load_checkpoint_for_model() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_checkpoint(model_config, path):\n model = models.VisionTransformer(num_classes=1, **model_config)\n variables = model.init(\n jax.random.PRNGKey(0),\n jnp.ones([1, 16, 16, 3], jnp.float32),\n train=False,\n )\n _save(variables['params'], path)", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def init_model(config, program, exe):\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n if os.path.exists(checkpoints + '.pdparams'):\n path = checkpoints\n fluid.load(program, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise ValueError(\"Model checkpoints {} does not exists,\"\n \"check if you lost the file prefix.\".format(\n checkpoints + '.pdparams'))\n else:\n pretrain_weights = config['Global'].get('pretrain_weights')\n if pretrain_weights:\n path = pretrain_weights\n load_params(exe, program, path)\n logger.info(\"Finish initing model from {}\".format(path))", "def save_checkpoint(self, model_path=None):\n # TODO: include new params based on ConfigEnum\n if not os.path.isdir(path_checkpoints_dir):\n os.mkdir(path_checkpoints_dir)\n if model_path is None:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"saved the model at {model_path}\") \n model_state_dict = self.model.state_dict()\n if self.optimizer is not None:\n opt_state_dict = self.optimizer.state_dict()\n else:\n opt_state_dict = None\n if self.scheduler is not None:\n sch_state_dict = self.scheduler.state_dict()\n else:\n sch_state_dict = None\n \n if self.scaler is not None:\n amp_grad_scaler = self.scaler.state_dict()\n else:\n amp_grad_scaler = None\n\n model_dict = {}\n model_dict[\"state_dict\"] = model_state_dict\n model_dict[\"optimizer\"] = opt_state_dict\n model_dict[\"scheduler\"] = sch_state_dict\n model_dict['scaler'] = amp_grad_scaler\n model_dict['image_size'] = self.image_size\n model_dict['device'] = self.device\n model_dict['fp16'] = self.fp16\n model_dict['accumulate_grad_steps'] = self.accumulate_grad_steps\n\n model_dict['experiment_id'] = self.experiment_id\n model_dict['experiment_tag'] = self.experiment_tag\n\n model_dict['seed'] = self.seed\n\n model_dict['train_batch_size'] = self.train_batch_size\n model_dict['valid_batch_size'] = self.valid_batch_size\n model_dict['test_batch_size'] = self.test_batch_size\n model_dict['dataloader_num_workers'] = self.dataloader_num_workers\n model_dict['train_dataloader_shuffle'] = self.train_dataloader_shuffle\n\n model_dict['optimizer_type'] = self.optimizer_type\n model_dict['optimizer_params'] = self.optimizer_params\n\n model_dict['scheduler_type'] = self.scheduler_type\n model_dict['scheduler_params'] = self.scheduler_params\n model_dict['step_scheduler_after'] = self.step_scheduler_after\n model_dict['step_scheduler_metric'] = self.step_scheduler_metric\n\n model_dict['compute_train_loss_after'] = self.compute_train_loss_after\n model_dict['compute_train_metric_after'] = self.compute_train_metric_after\n model_dict['compute_valid_loss_after'] = self.compute_valid_loss_after\n model_dict['compute_valid_metric_after'] = self.compute_valid_metric_after\n\n model_dict['training_stopping_criteria'] = self.training_stopping_criteria\n model_dict['stopping_criteria_params'] = self.stopping_criteria_params\n model_dict['max_epoch'] = self.max_epoch\n model_dict['train_on_all_data'] = self.train_on_all_data\n model_dict['validate_after'] = self.validate_after\n model_dict['validation_steps'] = self.validation_steps\n model_dict['run_lr_range_test'] = self.run_lr_range_test\n model_dict['sleep_in_epochs'] = self.sleep_in_epochs\n model_dict['sleep_time'] = self.sleep_time\n model_dict['checkpoint_epochs'] = self.checkpoint_epochs\n\n model_dict['_best_score'] = self._best_score\n model_dict['_current_score'] = self._current_score\n model_dict['_counter'] = self._counter\n\n model_dict['metrics'] = self.metrics\n model_dict['current_epoch'] = self.current_epoch\n model_dict['current_train_batch'] = self.current_train_batch\n model_dict['current_valid_batch'] = self.current_valid_batch\n\n model_dict['num_train_samples'] = self.num_train_samples\n model_dict['num_train_iterations'] = self.num_train_iterations\n model_dict['checkpoint_snapshot'] = self.checkpoint_snapshot \n torch.save(model_dict, model_path)", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def create_or_load_model(model, model_dir, session, name):\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n start_time = time.time()\n # It only takes a few seconds to initialize all variables.\n session.run(tf.global_variables_initializer())\n logging.info(\n \"Initialize %s model with fresh parameters before loading variables \"\n \"from the checkpoint, time %.2fs\", name,\n time.time() - start_time)\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def restore_checkpoint(model, checkpoint_dir, cuda=False, force=False, pretrain=False):\n try:\n cp_files = [\n file_\n for file_ in os.listdir(checkpoint_dir)\n if file_.startswith(\"epoch=\") and file_.endswith(\".checkpoint.pth.tar\")\n ]\n except FileNotFoundError:\n cp_files = None\n os.makedirs(checkpoint_dir)\n if not cp_files:\n print(\"No saved model parameters found\")\n if force:\n raise Exception(\"Checkpoint not found\")\n else:\n return model, 0, []\n\n # Find latest epoch\n for i in itertools.count(1):\n if \"epoch={}.checkpoint.pth.tar\".format(i) in cp_files:\n epoch = i\n else:\n break\n\n if not force:\n print(\n \"Which epoch to load from? Choose in range [0, {}].\".format(epoch),\n \"Enter 0 to train from scratch.\",\n )\n print(\">> \", end=\"\")\n inp_epoch = int(input())\n if inp_epoch not in range(epoch + 1):\n raise Exception(\"Invalid epoch number\")\n if inp_epoch == 0:\n print(\"Checkpoint not loaded\")\n clear_checkpoint(checkpoint_dir)\n return model, 0, []\n else:\n print(\"Which epoch to load from? Choose in range [1, {}].\".format(epoch))\n inp_epoch = int(input())\n if inp_epoch not in range(1, epoch + 1):\n raise Exception(\"Invalid epoch number\")\n\n filename = os.path.join(\n checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(inp_epoch)\n )\n\n print(\"Loading from checkpoint {}?\".format(filename))\n\n if cuda:\n checkpoint = torch.load(filename)\n else:\n # Load GPU model on CPU\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n try:\n start_epoch = checkpoint[\"epoch\"]\n stats = checkpoint[\"stats\"]\n if pretrain:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\n \"=> Successfully restored checkpoint (trained for {} epochs)\".format(\n checkpoint[\"epoch\"]\n )\n )\n except:\n print(\"=> Checkpoint not successfully restored\")\n raise\n\n return model, inp_epoch, stats", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def load_model_trainer_states_from_checkpoint(self, checkpoint_path, model=None):\n import os\n\n if model is None:\n try:\n import cloudpickle\n except ImportError:\n raise ImportError(\"cloudpickle is required to load model class\")\n logger.info(\"Loading model class\")\n model = cloudpickle.load(open(os.path.join(checkpoint_path, \"model_class.pkl\"), \"rb\"))\n\n self.model = HFWrapper(model)\n logger.info(\"Loading weights of previously trained model\")\n # Restoring model weights\n self.model.load_state_dict(\n # torch.load(os.path.join(training_args.output_dir, \"pytorch_model.bin\"))\n torch.load(os.path.join(checkpoint_path, \"pytorch_model.bin\"))\n )\n # Restoring random state\n rng_file = os.path.join(checkpoint_path, \"rng_state.pth\")\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n # Restoring AMP scaler\n if self.use_amp:\n self.scaler.load_state_dict(torch.load(os.path.join(checkpoint_path, \"scaler.pt\")))", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def load_checkpoint(path: str, save_dir: str, cuda: bool = False, attention_viz: bool = False) -> nn.Module:\r\n # Load model and args\r\n state = torch.load(path, map_location=lambda storage, loc: storage)\r\n args, loaded_state_dict = state['args'], state['state_dict']\r\n\r\n # Update args with current args\r\n args.cuda = cuda\r\n args.attention_viz = attention_viz\r\n args.save_dir = save_dir\r\n\r\n model = build_model(args)\r\n model.load_state_dict(loaded_state_dict)\r\n\r\n if cuda:\r\n print('Moving model to cuda')\r\n model = model.cuda()\r\n\r\n return model", "def init_model(config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None):\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config):\n raise TypeError('config must be a filename or Config object, '\n 'but got {}'.format(type(config)))\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n elif 'init_cfg' in config.model.backbone:\n config.model.backbone.init_cfg = None\n config.model.pretrained = None\n config.model.train_cfg = None\n init_default_scope(config.get('default_scope', 'mmseg'))\n\n model = MODELS.build(config.model)\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n dataset_meta = checkpoint['meta'].get('dataset_meta', None)\n # save the dataset_meta in the model for convenience\n if 'dataset_meta' in checkpoint.get('meta', {}):\n # mmseg 1.x\n model.dataset_meta = dataset_meta\n elif 'CLASSES' in checkpoint.get('meta', {}):\n # < mmseg 1.x\n classes = checkpoint['meta']['CLASSES']\n palette = checkpoint['meta']['PALETTE']\n model.dataset_meta = {'classes': classes, 'palette': palette}\n else:\n warnings.simplefilter('once')\n warnings.warn(\n 'dataset_meta or class names are not saved in the '\n 'checkpoint\\'s meta data, classes and palette will be'\n 'set according to num_classes ')\n num_classes = model.decode_head.num_classes\n dataset_name = None\n for name in dataset_aliases.keys():\n if len(get_classes(name)) == num_classes:\n dataset_name = name\n break\n if dataset_name is None:\n warnings.warn(\n 'No suitable dataset found, use Cityscapes by default')\n dataset_name = 'cityscapes'\n model.dataset_meta = {\n 'classes': get_classes(dataset_name),\n 'palette': get_palette(dataset_name)\n }\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model", "def create_model( session, batch_size ):\n model = linear_model.LinearModel(\n FLAGS.linear_size,\n FLAGS.num_layers,\n FLAGS.residual,\n FLAGS.batch_norm,\n FLAGS.max_norm,\n batch_size,\n FLAGS.learning_rate,\n FLAGS.origin_bc,\n summaries_dir,\n dtype=tf.float16 if FLAGS.use_fp16 else tf.float32)\n\n if FLAGS.load <= 0:\n # Create a new model from scratch\n print(\"Creating model with fresh parameters.\")\n session.run( tf.global_variables_initializer() )\n return model\n\n # Load a previously saved model\n ckpt = tf.train.get_checkpoint_state( train_dir, latest_filename=\"checkpoint\")\n print( \"train_dir\", train_dir )\n\n if ckpt and ckpt.model_checkpoint_path:\n # Check if the specific cpixels = pixels / pixels[2,:]heckpoint exists\n if FLAGS.load > 0:\n if os.path.isfile(os.path.join(train_dir,\"checkpoint-{0}.index\".format(FLAGS.load))):\n ckpt_name = os.path.join( os.path.join(train_dir,\"checkpoint-{0}\".format(FLAGS.load)) )\n else:\n raise ValueError(\"Asked to load checkpoint {0}, but it does not seem to exist\".format(FLAGS.load))\n else:\n ckpt_name = os.path.basename( ckpt.model_checkpoint_path )\n\n print(\"Loading model {0}\".format( ckpt_name ))\n model.saver.restore( session, ckpt.model_checkpoint_path )\n return model\n else:\n print(\"Could not find checkpoint. Aborting.\")\n raise( ValueError, \"Checkpoint {0} does not seem to exist\".format( ckpt.model_checkpoint_path ) )\n\n return model", "def try_load_checkpoint_for_model(self) -> bool:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if not self.checkpoint_path:\n raise ValueError(\"No checkpoint provided\")\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n epoch = ModelAndInfo._load_checkpoint(model=self._model,\n checkpoint_path=self.checkpoint_path,\n key_in_state_dict=ModelAndInfo.MODEL_STATE_DICT_KEY,\n use_gpu=self.config.use_gpu)\n\n logging.info(f\"Loaded model from checkpoint (epoch: {epoch})\")\n self.checkpoint_epoch = epoch\n return True", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def create_reference_model(self, config, tmp_path_factory: pytest.TempPathFactory, *args):\n config = copy.deepcopy(config) # ensure the reference model is not passed to tests\n\n save_folder = tmp_path_factory.mktemp('{device}-{precision}'.format(**config))\n config.update({'save_interval': '1ep', 'save_folder': str(save_folder), 'save_filename': 'ep{epoch}.pt'})\n\n trainer = Trainer(**config)\n trainer.fit()\n\n self.reference_model = trainer.state.model\n self.reference_folder = save_folder", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def init_model(session, model):\n # If there is a checkpoint, load it\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n\n # Else initialize the variables\n else:\n if FLAGS.decode:\n input(\"You sure you want to talk to an untrained chatbot? Press Ctrl-C to stop, Return to continue \")\n print(\"Fine.\")\n\n print(\"Creating model with fresh parameters.\")\n session.run(tf.global_variables_initializer())", "def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n from_hf = kwargs.pop(\"from_hf\", False)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n default_gpu = kwargs.pop(\"default_gpu\", True)\n\n # Load config\n assert config is not None\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n else:\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n logger.error(\"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path, \", \".join(cls.pretrained_model_archive_map.keys()), archive_file)\n )\n return None\n if default_gpu:\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Rename Bert parameters for our framework\n # NB: Assume 1 Bert layer is mapped to 1 layer only (cannot be used to init multiple layers)\n old_keys = []\n new_keys = []\n nums = []\n for key in state_dict.keys():\n new_key = None\n if \".layer.\" in key and from_hf:\n num = int(key.split(\".layer.\")[-1].split(\".\")[0])\n if \".attention.\" in key:\n new_key = key.replace(\".layer.%d.attention.\" % num,\n \".layer.%d.attention_\" % config.bert_layer2attn_sublayer.get(str(num), num))\n elif \".intermediate.\" in key:\n new_key = key.replace(\".layer.%d.intermediate.\" % num,\n \".layer.%d.intermediate.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n elif \".output.\" in key:\n new_key = key.replace(\".layer.%d.output.\" % num,\n \".layer.%d.output.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n nums.append(num)\n for old_key, new_key, _ in sorted(zip(old_keys, new_keys, nums), key=lambda x: x[2], reverse=True):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Load from a PyTorch state_dict\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n logger.info(start_prefix)\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0 and default_gpu:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(model.__class__.__name__, missing_keys)\n )\n if len(unexpected_keys) > 0 and default_gpu:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(model.__class__.__name__, unexpected_keys)\n )\n if len(error_msgs) > 0 and default_gpu:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(model.__class__.__name__, \"\\n\\t\".join(error_msgs))\n )\n\n if hasattr(model, \"tie_weights\"):\n model.tie_weights() # make sure word embedding weights are still tied\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"error_msgs\": error_msgs,\n }\n return model, loading_info\n\n return model", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def _init_model(self, checkpoint_path: str) -> None:\n # load weights\n logger.info(f\"Load weights from the checkpoint {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n\n state_dict = checkpoint[\"state_dict\"]\n self.orig_acc = checkpoint[\"test_acc\"]\n\n is_pruned = (\n next((name for name in state_dict if \"mask\" in name), None) is not None\n )\n\n if is_pruned:\n logger.info(\"Dummy prunning to load pruned weights\")\n model_utils.dummy_pruning(self.params_all)\n\n model_utils.initialize_params(self.model, state_dict)\n logger.info(\"Initialized weights\")\n\n # check the trained model is pruned\n\n if is_pruned:\n logger.info(\n \"Get masks and remove prunning reparameterization for prepare_qat\"\n )\n self.mask = model_utils.get_masks(self.model)\n model_utils.remove_pruning_reparameterization(self.params_all)", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model", "def create_checkpoint(model, save_dir, train_data):\n model.class_to_idx = train_data.class_to_idx\n\n checkpoint = {\n 'model': model.name,\n 'classifier': model.classifier,\n 'class_to_idx': model.class_to_idx,\n 'state_dict': model.state_dict()\n }\n\n if save_dir and isdir(save_dir):\n torch.save(checkpoint, save_dir + 'checkpoint.pth')\n print('checkpoint created')\n else: \n print(\"Directory not found. Saving at current directory in checkpoint.pth\")\n torch.save(checkpoint, 'checkpoint.pth')", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model, **kwargs)\n self.path = model\n self.model = get_zennet()\n\n model_pth_path = osp.join(self.path, ModelFile.TORCH_MODEL_FILE)\n\n checkpoint = torch.load(model_pth_path, map_location='cpu')\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n\n self.model.load_state_dict(state_dict, strict=True)\n logger.info('load model done')", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def initialize_model_from_cfg(args, gpu_id=0):\n model = eval(args.model).loot_model(args)\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n\n return model", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(pretrain_model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % pretrain_model)\n ckpt = tf.train.get_checkpoint_state(pretrain_model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(pretrain_model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(pretrain_model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(pretrain_model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n pretrain_model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n tf.logging.info(\"Copy the pre-trained model %s as the fine-tuned initialization\" % pretrain_model_checkpoint_path)\n\n import glob\n for filename in glob.glob(pretrain_model_checkpoint_path + \"*\"):\n bas = os.path.basename(filename).split(\"-\", 1)[0]\n ext = os.path.basename(filename).rsplit(\".\", 1)[1]\n shutil.copyfile(filename, os.path.join(target_model, bas + \"-0.\" + ext))\n\n with open(os.path.join(target_model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n return", "def train(self, config: ConfigurationNode = None):\n if config is None:\n config = self.config\n # Create writable timestamp for easier record keeping\n timestamp = datetime.now().isoformat(sep=\"T\", timespec=\"auto\")\n name_timestamp = timestamp.replace(\":\", \"_\")\n\n # Start the mlflow run:\n mlflow.start_run(run_name=name_timestamp)\n\n # Check valid output path, set path from the path_cfg_override modules respectively\n assert config.OUTPUT_PATH != ''\n path_output = config.OUTPUT_PATH # output folder\n path_train = config.DATASET.TRAIN_DATA_PATH # training data folder\n path_val = config.DATASET.VAL_DATA_PATH # validation data folder\n\n # Make output dir and its parents if not exist.\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Make result folders if they do not exist.\n self.results_dir = (Path(path_output) / name_timestamp)\n if not os.path.exists(self.results_dir):\n os.makedirs(self.results_dir)\n\n # Make backup folders if they do not exist.\n self.backup_dir = os.path.join(self.results_dir, 'model_backups')\n if not os.path.exists(self.backup_dir):\n os.makedirs(self.backup_dir)\n\n writer_tensorboard = SummaryWriter(log_dir=Path(self.results_dir / \"logs_tensorflow\"))\n\n # Now that CFG has been properly merged with new data along the way, time to dump a version of it into a string for trackability purposes.\n config.dump(stream=open(os.path.join(self.results_dir, f'config{name_timestamp}.yaml'), 'w'))\n\n # file path to store the state of the model.\n state_fpath = os.path.join(self.results_dir, f'model{name_timestamp}.pt')\n\n # ????\n perf_path = os.path.join(self.results_dir, f'trace{name_timestamp}.p')\n perf_trace = []\n\n # Load data, create the data loader objects from them.\n data_train = pickle.load(open(path_train, 'rb'))\n data_val = pickle.load(open(path_val, 'rb'))\n self.loader_train = build_data_loader(data_train, config.DATASET, True)\n self.loader_val = build_data_loader(data_val, config.DATASET, False)\n\n # Build the model using configue dict node\n self.model = build_model(config.MODEL)\n\n # Enable parallel multi GPU mode if the config specify it.\n if config.MODEL.PARALLEL:\n print(\"Utilized parallel processing\")\n self.model = torch.nn.DataParallel(self.model)\n\n current_epoch = 0\n\n # For resuming training (i.e. load checkpoint)\n if config.RESUME_PATH != \"\":\n checkpoint = torch.load(config.RESUME_PATH, map_location='cpu')\n current_epoch = checkpoint['epoch']\n self.model.load_state_dict(checkpoint[\"model_state\"])\n _ = self.model.cuda()\n\n # SOLVER EVALUATOR\n cfg_solver = config.MODEL.SOLVER\n\n # Build optimizer (between train/validation, using the solver portion of the configuration.\n optimizer = build_optimizer(self.model, cfg_solver)\n\n # Build evaluator (between train/validation, using the solver portion of the configuration.\n evaluator = build_evaluator(cfg_solver)\n\n evaluator.float().cuda()\n total_epochs = cfg_solver.TOTAL_EPOCHS\n\n\n # Main training epoch loop starts here.\n for epoch in range(current_epoch, total_epochs):\n\n # Train a single epoch\n self.train_epoch(epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard)\n\n mlflow.end_run()", "def load_pretrained_model(self,model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n self.create_training_model(model_dir = model_dir,\n **rnn_params)\n #从目录中读取神经网络参数\n self.set_model_from_file()", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step,\r\n last_global_data_samples, **kwargs):\r\n checkpoint_state_dict = {\r\n 'epoch': epoch,\r\n 'last_global_step': last_global_step,\r\n 'last_global_data_samples': last_global_data_samples\r\n }\r\n # Add extra kwargs too\r\n checkpoint_state_dict.update(kwargs)\r\n\r\n success = model.network.save_checkpoint(PATH, ckpt_id,\r\n checkpoint_state_dict)\r\n status_msg = 'checkpointing: PATH={}, ckpt_id={}'.format(PATH, ckpt_id)\r\n if success:\r\n logging.info(f\"Success {status_msg}\")\r\n else:\r\n logging.warning(f\"Failure {status_msg}\")\r\n return", "def save_model(self, checkpoint_path, epoch):\n self.saver.save(self.sess, checkpoint_path, global_step = epoch)", "def build_graph_from_config(self, model_config, track_config, checkpoint_path):\n self.build_model()\n \n ema = tf.train.ExponentialMovingAverage(0)\n variables_to_restore = ema.variables_to_restore(moving_avg_variables=[])\n\n # Filter out State variables\n variables_to_restore_filterd = {}\n for key, value in variables_to_restore.items():\n if key.split('/')[1] != 'State':\n if \"alex_branch\" not in key:\n if \"vggf_branch\" not in key:\n variables_to_restore_filterd[key] = value\n \n saver = tf.train.Saver(variables_to_restore_filterd)\n \n\n if osp.isdir(checkpoint_path):\n #checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)\n if not checkpoint_path:\n raise ValueError(\"No checkpoint file found in: {}\".format(checkpoint_path))\n\n def _restore_fn(sess):\n logging.info(\"Loading model from checkpoint: %s\", checkpoint_path)\n saver.restore(sess, checkpoint_path)\n logging.info(\"Successfully loaded checkpoint: %s\", os.path.basename(checkpoint_path))\n logging.info(\"Restore CANet...\")\n\n return _restore_fn", "def load_model(self, model_path):\n # Check the model file exists\n if not os.path.isfile(model_path):\n raise ValueError(f\"The model file `{model_path}` is not exists or broken!\")\n\n checkpoint = torch.load(model_path)\n self.model_type = checkpoint['model_type']\n self.label2idx = checkpoint['label2idx']\n self.idx2label = checkpoint['idx2label']\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.to(self.device)", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def __init__(self, saved_model=None, serialize_input=True):\n assert saved_model\n self.saved_model_path = saved_model\n self.serialize_input = serialize_input\n logging.info(\"Reading checkpoint {}.\".format(saved_model))\n imported_model = tf.saved_model.load(saved_model)\n self.bleurt_model_ops = imported_model.signatures[\"serving_default\"]\n logging.info(\"BLEURT initialized.\")", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def load_model(path='./model_checkpoint', name='tf_model'):\n with open(os.path.join(path, name + '.json')) as json_file:\n json_config = json_file.read()\n model = TensorModel(tf.keras.models.model_from_json(json_config))\n model._model.load_weights(os.path.join(path, name + '_weights.h5'))\n\n return model", "def try_create_model_load_from_checkpoint_and_adjust(self) -> bool:\n success = self.try_create_model_and_load_from_checkpoint()\n self.create_summary_and_adjust_model_for_gpus()\n return success", "def load_model_from_checkpoint(file, device):\r\n\r\n if device == 'cuda':\r\n # Load all tensors onto GPU\r\n map_location = lambda storage, loc: storage.cuda()\r\n else:\r\n # Load all tensors onto CPU\r\n map_location = lambda storage, loc: storage\r\n\r\n # Assuming model was trained and checkpoint saved on Linux, but predict.py inference is executed using Windows.\r\n # Then, it is required to implement the following quick fix, because otherwise the exception is raised:\r\n # \"NotImplementedError: cannot instantiate 'PosixPath' on your system\"\r\n # Credits to https://stackoverflow.com/questions/57286486/i-cant-load-my-model-because-i-cant-put-a-posixpath\r\n if type(file) == pathlib.WindowsPath:\r\n tmp_PosixPath = pathlib.PosixPath\r\n pathlib.PosixPath = pathlib.WindowsPath\r\n\r\n parameters = torch.load(file, map_location=map_location)\r\n\r\n # Restore default\r\n if type(file) == pathlib.WindowsPath:\r\n pathlib.WindowsPath = pathlib.PosixPath\r\n pathlib.PosixPath = tmp_PosixPath\r\n\r\n model = train.create_model(parameters)\r\n\r\n model.class_to_idx = parameters.get('train_datasets_class_to_idx')\r\n model.load_state_dict(parameters.get('state_dict'), strict=False)\r\n\r\n return model, parameters", "def create_checkpoint(self, name, path=''):\n\n\t\tnb_path = self._get_os_path(name, path)\n\t\tself.log.debug('creating checkpoint \"%s\" \"%s\" \"%s\"' % (path, name, nb_path))\n\t\t# only the one checkpoint ID:\n\t\tcheckpoint_id = u\"checkpoint\"\n\t\tcp_path = self.get_checkpoint_path(checkpoint_id, name, path)\n\t\tself.log.debug(\"creating checkpoint for notebook %s\", name)\n\t\tif not key_exists(self.bucket, self.checkpoint_dir):\n\t\t\tnew_key_from_string(self.bucket, self.checkpoint_dir, '')\n\t\tself._copy(nb_path, cp_path)\n\n\t\t# return the checkpoint info\n\t\treturn self.get_checkpoint_model(checkpoint_id, name, path)", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def create_model(self):\n try:\n self.model = PPO2.load(self.save_path)\n self.model.set_env(self.env)\n print(\"Loading of the latest model successful!\")\n except:\n print(\"Creating new model...\")\n self.model = PPO2(CnnPolicy, self.env, verbose=1)", "def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)", "def load_model(config, batchmanager):\n \n # this function returns a dictionary mapping\n # name of the task (string) --> number of classes in the task (int)\n tasks = batchmanager.getTasksWithNClasses()\n # this \"tasks\" object is used to initialize the model (with the right output layers)\n model = MultiTaskBERT(device = config.device, tasks = tasks)\n\n if not config.untrained_baseline:\n\n # if we evaluate only, model MUST be loaded.\n if config.k_shot_only:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--k_shot_only` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n raise Exception()\n \n # if we saved the state dictionary, load it.\n elif config.resume:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--resume` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n else:\n if os.path.exists(path_to_dicts(config)):\n print(f\"WARNING: `--resume` flag was NOT passed, but `{path_to_dicts(config)}` was found!\") \n\n return model", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def train_and_eval(config, babas_data):\n\n if config.resume_from_checkpoint is not None:\n try:\n if config.augment_background == 'background':\n bg = config.augment_background\n else:\n bg = None\n rfc = config.resume_from_checkpoint\n ic = config.include_validation\n print 'Loading saved config: %s' % config.saved_config\n config = np.load(config.saved_config).item()\n config.resume_from_checkpoint = rfc\n config.include_validation = ic\n if not hasattr(config, 'augment_background'):\n config.augment_background = 'constant'\n if not hasattr(config, 'background_folder'):\n config.background_folder = 'backgrounds'\n if bg is not None:\n print 'Overriding saved config to add kinect backgrounds to training.'\n config.augment_background = bg\n results_dir = rfc\n except:\n print 'Relying on default config file.'\n\n if babas_data: # Shitty naive training method\n config.tfrecord_dir = '/media/data_cifs/monkey_tracking/data_for_babas/tfrecords_from_babas'\n config.babas_tfrecord_dir = config.tfrecord_dir\n config.steps_before_validation = 20\n config.epochs = 2000\n config.convert_labels_to_pixel_space = False\n config.augment_background = 'constant'\n\n # Import your model\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n model_file = import_cnn(config.model_type)\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = '%s_%s' % (config.model_type, dt_stamp)\n if config.selected_joints is not None:\n dt_dataset = '_%s' % (config.selected_joints) + dt_dataset\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, dt_dataset)\n results_dir = os.path.join(config.npy_dir, dt_dataset)\n print 'Saving Dmurphy\\'s online updates to: %s' % results_dir\n dir_list = [config.train_checkpoint, config.summary_dir, results_dir]\n [tf_fun.make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, config.train_tfrecords)\n if config.babas_tfrecord_dir is not None:\n train_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.train_tfrecords)\n if config.include_validation or config.include_validation is None:\n val_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.val_tfrecords)\n else:\n train_babas_tfrecord_dir = None\n val_babas_tfrecord_dir = None\n\n if isinstance(config.include_validation, basestring):\n validation_data = config.include_validation\n elif config.include_validation == True:\n validation_data = os.path.join(\n config.tfrecord_dir,\n config.val_tfrecords)\n else:\n validation_data = None\n\n print 'Using training set: %s' % train_data\n print 'Using validation set: %s' % validation_data\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_data_dict = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background=config.augment_background,\n background_folder=config.background_folder,\n randomize_background=config.randomize_background,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=train_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n train_data_dict['deconv_label_size'] = len(config.labels)\n\n val_data_dict = inputs(\n tfrecord_file=validation_data,\n batch_size=config.validation_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background='none',\n background_folder=config.background_folder,\n randomize_background=None,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=val_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n val_data_dict['deconv_label_size'] = len(config.labels)\n\n # Check output_shape\n if config.selected_joints is not None:\n print 'Targeting joint: %s' % config.selected_joints\n joint_shape = len(config.selected_joints) * config.keep_dims\n if (config.num_classes // config.keep_dims) > (joint_shape):\n print 'New target size: %s' % joint_shape\n config.num_classes = joint_shape\n\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n print 'Creating training graph:'\n model = model_file.model_struct(\n weight_npy_path=config.weight_npy_path)\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n rgb=train_data_dict['image'],\n target_variables=train_data_dict,\n train_mode=train_mode,\n batchnorm=config.batch_norm)\n train_mu, train_var = tf.nn.moments(train_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"train image mean\", train_mu)\n tf.summary.histogram(\"train image std\", tf.sqrt(train_var))\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv train', model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(model.deconv, axis=3), tf.float32), 3))\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n print 'Creating validation graph:'\n val_model = model_file.model_struct()\n val_model.build(\n rgb=val_data_dict['image'],\n target_variables=val_data_dict)\n\n # Calculate validation accuracy\n val_mu, val_var = tf.nn.moments(val_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"validation image mean\", val_mu)\n tf.summary.histogram(\"validation image std\", tf.sqrt(val_var))\n if 'label' in val_data_dict.keys():\n # val_score = tf.reduce_mean(\n # tf_fun.l2_loss(\n # val_model.output, val_data_dict['label']))\n if config.keep_dims == 3:\n z_mask = tf.expand_dims(tf.tile([1, 1, 0], [int(val_data_dict['label'].get_shape()[-1]) // 3]), axis=0)\n z_mask = tf.cast(z_mask, tf.float32)\n val_model.output = val_model.output * z_mask\n val_data_dict['label'] = val_data_dict['label'] * z_mask \n val_score = tf.reduce_mean(tf.nn.l2_loss(val_model.output - val_data_dict['label']))\n tf.summary.scalar(\"validation mse\", val_score)\n if 'fc' in config.aux_losses:\n tf.summary.image('FC val activations', val_model.final_fc)\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv val', val_model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(val_model.deconv, axis=3),\n tf.float32), 3))\n tf.summary.image(\n 'validation images',\n tf.cast(val_data_dict['image'], tf.float32))\n\n # Prepare the loss functions:::\n loss_list, loss_label = [], []\n if 'label' in train_data_dict.keys():\n # 1. Joint localization loss\n if config.calculate_per_joint_loss == 'thomas':\n label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton and joint':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n loss_label += ['skeleton loss']\n delta = model['output'] - train_data_dict['label']\n proc_weights = np.asarray(\n config.dim_weight)[None,:].repeat(\n len(config.joint_names), axis=0).reshape(1, -1)\n delta *= proc_weights\n # label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n # model=model,\n # train_data_dict=train_data_dict,\n # config=config,\n # y_key='label',\n # yhat_key='output')\n # loss_list += [label_loss]\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n else:\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n loss_label += ['combined head']\n for al in loss_helper.potential_aux_losses():\n loss_list, loss_label = loss_helper.get_aux_losses(\n loss_list=loss_list,\n loss_label=loss_label,\n train_data_dict=train_data_dict,\n model=model,\n aux_loss_dict=al,\n domain_adaptation=train_babas_tfrecord_dir)\n loss = tf.add_n(loss_list)\n\n # Add wd if necessary\n if config.wd_penalty is not None:\n _, l2_wd_layers = tf_fun.fine_tune_prepare_layers(\n tf.trainable_variables(), config.wd_layers)\n l2_wd_layers = [\n x for x in l2_wd_layers if 'biases' not in x.name]\n if config.wd_type == 'l1':\n loss += (config.wd_penalty * tf.add_n(\n [tf.reduce_sum(tf.abs(x)) for x in l2_wd_layers]))\n elif config.wd_type == 'l2':\n loss += (config.wd_penalty * tf.add_n(\n [tf.nn.l2_loss(x) for x in l2_wd_layers]))\n\n optimizer = loss_helper.return_optimizer(config.optimizer)\n optimizer = optimizer(config.lr)\n\n if hasattr(config, 'fine_tune_layers') and config.fine_tune_layers is not None:\n print 'Finetuning learning for: %s' % config.fine_tune_layers\n train_op, grads = tf_fun.finetune_learning(\n loss,\n trainables=tf.trainable_variables(),\n fine_tune_layers=config.fine_tune_layers,\n config=config\n )\n else:\n # Op to calculate every variable gradient\n grads = optimizer.compute_gradients(\n loss, tf.trainable_variables())\n # Op to update all variables according to their gradient\n train_op = optimizer.apply_gradients(\n grads_and_vars=grads)\n\n # Summarize all gradients and weights\n [tf.summary.histogram(\n var.name + '/gradient', grad)\n for grad, var in grads if grad is not None]\n # train_op = optimizer.minimize(loss)\n\n # Summarize losses\n [tf.summary.scalar(lab, il) for lab, il in zip(\n loss_label, loss_list)]\n\n # Summarize images and l1 weights\n tf.summary.image(\n 'train images',\n tf.cast(train_data_dict['image'], tf.float32))\n tf_fun.add_filter_summary(\n trainables=tf.trainable_variables(),\n target_layer='conv1_1_filters')\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n tf.add_to_collection('output', model.output)\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Create list of variables to run through training model\n train_session_vars = {\n 'train_op': train_op,\n 'loss_value': loss,\n 'im': train_data_dict['image'],\n 'yhat': model.output,\n 'ytrue': train_data_dict['label']\n }\n if hasattr(model, 'deconv'):\n train_session_vars['deconv'] = model.deconv\n if hasattr(model, 'final_fc'):\n train_session_vars['fc'] = model.final_fc\n\n # Create list of variables to run through validation model\n val_session_vars = {\n 'val_acc': val_score,\n 'val_pred': val_model.output,\n 'val_ims': val_data_dict['image'],\n 'val_true': val_data_dict['label'],\n }\n\n # Create list of variables to save to numpys\n save_training_vars = [\n 'im',\n 'yhat',\n 'ytrue',\n 'yhat'\n ]\n\n for al in loss_helper.potential_aux_losses():\n if al.keys()[0] in train_data_dict.keys():\n y_key = '%s' % al.keys()[0]\n train_session_vars[y_key] = train_data_dict[al.values()[0]['y_name']]\n save_training_vars += [y_key]\n\n yhat_key = '%s_hat' % al.keys()[0]\n train_session_vars[yhat_key] = model[al.values()[0]['model_name']]\n save_training_vars += [yhat_key]\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, losses = 0, []\n num_joints = int(\n train_data_dict['label'].get_shape()[-1]) // config.keep_dims\n normalize_vec = tf_fun.get_normalization_vec(config, num_joints)\n if config.resume_from_checkpoint is not None:\n if '.ckpt' in config.resume_from_checkpoint:\n ckpt = config.resume_from_checkpoint\n 'Restoring specified checkpoint: %s' % config.resume_from_checkpoint\n else:\n ckpt = tf.train.latest_checkpoint(config.resume_from_checkpoint)\n print 'Evaluating checkpoint: %s' % ckpt\n saver.restore(sess, ckpt)\n try:\n while not coord.should_stop():\n start_time = time.time()\n train_out_dict = sess.run(train_session_vars.values())\n train_out_dict = {k: v for k, v in zip(\n train_session_vars.keys(), train_out_dict)}\n losses.append(train_out_dict['loss_value'])\n duration = time.time() - start_time\n assert not np.isnan(\n train_out_dict['loss_value']), 'Model diverged with loss = NaN'\n if step % config.steps_before_validation == 0:\n if validation_data is not False:\n val_out_dict = sess.run(\n val_session_vars.values())\n val_out_dict = {k: v for k, v in zip(\n val_session_vars.keys(), val_out_dict)}\n # if config.normalize_labels:\n # val_out_dict['val_pred'] *= normalize_vec\n # val_out_dict['val_true'] *= normalize_vec\n np.savez(\n os.path.join(\n results_dir, '%s_val_coors' % step),\n val_pred=val_out_dict['val_pred'],\n val_ims=val_out_dict['val_ims'],\n val_true=val_out_dict['val_true'],\n normalize_vec=normalize_vec)\n with open(\n os.path.join(\n results_dir, '%s_config.p' % step), 'wb') as fp:\n pickle.dump(config, fp)\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy attach 9177\n format_str = (\n '%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch) | '\n 'Validation l2 loss = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, train_out_dict['loss_value'],\n config.train_batch / duration, float(duration),\n val_out_dict['val_acc'],\n config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if config.normalize_labels:\n train_out_dict['yhat'] *= normalize_vec\n train_out_dict['ytrue'] *= normalize_vec\n [save_training_data(\n output_dir=results_dir,\n data=train_out_dict[k],\n name='%s_%s' % (k, step)) for k in save_training_vars]\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch)')\n print (format_str % (\n datetime.now(),\n step,\n train_out_dict['loss_value'],\n config.train_batch / duration,\n float(duration)))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%s_training_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_epoch_checkpoint(self, directory, epoch):\n chkpnt = torch.load(directory / f\"chkpnt_epoch{epoch:04d}.pth\")\n self.load_state_dict(chkpnt['model_state_dict'])", "def __init__(self, model_dir: str, *args, **kwargs):\n super().__init__(model_dir, *args, **kwargs)\n self.model = FRCRN(*args, **kwargs)\n model_bin_file = os.path.join(model_dir,\n ModelFile.TORCH_MODEL_BIN_FILE)\n if os.path.exists(model_bin_file):\n checkpoint = torch.load(\n model_bin_file, map_location=torch.device('cpu'))\n if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:\n # the new trained model by user is based on FRCRNDecorator\n self.load_state_dict(checkpoint['state_dict'])\n else:\n # The released model on Modelscope is based on FRCRN\n self.model.load_state_dict(checkpoint, strict=False)", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path,\n key_in_state_dict: str, use_gpu: bool) -> int:\n logging.info(f\"Loading checkpoint {checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu)\n\n try:\n state_dict = checkpoint[key_in_state_dict]\n except KeyError:\n logging.error(f\"Key {key_in_state_dict} not found in checkpoint\")\n return False\n\n if isinstance(model, torch.nn.DataParallel):\n result = model.module.load_state_dict(state_dict, strict=False)\n else:\n result = model.load_state_dict(state_dict, strict=False)\n\n if result.missing_keys:\n logging.warning(f\"Missing keys in model checkpoint: {result.missing_keys}\")\n if result.unexpected_keys:\n logging.warning(f\"Unexpected keys in model checkpoint: {result.unexpected_keys}\")\n\n return checkpoint[ModelAndInfo.EPOCH_KEY]", "def setup_training(model, train_loader, valid_loader, hps):\r\n\r\n train_dir = os.path.join(hps.save_root, \"train\")\r\n if not os.path.exists(train_dir): os.makedirs(train_dir)\r\n\r\n if hps.restore_model != 'None':\r\n logger.info(\"[INFO] Restoring %s for training...\", hps.restore_model)\r\n bestmodel_file = os.path.join(train_dir, hps.restore_model)\r\n loader = ModelLoader()\r\n loader.load_pytorch(model, bestmodel_file)\r\n else:\r\n logger.info(\"[INFO] Create new model for training...\")\r\n\r\n run_training(model, train_loader, valid_loader, hps) # this is an infinite loop until interrupted\r", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))", "def _save_model(self, checkpoint_dir):\n # Check whether the specified path exists or not\n isExist = os.path.exists(checkpoint_dir)\n\n if not isExist:\n # Create a new directory because it does not exist\n os.makedirs(checkpoint_dir)\n\n filename = self._get_checkpoint_name()\n path = checkpoint_dir + filename\n\n # Serialize the model checkpoint in to a Python Pickle file\n with open(path, 'wb') as f:\n pickle.dump(self._model, f)\n return path" ]
[ "0.771995", "0.75882643", "0.7439317", "0.7204472", "0.71614414", "0.71529615", "0.71529615", "0.70953584", "0.70633674", "0.7042233", "0.7033582", "0.69744486", "0.6970394", "0.6955097", "0.6915084", "0.6887449", "0.6820261", "0.6804254", "0.679512", "0.677149", "0.67633355", "0.67530006", "0.6739434", "0.6685956", "0.6680621", "0.6678945", "0.66758764", "0.6670462", "0.6657075", "0.66519237", "0.6651457", "0.6651417", "0.66477597", "0.66439426", "0.6635655", "0.66354185", "0.66316897", "0.6627271", "0.66222334", "0.6622172", "0.6618883", "0.6615855", "0.6610012", "0.660483", "0.6596398", "0.65937036", "0.65872246", "0.6585868", "0.65733397", "0.65611005", "0.6556627", "0.6547424", "0.6544578", "0.65370953", "0.65342206", "0.6519594", "0.65148723", "0.6513011", "0.65102166", "0.65102166", "0.6508924", "0.6493699", "0.6491976", "0.64901525", "0.6488889", "0.64869255", "0.6486607", "0.6479606", "0.6476691", "0.6466829", "0.6464499", "0.64524263", "0.6447498", "0.6445471", "0.64302015", "0.6419981", "0.641769", "0.64161503", "0.6408957", "0.639073", "0.63795334", "0.6373648", "0.6365831", "0.63550323", "0.6333406", "0.63330054", "0.6323338", "0.63200337", "0.63135576", "0.63044935", "0.630002", "0.6297692", "0.62963176", "0.6270087", "0.6257685", "0.6245581", "0.6243117", "0.62414736", "0.6239511", "0.6237328" ]
0.6709257
23
Creates a model as per the config, and loads the parameters from the given checkpoint path. The model is then adjusted for data parallelism and mixed precision. Also updates the checkpoint_epoch.
def try_create_model_load_from_checkpoint_and_adjust(self) -> bool: success = self.try_create_model_and_load_from_checkpoint() self.create_summary_and_adjust_model_for_gpus() return success
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def save_checkpoint(self, model_path=None):\n # TODO: include new params based on ConfigEnum\n if not os.path.isdir(path_checkpoints_dir):\n os.mkdir(path_checkpoints_dir)\n if model_path is None:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"saved the model at {model_path}\") \n model_state_dict = self.model.state_dict()\n if self.optimizer is not None:\n opt_state_dict = self.optimizer.state_dict()\n else:\n opt_state_dict = None\n if self.scheduler is not None:\n sch_state_dict = self.scheduler.state_dict()\n else:\n sch_state_dict = None\n \n if self.scaler is not None:\n amp_grad_scaler = self.scaler.state_dict()\n else:\n amp_grad_scaler = None\n\n model_dict = {}\n model_dict[\"state_dict\"] = model_state_dict\n model_dict[\"optimizer\"] = opt_state_dict\n model_dict[\"scheduler\"] = sch_state_dict\n model_dict['scaler'] = amp_grad_scaler\n model_dict['image_size'] = self.image_size\n model_dict['device'] = self.device\n model_dict['fp16'] = self.fp16\n model_dict['accumulate_grad_steps'] = self.accumulate_grad_steps\n\n model_dict['experiment_id'] = self.experiment_id\n model_dict['experiment_tag'] = self.experiment_tag\n\n model_dict['seed'] = self.seed\n\n model_dict['train_batch_size'] = self.train_batch_size\n model_dict['valid_batch_size'] = self.valid_batch_size\n model_dict['test_batch_size'] = self.test_batch_size\n model_dict['dataloader_num_workers'] = self.dataloader_num_workers\n model_dict['train_dataloader_shuffle'] = self.train_dataloader_shuffle\n\n model_dict['optimizer_type'] = self.optimizer_type\n model_dict['optimizer_params'] = self.optimizer_params\n\n model_dict['scheduler_type'] = self.scheduler_type\n model_dict['scheduler_params'] = self.scheduler_params\n model_dict['step_scheduler_after'] = self.step_scheduler_after\n model_dict['step_scheduler_metric'] = self.step_scheduler_metric\n\n model_dict['compute_train_loss_after'] = self.compute_train_loss_after\n model_dict['compute_train_metric_after'] = self.compute_train_metric_after\n model_dict['compute_valid_loss_after'] = self.compute_valid_loss_after\n model_dict['compute_valid_metric_after'] = self.compute_valid_metric_after\n\n model_dict['training_stopping_criteria'] = self.training_stopping_criteria\n model_dict['stopping_criteria_params'] = self.stopping_criteria_params\n model_dict['max_epoch'] = self.max_epoch\n model_dict['train_on_all_data'] = self.train_on_all_data\n model_dict['validate_after'] = self.validate_after\n model_dict['validation_steps'] = self.validation_steps\n model_dict['run_lr_range_test'] = self.run_lr_range_test\n model_dict['sleep_in_epochs'] = self.sleep_in_epochs\n model_dict['sleep_time'] = self.sleep_time\n model_dict['checkpoint_epochs'] = self.checkpoint_epochs\n\n model_dict['_best_score'] = self._best_score\n model_dict['_current_score'] = self._current_score\n model_dict['_counter'] = self._counter\n\n model_dict['metrics'] = self.metrics\n model_dict['current_epoch'] = self.current_epoch\n model_dict['current_train_batch'] = self.current_train_batch\n model_dict['current_valid_batch'] = self.current_valid_batch\n\n model_dict['num_train_samples'] = self.num_train_samples\n model_dict['num_train_iterations'] = self.num_train_iterations\n model_dict['checkpoint_snapshot'] = self.checkpoint_snapshot \n torch.save(model_dict, model_path)", "def create_checkpoint(model_config, path):\n model = models.VisionTransformer(num_classes=1, **model_config)\n variables = model.init(\n jax.random.PRNGKey(0),\n jnp.ones([1, 16, 16, 3], jnp.float32),\n train=False,\n )\n _save(variables['params'], path)", "def init_model(config, program, exe):\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n if os.path.exists(checkpoints + '.pdparams'):\n path = checkpoints\n fluid.load(program, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise ValueError(\"Model checkpoints {} does not exists,\"\n \"check if you lost the file prefix.\".format(\n checkpoints + '.pdparams'))\n else:\n pretrain_weights = config['Global'].get('pretrain_weights')\n if pretrain_weights:\n path = pretrain_weights\n load_params(exe, program, path)\n logger.info(\"Finish initing model from {}\".format(path))", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def _init_model(self, checkpoint_path: str) -> None:\n # load weights\n logger.info(f\"Load weights from the checkpoint {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n\n state_dict = checkpoint[\"state_dict\"]\n self.orig_acc = checkpoint[\"test_acc\"]\n\n is_pruned = (\n next((name for name in state_dict if \"mask\" in name), None) is not None\n )\n\n if is_pruned:\n logger.info(\"Dummy prunning to load pruned weights\")\n model_utils.dummy_pruning(self.params_all)\n\n model_utils.initialize_params(self.model, state_dict)\n logger.info(\"Initialized weights\")\n\n # check the trained model is pruned\n\n if is_pruned:\n logger.info(\n \"Get masks and remove prunning reparameterization for prepare_qat\"\n )\n self.mask = model_utils.get_masks(self.model)\n model_utils.remove_pruning_reparameterization(self.params_all)", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def train(self, config: ConfigurationNode = None):\n if config is None:\n config = self.config\n # Create writable timestamp for easier record keeping\n timestamp = datetime.now().isoformat(sep=\"T\", timespec=\"auto\")\n name_timestamp = timestamp.replace(\":\", \"_\")\n\n # Start the mlflow run:\n mlflow.start_run(run_name=name_timestamp)\n\n # Check valid output path, set path from the path_cfg_override modules respectively\n assert config.OUTPUT_PATH != ''\n path_output = config.OUTPUT_PATH # output folder\n path_train = config.DATASET.TRAIN_DATA_PATH # training data folder\n path_val = config.DATASET.VAL_DATA_PATH # validation data folder\n\n # Make output dir and its parents if not exist.\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Make result folders if they do not exist.\n self.results_dir = (Path(path_output) / name_timestamp)\n if not os.path.exists(self.results_dir):\n os.makedirs(self.results_dir)\n\n # Make backup folders if they do not exist.\n self.backup_dir = os.path.join(self.results_dir, 'model_backups')\n if not os.path.exists(self.backup_dir):\n os.makedirs(self.backup_dir)\n\n writer_tensorboard = SummaryWriter(log_dir=Path(self.results_dir / \"logs_tensorflow\"))\n\n # Now that CFG has been properly merged with new data along the way, time to dump a version of it into a string for trackability purposes.\n config.dump(stream=open(os.path.join(self.results_dir, f'config{name_timestamp}.yaml'), 'w'))\n\n # file path to store the state of the model.\n state_fpath = os.path.join(self.results_dir, f'model{name_timestamp}.pt')\n\n # ????\n perf_path = os.path.join(self.results_dir, f'trace{name_timestamp}.p')\n perf_trace = []\n\n # Load data, create the data loader objects from them.\n data_train = pickle.load(open(path_train, 'rb'))\n data_val = pickle.load(open(path_val, 'rb'))\n self.loader_train = build_data_loader(data_train, config.DATASET, True)\n self.loader_val = build_data_loader(data_val, config.DATASET, False)\n\n # Build the model using configue dict node\n self.model = build_model(config.MODEL)\n\n # Enable parallel multi GPU mode if the config specify it.\n if config.MODEL.PARALLEL:\n print(\"Utilized parallel processing\")\n self.model = torch.nn.DataParallel(self.model)\n\n current_epoch = 0\n\n # For resuming training (i.e. load checkpoint)\n if config.RESUME_PATH != \"\":\n checkpoint = torch.load(config.RESUME_PATH, map_location='cpu')\n current_epoch = checkpoint['epoch']\n self.model.load_state_dict(checkpoint[\"model_state\"])\n _ = self.model.cuda()\n\n # SOLVER EVALUATOR\n cfg_solver = config.MODEL.SOLVER\n\n # Build optimizer (between train/validation, using the solver portion of the configuration.\n optimizer = build_optimizer(self.model, cfg_solver)\n\n # Build evaluator (between train/validation, using the solver portion of the configuration.\n evaluator = build_evaluator(cfg_solver)\n\n evaluator.float().cuda()\n total_epochs = cfg_solver.TOTAL_EPOCHS\n\n\n # Main training epoch loop starts here.\n for epoch in range(current_epoch, total_epochs):\n\n # Train a single epoch\n self.train_epoch(epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard)\n\n mlflow.end_run()", "def load_model_trainer_states_from_checkpoint(self, checkpoint_path, model=None):\n import os\n\n if model is None:\n try:\n import cloudpickle\n except ImportError:\n raise ImportError(\"cloudpickle is required to load model class\")\n logger.info(\"Loading model class\")\n model = cloudpickle.load(open(os.path.join(checkpoint_path, \"model_class.pkl\"), \"rb\"))\n\n self.model = HFWrapper(model)\n logger.info(\"Loading weights of previously trained model\")\n # Restoring model weights\n self.model.load_state_dict(\n # torch.load(os.path.join(training_args.output_dir, \"pytorch_model.bin\"))\n torch.load(os.path.join(checkpoint_path, \"pytorch_model.bin\"))\n )\n # Restoring random state\n rng_file = os.path.join(checkpoint_path, \"rng_state.pth\")\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n # Restoring AMP scaler\n if self.use_amp:\n self.scaler.load_state_dict(torch.load(os.path.join(checkpoint_path, \"scaler.pt\")))", "def restore_checkpoint(model, checkpoint_dir, cuda=False, force=False, pretrain=False):\n try:\n cp_files = [\n file_\n for file_ in os.listdir(checkpoint_dir)\n if file_.startswith(\"epoch=\") and file_.endswith(\".checkpoint.pth.tar\")\n ]\n except FileNotFoundError:\n cp_files = None\n os.makedirs(checkpoint_dir)\n if not cp_files:\n print(\"No saved model parameters found\")\n if force:\n raise Exception(\"Checkpoint not found\")\n else:\n return model, 0, []\n\n # Find latest epoch\n for i in itertools.count(1):\n if \"epoch={}.checkpoint.pth.tar\".format(i) in cp_files:\n epoch = i\n else:\n break\n\n if not force:\n print(\n \"Which epoch to load from? Choose in range [0, {}].\".format(epoch),\n \"Enter 0 to train from scratch.\",\n )\n print(\">> \", end=\"\")\n inp_epoch = int(input())\n if inp_epoch not in range(epoch + 1):\n raise Exception(\"Invalid epoch number\")\n if inp_epoch == 0:\n print(\"Checkpoint not loaded\")\n clear_checkpoint(checkpoint_dir)\n return model, 0, []\n else:\n print(\"Which epoch to load from? Choose in range [1, {}].\".format(epoch))\n inp_epoch = int(input())\n if inp_epoch not in range(1, epoch + 1):\n raise Exception(\"Invalid epoch number\")\n\n filename = os.path.join(\n checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(inp_epoch)\n )\n\n print(\"Loading from checkpoint {}?\".format(filename))\n\n if cuda:\n checkpoint = torch.load(filename)\n else:\n # Load GPU model on CPU\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n try:\n start_epoch = checkpoint[\"epoch\"]\n stats = checkpoint[\"stats\"]\n if pretrain:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\n \"=> Successfully restored checkpoint (trained for {} epochs)\".format(\n checkpoint[\"epoch\"]\n )\n )\n except:\n print(\"=> Checkpoint not successfully restored\")\n raise\n\n return model, inp_epoch, stats", "def init_model(config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None):\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config):\n raise TypeError('config must be a filename or Config object, '\n 'but got {}'.format(type(config)))\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n elif 'init_cfg' in config.model.backbone:\n config.model.backbone.init_cfg = None\n config.model.pretrained = None\n config.model.train_cfg = None\n init_default_scope(config.get('default_scope', 'mmseg'))\n\n model = MODELS.build(config.model)\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n dataset_meta = checkpoint['meta'].get('dataset_meta', None)\n # save the dataset_meta in the model for convenience\n if 'dataset_meta' in checkpoint.get('meta', {}):\n # mmseg 1.x\n model.dataset_meta = dataset_meta\n elif 'CLASSES' in checkpoint.get('meta', {}):\n # < mmseg 1.x\n classes = checkpoint['meta']['CLASSES']\n palette = checkpoint['meta']['PALETTE']\n model.dataset_meta = {'classes': classes, 'palette': palette}\n else:\n warnings.simplefilter('once')\n warnings.warn(\n 'dataset_meta or class names are not saved in the '\n 'checkpoint\\'s meta data, classes and palette will be'\n 'set according to num_classes ')\n num_classes = model.decode_head.num_classes\n dataset_name = None\n for name in dataset_aliases.keys():\n if len(get_classes(name)) == num_classes:\n dataset_name = name\n break\n if dataset_name is None:\n warnings.warn(\n 'No suitable dataset found, use Cityscapes by default')\n dataset_name = 'cityscapes'\n model.dataset_meta = {\n 'classes': get_classes(dataset_name),\n 'palette': get_palette(dataset_name)\n }\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n from_hf = kwargs.pop(\"from_hf\", False)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n default_gpu = kwargs.pop(\"default_gpu\", True)\n\n # Load config\n assert config is not None\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n else:\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n logger.error(\"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path, \", \".join(cls.pretrained_model_archive_map.keys()), archive_file)\n )\n return None\n if default_gpu:\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Rename Bert parameters for our framework\n # NB: Assume 1 Bert layer is mapped to 1 layer only (cannot be used to init multiple layers)\n old_keys = []\n new_keys = []\n nums = []\n for key in state_dict.keys():\n new_key = None\n if \".layer.\" in key and from_hf:\n num = int(key.split(\".layer.\")[-1].split(\".\")[0])\n if \".attention.\" in key:\n new_key = key.replace(\".layer.%d.attention.\" % num,\n \".layer.%d.attention_\" % config.bert_layer2attn_sublayer.get(str(num), num))\n elif \".intermediate.\" in key:\n new_key = key.replace(\".layer.%d.intermediate.\" % num,\n \".layer.%d.intermediate.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n elif \".output.\" in key:\n new_key = key.replace(\".layer.%d.output.\" % num,\n \".layer.%d.output.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n nums.append(num)\n for old_key, new_key, _ in sorted(zip(old_keys, new_keys, nums), key=lambda x: x[2], reverse=True):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Load from a PyTorch state_dict\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n logger.info(start_prefix)\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0 and default_gpu:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(model.__class__.__name__, missing_keys)\n )\n if len(unexpected_keys) > 0 and default_gpu:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(model.__class__.__name__, unexpected_keys)\n )\n if len(error_msgs) > 0 and default_gpu:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(model.__class__.__name__, \"\\n\\t\".join(error_msgs))\n )\n\n if hasattr(model, \"tie_weights\"):\n model.tie_weights() # make sure word embedding weights are still tied\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"error_msgs\": error_msgs,\n }\n return model, loading_info\n\n return model", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def train_and_eval(config, babas_data):\n\n if config.resume_from_checkpoint is not None:\n try:\n if config.augment_background == 'background':\n bg = config.augment_background\n else:\n bg = None\n rfc = config.resume_from_checkpoint\n ic = config.include_validation\n print 'Loading saved config: %s' % config.saved_config\n config = np.load(config.saved_config).item()\n config.resume_from_checkpoint = rfc\n config.include_validation = ic\n if not hasattr(config, 'augment_background'):\n config.augment_background = 'constant'\n if not hasattr(config, 'background_folder'):\n config.background_folder = 'backgrounds'\n if bg is not None:\n print 'Overriding saved config to add kinect backgrounds to training.'\n config.augment_background = bg\n results_dir = rfc\n except:\n print 'Relying on default config file.'\n\n if babas_data: # Shitty naive training method\n config.tfrecord_dir = '/media/data_cifs/monkey_tracking/data_for_babas/tfrecords_from_babas'\n config.babas_tfrecord_dir = config.tfrecord_dir\n config.steps_before_validation = 20\n config.epochs = 2000\n config.convert_labels_to_pixel_space = False\n config.augment_background = 'constant'\n\n # Import your model\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n model_file = import_cnn(config.model_type)\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = '%s_%s' % (config.model_type, dt_stamp)\n if config.selected_joints is not None:\n dt_dataset = '_%s' % (config.selected_joints) + dt_dataset\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, dt_dataset)\n results_dir = os.path.join(config.npy_dir, dt_dataset)\n print 'Saving Dmurphy\\'s online updates to: %s' % results_dir\n dir_list = [config.train_checkpoint, config.summary_dir, results_dir]\n [tf_fun.make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, config.train_tfrecords)\n if config.babas_tfrecord_dir is not None:\n train_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.train_tfrecords)\n if config.include_validation or config.include_validation is None:\n val_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.val_tfrecords)\n else:\n train_babas_tfrecord_dir = None\n val_babas_tfrecord_dir = None\n\n if isinstance(config.include_validation, basestring):\n validation_data = config.include_validation\n elif config.include_validation == True:\n validation_data = os.path.join(\n config.tfrecord_dir,\n config.val_tfrecords)\n else:\n validation_data = None\n\n print 'Using training set: %s' % train_data\n print 'Using validation set: %s' % validation_data\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_data_dict = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background=config.augment_background,\n background_folder=config.background_folder,\n randomize_background=config.randomize_background,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=train_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n train_data_dict['deconv_label_size'] = len(config.labels)\n\n val_data_dict = inputs(\n tfrecord_file=validation_data,\n batch_size=config.validation_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background='none',\n background_folder=config.background_folder,\n randomize_background=None,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=val_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n val_data_dict['deconv_label_size'] = len(config.labels)\n\n # Check output_shape\n if config.selected_joints is not None:\n print 'Targeting joint: %s' % config.selected_joints\n joint_shape = len(config.selected_joints) * config.keep_dims\n if (config.num_classes // config.keep_dims) > (joint_shape):\n print 'New target size: %s' % joint_shape\n config.num_classes = joint_shape\n\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n print 'Creating training graph:'\n model = model_file.model_struct(\n weight_npy_path=config.weight_npy_path)\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n rgb=train_data_dict['image'],\n target_variables=train_data_dict,\n train_mode=train_mode,\n batchnorm=config.batch_norm)\n train_mu, train_var = tf.nn.moments(train_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"train image mean\", train_mu)\n tf.summary.histogram(\"train image std\", tf.sqrt(train_var))\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv train', model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(model.deconv, axis=3), tf.float32), 3))\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n print 'Creating validation graph:'\n val_model = model_file.model_struct()\n val_model.build(\n rgb=val_data_dict['image'],\n target_variables=val_data_dict)\n\n # Calculate validation accuracy\n val_mu, val_var = tf.nn.moments(val_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"validation image mean\", val_mu)\n tf.summary.histogram(\"validation image std\", tf.sqrt(val_var))\n if 'label' in val_data_dict.keys():\n # val_score = tf.reduce_mean(\n # tf_fun.l2_loss(\n # val_model.output, val_data_dict['label']))\n if config.keep_dims == 3:\n z_mask = tf.expand_dims(tf.tile([1, 1, 0], [int(val_data_dict['label'].get_shape()[-1]) // 3]), axis=0)\n z_mask = tf.cast(z_mask, tf.float32)\n val_model.output = val_model.output * z_mask\n val_data_dict['label'] = val_data_dict['label'] * z_mask \n val_score = tf.reduce_mean(tf.nn.l2_loss(val_model.output - val_data_dict['label']))\n tf.summary.scalar(\"validation mse\", val_score)\n if 'fc' in config.aux_losses:\n tf.summary.image('FC val activations', val_model.final_fc)\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv val', val_model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(val_model.deconv, axis=3),\n tf.float32), 3))\n tf.summary.image(\n 'validation images',\n tf.cast(val_data_dict['image'], tf.float32))\n\n # Prepare the loss functions:::\n loss_list, loss_label = [], []\n if 'label' in train_data_dict.keys():\n # 1. Joint localization loss\n if config.calculate_per_joint_loss == 'thomas':\n label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton and joint':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n loss_label += ['skeleton loss']\n delta = model['output'] - train_data_dict['label']\n proc_weights = np.asarray(\n config.dim_weight)[None,:].repeat(\n len(config.joint_names), axis=0).reshape(1, -1)\n delta *= proc_weights\n # label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n # model=model,\n # train_data_dict=train_data_dict,\n # config=config,\n # y_key='label',\n # yhat_key='output')\n # loss_list += [label_loss]\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n else:\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n loss_label += ['combined head']\n for al in loss_helper.potential_aux_losses():\n loss_list, loss_label = loss_helper.get_aux_losses(\n loss_list=loss_list,\n loss_label=loss_label,\n train_data_dict=train_data_dict,\n model=model,\n aux_loss_dict=al,\n domain_adaptation=train_babas_tfrecord_dir)\n loss = tf.add_n(loss_list)\n\n # Add wd if necessary\n if config.wd_penalty is not None:\n _, l2_wd_layers = tf_fun.fine_tune_prepare_layers(\n tf.trainable_variables(), config.wd_layers)\n l2_wd_layers = [\n x for x in l2_wd_layers if 'biases' not in x.name]\n if config.wd_type == 'l1':\n loss += (config.wd_penalty * tf.add_n(\n [tf.reduce_sum(tf.abs(x)) for x in l2_wd_layers]))\n elif config.wd_type == 'l2':\n loss += (config.wd_penalty * tf.add_n(\n [tf.nn.l2_loss(x) for x in l2_wd_layers]))\n\n optimizer = loss_helper.return_optimizer(config.optimizer)\n optimizer = optimizer(config.lr)\n\n if hasattr(config, 'fine_tune_layers') and config.fine_tune_layers is not None:\n print 'Finetuning learning for: %s' % config.fine_tune_layers\n train_op, grads = tf_fun.finetune_learning(\n loss,\n trainables=tf.trainable_variables(),\n fine_tune_layers=config.fine_tune_layers,\n config=config\n )\n else:\n # Op to calculate every variable gradient\n grads = optimizer.compute_gradients(\n loss, tf.trainable_variables())\n # Op to update all variables according to their gradient\n train_op = optimizer.apply_gradients(\n grads_and_vars=grads)\n\n # Summarize all gradients and weights\n [tf.summary.histogram(\n var.name + '/gradient', grad)\n for grad, var in grads if grad is not None]\n # train_op = optimizer.minimize(loss)\n\n # Summarize losses\n [tf.summary.scalar(lab, il) for lab, il in zip(\n loss_label, loss_list)]\n\n # Summarize images and l1 weights\n tf.summary.image(\n 'train images',\n tf.cast(train_data_dict['image'], tf.float32))\n tf_fun.add_filter_summary(\n trainables=tf.trainable_variables(),\n target_layer='conv1_1_filters')\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n tf.add_to_collection('output', model.output)\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Create list of variables to run through training model\n train_session_vars = {\n 'train_op': train_op,\n 'loss_value': loss,\n 'im': train_data_dict['image'],\n 'yhat': model.output,\n 'ytrue': train_data_dict['label']\n }\n if hasattr(model, 'deconv'):\n train_session_vars['deconv'] = model.deconv\n if hasattr(model, 'final_fc'):\n train_session_vars['fc'] = model.final_fc\n\n # Create list of variables to run through validation model\n val_session_vars = {\n 'val_acc': val_score,\n 'val_pred': val_model.output,\n 'val_ims': val_data_dict['image'],\n 'val_true': val_data_dict['label'],\n }\n\n # Create list of variables to save to numpys\n save_training_vars = [\n 'im',\n 'yhat',\n 'ytrue',\n 'yhat'\n ]\n\n for al in loss_helper.potential_aux_losses():\n if al.keys()[0] in train_data_dict.keys():\n y_key = '%s' % al.keys()[0]\n train_session_vars[y_key] = train_data_dict[al.values()[0]['y_name']]\n save_training_vars += [y_key]\n\n yhat_key = '%s_hat' % al.keys()[0]\n train_session_vars[yhat_key] = model[al.values()[0]['model_name']]\n save_training_vars += [yhat_key]\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, losses = 0, []\n num_joints = int(\n train_data_dict['label'].get_shape()[-1]) // config.keep_dims\n normalize_vec = tf_fun.get_normalization_vec(config, num_joints)\n if config.resume_from_checkpoint is not None:\n if '.ckpt' in config.resume_from_checkpoint:\n ckpt = config.resume_from_checkpoint\n 'Restoring specified checkpoint: %s' % config.resume_from_checkpoint\n else:\n ckpt = tf.train.latest_checkpoint(config.resume_from_checkpoint)\n print 'Evaluating checkpoint: %s' % ckpt\n saver.restore(sess, ckpt)\n try:\n while not coord.should_stop():\n start_time = time.time()\n train_out_dict = sess.run(train_session_vars.values())\n train_out_dict = {k: v for k, v in zip(\n train_session_vars.keys(), train_out_dict)}\n losses.append(train_out_dict['loss_value'])\n duration = time.time() - start_time\n assert not np.isnan(\n train_out_dict['loss_value']), 'Model diverged with loss = NaN'\n if step % config.steps_before_validation == 0:\n if validation_data is not False:\n val_out_dict = sess.run(\n val_session_vars.values())\n val_out_dict = {k: v for k, v in zip(\n val_session_vars.keys(), val_out_dict)}\n # if config.normalize_labels:\n # val_out_dict['val_pred'] *= normalize_vec\n # val_out_dict['val_true'] *= normalize_vec\n np.savez(\n os.path.join(\n results_dir, '%s_val_coors' % step),\n val_pred=val_out_dict['val_pred'],\n val_ims=val_out_dict['val_ims'],\n val_true=val_out_dict['val_true'],\n normalize_vec=normalize_vec)\n with open(\n os.path.join(\n results_dir, '%s_config.p' % step), 'wb') as fp:\n pickle.dump(config, fp)\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy attach 9177\n format_str = (\n '%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch) | '\n 'Validation l2 loss = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, train_out_dict['loss_value'],\n config.train_batch / duration, float(duration),\n val_out_dict['val_acc'],\n config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if config.normalize_labels:\n train_out_dict['yhat'] *= normalize_vec\n train_out_dict['ytrue'] *= normalize_vec\n [save_training_data(\n output_dir=results_dir,\n data=train_out_dict[k],\n name='%s_%s' % (k, step)) for k in save_training_vars]\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch)')\n print (format_str % (\n datetime.now(),\n step,\n train_out_dict['loss_value'],\n config.train_batch / duration,\n float(duration)))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%s_training_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def train_and_evaluate(\n model_name: str,\n job_log_dir: Optional[str],\n multi_host_checkpointing: Optional[bool],\n maybe_use_persistence_checkpointing: bool,\n restore_checkpoint_dir: Optional[str],\n restore_checkpoint_step: Optional[int],\n eval_on_test: Optional[bool],\n checkpoint_todelete_subdir: Optional[str] = None) -> None:\n model_config = model_utils.get_model(model_name)()\n _write_params_file(model_config, job_log_dir)\n task_p = model_config.task()\n\n input_p = model_config.datasets()\n # Note that we modify input params below with runtime information, therefore\n # model_config.dataset() should not be called again as it won't have the\n # correct runtime information populated.\n for inp in input_p:\n if not isinstance(inp, base_input.BaseInputParams):\n raise ValueError('Expecting BaseInputParams from datasets(), got: '\n f'{inp.ToText()}')\n inp.num_infeed_hosts = jax.process_count()\n inp.infeed_host_index = jax.process_index()\n train_input_p = [v for v in input_p if v.is_training]\n if len(train_input_p) != 1:\n raise ValueError(\n f'Expecting exactly one training split. Got `{len(train_input_p)}`.')\n train_input_p = train_input_p[0]\n logging.info('train_input_p=%s', train_input_p.ToText())\n eval_input_p = None\n if eval_on_test:\n eval_input_p = [v for v in input_p if not v.is_training]\n\n checkpoint_type = checkpoints.retrieve_checkpoint_type(\n multi_host_checkpointing, maybe_use_persistence_checkpointing, task_p)\n\n checkpoint_manager = _create_checkpoint_manager(model_name, task_p,\n job_log_dir, checkpoint_type,\n checkpoint_todelete_subdir)\n\n if task_p.model.device_mesh is not None:\n train_and_evaluate_spmd_model(task_p, train_input_p, job_log_dir,\n checkpoint_manager, checkpoint_type,\n restore_checkpoint_dir,\n restore_checkpoint_step, eval_input_p)\n else:\n train_and_evaluate_pmap(task_p, train_input_p, job_log_dir,\n checkpoint_manager, restore_checkpoint_dir,\n restore_checkpoint_step, eval_input_p)", "def initialize_model_from_cfg(args, gpu_id=0):\n model = eval(args.model).loot_model(args)\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n\n return model", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def create_or_load_model(model, model_dir, session, name):\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n start_time = time.time()\n # It only takes a few seconds to initialize all variables.\n session.run(tf.global_variables_initializer())\n logging.info(\n \"Initialize %s model with fresh parameters before loading variables \"\n \"from the checkpoint, time %.2fs\", name,\n time.time() - start_time)\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load_checkpoint(path: str, save_dir: str, cuda: bool = False, attention_viz: bool = False) -> nn.Module:\r\n # Load model and args\r\n state = torch.load(path, map_location=lambda storage, loc: storage)\r\n args, loaded_state_dict = state['args'], state['state_dict']\r\n\r\n # Update args with current args\r\n args.cuda = cuda\r\n args.attention_viz = attention_viz\r\n args.save_dir = save_dir\r\n\r\n model = build_model(args)\r\n model.load_state_dict(loaded_state_dict)\r\n\r\n if cuda:\r\n print('Moving model to cuda')\r\n model = model.cuda()\r\n\r\n return model", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def try_load_checkpoint_for_model(self) -> bool:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if not self.checkpoint_path:\n raise ValueError(\"No checkpoint provided\")\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n epoch = ModelAndInfo._load_checkpoint(model=self._model,\n checkpoint_path=self.checkpoint_path,\n key_in_state_dict=ModelAndInfo.MODEL_STATE_DICT_KEY,\n use_gpu=self.config.use_gpu)\n\n logging.info(f\"Loaded model from checkpoint (epoch: {epoch})\")\n self.checkpoint_epoch = epoch\n return True", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def train(model, infer_train, infer_val, load_checkpoint=None):\n\n global checkpoint_name\n print('Initialising {}'.format(cfg['experiment_name']))\n checkpoint_folder = 'checkpoints/{}/'.format(cfg['experiment_name'])\n\n if not os.path.exists(checkpoint_folder):\n os.makedirs(checkpoint_folder)\n\n tb_folder = 'tb/{}/'.format(cfg['experiment_name'])\n if not os.path.exists(tb_folder):\n os.makedirs(tb_folder)\n\n writer = SummaryWriter(logdir=tb_folder, flush_secs=30)\n optimiser = Adam(model.parameters(), lr=cfg['learning_rate'], weight_decay=cfg['weight_decay'])\n\n train_dataset = TweetDataset(dataset_type='train')\n train_loader = DataLoader(train_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=True, pin_memory=True)\n\n val_dataset = TweetDataset(dataset_type='val')\n val_loader = DataLoader(val_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=False, pin_memory=True)\n\n if load_checkpoint:\n checkpoint = torch.load(load_checkpoint)\n assert model.config == checkpoint['net_config'], \\\n \"The provided checkpoint has a different configuration, loading is impossible\"\n start_epoch = checkpoint['epoch'] + 1\n epochs = cfg['epochs'] + start_epoch\n step = checkpoint['step']\n model.load_state_dict(checkpoint['model'])\n optimiser.load_state_dict(checkpoint['optimiser'])\n print(\"Loaded the checkpoint at {}\".format(load_checkpoint))\n else:\n start_epoch, step = 0, 0\n epochs = cfg['epochs']\n\n init_loss = 0.\n avg_loss = AverageMeter()\n best_mae = 1e10\n\n print('Sanity val')\n val(model, val_loader, writer, 0, infer_val)\n model.train()\n\n print('Starting training')\n for epoch in range(start_epoch, epochs):\n loader_length = len(train_loader)\n epoch_start = time.time()\n\n for batch_idx, batch in enumerate(train_loader):\n optimiser.zero_grad()\n\n loss = infer_train(model, batch)\n loss.backward()\n\n if epoch == 0 and batch_idx == 0:\n init_loss = loss\n\n # logging\n elapsed = time.time() - epoch_start\n progress = batch_idx / loader_length\n est = datetime.timedelta(seconds=int(elapsed / progress)) if progress > 0.001 else '-'\n avg_loss.update(loss)\n suffix = '\\tloss {:.4f}/{:.4f}\\tETA [{}/{}]'.format(avg_loss.avg, init_loss,\n datetime.timedelta(seconds=int(elapsed)), est)\n printProgressBar(batch_idx, loader_length, suffix=suffix,\n prefix='Epoch [{}/{}]\\tStep [{}/{}]'.format(epoch, epochs - 1, batch_idx, loader_length))\n\n writer.add_scalar('Steps/train_loss', loss, step)\n\n # saving the model\n if step % cfg['checkpoint_every'] == 0:\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n step += 1\n optimiser.step()\n\n # validating\n if step % cfg['val_every'] == 0:\n mae = val(model, val_loader, writer, step, infer_val)\n if mae < best_mae:\n best_mae = mae\n print('Best model with V{:.2f}'.format(best_mae))\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n '{}/best.pth'.format(checkpoint_folder))\n model.train()\n\n # end of epoch\n print('')\n writer.add_scalar('Epochs/train_loss', avg_loss.avg, epoch)\n avg_loss.reset()\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': loader_length, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n\n # finished training\n writer.close()\n print('Training finished :)')", "def load_pretrained_model(self,model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n self.create_training_model(model_dir = model_dir,\n **rnn_params)\n #从目录中读取神经网络参数\n self.set_model_from_file()", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load_model(config, batchmanager):\n \n # this function returns a dictionary mapping\n # name of the task (string) --> number of classes in the task (int)\n tasks = batchmanager.getTasksWithNClasses()\n # this \"tasks\" object is used to initialize the model (with the right output layers)\n model = MultiTaskBERT(device = config.device, tasks = tasks)\n\n if not config.untrained_baseline:\n\n # if we evaluate only, model MUST be loaded.\n if config.k_shot_only:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--k_shot_only` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n raise Exception()\n \n # if we saved the state dictionary, load it.\n elif config.resume:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--resume` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n else:\n if os.path.exists(path_to_dicts(config)):\n print(f\"WARNING: `--resume` flag was NOT passed, but `{path_to_dicts(config)}` was found!\") \n\n return model", "def load_model_from_checkpoint(file, device):\r\n\r\n if device == 'cuda':\r\n # Load all tensors onto GPU\r\n map_location = lambda storage, loc: storage.cuda()\r\n else:\r\n # Load all tensors onto CPU\r\n map_location = lambda storage, loc: storage\r\n\r\n # Assuming model was trained and checkpoint saved on Linux, but predict.py inference is executed using Windows.\r\n # Then, it is required to implement the following quick fix, because otherwise the exception is raised:\r\n # \"NotImplementedError: cannot instantiate 'PosixPath' on your system\"\r\n # Credits to https://stackoverflow.com/questions/57286486/i-cant-load-my-model-because-i-cant-put-a-posixpath\r\n if type(file) == pathlib.WindowsPath:\r\n tmp_PosixPath = pathlib.PosixPath\r\n pathlib.PosixPath = pathlib.WindowsPath\r\n\r\n parameters = torch.load(file, map_location=map_location)\r\n\r\n # Restore default\r\n if type(file) == pathlib.WindowsPath:\r\n pathlib.WindowsPath = pathlib.PosixPath\r\n pathlib.PosixPath = tmp_PosixPath\r\n\r\n model = train.create_model(parameters)\r\n\r\n model.class_to_idx = parameters.get('train_datasets_class_to_idx')\r\n model.load_state_dict(parameters.get('state_dict'), strict=False)\r\n\r\n return model, parameters", "def train_model_by_config(\n checkpoint: int,\n config: lmp.config.BaseConfig,\n dataset: lmp.dataset.BaseDataset,\n model: lmp.model.BaseRNNModel,\n optimizer: Union[\n torch.optim.SGD,\n torch.optim.Adam,\n ],\n tokenizer: lmp.tokenizer.BaseTokenizer,\n):\n # Create collate_fn for sampling.\n collate_fn = lmp.dataset.BaseDataset.create_collate_fn(\n tokenizer=tokenizer,\n max_seq_len=config.max_seq_len\n )\n\n # `torch` utility for sampling.\n data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=config.batch_size,\n shuffle=True,\n collate_fn=collate_fn\n )\n\n train_model(\n checkpoint=checkpoint,\n checkpoint_step=config.checkpoint_step,\n data_loader=data_loader,\n device=config.device,\n epoch=config.epoch,\n experiment=config.experiment,\n max_norm=config.max_norm,\n model=model,\n optimizer=optimizer,\n vocab_size=tokenizer.vocab_size\n )", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(pretrain_model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % pretrain_model)\n ckpt = tf.train.get_checkpoint_state(pretrain_model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(pretrain_model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(pretrain_model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(pretrain_model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n pretrain_model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n tf.logging.info(\"Copy the pre-trained model %s as the fine-tuned initialization\" % pretrain_model_checkpoint_path)\n\n import glob\n for filename in glob.glob(pretrain_model_checkpoint_path + \"*\"):\n bas = os.path.basename(filename).split(\"-\", 1)[0]\n ext = os.path.basename(filename).rsplit(\".\", 1)[1]\n shutil.copyfile(filename, os.path.join(target_model, bas + \"-0.\" + ext))\n\n with open(os.path.join(target_model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n return", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def create_reference_model(self, config, tmp_path_factory: pytest.TempPathFactory, *args):\n config = copy.deepcopy(config) # ensure the reference model is not passed to tests\n\n save_folder = tmp_path_factory.mktemp('{device}-{precision}'.format(**config))\n config.update({'save_interval': '1ep', 'save_folder': str(save_folder), 'save_filename': 'ep{epoch}.pt'})\n\n trainer = Trainer(**config)\n trainer.fit()\n\n self.reference_model = trainer.state.model\n self.reference_folder = save_folder", "def create_model( session, batch_size ):\n model = linear_model.LinearModel(\n FLAGS.linear_size,\n FLAGS.num_layers,\n FLAGS.residual,\n FLAGS.batch_norm,\n FLAGS.max_norm,\n batch_size,\n FLAGS.learning_rate,\n FLAGS.origin_bc,\n summaries_dir,\n dtype=tf.float16 if FLAGS.use_fp16 else tf.float32)\n\n if FLAGS.load <= 0:\n # Create a new model from scratch\n print(\"Creating model with fresh parameters.\")\n session.run( tf.global_variables_initializer() )\n return model\n\n # Load a previously saved model\n ckpt = tf.train.get_checkpoint_state( train_dir, latest_filename=\"checkpoint\")\n print( \"train_dir\", train_dir )\n\n if ckpt and ckpt.model_checkpoint_path:\n # Check if the specific cpixels = pixels / pixels[2,:]heckpoint exists\n if FLAGS.load > 0:\n if os.path.isfile(os.path.join(train_dir,\"checkpoint-{0}.index\".format(FLAGS.load))):\n ckpt_name = os.path.join( os.path.join(train_dir,\"checkpoint-{0}\".format(FLAGS.load)) )\n else:\n raise ValueError(\"Asked to load checkpoint {0}, but it does not seem to exist\".format(FLAGS.load))\n else:\n ckpt_name = os.path.basename( ckpt.model_checkpoint_path )\n\n print(\"Loading model {0}\".format( ckpt_name ))\n model.saver.restore( session, ckpt.model_checkpoint_path )\n return model\n else:\n print(\"Could not find checkpoint. Aborting.\")\n raise( ValueError, \"Checkpoint {0} does not seem to exist\".format( ckpt.model_checkpoint_path ) )\n\n return model", "def main(cfg, logger):\n\n # Initialize parameters\n model_selection_metric = cfg['train']['model_selection_metric']\n\n if cfg['train']['model_selection_mode'] == 'maximize':\n model_selection_sign = 1\n elif cfg['train']['model_selection_mode'] == 'minimize':\n model_selection_sign = -1\n else:\n raise ValueError(\n 'model_selection_mode must be either maximize or minimize.')\n\n # Get data loader\n train_loader = make_data_loader(cfg, phase='train')\n val_loader = make_data_loader(cfg, phase='val')\n\n # Set up tensorboard logger\n tboard_logger = SummaryWriter(os.path.join(cfg['misc']['log_dir'], 'logs'))\n\n # Get model\n model = config.get_model(cfg)\n\n # Get optimizer and trainer\n optimizer = getattr(optim, cfg['optimizer']['alg'])(model.parameters(), lr=cfg['optimizer']['learning_rate'],\n weight_decay=cfg['optimizer']['weight_decay'])\n\n trainer = config.get_trainer(cfg, model, optimizer, tboard_logger)\n\n # Load pre-trained model if existing\n kwargs = {\n 'model': model,\n 'optimizer': optimizer,\n }\n\n checkpoint_io = CheckpointIO(cfg['misc']['log_dir'], initialize_from=cfg['model']['init_from'],\n initialization_file_name=cfg['model']['init_file_name'], **kwargs)\n\n try:\n load_dict = checkpoint_io.load('model.pt')\n except FileExistsError:\n load_dict = dict()\n\n epoch_it = load_dict.get('epoch_it', -1)\n it = load_dict.get('it', -1)\n\n metric_val_best = load_dict.get(\n 'loss_val_best', -model_selection_sign * np.inf)\n\n if metric_val_best == np.inf or metric_val_best == -np.inf:\n metric_val_best = -model_selection_sign * np.inf\n\n logger.info('Current best validation metric ({}): {:.5f}'.format(\n model_selection_metric, metric_val_best))\n\n # Training parameters\n stat_interval = cfg['train']['stat_interval']\n stat_interval = stat_interval if stat_interval > 0 else abs(\n stat_interval * len(train_loader))\n\n chkpt_interval = cfg['train']['chkpt_interval']\n chkpt_interval = chkpt_interval if chkpt_interval > 0 else abs(\n chkpt_interval * len(train_loader))\n\n val_interval = cfg['train']['val_interval']\n val_interval = val_interval if val_interval > 0 else abs(\n val_interval * len(train_loader))\n\n # Print model parameters and model graph\n nparameters = sum(p.numel() for p in model.parameters())\n # print(model)\n logger.info('Total number of parameters: {}'.format(nparameters))\n\n # Training loop\n while epoch_it < cfg['train']['max_epoch']:\n epoch_it += 1\n\n for batch in train_loader:\n it += 1\n loss = trainer.train_step(batch, it)\n tboard_logger.add_scalar('train/loss', loss, it)\n\n # Print output\n if stat_interval != 0 and (it % stat_interval) == 0 and it != 0:\n logger.info('[Epoch {}] it={}, loss={:.4f}'.format(\n epoch_it, it, loss))\n\n # Save checkpoint\n if (chkpt_interval != 0 and (it % chkpt_interval) == 0) and it != 0:\n logger.info('Saving checkpoint')\n checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,\n loss_val_best=metric_val_best)\n\n # Run validation\n if val_interval != 0 and (it % val_interval) == 0 and it != 0:\n eval_dict = trainer.evaluate(val_loader, it)\n\n metric_val = eval_dict[model_selection_metric]\n logger.info('Validation metric ({}): {:.4f}'.format(\n model_selection_metric, metric_val))\n\n for k, v in eval_dict.items():\n tboard_logger.add_scalar('val/{}'.format(k), v, it)\n\n if model_selection_sign * (metric_val - metric_val_best) > 0:\n metric_val_best = metric_val\n logger.info(\n 'New best model (loss {:.4f})'.format(metric_val_best))\n checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,\n loss_val_best=metric_val_best)\n\n # Quit after the maximum number of epochs is reached\n logger.info('Training completed after {} Epochs ({} it) with best val metric ({})={}'.format(\n epoch_it, it, model_selection_metric, metric_val_best))", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def load_pretrained_model(self, model_path):\n # My eyes and my heart both hurt when writing this method\n\n # Only care about layer_types that have trainable parameters\n ltypes = ['BNData', 'ConvolutionData', 'HoleConvolutionData']\n\n def _get_layer_params(layer, ltype):\n\n if ltype == 'BNData': \n n_channels = layer.blobs[0].shape.dim[1]\n gamma = np.array(layer.blobs[0].data).reshape(n_channels)\n beta = np.array(layer.blobs[1].data).reshape(n_channels)\n mean = np.array(layer.blobs[2].data).reshape(n_channels)\n var = np.array(layer.blobs[3].data).reshape(n_channels)\n return [mean, var, gamma, beta]\n\n elif ltype in ['ConvolutionData', 'HoleConvolutionData']:\n is_bias = layer.convolution_param.bias_term\n shape = [int(d) for d in layer.blobs[0].shape.dim]\n weights = np.array(layer.blobs[0].data).reshape(shape)\n bias = []\n if is_bias:\n bias = np.array(layer.blobs[1].data).reshape(shape[0])\n return [weights, bias]\n \n elif ltype == 'InnerProduct':\n raise Exception(\"Fully connected layers {}, not supported\".format(ltype))\n\n else:\n raise Exception(\"Unkown layer type {}\".format(ltype))\n\n\n net = caffe_pb2.NetParameter()\n with open(model_path, 'rb') as model_file:\n net.MergeFromString(model_file.read())\n\n # dict formatted as -> key:<layer_name> :: value:<layer_type>\n layer_types = {}\n # dict formatted as -> key:<layer_name> :: value:[<list_of_params>]\n layer_params = {}\n\n for l in net.layer:\n lname = l.name\n ltype = l.type\n if ltype in ltypes:\n print(\"Processing layer {}\".format(lname))\n layer_types[lname] = ltype\n layer_params[lname] = _get_layer_params(l, ltype)\n\n # Set affine=False for all batchnorm modules\n def _no_affine_bn(module=None):\n if isinstance(module, nn.BatchNorm2d):\n module.affine = False\n\n if len([m for m in module.children()]) > 0:\n for child in module.children():\n _no_affine_bn(child)\n\n #_no_affine_bn(self)\n\n\n def _transfer_conv(layer_name, module):\n weights, bias = layer_params[layer_name]\n w_shape = np.array(module.weight.size())\n \n np.testing.assert_array_equal(weights.shape, w_shape)\n print(\"CONV: Original {} and trans weights {}\".format(w_shape,\n weights.shape))\n module.weight.data = torch.from_numpy(weights)\n\n if len(bias) != 0:\n b_shape = np.array(module.bias.size())\n np.testing.assert_array_equal(bias.shape, b_shape)\n print(\"CONV: Original {} and trans bias {}\".format(b_shape,\n bias.shape))\n module.bias.data = torch.from_numpy(bias)\n\n\n def _transfer_conv_bn(conv_layer_name, mother_module):\n conv_module = mother_module[0]\n bn_module = mother_module[1]\n \n _transfer_conv(conv_layer_name, conv_module)\n \n mean, var, gamma, beta = layer_params[conv_layer_name+'/bn']\n print(\"BN: Original {} and trans weights {}\".format(bn_module.running_mean.size(),\n mean.shape))\n bn_module.running_mean = torch.from_numpy(mean)\n bn_module.running_var = torch.from_numpy(var)\n bn_module.weight.data = torch.from_numpy(gamma)\n bn_module.bias.data = torch.from_numpy(beta)\n\n\n def _transfer_residual(prefix, block):\n block_module, n_layers = block[0], block[1]\n\n bottleneck = block_module.layers[0]\n bottleneck_conv_bn_dic = {prefix + '_1_1x1_reduce': bottleneck.cbr1.cbr_unit,\n prefix + '_1_3x3': bottleneck.cbr2.cbr_unit,\n prefix + '_1_1x1_proj': bottleneck.cb4.cb_unit,\n prefix + '_1_1x1_increase': bottleneck.cb3.cb_unit,}\n\n for k, v in bottleneck_conv_bn_dic.items():\n _transfer_conv_bn(k, v)\n\n for layer_idx in range(2, n_layers+1):\n residual_layer = block_module.layers[layer_idx-1]\n residual_conv_bn_dic = {'_'.join(map(str, [prefix, layer_idx, '1x1_reduce'])): residual_layer.cbr1.cbr_unit,\n '_'.join(map(str, [prefix, layer_idx, '3x3'])): residual_layer.cbr2.cbr_unit,\n '_'.join(map(str, [prefix, layer_idx, '1x1_increase'])): residual_layer.cb3.cb_unit,} \n \n for k, v in residual_conv_bn_dic.items():\n _transfer_conv_bn(k, v)\n\n\n convbn_layer_mapping = {'conv1_1_3x3_s2': self.convbnrelu1_1.cbr_unit,\n 'conv1_2_3x3': self.convbnrelu1_2.cbr_unit,\n 'conv1_3_3x3': self.convbnrelu1_3.cbr_unit,\n 'conv5_3_pool6_conv': self.pyramid_pooling.paths[0].cbr_unit, \n 'conv5_3_pool3_conv': self.pyramid_pooling.paths[1].cbr_unit,\n 'conv5_3_pool2_conv': self.pyramid_pooling.paths[2].cbr_unit,\n 'conv5_3_pool1_conv': self.pyramid_pooling.paths[3].cbr_unit,\n 'conv5_4': self.cbr_final.cbr_unit,}\n\n residual_layers = {'conv2': [self.res_block2, self.block_config[0]],\n 'conv3': [self.res_block3, self.block_config[1]],\n 'conv4': [self.res_block4, self.block_config[2]],\n 'conv5': [self.res_block5, self.block_config[3]],}\n\n # Transfer weights for all non-residual conv+bn layers\n for k, v in convbn_layer_mapping.items():\n _transfer_conv_bn(k, v)\n\n # Transfer weights for final non-bn conv layer\n _transfer_conv('conv6', self.classification)\n\n # Transfer weights for all residual layers\n for k, v in residual_layers.items():\n _transfer_residual(k, v)", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_params(self, params):\n params.cp_latest_filename = \"latest_checkpoint_v\"+params.version\n params.cp_load_latest_filename = \"latest_checkpoint_v\"+params.cp_load_ver\n params.cp_load_dir = params.out_dir + params.cp_load_name+ \"/checkpoints/\"\n if not hasattr(params, \"model_out_dir\"):\n params.model_out_dir = params.out_dir + params.model_name\n params.cp_save_dir = params.model_out_dir + \"/checkpoints/\"\n params.log_dir = params.model_out_dir + \"/logfiles/\"\n params.save_dir = params.model_out_dir + \"/savefiles/\"\n params.disp_dir = params.model_out_dir + \"/vis/\"\n params.num_pixels = int(np.prod(params.data_shape))\n self.params = params\n self.params_loaded = True", "def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step,\r\n last_global_data_samples, **kwargs):\r\n checkpoint_state_dict = {\r\n 'epoch': epoch,\r\n 'last_global_step': last_global_step,\r\n 'last_global_data_samples': last_global_data_samples\r\n }\r\n # Add extra kwargs too\r\n checkpoint_state_dict.update(kwargs)\r\n\r\n success = model.network.save_checkpoint(PATH, ckpt_id,\r\n checkpoint_state_dict)\r\n status_msg = 'checkpointing: PATH={}, ckpt_id={}'.format(PATH, ckpt_id)\r\n if success:\r\n logging.info(f\"Success {status_msg}\")\r\n else:\r\n logging.warning(f\"Failure {status_msg}\")\r\n return", "def load_model(self, model_path):\n # Check the model file exists\n if not os.path.isfile(model_path):\n raise ValueError(f\"The model file `{model_path}` is not exists or broken!\")\n\n checkpoint = torch.load(model_path)\n self.model_type = checkpoint['model_type']\n self.label2idx = checkpoint['label2idx']\n self.idx2label = checkpoint['idx2label']\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.to(self.device)", "def try_create_model_and_load_from_checkpoint(self) -> bool:\n self.create_model()\n if self.checkpoint_path:\n # Load the stored model. If there is no checkpoint present, return immediately.\n return self.try_load_checkpoint_for_model()\n return True", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)", "def _load_checkpoint_to_net(config, network):\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in network.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.set_data(weights[weights_name].data)\n elif isinstance(weights[weights_name], Tensor):\n param.set_data(Tensor(weights[weights_name].asnumpy(), config.dtype))\n elif isinstance(weights[weights_name], np.ndarray):\n param.set_data(Tensor(weights[weights_name], config.dtype))\n else:\n param.set_data(weights[weights_name])\n else:\n for param in network.trainable_params():\n name = param.name\n value = param.data\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.set_data(one_weight(value.asnumpy().shape))\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n if param.data.dtype == \"Float32\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float16)))\n else:\n if param.data.dtype == \"Float32\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float16)))", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def build_graph_from_config(self, model_config, track_config, checkpoint_path):\n self.build_model()\n \n ema = tf.train.ExponentialMovingAverage(0)\n variables_to_restore = ema.variables_to_restore(moving_avg_variables=[])\n\n # Filter out State variables\n variables_to_restore_filterd = {}\n for key, value in variables_to_restore.items():\n if key.split('/')[1] != 'State':\n if \"alex_branch\" not in key:\n if \"vggf_branch\" not in key:\n variables_to_restore_filterd[key] = value\n \n saver = tf.train.Saver(variables_to_restore_filterd)\n \n\n if osp.isdir(checkpoint_path):\n #checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)\n if not checkpoint_path:\n raise ValueError(\"No checkpoint file found in: {}\".format(checkpoint_path))\n\n def _restore_fn(sess):\n logging.info(\"Loading model from checkpoint: %s\", checkpoint_path)\n saver.restore(sess, checkpoint_path)\n logging.info(\"Successfully loaded checkpoint: %s\", os.path.basename(checkpoint_path))\n logging.info(\"Restore CANet...\")\n\n return _restore_fn", "def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model", "def train(self, resume_from_checkpoint: Optional[Union[str, bool]] = None,\n trial: Union[\"optuna.Trial\", Dict[str, Any]] = None, ignore_keys_for_eval: Optional[List[str]] = None, **kwargs):\n\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n args = self.args\n\n self.is_in_train = True\n\n # do_train is not a reliable argument, as it might not be set and .train() still called, so\n # the following is a workaround:\n if args.fp16_full_eval and not args.do_train:\n self.model = self.model.to(args.device)\n\n if \"model_path\" in kwargs:\n resume_from_checkpoint = kwargs.pop(\"model_path\")\n if len(kwargs) > 0:\n raise TypeError(f\"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.\")\n\n # This might change the seed so needs to run first.\n self._hp_search_setup(trial)\n\n # Model re-init\n model_reloaded = False\n if self.model_init is not None:\n # Seed must be set before instantiating the model when using model_init.\n set_seed(args.seed)\n self.model = self.call_model_init(trial)\n model_reloaded = True\n\n # Reinitializes optimizer and scheduler\n self.optimizer, self.lr_scheduler = None, None\n\n # Load potential model checkpoint\n if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:\n resume_from_checkpoint = get_last_checkpoint(args.output_dir)\n if resume_from_checkpoint is None:\n raise ValueError(f\"No valid checkpoint found in output directory ({args.output_dir})\")\n\n # If model was re-initialized, put it on the right device and update self.model_wrapped\n if model_reloaded:\n if self.place_model_on_device:\n self.model = self.model.to(args.device)\n self.model_wrapped = self.model\n\n # Keeping track whether we can len() on the dataset or not\n train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)\n\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n if train_dataset_is_sized:\n num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n if args.max_steps > 0:\n max_steps = args.max_steps\n num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(\n args.max_steps % num_update_steps_per_epoch > 0\n )\n else:\n max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(args.num_train_epochs)\n else:\n # see __init__. max_steps is set when the dataset has no __len__\n max_steps = args.max_steps\n num_train_epochs = int(args.num_train_epochs)\n num_update_steps_per_epoch = max_steps\n\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n model = self._wrap_model(self.model_wrapped)\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n # Train!\n world_size = 1 # number of processes in parallel\n\n total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * world_size\n num_examples = (self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps)\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n steps_trained_progress_bar = None\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, \"trainer_state.json\")):\n self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, \"trainer_state.json\"))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {self.state.global_step}\")\n if not args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` \"\n \"flag to your launch command, but you will resume the training on data already seen by your model.\"\n )\n if self.is_local_process_zero() and not args.disable_tqdm:\n steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)\n steps_trained_progress_bar.set_description(\"Skipping the first batches\")\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None\n self.state.trial_params = hp_params(trial) if trial is not None else None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(args, self.state, self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not args.ignore_data_skip:\n for epoch in range(epochs_trained):\n # We just need to begin an iteration to create the randomization of the sampler.\n for _ in train_dataloader:\n break\n\n for epoch in range(epochs_trained, num_train_epochs):\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = (len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps)\n self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)\n\n for step, inputs in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n if steps_trained_progress_bar is not None:\n steps_trained_progress_bar.update(1)\n if steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n continue\n\n elif steps_trained_progress_bar is not None:\n steps_trained_progress_bar.close()\n steps_trained_progress_bar = None\n\n if step % args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(args, self.state, self.control)\n\n tr_loss += self.custom_training_step(model, inputs)\n\n self.current_flos += float(self.floating_point_ops(inputs))\n\n # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps\n if (step + 1) % args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if args.max_grad_norm is not None and args.max_grad_norm > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n # Optimizer step\n optimizer_was_run = True\n self.optimizer.step()\n\n if optimizer_was_run:\n self.lr_scheduler.step()\n\n model.zero_grad()\n\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n self.control = self.callback_handler.on_step_end(args, self.state, self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n\n self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)\n\n if self.control.should_training_stop:\n break\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n logger.info(f\"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).\")\n\n # We load the model state dict on the CPU to avoid an OOM error.\n state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME), map_location=\"cpu\")\n # If the model is on the GPU, it still works!\n self.model.load_state_dict(state_dict)\n\n metrics = speed_metrics(\"train\", start_time, self.state.max_steps)\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n self.log(metrics)\n\n self.control = self.callback_handler.on_train_end(args, self.state, self.control)\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n\n self.is_in_train = False\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)", "def run(self):\n # Get the checkpoint file\n print('loading checkpoint file ...')\n cp = torch.load(self.cfg.work_dir + '/latest.pth')\n print('done')\n\n print('loading state dictionary ...')\n # Initialize network first as separate modules so we can access WFCOS\n backbone = build_backbone(self.cfg.model.backbone).cuda()\n neck = build_neck(self.cfg.model.neck).cuda()\n head = build_head(self.cfg.model.bbox_head).cuda()\n\n # Load the state dicts\n backbone_state = OrderedDict()\n neck_state = OrderedDict()\n head_state = OrderedDict()\n\n for key in cp['state_dict'].keys():\n if 'backbone' in key:\n backbone_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n elif 'neck' in key:\n neck_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n elif 'bbox_head' in key:\n head_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n\n backbone.load_state_dict(backbone_state)\n neck.load_state_dict(neck_state)\n head.load_state_dict(head_state)\n\n # Set to eval mode\n backbone.eval()\n neck.eval()\n head.eval()\n\n print('done')\n\n print('starting inference validation run ...')\n for i, (img, cls) in enumerate(self.loader):\n out = backbone(img)\n out = neck(out)\n out = head(out)\n\n img_metas = [{'img_shape': (640, 800),\n 'scale_factor': 1}]\n bboxes = head.get_bboxes(out[0], out[1], out[2], img_metas,\n self.cfg.test_cfg)\n pass\n print('done')", "def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path,\n key_in_state_dict: str, use_gpu: bool) -> int:\n logging.info(f\"Loading checkpoint {checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu)\n\n try:\n state_dict = checkpoint[key_in_state_dict]\n except KeyError:\n logging.error(f\"Key {key_in_state_dict} not found in checkpoint\")\n return False\n\n if isinstance(model, torch.nn.DataParallel):\n result = model.module.load_state_dict(state_dict, strict=False)\n else:\n result = model.load_state_dict(state_dict, strict=False)\n\n if result.missing_keys:\n logging.warning(f\"Missing keys in model checkpoint: {result.missing_keys}\")\n if result.unexpected_keys:\n logging.warning(f\"Unexpected keys in model checkpoint: {result.unexpected_keys}\")\n\n return checkpoint[ModelAndInfo.EPOCH_KEY]", "def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))", "def initialize_model_from_cfg(args, gpu_id=0):\n model = model_builder.Generalized_RCNN()\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_caffe2_detectron_weights(model, args.load_detectron)\n\n model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)\n\n return model", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def train_model(\n checkpoint: int,\n checkpoint_step: int,\n data_loader: torch.utils.data.DataLoader,\n device: torch.device,\n epoch: int,\n experiment: str,\n max_norm: float,\n model: lmp.model.BaseRNNModel,\n optimizer: Union[\n torch.optim.SGD,\n torch.optim.Adam,\n ],\n vocab_size: int\n):\n # Set experiment output folder.\n file_dir = f'{lmp.path.DATA_PATH}/{experiment}'\n\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n\n # Set experiment log folder.\n writer = torch.utils.tensorboard.SummaryWriter(\n f'{lmp.path.DATA_PATH}/log/{experiment}'\n )\n\n # Define objective function.\n criterion = torch.nn.CrossEntropyLoss()\n\n # Step = number of updates.\n # Every update must increment `step`.\n step = 0\n\n # Set model to train mode.\n model.train()\n\n # Clean up gradient in model parameters.\n model.zero_grad()\n\n # Initialize total loss.\n total_loss = 0.0\n\n for cur_epoch in range(epoch):\n\n epoch_iterator = tqdm(\n data_loader,\n desc=f'epoch: {cur_epoch}, loss: {0:.6f}'\n )\n\n for x, y in epoch_iterator:\n # Increment step for each update.\n step += 1\n\n # Continue training from previous checkpoint step.\n if step < checkpoint:\n continue\n\n # Put tensors on to specified device (CPU or GPU). Reshape `y` into\n # shape (B x S) for cross-entropy.\n # x.size = (B, S)\n # y.size = (B x S)\n x = x.to(device)\n y = y.reshape(-1).to(device)\n\n # Forward pass.\n # pred_y_logits.size = (B, S, V)\n pred_y_logits = model(x)\n\n # Reshape `pred_y_logits` into shape (B x S, V) for cross-entropy.\n pred_y_logits = pred_y_logits.reshape(-1, vocab_size)\n\n # Perform cross-entropy.\n loss = criterion(pred_y_logits, y)\n\n # Calculate total loss.\n total_loss += loss.item()\n\n # Log loss.\n epoch_iterator.set_description(\n f'epoch: {cur_epoch}, loss: {loss.item():.6f}'\n )\n\n # Backward pass.\n loss.backward()\n\n # Perform gradient clipping to avoid gradient explosion.\n torch.nn.utils.clip_grad_norm_(\n model.parameters(),\n max_norm\n )\n\n # Gradient descent.\n optimizer.step()\n\n # `torch` required manually clean up gradient.\n optimizer.zero_grad()\n\n # Save checkpoint for each `checkpoint_step`.\n if step % checkpoint_step == 0:\n torch.save(\n model.state_dict(),\n os.path.join(\n file_dir,\n f'model-{step}.pt'\n )\n )\n torch.save(\n optimizer.state_dict(),\n os.path.join(\n file_dir,\n f'optimizer-{step}.pt'\n )\n )\n # Log average loss.\n writer.add_scalar(\n f'{experiment}/loss',\n total_loss / checkpoint_step,\n step\n )\n total_loss = 0.0\n\n # Save last checkpoint.\n torch.save(\n model.state_dict(),\n os.path.join(\n file_dir,\n f'model-{step}.pt'\n )\n )\n torch.save(\n optimizer.state_dict(),\n os.path.join(\n file_dir,\n f'optimizer-{step}.pt'\n )\n )", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def train(self, config, **kwargs):\n\n config_parameters = utils.parse_config_or_kwargs(config, **kwargs)\n outputdir = Path(\n config_parameters['outputpath'], config_parameters['model'],\n \"{}_{}\".format(\n datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%m'),\n uuid.uuid1().hex[:8]))\n # Early init because of creating dir\n checkpoint_handler = ModelCheckpoint(\n outputdir,\n 'run',\n n_saved=1,\n require_empty=False,\n create_dir=True,\n score_function=lambda engine: -engine.state.metrics['Loss'],\n save_as_state_dict=False,\n score_name='loss')\n logger = utils.getfile_outlogger(Path(outputdir, 'train.log'))\n logger.info(\"Storing files in {}\".format(outputdir))\n # utils.pprint_dict\n utils.pprint_dict(config_parameters, logger.info)\n logger.info(\"Running on device {}\".format(DEVICE))\n labels_df = pd.read_csv(config_parameters['trainlabel'], sep=' ')\n labels_df['encoded'], encoder = utils.encode_labels(\n labels=labels_df['bintype'])\n train_df, cv_df = utils.split_train_cv(labels_df)\n\n transform = utils.parse_transforms(config_parameters['transforms'])\n utils.pprint_dict({'Classes': encoder.classes_},\n logger.info,\n formatter='pretty')\n utils.pprint_dict(transform, logger.info, formatter='pretty')\n if 'sampler' in config_parameters and config_parameters[\n 'sampler'] == 'MinimumOccupancySampler':\n # Asserts that each \"batch\" contains at least one instance\n train_sampler = dataset.MinimumOccupancySampler(\n np.stack(train_df['encoded'].values))\n\n sampling_kwargs = {\"sampler\": train_sampler, \"shuffle\": False}\n elif 'shuffle' in config_parameters and config_parameters['shuffle']:\n sampling_kwargs = {\"shuffle\": True}\n else:\n sampling_kwargs = {\"shuffle\": False}\n\n logger.info(\"Using Sampler {}\".format(sampling_kwargs))\n\n colname = config_parameters.get('colname', ('filename', 'encoded')) #\n trainloader = dataset.getdataloader(\n train_df,\n config_parameters['traindata'],\n transform=transform,\n batch_size=config_parameters['batch_size'],\n colname=colname, # For other datasets with different key names\n num_workers=config_parameters['num_workers'],\n **sampling_kwargs)\n cvdataloader = dataset.getdataloader(\n cv_df,\n config_parameters['traindata'],\n transform=None,\n shuffle=False,\n colname=colname, # For other datasets with different key names\n batch_size=config_parameters['batch_size'],\n num_workers=config_parameters['num_workers'])\n if 'pretrained' in config_parameters and config_parameters[\n 'pretrained'] is not None:\n model = models.load_pretrained(config_parameters['pretrained'],\n outputdim=len(encoder.classes_))\n else:\n model = getattr(models, config_parameters['model'],\n 'LightCNN')(inputdim=trainloader.dataset.datadim,\n outputdim=len(encoder.classes_),\n **config_parameters['model_args'])\n\n if config_parameters['optimizer'] == 'AdaBound':\n try:\n import adabound\n optimizer = adabound.AdaBound(\n model.parameters(), **config_parameters['optimizer_args'])\n except ImportError:\n logger.info(\n \"Adabound package not found, install via pip install adabound. Using Adam instead\"\n )\n config_parameters['optimizer'] = 'Adam'\n config_parameters['optimizer_args'] = {\n } # Default adam is adabount not found\n else:\n optimizer = getattr(\n torch.optim,\n config_parameters['optimizer'],\n )(model.parameters(), **config_parameters['optimizer_args'])\n\n utils.pprint_dict(optimizer, logger.info, formatter='pretty')\n utils.pprint_dict(model, logger.info, formatter='pretty')\n if DEVICE.type != 'cpu' and torch.cuda.device_count() > 1:\n logger.info(\"Using {} GPUs!\".format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n criterion = torch.nn.CrossEntropyLoss().to(DEVICE)\n model = model.to(DEVICE)\n\n precision = Precision()\n recall = Recall()\n f1_score = (precision * recall * 2 / (precision + recall)).mean()\n metrics = {\n 'Loss': Loss(criterion),\n 'Precision': precision.mean(),\n 'Recall': recall.mean(),\n 'Accuracy': Accuracy(),\n 'F1': f1_score,\n }\n\n # batch contains 3 elements, X,Y and filename. Filename is only used\n # during evaluation\n def _prep_batch(batch, device=DEVICE, non_blocking=False):\n x, y, _ = batch\n return (convert_tensor(x, device=device,\n non_blocking=non_blocking),\n convert_tensor(y, device=device,\n non_blocking=non_blocking))\n\n train_engine = create_supervised_trainer(model,\n optimizer=optimizer,\n loss_fn=criterion,\n prepare_batch=_prep_batch,\n device=DEVICE)\n inference_engine = create_supervised_evaluator(\n model, metrics=metrics, prepare_batch=_prep_batch, device=DEVICE)\n\n RunningAverage(output_transform=lambda x: x).attach(\n train_engine, 'run_loss') # Showing progressbar during training\n pbar = ProgressBar(persist=False)\n pbar.attach(train_engine, ['run_loss'])\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n patience=3,\n factor=0.1)\n\n @inference_engine.on(Events.COMPLETED)\n def update_reduce_on_plateau(engine):\n val_loss = engine.state.metrics['Loss']\n if 'ReduceLROnPlateau' == scheduler.__class__.__name__:\n scheduler.step(val_loss)\n else:\n scheduler.step()\n\n early_stop_handler = EarlyStopping(\n patience=5,\n score_function=lambda engine: -engine.state.metrics['Loss'],\n trainer=train_engine)\n inference_engine.add_event_handler(Events.EPOCH_COMPLETED,\n early_stop_handler)\n inference_engine.add_event_handler(Events.EPOCH_COMPLETED,\n checkpoint_handler, {\n 'model': model,\n 'encoder': encoder,\n 'config': config_parameters,\n })\n\n @train_engine.on(Events.EPOCH_COMPLETED)\n def compute_validation_metrics(engine):\n inference_engine.run(cvdataloader)\n results = inference_engine.state.metrics\n output_str_list = [\n \"Validation Results - Epoch : {:<5}\".format(engine.state.epoch)\n ]\n for metric in metrics:\n output_str_list.append(\"{} {:<5.3f}\".format(\n metric, results[metric]))\n logger.info(\" \".join(output_str_list))\n pbar.n = pbar.last_print_n = 0\n\n train_engine.run(trainloader, max_epochs=config_parameters['epochs'])\n return outputdir", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model, **kwargs)\n self.path = model\n self.model = get_zennet()\n\n model_pth_path = osp.join(self.path, ModelFile.TORCH_MODEL_FILE)\n\n checkpoint = torch.load(model_pth_path, map_location='cpu')\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n\n self.model.load_state_dict(state_dict, strict=True)\n logger.info('load model done')", "def save_model(self, checkpoint_path, epoch):\n self.saver.save(self.sess, checkpoint_path, global_step = epoch)", "def train_model(config, environ, train_data, test_data, trainval_data=None):\n np.random.seed(0)\n if not hasattr(config, \"seed\"):\n tf.set_random_seed(1234)\n log.info(\"Setting tensorflow random seed={:d}\".format(1234))\n else:\n log.info(\"Setting tensorflow random seed={:d}\".format(config.seed))\n tf.set_random_seed(config.seed)\n if environ.verbose:\n verbose_level = 0\n else:\n verbose_level = 2\n\n if trainval_data is None:\n trainval_data = train_data\n\n log.info(\"Environment: {}\".format(environ.__dict__))\n log.info(\"Config: {}\".format(config.__dict__))\n\n save_folder = os.path.join(environ.save_folder, environ.exp_id)\n logs_folder = os.path.join(environ.logs_folder, environ.exp_id)\n with log.verbose_level(verbose_level):\n exp_logger = ExperimentLogger(logs_folder)\n\n if not hasattr(config, \"seed\"):\n data_seed = 0\n else:\n data_seed = config.seed\n\n # Gets data iterators.\n train_iter = get_iter(\n train_data,\n batch_size=config.batch_size,\n shuffle=True,\n cycle=True,\n prefetch=config.prefetch,\n seed=data_seed,\n num_worker=25,\n queue_size=500)\n trainval_iter = get_iter(\n train_data,\n batch_size=config.batch_size,\n shuffle=True,\n cycle=True,\n prefetch=config.prefetch,\n num_worker=10,\n queue_size=200)\n test_iter = get_iter(\n test_data,\n batch_size=config.batch_size,\n shuffle=False,\n cycle=False,\n prefetch=config.prefetch,\n num_worker=10,\n queue_size=200)\n\n # Builds models.\n log.info(\"Building models\")\n with tf.name_scope(\"Train\"):\n with tf.variable_scope(\"Model\", reuse=None):\n with tf.device(environ.device):\n if config.model.startswith(\"resnet\"):\n m = ResNetModel(config, is_training=True)\n else:\n m = CNNModel(config, is_training=True)\n\n with tf.name_scope(\"Valid\"):\n with tf.variable_scope(\"Model\", reuse=True):\n with tf.device(environ.device):\n if config.model.startswith(\"resnet\"):\n mvalid = ResNetModel(config, is_training=False)\n else:\n mvalid = CNNModel(config, is_training=False)\n\n # Initializes variables.\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n\n def train_step():\n \"\"\"Train step.\"\"\"\n batch = train_iter.next()\n feed_data = {m.input: batch[\"img\"], m.label: batch[\"label\"]}\n cost, ce, _ = sess.run([m.cost, m.cross_ent, m.train_op],\n feed_dict=feed_data)\n return ce\n\n def evaluate(data_iter, nbatches):\n \"\"\"Runs evaluation.\"\"\"\n num_correct = 0.0\n count = 0\n if nbatches == -1:\n iter_ = data_iter\n else:\n iter_ = range(nbatches)\n\n for bb in iter_:\n if nbatches == -1:\n batch = bb\n else:\n batch = data_iter.next()\n feed_data = {mvalid.input: batch[\"img\"]}\n y = sess.run(mvalid.output, feed_dict=feed_data)\n pred_label = np.argmax(y, axis=1)\n num_correct += np.sum(\n np.equal(pred_label, batch[\"label\"]).astype(float))\n count += pred_label.size\n acc = (num_correct / count)\n return acc\n\n def save():\n \"\"\"Snapshots a model.\"\"\"\n if not os.path.isdir(save_folder):\n os.makedirs(save_folder)\n config_file = os.path.join(save_folder, \"conf.json\")\n environ_file = os.path.join(save_folder, \"env.json\")\n with open(config_file, \"w\") as f:\n f.write(config.to_json())\n with open(environ_file, \"w\") as f:\n f.write(environ.to_json())\n log.info(\"Saving to {}\".format(save_folder))\n saver.save(\n sess,\n os.path.join(save_folder, \"model.ckpt\"),\n global_step=m.global_step)\n\n def train():\n \"\"\"Train loop.\"\"\"\n lr = config.base_learn_rate\n lr_decay_steps = config.lr_decay_steps\n max_train_iter = config.max_train_iter\n m.assign_lr(sess, lr)\n\n if environ.verbose:\n loop = range(max_train_iter)\n else:\n loop = pb.get(max_train_iter)\n\n for niter in loop:\n # decrease learning rate\n if len(lr_decay_steps) > 0:\n if (niter + 1) == lr_decay_steps[0]:\n lr *= 0.1\n m.assign_lr(sess, lr)\n lr_decay_steps.pop(0)\n ce = train_step()\n if (niter + 1) % config.disp_iter == 0 or niter == 0:\n exp_logger.log_train_ce(niter, ce)\n if (niter + 1) % config.valid_iter == 0 or niter == 0:\n acc = evaluate(trainval_iter, 10)\n exp_logger.log_train_acc(niter, acc)\n test_iter.reset()\n acc = evaluate(test_iter, -1)\n log.info(\"Experment ID {}\".format(environ.exp_id))\n exp_logger.log_valid_acc(niter, acc)\n if (niter + 1) % config.save_iter == 0:\n save()\n test_iter.reset()\n acc = evaluate(test_iter, -1)\n return acc\n\n acc = train()\n return acc", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state" ]
[ "0.77299833", "0.74241644", "0.7334485", "0.7309532", "0.71466875", "0.7120709", "0.7097819", "0.70120436", "0.6995636", "0.69388175", "0.69167244", "0.69100595", "0.69032747", "0.6854969", "0.68546087", "0.68304265", "0.6785825", "0.67773795", "0.6773737", "0.67481923", "0.6747677", "0.6735257", "0.67336524", "0.67256683", "0.6709224", "0.6672442", "0.6655351", "0.66481644", "0.6647635", "0.6636869", "0.6611053", "0.660248", "0.660248", "0.65953124", "0.6594743", "0.65873396", "0.65851843", "0.658264", "0.65498793", "0.65485233", "0.6547423", "0.65407646", "0.6534953", "0.65117866", "0.65112835", "0.6509691", "0.649417", "0.64734626", "0.6472088", "0.6444999", "0.64448243", "0.64432454", "0.6438691", "0.6433641", "0.6433389", "0.6428864", "0.6425225", "0.6394112", "0.63930404", "0.63927853", "0.63926965", "0.6388553", "0.63844615", "0.6376105", "0.63742954", "0.63712686", "0.63615674", "0.63600224", "0.63556355", "0.6354203", "0.63504475", "0.6346633", "0.6346629", "0.634579", "0.6337445", "0.6335578", "0.6320063", "0.6309723", "0.6307621", "0.6303415", "0.63033444", "0.62997997", "0.62969965", "0.62923795", "0.62897587", "0.62868553", "0.62844193", "0.6283398", "0.6279034", "0.62679136", "0.6261727", "0.6259494", "0.62514925", "0.6251156", "0.6251156", "0.624443", "0.62283057", "0.6218762", "0.62180525", "0.6215615" ]
0.6487272
47
Creates a model (with temperature scaling) according to the config given.
def create_mean_teacher_model(self) -> None: self._mean_teacher_model = create_model_with_temperature_scaling(self.config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model_with_temperature_scaling(config: ModelConfigBase) -> Any:\n # wrap the model around a temperature scaling model if required\n model = config.create_model()\n if isinstance(config, SequenceModelBase) and config.temperature_scaling_config:\n model = ModelWithTemperature(model, config.temperature_scaling_config)\n return model", "def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)", "def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector", "def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model", "def model_creator(config):\n return nn.Linear(1, 1)", "def __init__(self, data_set, model, config):\n\n self.config = config\n self.data_set = data_set\n # Normalize or standardize the features, to have them ready to use as model input\n self.data_set.shift_and_scale(self.config[\"shift\"], self.config[\"scaling\"])\n self.model = model\n self.model.eval()\n self.device = torch.device(\"cpu\") if not self.config[\"use_gpu\"] \\\n else torch.device(\"cuda:\" + str(self.config[\"gpu_no\"]))", "def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model", "def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model", "def model(self, **config_kwargs):\n measurement = self.get_measurement(**config_kwargs)\n log.debug(\n 'model being created for measurement {0:s}'.format(measurement['name'])\n )\n\n patches = config_kwargs.get('patches', [])\n\n modelspec = {\n 'channels': self.spec['channels'],\n 'parameters': measurement['config']['parameters'],\n }\n for patch in patches:\n modelspec = jsonpatch.JsonPatch(patch).apply(modelspec)\n\n return Model(modelspec, poiname=measurement['config']['poi'], **config_kwargs)", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def run_model(config_file):\n config_file = os.path.join(os.getcwd(), config_file)\n result = Tethys(config_file=config_file)\n result.run_model()\n return result", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def create_shunt_model(self):\r\n\r\n print('\\nCreate shunt model')\r\n\r\n if not self.original_model:\r\n raise ValueError('Original model not yet initialized! Either call create_original_model or set it manually.')\r\n if not self.shunt_params:\r\n raise ValueError('No parameters found in config for shunt model! Create the field [SHUNT_MODEL]')\r\n\r\n logging.info('')\r\n logging.info('#######################################################################################################')\r\n logging.info('############################################ SHUNT MODEL ##############################################')\r\n logging.info('#######################################################################################################')\r\n logging.info('')\r\n\r\n dilation_rate_input, dilation_rate_output = find_input_output_dilation_rates(self.original_model, self.shunt_params['locations'])\r\n\r\n print('Used dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('Creating shunt with dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('')\r\n\r\n with self.activate_distribution_scope():\r\n if self.shunt_params['from_file']:\r\n self.shunt_model = keras.models.load_model(self.shunt_params['filepath'])\r\n print('Shunt model loaded successfully!')\r\n else:\r\n input_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][0]).input_shape[1:]\r\n if isinstance(input_shape_shunt, list):\r\n input_shape_shunt = input_shape_shunt[0][1:]\r\n output_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][1]).output_shape[1:]\r\n if isinstance(output_shape_shunt, list):\r\n output_shape_shunt = output_shape_shunt[0][1:]\r\n\r\n self.shunt_model = Architectures.createShunt(input_shape_shunt,\r\n output_shape_shunt,\r\n arch=self.shunt_params['arch'],\r\n use_se=False,\r\n dilation_rate_input=dilation_rate_input,\r\n dilation_rate_output=dilation_rate_output,\r\n expansion_factor=1.0)\r\n\r\n if self.shunt_params['pretrained']:\r\n self.shunt_model.load_weights(self.shunt_params['weightspath'])\r\n print('Shunt weights loaded successfully!')\r\n\r\n self.shunt_model.summary(print_fn=self.logger.info, line_length=150)\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))", "def do_create_model(**kwargs):\n model_params = {\n 'name': kwargs['dag_run'].conf.get('model_name'),\n 'description': 'A custom DNN regressor model',\n 'regions': [REGION]\n }\n\n ti = kwargs['ti']\n\n is_model = ti.xcom_pull(key='is_project', task_ids='check_model')\n if not is_model:\n mle = MLEngineHook()\n mle.create_model(PROJECT, model_params)", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def config_task(self) -> None:\n if self.hparams[\"model\"] == \"resnet18\":\n self.model = models.resnet18(pretrained=True)\n in_features = self.model.fc.in_features\n self.model.fc = nn.Linear( # type: ignore[attr-defined]\n in_features, out_features=1\n )\n else:\n raise ValueError(f\"Model type '{self.hparams['model']}' is not valid.\")", "def create_model(self, model_config):\n\n return self.conn.create_model(\n **model_config)", "def create_model(model_class, model_params=None, model_name='model'):\n\n model_params = {} if model_params is None else model_params\n\n model = model_class(**model_params)\n\n if special_parameters.load_model: # recover from checkpoint\n _load_model(model, model_name)\n\n # configure usage on GPU\n if use_gpu():\n model.to(first_device())\n model = torch.nn.DataParallel(model, device_ids=all_devices())\n\n # print info about devices\n print_info('Device(s)): ' + str(device_description()))\n\n return model", "def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])", "def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def _random_model(self, input_size, output_size, task, config: dict) -> AbstractModel:\n return create_random_model(input_size, output_size, config, task)", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def model_setup(params):\n n_classes = len(classes_config.training_ids)\n if general_config.model_id == constants.ssdlite:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list)\n elif general_config.model_id == constants.ssd:\n model = resnet_ssd.SSD300(n_classes=n_classes)\n elif general_config.model_id == constants.ssd_modified:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list,\n out_channels=params.out_channels, width_mult=params.width_mult)\n model.to(general_config.device)\n\n return model", "def template_model():\n model_type = 'continuous' # either 'discrete' or 'continuous'\n model = do_mpc.model.Model(model_type)\n\n # Model variables:\n var1 = model.set_variable(var_type='_x', var_name='var1')\n var2 = model.set_variable(var_type='_x', var_name='var2')\n\n state = vertcat(var1,var2)\n state_dot = model.set_variable(var_type='_x', var_name='state_dot', shape=(2.1))\n\n input1 = model.set_variable(var_type='_u', var_name='input1')\n\n\n # Parameters:\n # define Parameters\n\n model.set_rhs('var1',state_dot[0])\n model.set_rhs('var2',state_dot[1])\n\n state_dot_rhs = vertcat(\n # rhs1,\n # rhs2)\n model.set_rhs('state_dot',state_dot_rhs)\n\n model.setup()\n\n return model", "def from_config(cls,config):\n ## find labels in list\n label_list = load_label_list(config.label_list)\n use_cuda = True if torch.cuda.is_available() else False\n\n global_args = {\n \"fp16\" : False,\n \"classification_report\" : True,\n \"tensorboard_dir\" : config.tensorboard_dir,\n \"wandb_project\" : config.wandb_project,\n \"wandb_kwargs\" : {\n \"name\" : config.wandb_name,\n \"entity\" : config.wandb_entity,\n }\n }\n\n model = NERModel(\n config.model_name,\n config.model_type,\n use_cuda=use_cuda,\n labels=label_list,\n args=global_args,\n )\n return cls(model,config)", "def create(self, req, body):\n context = req.environ['meteos.context']\n\n if not self.is_valid_body(body, 'model'):\n raise exc.HTTPUnprocessableEntity()\n\n model = body['model']\n\n LOG.debug(\"Create model with request: %s\", model)\n\n try:\n experiment = self.engine_api.get_experiment(\n context, model['experiment_id'])\n utils.is_valid_status(experiment.__class__.__name__,\n experiment.status,\n constants.STATUS_AVAILABLE)\n template = self.engine_api.get_template(\n context, experiment.template_id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n except exception.InvalidStatus:\n raise\n\n display_name = model.get('display_name')\n display_description = model.get('display_description')\n experiment_id = model.get('experiment_id')\n source_dataset_url = model.get('source_dataset_url')\n dataset_format = model.get('dataset_format', 'csv')\n model_type = model.get('model_type')\n model_params = model.get('model_params')\n swift_tenant = model.get('swift_tenant')\n swift_username = model.get('swift_username')\n swift_password = model.get('swift_password')\n\n new_model = self.engine_api.create_model(context,\n display_name,\n display_description,\n source_dataset_url,\n dataset_format,\n model_type,\n model_params,\n template.id,\n template.job_template_id,\n experiment_id,\n experiment.cluster_id,\n swift_tenant,\n swift_username,\n swift_password)\n\n return self._view_builder.detail(req, new_model)", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def from_config(cls, *args, **kwargs):\n _config = args\n\n if isinstance(args, tuple): # multiple non-keyword arguments were provided\n if len(args) > 0:\n _config = args[0]\n\n else:\n _config = kwargs['config_path']\n kwargs.pop('config_path')\n\n local = False\n if 'make_new_path' in kwargs:\n local = True\n elif isinstance(_config, str) and os.path.isfile(_config):\n local = True\n elif isinstance(_config, dict) and \"category\" in _config:\n local = True\n\n if local:\n config = None\n config_path = None\n\n # we need to build ai4water's Model class\n if isinstance(_config, dict):\n config = _config\n else:\n config_path = _config\n return BaseModel._get_config_and_path(\n cls,\n config=config,\n config_path=config_path,\n **kwargs\n )\n\n # tf1.15 has from_config so call it\n return super().from_config(*args, **kwargs)", "def create_model(self):\n pass", "def create_model(self):\n pass", "def construct_model(self, output_model_path):\n\n input_tensor = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [1, 1, 7, 7])\n output_tensor = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [1, 1, 8, 8])\n ini_w = helper.make_tensor(\"weight\", TensorProto.FLOAT, [1, 1, 2, 2], [1.0, 1.0, 1.0, 1.0])\n ini_b = helper.make_tensor(\"bias\", TensorProto.FLOAT, [1], [0.17])\n conv_tranpose_node = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"input\", \"weight\", \"bias\"],\n [\"output\"],\n kernel_shape=[2, 2],\n output_padding=[0, 0],\n pads=[0, 0, 0, 0],\n strides=[1, 1],\n dilations=[1, 1],\n group=1,\n )\n graph = helper.make_graph(\n [conv_tranpose_node],\n \"conv_transpose_test\",\n [input_tensor],\n [output_tensor],\n initializer=[ini_w, ini_b],\n )\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n model.ir_version = 7 # use stable onnx ir version\n\n onnx.save(model, output_model_path)", "def build_model(cfg, model, gpu_id=None):\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n # Construct the model\n # name = cfg.MODEL.MODEL_NAME\n # model = MODEL_REGISTRY.get(name)(cfg)\n \n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n #, find_unused_parameters=True\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n \n return model", "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())", "def init_model(model):\n model(tf.random.uniform((1, 512, 512, 3)))", "def initialize_model_from_cfg(args, gpu_id=0):\n model = eval(args.model).loot_model(args)\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n\n return model", "def create_model(sess, FLAGS, mode):\n if FLAGS.model == \"vallina\":\n model = LinearModel(FLAGS, mode)\n model.build()\n else:\n pass\n # other model \n\n # create task file\n model_path = os.path.join(FLAGS.logdir, FLAGS.task_name)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n print (\"Save model to {}\".format(model_path))\n elif (FLAGS.reset):\n shutil.rmtree(model_path)\n os.makedirs(model_path)\n print (\"Remove existing model at {} and restart.\".format(model_path))\n else:\n raise ValueError(\"Fail to create the new model.\")\n\n # Save the current configurations\n config = dict(FLAGS.__flags.items())\n with open(\"/\".join([model_path, \"config.json\"]), \"w\") as file:\n json.dump(config, file)\n\n # initialize variables\n sess.run(tf.global_variables_initializer())\n\n return model", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def get_model(model_name: str = \"\", cfg={}) -> torch.nn.Module:\n if model_name == \"default\":\n model = AudioNTT2020(n_mels=cfg.n_mels, d=cfg.feature_d)\n\n elif model_name == \"resnetish34\":\n model = resnetish34()\n\n elif model_name == \"clstm\":\n model = CLSTM()\n\n elif model_name == \"cvt\":\n s1_depth, s2_depth, s3_depth = cfg.depths\n s1_emb_dim, s2_emb_dim, s3_emb_dim = cfg.embed_dims\n s1_mlp_mult, s2_mlp_mult, s3_mlp_mult = cfg.mlp_mults\n\n model = CvT(\n s1_emb_dim=s1_emb_dim,\n s1_depth=s1_depth,\n s1_mlp_mult=s1_mlp_mult,\n s2_emb_dim=s2_emb_dim,\n s2_depth=s2_depth,\n s2_mlp_mult=s2_mlp_mult,\n s3_emb_dim=s3_emb_dim,\n s3_depth=s3_depth,\n s3_mlp_mult=s3_mlp_mult,\n pool=cfg.cvt_pool,\n )\n else:\n raise ValueError(\"Model not found.\")\n return model", "def __init__(self, config):\n self.model = None\n self.config = config\n self.batch_size = config.get('batch_size')\n self.epochs = config.get('epochs')\n self.steps_per_epoch = config.get('steps_per_epoch')\n self.validation_steps = config.get('validation_steps')\n self.distributed = config.get('distributed', False)\n \n # init model\n self.init()", "def config_task(self) -> None:\n weights = self.hyperparams[\"weights\"]\n\n if self.hyperparams[\"model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n\n class_weights = None\n if isinstance(self.class_weights, torch.Tensor):\n class_weights = self.class_weights.to(dtype=torch.float32)\n elif hasattr(self.class_weights, \"__array__\") or self.class_weights:\n class_weights = torch.tensor(self.class_weights, dtype=torch.float32)\n\n self.loss = nn.CrossEntropyLoss(\n ignore_index=ignore_value, weight=class_weights\n )\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n if self.hyperparams[\"model\"] != \"fcn\":\n if weights and weights is not True:\n if isinstance(weights, WeightsEnum):\n state_dict = weights.get_state_dict(progress=True)\n elif os.path.exists(weights):\n _, state_dict = utils.extract_backbone(weights)\n else:\n state_dict = get_weight(weights).get_state_dict(progress=True)\n self.model.encoder.load_state_dict(state_dict)\n\n # Freeze backbone\n if self.hyperparams.get(\"freeze_backbone\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n\n # Freeze decoder\n if self.hyperparams.get(\"freeze_decoder\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.decoder.parameters():\n param.requires_grad = False", "def getModel(config: configuration.Configuration) -> torch.nn.Module:\n if config.modelName == ModelName.DENSE:\n return DenseGenerator(1, 1, n_blocks=config.blockCount)\n elif config.modelName == ModelName.SHALLOW:\n return Shallow(1, 1, )\n elif config.modelName == ModelName.TIRAMISU:\n model = Tiramisu(1, 1, structure=(\n config.down, # Down blocks\n config.bottleneck, # bottleneck layers\n config.up, # Up blocks\n ), checkpoint=False)\n\n model.initialize_kernels(torch.nn.init.kaiming_uniform_, conv=True)\n return model\n else:\n return SimpleCNN()", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n\n baseEncoder = self.createEncoder(inputs)\n baseEncoder = Dropout(self.drop)(baseEncoder)\n\n # Instantiate encoder layers\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(baseEncoder)\n z_log_var = Q_z_log_var(baseEncoder)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n \n G_0 = Dense(8*self.kernel_init)(encoding)\n G_0 = Dropout(self.drop)(G_0)\n baseDecoder = self.createDecoder(G_0)\n\n self.model =Model(inputs, baseDecoder)\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n self.model.summary()\n print(\"Completed model setup.\")", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ###### Temperature in Kelvin\n model.T_kelvin = model.zero_celsius + model.T_celsius*kelvin\n \n ##### Potentials\n # Resting potential (calculated with Goldman equation)\n model.V_res = (model.R*model.T_kelvin)/model.F * np.log((model.P_K*model.n_init**2*model.K_e + model.P_Na*model.h_init*model.m_init**3*model.Na_e)/\\\n (model.P_K*model.n_init**2*model.K_i + model.P_Na*model.h_init*model.m_init**3*model.Na_i))\n \n # Nerst potential for leakage current; leakage chanels were excluded but could be added by using: g_L*(E_L-(v-V_res)) \n model.E_L = (-1/model.g_L)*(model.P_Na*model.m_init**3*model.h_init*(model.V_res*model.F**2)/(model.R*model.T_kelvin) * \\\n (model.Na_e-model.Na_i*exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))) + \\\n model.P_K*model.n_init**2*(model.V_res*model.F**2)/(model.R*model.T_kelvin) *\\\n (model.K_e-model.K_i*np.exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))))\n \n \n ##### structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5)\n model.structure = np.array(list(np.tile([2,1],model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### Compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(model.structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # dendrite\n model.fiber_inner_diameter = 0.7* model.fiber_outer_diameter\n model.compartment_diameters[:] = model.fiber_inner_diameter\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacitivites\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # internodes\n model.c_m[np.where(model.structure == 1)] = 0*uF/cm**2\n # nodes\n model.c_m[np.where(model.structure == 2)] = model.c_m_layer\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2 \n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Noise term\n model.P_Na_vector = np.zeros(model.nof_comps)*um/second\n model.P_Na_vector[model.structure == 2] = model.P_Na\n model.noise_term = np.sqrt(model.A_surface*model.P_Na_vector)\n \n ##### Compartments to plot\n model.comps_to_plot = range(1,model.nof_comps)\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.n = model.n_init\n neuron.h = model.h_init\n \n ##### Set parameter values of differential equations\n # conductances active compartments\n neuron.g_Na = model.g_Na\n neuron.g_K = model.g_K\n \n # conductances internodes\n neuron.g_Na[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n neuron.g_K[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.E_L = model.E_L\n neuron.g_L = model.g_L \n \n return neuron, model", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def create_scaling_model(params, experiments, reflections):\n autos = [None, Auto, \"auto\", \"Auto\"]\n use_auto_model = params.model in autos\n # Determine non-auto model to use outside the loop over datasets.\n if not use_auto_model:\n model_class = None\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == params.model:\n model_class = entry_point.load()\n break\n if not model_class:\n raise ValueError(f\"Unable to create scaling model of type {params.model}\")\n\n for expt, refl in zip(experiments, reflections):\n if not expt.scaling_model or params.overwrite_existing_models:\n # need to make a new model\n if use_auto_model:\n if not expt.scan:\n model = KBScalingModel\n else: # set model as physical unless scan < 1.0 degree\n osc_range = expt.scan.get_oscillation_range()\n abs_osc_range = abs(osc_range[1] - osc_range[0])\n if abs_osc_range < 1.0:\n model = KBScalingModel\n else:\n model = PhysicalScalingModel\n else:\n model = model_class\n expt.scaling_model = model.from_data(params, expt, refl)\n else:\n # allow for updating of an existing model.\n expt.scaling_model.update(params)\n return experiments", "def build(model_name):\n return pretrain.factory.create(model_name)", "def create_model(X, tmode, nmode1, nmode2, nmode, window, rank, tlength, seasonp, horizon, f_window):\n model = smooth_tfactor(X, tmode, nmode1, nmode2, nmode, window, rank, tlength, seasonp, horizon, f_window)\n opt = torch.optim.SGD(model.parameters(),lr=0.001)\n return model, opt", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def _initialize_model(self):\n max_value = self.data.max()\n\n if self.model_type == self._GAUSSIAN2D:\n model = models.Gaussian2D(\n x_mean=self.x, y_mean=self.y, x_stddev=1, y_stddev=1\n )\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.x_stddev.bounds = (0, self._box / 4)\n model.y_stddev.bounds = (0, self._box / 4)\n model.x_mean.bounds = (self.x - 5, self.x + 5)\n model.y_mean.bounds = (self.y - 5, self.y + 5)\n\n elif self.model_type == self._MOFFAT2D:\n model = models.Moffat2D()\n model.x_0 = self.x\n model.y_0 = self.y\n model.gamma = 2\n model.alpha = 2\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.alpha.bounds = (1, 6)\n model.gamma.bounds = (0, self._box / 4)\n model.x_0.bounds = (self.x - 5, self.x + 5)\n model.y_0.bounds = (self.y - 5, self.y + 5)\n\n model += models.Const2D(self.fit_sky())\n model.amplitude_1.fixed = True\n return model", "def create(model: TModel) -> ModelTransformer:\n model_backend = get_backend(model)\n if model_backend == BackendType.ONNX:\n from nncf.onnx.graph.model_transformer import ONNXModelTransformer\n\n return ONNXModelTransformer(model)\n if model_backend == BackendType.OPENVINO:\n from nncf.openvino.graph.model_transformer import OVModelTransformer\n\n return OVModelTransformer(model)\n if model_backend == BackendType.TORCH:\n from nncf.torch.model_transformer import PTModelTransformer\n\n return PTModelTransformer(model)\n raise RuntimeError(\n \"Cannot create backend-specific model transformer because {} is not supported!\".format(model_backend)\n )", "def create_reference_model(self, config, tmp_path_factory: pytest.TempPathFactory, *args):\n config = copy.deepcopy(config) # ensure the reference model is not passed to tests\n\n save_folder = tmp_path_factory.mktemp('{device}-{precision}'.format(**config))\n config.update({'save_interval': '1ep', 'save_folder': str(save_folder), 'save_filename': 'ep{epoch}.pt'})\n\n trainer = Trainer(**config)\n trainer.fit()\n\n self.reference_model = trainer.state.model\n self.reference_folder = save_folder", "def create_model(config_obj: Union[ModelConfig, dict], random_seed: int = default_random_seed) -> BaseModel:\n if isinstance(config_obj, dict):\n config_obj = ModelConfig.from_dict(config_obj)\n model_type = get_from_registry(config_obj.model_type, model_type_registry)\n return model_type(config_obj, random_seed=random_seed)", "def __init__(self, model, settings):\n super().__init__(model, settings)\n self.model_part = self.model.CreateModelPart(self.settings[\"model_part_name\"].GetString())\n self.model_part.ProcessInfo.SetValue(KM.DOMAIN_SIZE, self.settings[\"domain_size\"].GetInt())\n self.model_part.ProcessInfo.SetValue(KM.GRAVITY_Z, self.settings[\"gravity\"].GetDouble())\n self.EstimateDeltaTimeUtility = SW.EstimateTimeStepUtility(self.GetComputingModelPart(), self.settings[\"time_stepping\"])", "def make_environment(self):\n\t\tbase_layer = 0\n\t\tself.Gravity = 9.81\n\n\t\t#Private data for to define model\n\t\t__model_max_altitude = 87000\n\t\t__atmosphere_layers = {0:0, 11000:1, 20000:2, 32000:3, 47000:4, 51000:5, 71000:6}\n\t\t__layer_base_data = {\n\t\t\t0:{'temp':288.15, 'lapse':-0.0065, 'press':101325},\n\t\t\t1:{'temp':216.65, 'lapse':0, 'press':22632.1},\n\t\t\t2:{'temp':216.65, 'lapse':0.001, 'press':5474.89},\n\t\t\t3:{'temp':228.65, 'lapse':0.0028, 'press':868.019},\n\t\t\t4:{'temp':270.65, 'lapse':0, 'press':110.906},\n\t\t\t5:{'temp':270.65, 'lapse':-0.0028, 'press':66.9389},\n\t\t\t6:{'temp':214.65, 'lapse':-0.002, 'press':3.95642},\n\t\t\t}\n\t\t__gas_constant = 8.31432#e3\n\t\t__air_molar_mass = 0.0289644\n\t\t__specific_heat_ratio = 1.4\n\t\t__visc_lambda = 1.51204129e-6\n\t\t__visc_sutherland_const = 120.0\n\n\t\tif self.Altitude > __model_max_altitude:\n\t\t\traise helpers.extra_exceptions.ModelExtrapolationException(\n\t\t\t'Exceeded model maximum altitude')\n\n\t\tlayerKeys = __atmosphere_layers.keys()\n\t\tlayerKeys = list(layerKeys)\n\t\tlayerKeys.sort()\n\t\tfor layer in layerKeys:\n\t\t\tif self.Altitude >= layer:\n\t\t\t\tbase_layer = __atmosphere_layers[layer]\n\t\t\t\tbase_alt = layer\n\t\tbase_temp = __layer_base_data[base_layer]['temp']\n\t\tbase_lapse = __layer_base_data[base_layer]['lapse']\n\t\tbase_press = __layer_base_data[base_layer]['press']\n\n\t\tself.Temperature = base_temp + base_lapse * (self.Altitude - base_alt)\n\t\t+ self.Temperature_offset\n\n\t\tif base_lapse == 0:\n\t\t\tself.Pressure = base_press * \\\n\t\t\t\tnp.exp( (-self.Gravity*__air_molar_mass*(self.Altitude-base_alt)) \\\n\t\t\t\t/(__gas_constant*base_temp))\n\t\telse:\n\t\t\tself.Pressure = base_press * \\\n\t\t\t\t(base_temp/self.Temperature) ** \\\n\t\t\t\t(self.Gravity*__air_molar_mass/__gas_constant/base_lapse)\n\n\t\tself.Density = __air_molar_mass*self.Pressure / \\\n\t\t\t__gas_constant/self.Temperature\n\t\tself.Speed_of_sound = np.sqrt(__specific_heat_ratio*__gas_constant* \\\n\t\t\tself.Temperature/__air_molar_mass)\n\t\tself.Dynamic_viscosity = __visc_lambda*self.Temperature**(3.0/2.0)/ \\\n\t\t\t(self.Temperature+__visc_sutherland_const)", "def init_model(config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None):\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config):\n raise TypeError('config must be a filename or Config object, '\n 'but got {}'.format(type(config)))\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n elif 'init_cfg' in config.model.backbone:\n config.model.backbone.init_cfg = None\n config.model.pretrained = None\n config.model.train_cfg = None\n init_default_scope(config.get('default_scope', 'mmseg'))\n\n model = MODELS.build(config.model)\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n dataset_meta = checkpoint['meta'].get('dataset_meta', None)\n # save the dataset_meta in the model for convenience\n if 'dataset_meta' in checkpoint.get('meta', {}):\n # mmseg 1.x\n model.dataset_meta = dataset_meta\n elif 'CLASSES' in checkpoint.get('meta', {}):\n # < mmseg 1.x\n classes = checkpoint['meta']['CLASSES']\n palette = checkpoint['meta']['PALETTE']\n model.dataset_meta = {'classes': classes, 'palette': palette}\n else:\n warnings.simplefilter('once')\n warnings.warn(\n 'dataset_meta or class names are not saved in the '\n 'checkpoint\\'s meta data, classes and palette will be'\n 'set according to num_classes ')\n num_classes = model.decode_head.num_classes\n dataset_name = None\n for name in dataset_aliases.keys():\n if len(get_classes(name)) == num_classes:\n dataset_name = name\n break\n if dataset_name is None:\n warnings.warn(\n 'No suitable dataset found, use Cityscapes by default')\n dataset_name = 'cityscapes'\n model.dataset_meta = {\n 'classes': get_classes(dataset_name),\n 'palette': get_palette(dataset_name)\n }\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model", "def get_model_with_properties():\n \n m = ConcreteModel()\n\n # ------------------------------------------------------------------\n # Data\n # ------------------------------------------------------------------\n\n m.np = 25 # Number of possible tays\n m.c = 4 # Number of components\n m.lc = 1 # Light component\n m.hc = 4 # Heavy component\n\n #### Constant parameters\n m.Rgas = 8.314 # Ideal gas constant in J/mol K\n m.Tref = 298.15 # Reference temperature in K\n\n #### Product specifications\n m.xspec_lc = 0.99 # Final liquid composition for methanol (1)\n m.xspec_hc = 0.99 # Fnal liquid composition for butanol (4)\n m.xspec_inter2 = 0.99 # Final liquid composition for ethanol (2)\n m.xspec_inter3 = 0.99 # Final liquid composition for propanol (3)\n m.Ddes = 50 # Final flowrate in distillate in mol/s\n m.Bdes = 50 # Final flowrate in bottoms in mol/s\n m.Sdes = 50 # Final flowrate in side product streams in mol/s\n\n # #### Known initial values\n m.Fi = m.Ddes + m.Bdes + 2 * m.Sdes # Side feed flowrate in mol/s\n m.Vi = 400 # Initial value for vapor flowrate in mol/s\n m.Li = 400 # Initial value for liquid flowrate in mol/s\n\n m.Tf = 358 # Side feed temperature in K\n\n m.Preb = 1.2 # Reboiler pressure in bar\n m.Pbot = 1.12 # Bottom-most tray pressure in bar\n m.Ptop = 1.08 # Top-most tray pressure in bar\n m.Pcon = 1.05 # Condenser pressure in bar\n m.Pf = 1.02\n\n m.rr0 = 0.893 # Internal reflux ratio initial value\n m.bu0 = 0.871 # Internal reflux ratio initial value\n\n\n #### Scaling factors\n m.Hscale = 1e3 \n m.Qscale = 1e-3 \n\n \n #### Constants for the calculation of liquid heat capacity\n m.cpc = {} # Constant 1 for liquid heat capacity \n m.cpc2 = {} # Constant 2 for liquid heat capacity \n m.cpc[1] = m.Rgas \n m.cpc[2] = 1\n m.cpc2['A', 1] = 1 / 100\n m.cpc2['B', 1] = 1 / 1e4\n m.cpc2['A', 2] = 1\n m.cpc2['B', 2] = 1\n\n\n # ------------------------------------------------------------------\n # Physical Properties\n #\n # Notation:\n # MW ........................ molecular weight in g/gmol\n # TB ........................ boiling point temperature in K\n # TC ........................ critical temperature in K\n # PC ........................ critical pressure in bar\n # w ........................ acentric factor\n # lden ...................... liquid density g/m3,\n # dHvap ..................... heat of vaporization in J/mol.\n # vpA, vpB, vpC, and vpD .... vapor pressure constants\n # cpA, cpB, cpC, and cpD .... heat capacity constants J/mol:\n # 1 for liq and 2 for vapor phase\n #\n # Reference A: R.C. Reid, J.M. Prausnitz and B.E. Poling,\n # \"The Properties of gases and liquids\", 1987 and 2004 Eds.\n #\n # ------------------------------------------------------------------\n\n m.prop = {} # Properties of components:\n cpL = {} # Ruczika-D method for liquid heat capacity calculation\n # (Reference A, page 6.20)\n sumA = {}\n sumB = {}\n sumC = {}\n cpL['a', 'C(H3)(C)'] = 4.19845\n cpL['b', 'C(H3)(C)'] = -0.312709\n cpL['c', 'C(H3)(C)'] = 0.178609\n cpL['a', 'C(H2)(C2)'] = 2.7345\n cpL['b', 'C(H2)(C2)'] = 0.122732\n cpL['c', 'C(H2)(C2)'] = -0.123482\n cpL['a', 'C(H2)(C)(O)'] = 0.517007\n cpL['b', 'C(H2)(C)(O)'] = 1.26631\n cpL['c', 'C(H2)(C)(O)'] = -0.0939713\n cpL['a', 'O(H)(C)'] = 16.1555\n cpL['b', 'O(H)(C)'] = -11.938\n cpL['c', 'O(H)(C)'] = 2.85117\n cpL['a', 'C(H3)(O)'] = 3.70344\n cpL['b', 'C(H3)(O)'] = -1.12884\n cpL['c', 'C(H3)(O)'] = 0.51239\n sumA[1] = (cpL['a', 'C(H3)(O)']\n + cpL['a', 'O(H)(C)']) \n sumB[1] = (cpL['b', 'C(H3)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[1] = (cpL['c', 'C(H3)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[2] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[2] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[2] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[3] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[3] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[3] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[4] = (cpL['a', 'C(H3)(C)']\n + 2 * cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[4] = (cpL['b', 'C(H3)(C)']\n + 2 * cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[4] = (cpL['c', 'C(H3)(C)']\n + 2 * cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n\n ## Methanol: component 1\n m.prop[1, 'MW'] = 32.042\n m.prop[1, 'TB'] = 337.7\n m.prop[1, 'TC'] = 512.6\n m.prop[1, 'PC'] = 80.9\n m.prop[1, 'w'] = 0.556\n m.prop[1, 'lden'] = 792e3\n m.prop[1, 'dHvap'] = 38.376e3\n m.prop[1, 'vpA'] = -8.54796\n m.prop[1, 'vpB'] = 0.76982\n m.prop[1, 'vpC'] = -3.10850\n m.prop[1, 'vpD'] = 1.54481\n m.prop[1, 'cpA', 1] = sumA[1]\n m.prop[1, 'cpB', 1] = sumB[1]\n m.prop[1, 'cpC', 1] = sumC[1]\n m.prop[1, 'cpD', 1] = 0\n m.prop[1, 'cpA', 2] = 2.115e1\n m.prop[1, 'cpB', 2] = 7.092e-2\n m.prop[1, 'cpC', 2] = 2.587e-5\n m.prop[1, 'cpD', 2] = -2.852e-8\n\n\n ## Ethanol: component 2\n m.prop[2, 'MW'] = 46.069\n m.prop[2, 'TB'] = 351.4\n m.prop[2, 'TC'] = 513.9\n m.prop[2, 'PC'] = 61.4\n m.prop[2, 'w'] = 0.644\n m.prop[2, 'lden'] = 789.3e3\n m.prop[2, 'dHvap'] = 42.698e3\n m.prop[2, 'vpA'] = -8.51838\n m.prop[2, 'vpB'] = 0.34163\n m.prop[2, 'vpC'] = -5.73683\n m.prop[2, 'vpD'] = 8.32581\n m.prop[2, 'cpA', 1] = sumA[2]\n m.prop[2, 'cpB', 1] = sumB[2]\n m.prop[2, 'cpC', 1] = sumC[2]\n m.prop[2, 'cpD', 1] = 0\n m.prop[2, 'cpA', 2] = 9.014\n m.prop[2, 'cpB', 2] = 2.141e-1\n m.prop[2, 'cpC', 2] = -8.390e-5\n m.prop[2, 'cpD', 2] = 1.373e-9\n\n\n ## Propanol: component 3\n m.prop[3, 'MW'] = 60.096\n m.prop[3, 'TB'] = 370.3\n m.prop[3, 'TC'] = 536.8\n m.prop[3, 'PC'] = 51.7\n m.prop[3, 'w'] = 0.623\n m.prop[3, 'lden'] = 804e3\n m.prop[3, 'dHvap'] = 47.763e3\n m.prop[3, 'vpA'] = -8.05594\n m.prop[3, 'vpB'] = 4.25183e-2\n m.prop[3, 'vpC'] = -7.51296\n m.prop[3, 'vpD'] = 6.89004\n m.prop[3, 'cpA', 1] = sumA[3]\n m.prop[3, 'cpB', 1] = sumB[3]\n m.prop[3, 'cpC', 1] = sumC[3]\n m.prop[3, 'cpD', 1] = 0\n m.prop[3, 'cpA', 2] = 2.47\n m.prop[3, 'cpB', 2] = 3.325e-1\n m.prop[3, 'cpC', 2] = -1.855e-4\n m.prop[3, 'cpD', 2] = 4.296e-8\n\n\n ## Butanol: component 4\n m.prop[4, 'MW'] = 74.123\n m.prop[4, 'TB'] = 390.9\n m.prop[4, 'TC'] = 563.1\n m.prop[4, 'PC'] = 44.2\n m.prop[4, 'w'] = 0.593\n m.prop[4, 'lden'] = 810e3\n m.prop[4, 'dHvap'] = 52.607e3\n m.prop[4, 'vpA'] = -8.00756\n m.prop[4, 'vpB'] = 0.53783\n m.prop[4, 'vpC'] = -9.34240\n m.prop[4, 'vpD'] = 6.68692\n m.prop[4, 'cpA', 1] = sumA[4]\n m.prop[4, 'cpB', 1] = sumB[4]\n m.prop[4, 'cpC', 1] = sumC[4]\n m.prop[4, 'cpD', 1] = 0\n m.prop[4, 'cpA', 2] = 3.266\n m.prop[4, 'cpB', 2] = 4.18e-1\n m.prop[4, 'cpC', 2] = -2.242e-4\n m.prop[4, 'cpD', 2] = 4.685e-8\n\n\n return m", "def __init__(self,\n modeltype='TLusty'):\n if modeltype == 'TLusty':\n self.modtype = 'TLusty_v10'\n self.filebase = 'T*v10_z*.dat'\n self.path = '/home/kgordon/Dust/Ext/Model_Standards_Data/'\n self.read_tlusty_models(self.filebase, self.path)\n else:\n print('model type not supported')\n exit()", "def custom_model():\n\t# initialize the model\n\t# load weights from path\n\t# returns model\n\tmodel = mlp.get_training_model()\n\tmodel.load_state_dict(torch.load(\"model_wt.pth\"))\n\treturn model", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def create_model(self, C : float =1):\n self.classifier = RandomForestClassifier(max_depth=5)", "def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)", "def convert_from_config(config):\n\n if isinstance(config, str):\n yamlConfig = parse_yaml_config(config)\n else:\n yamlConfig = config\n\n model = None\n if 'OnnxModel' in yamlConfig:\n if __onnx_enabled__:\n model = onnx_to_hls(yamlConfig)\n else:\n raise Exception(\"ONNX not found. Please install ONNX.\")\n elif 'PytorchModel' in yamlConfig:\n if __pytorch_enabled__:\n model = pytorch_to_hls(yamlConfig)\n else:\n raise Exception(\"PyTorch not found. Please install PyTorch.\")\n else:\n model = keras_to_hls(yamlConfig)\n\n return model", "def from_config(cls, config):\n config['posterior'] = tf.keras.layers.deserialize(config['posterior'])\n config['prior'] = tf.keras.layers.deserialize(config['prior'])\n return cls(**config)", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def __create_model(self, classes):\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained_base=True)\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained=True)\r\n # self._model.reset_class(classes, reuse_weights=[cname for cname in classes if cname in self._model.classes])\r\n if self._model is None or classes != self.classes:\r\n model_name = 'ssd_{}_{}_custom'.format(self.img_size, self.backbone)\r\n self._model = model_zoo.get_model(model_name, classes=classes, pretrained=False, pretrained_base=True,\r\n root=self.temp_path)\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"always\")\r\n self._model.initialize()\r\n self._model.collect_params().reset_ctx(self.ctx)\r\n _, _, _ = self._model(mx.nd.zeros((1, 3, self.img_size, self.img_size), self.ctx))\r\n\r\n self._model.reset_class(classes)\r\n self.classes = classes", "def __init__(self, config=None, class_min=0):\n self.config = self._resolve_config(config)\n self.class_min = self._resolve_class_min(class_min)\n self.model = LogReg(**self.config)\n self.scaler = StandardScaler()", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def make_objects():\n cwd = os.getcwd()\n\n os.chdir(\"test_data/protein_load\")\n pmodel = pyODEM.model_loaders.Protein(\"ww_domain.ini\")\n os.chdir(cwd)\n\n pmodel.set_temperature(120.)\n\n return pmodel", "def build_model(config):\n # Load the pretrained model\n detr = get_detr_model(config, include_top=True, weights=\"detr\")\n detr.summary()\n return detr", "def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo", "def _make_model(self):\n self._model = tf.estimator.Estimator(model_fn=self.model_fn,\n model_dir=self.model_dir,\n config=self._config,\n params=self._params,\n )", "def __init__(self, model_name='vgg16'):\n trainer = Trainer(model_name=model_name)\n self.model = trainer.model\n self.model_save_dir = trainer.model_save_dir\n self.model_name = model_name", "def create_model():\n\n # Create a sequential model (a simple NN is created) adding a softmax activation at the end with 10 units:\n model = Sequential()\n model.add(Dense(units=128, activation=\"relu\", input_shape=(784,)))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=10, activation=\"softmax\"))\n\n # Compile the model using the loss function \"categorical_crossentropy\" and Stocastic Gradient Descent optimizer:\n model.compile(optimizer=SGD(0.001), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # Return the created model\n return model", "def get_model(model_name, model_config, to_cuda,\n uniform_initialize_bn_weight=False, forward_is_infer=False):\n model = None\n if model_name == 'Tacotron2':\n if forward_is_infer:\n class Tacotron2__forward_is_infer(Tacotron2):\n def forward(self, inputs, input_lengths):\n return self.infer(inputs, input_lengths)\n model = Tacotron2__forward_is_infer(**model_config)\n else:\n model = Tacotron2(**model_config)\n elif model_name == 'WaveGlow':\n if forward_is_infer:\n class WaveGlow__forward_is_infer(WaveGlow):\n def forward(self, spect, sigma=1.0):\n return self.infer(spect, sigma)\n model = WaveGlow__forward_is_infer(**model_config)\n else:\n model = WaveGlow(**model_config)\n else:\n raise NotImplementedError(model_name)\n\n if uniform_initialize_bn_weight:\n init_bn(model)\n\n if to_cuda:\n model = model.cuda()\n return model", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def __init__(self, config, xtdim, batch_size):\n self.float_type = 'float32' # This should be the default\n self.config = config\n self.dt = self.config['dt']\n\n self.n_input = self.config['n_input']\n self.n_output = self.config['n_output']\n\n self.batch_size = batch_size\n self.xtdim = xtdim\n\n # time major\n self.x = np.zeros((xtdim, batch_size, self.n_input), dtype=self.float_type)\n self.y = np.zeros((xtdim, batch_size, self.n_output), dtype=self.float_type)\n self.cost_mask = np.zeros((xtdim, batch_size, self.n_output), dtype=self.float_type)\n # strength of input noise\n self._sigma_x = config['sigma_x'] * math.sqrt(2./self.config['alpha'])\n\n if config['rule_name'] == 'timed_spatial_reproduction_broad_tuning' \\\n or config['rule_name'] == 'spatial_reproduction_broad_tuning':\n self.n_guassianline = 32 + 12\n self.sd_gaussianline = 4.\n else:\n self.n_guassianline = 32\n self.sd_gaussianline = 2.\n\n self.pref_line_gaussian = np.arange(0, self.n_guassianline)", "def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError", "def __init__(self, config_file_name: str):\n configs_trainer = io.read_yaml(PATH_CONFIG, config_file_name)\n configs_model = configs_trainer[configs_trainer['model']]\n\n # Add trainer configs attributes\n horizons = configs_trainer['forecasting_horizons_trainer']\n self.forecasting_horizons_trainer = range(horizons['smallest_horizon'], horizons['largest_horizon'] + 1)\n\n for name, value in configs_trainer.items():\n if name in ['train_date_when_predicing_min', 'train_date_to_predict_max']:\n self.__setattr__(name, value)\n\n # Initiate individual model configs object (replace attributes that were specified in configs_model).\n configs = io.read_yaml(PATH_CONFIG, configs_trainer['file_name_model_configs'])\n configs = configs[configs_trainer['model']]\n Logger.info('Loaded model configs from file',\n os.path.join(PATH_CONFIG, configs_trainer['file_name_model_configs']), self.__class__.__name__)\n configs.update(configs_model)\n\n def update_train_scope(attr, limit, fct):\n if configs.get(attr) is not None and limit in vars(self):\n date = fct(configs.get(attr), self.__getattribute__(limit))\n configs.update({attr: date})\n\n update_train_scope('train_start', 'train_date_when_predicting_min', max)\n update_train_scope('train_end', 'train_date_to_predict_max', min)\n\n self.configs_individual_model = Configs(configs={k: v for k, v in configs.items()\n if k in Configs.__dict__.keys()})\n\n # Update maximum date to predict train to ensure that we don't overlap with the evaluation period\n if self.configs_individual_model.evaluation_start is not None and self.train_date_to_predict_max is not None:\n max_date_to_predict = substract_period(\n self.configs_individual_model.evaluation_start, 1,\n highest_period=52 if self.configs_individual_model.is_weekly_forecast else 12\n )\n self.train_date_to_predict_max = min(self.train_date_to_predict_max, max_date_to_predict)\n\n Logger.info('Loaded trainer configs from file',\n os.path.join(PATH_CONFIG, config_file_name), self.__class__.__name__)", "def make(model: Type[Model], **kwargs: Any) -> Model:\n return modelfactory_factory(model)(**kwargs)", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def model_fn_builder(config: electra_files.configure_finetuning.FinetuningConfig, tasks,\n num_train_steps, pretraining_config=None):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec\n\n return model_fn", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def model():\n return TimeSeriesMultiReg()", "def new(cls, args, src_meta, trg_meta, waitk_lagging, name=None):\n # build source and target modality\n src_modality, trg_modality = cls.build_modalities(args, src_meta, trg_meta)\n encoder_params, decoder_params = {}, {}\n for f in cls.class_or_method_args():\n if f.name in args:\n if f.name.startswith(\"encoder.\"):\n encoder_params[f.name[8:]] = args[f.name]\n elif f.name.startswith(\"decoder.\"):\n decoder_params[f.name[8:]] = args[f.name]\n # build encoder and decoder\n encoder = build_encoder({\n \"encoder.class\": \"TransformerEncoder\",\n \"encoder.params\": encoder_params})\n decoder = build_decoder({\n \"decoder.class\": \"TransformerDecoder\",\n \"decoder.params\": decoder_params})\n model = cls(args, src_meta, trg_meta, src_modality, trg_modality,\n encoder, decoder, name=name)\n model.wait_k = waitk_lagging\n _ = model({\"src\": tf.convert_to_tensor([[1, 2, 3]], tf.int64),\n \"src_padding\": tf.convert_to_tensor([[0, 0., 0]], tf.float32),\n \"trg_input\": tf.convert_to_tensor([[1, 2, 3]], tf.int64)})\n return model", "def __init__(self, model_type, model_cfg, training_cfg):\n super().__init__()\n self.save_hyperparameters()\n\n self.model_cfg = model_cfg\n self.training_cfg = training_cfg\n \n if model_type == \"ConvLSTM\":\n self.model = Conv_LSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"AutoencLSTM\":\n self.model = AutoencLSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"ConvTransformer\":\n self.model = ENS_Conv_Transformer(num_hidden=self.model_cfg[\"num_hidden\"],\n output_dim=self.model_cfg[\"output_channels\"],\n depth=self.model_cfg[\"depth\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n num_conv_layers=self.model_cfg[\"num_conv_layers\"],\n kernel_size=self.model_cfg[\"kernel_size\"],\n img_width=self.model_cfg[\"img_width\"],\n non_pred_channels=self.model_cfg[\"non_pred_channels\"],\n num_layers_query_feat=self.model_cfg[\"num_layers_query_feat\"],\n in_channels=self.model_cfg[\"in_channels\"],\n baseline=self.training_cfg[\"baseline\"])\n self.baseline = self.training_cfg[\"baseline\"]\n self.future_training = self.training_cfg[\"future_training\"]\n self.learning_rate = self.training_cfg[\"start_learn_rate\"]\n self.training_loss = get_loss_from_name(self.training_cfg[\"training_loss\"])\n self.test_loss = get_loss_from_name(self.training_cfg[\"test_loss\"])", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def create(name, out_channel, pretrain):\n if out_channel == 10 or out_channel == 100 or out_channel == 200:\n # use custom models\n if name not in custom_factory:\n raise KeyError(\"Unknown model:\", name)\n return custom_factory[name](out_channel)\n elif out_channel == 1000:\n if name not in torchvision_factory:\n raise KeyError(\"Unknown model:\", name)\n return torchvision_factory[name](pretrain)\n else:\n raise Exception", "def build_model(\n config: Mapping, cardinalities: Mapping[str, int]\n) -> keras.Model:\n\n model_config = config['model']\n if isinstance(model_config, str):\n model = keras.models.load_model(\n model_config, custom_objects={\n 'loss_fn': _create_loss(config['loss'])\n }\n )\n\n return model\n\n features = Features(config['features'])\n inputs_all = []\n\n # Constituents of different types\n constituent_types = [\n key for key in sorted(model_config.keys()) # Ensure order\n if key not in {'head', 'load_weights'}\n ]\n outputs_constituents = []\n for constituent_type in constituent_types:\n inputs_numerical = keras.Input(\n shape=(None, len(features.numerical(constituent_type))),\n ragged=True, name=f'{constituent_type}_numerical'\n )\n inputs_categorical = OrderedDict()\n for feature in features.categorical(constituent_type):\n inputs_categorical[feature] = keras.Input(\n shape=(None,), ragged=True, name=feature\n )\n inputs_all.append(inputs_numerical)\n inputs_all.extend(inputs_categorical.values())\n\n outputs = _apply_deep_set(\n inputs_numerical, inputs_categorical,\n model_config[constituent_type], cardinalities, constituent_type\n )\n outputs_constituents.append(outputs)\n\n # Head\n inputs_global_numerical = keras.Input(\n shape=(len(features.numerical('global')),),\n name='global_numerical'\n )\n inputs_global_categorical = OrderedDict()\n for feature in features.categorical('global'):\n inputs_global_categorical[feature] = keras.Input(\n shape=(None,), name=feature\n )\n embeddings_global = {\n feature: Embedding(\n cardinalities[feature],\n model_config['head']['embeddings'][feature],\n name=feature + '_embeddings'\n )(inputs)\n for feature, inputs in inputs_global_categorical.items()\n }\n inputs_all.append(inputs_global_numerical)\n inputs_all.extend(inputs_global_categorical.values())\n inputs_head = Concatenate(name='head_concatenate')(\n [inputs_global_numerical]\n + [\n embeddings_global[feature]\n for feature in inputs_global_categorical.values()\n ]\n + outputs_constituents\n )\n outputs = _apply_dense_from_config(\n inputs_head, model_config['head'], name_prefix='head_'\n )\n\n outputs = Dense(1, name='head_dense_output')(outputs) # Output unit\n model = keras.Model(inputs=inputs_all, outputs=outputs, name='full')\n\n model.compile(\n optimizer=_create_optimizer(config.get('optimizer', None)),\n loss=_create_loss(config['loss'])\n )\n if 'load_weights' in model_config:\n # Normally, a saved model should be loaded\n # keras.models.load_model at the beginning of thsi function.\n # However, this is currently not supported for models that use\n # ragged tensors [1]. As a workaround, construct the model anew\n # and then load saved weights. The path to weights would\n # usually be \"{model_directory}/variables/variables\", with the\n # \".index\" file extension stripped off. This doesn't restore\n # the state of the optimizer.\n # [1] https://github.com/tensorflow/tensorflow/issues/41034\n model.load_weights(model_config['load_weights'])\n return model", "def initialize_thermal_prediction(self, config_file):\n conf_pred = config_file['prediction']['heat']\n conf_powr = config_file['prediction']['power']\n # config_json\n n_day = conf_pred['n_day']\n n_values = conf_pred['n_values_per_day']\n precision_in_h = conf_pred['precision_in_h']\n use_predef_loads = conf_pred['use_predef_loads']\n predef_loads_file_path = conf_pred['path_loads']\n # heating curve\n conf_hk = config_file['components']['heating_curve']\n hk_ta = conf_hk['design_ambient_temperature_oC']\n hk_ti = conf_hk['design_indoor_temperature_oC']\n hk_tv = conf_hk['design_supply_temperature_oC']\n hk_tr = conf_hk['design_return_temperature_oC']\n hk_n = conf_hk['radiator_coefficient_n']\n hk_m = conf_hk['radiator_coefficient_m']\n hk_qn = conf_hk['design_heat_load_in_kW']\n # chp unit\n patm = utils.get_pressure_in_MPa()\n calcopt = utils.get_calc_option()\n eps_el_chp = config_file['components']['chp_unit']['electrical_efficiency']\n eps_th_chp = config_file['components']['chp_unit']['thermal_efficiency']\n qel_n_chp = config_file['components']['chp_unit']['max_electric_power_in_kW']\n chp_tinp = config_file['components']['chp_unit']['design_input_temperature_oC']\n chp_tmax = config_file['components']['chp_unit']['design_output_temperature_oC']\n qth_n_chp = eps_th_chp * qel_n_chp / eps_el_chp # in kW\n mstr_chp = qth_n_chp / (utils.cp_fluid_water(0.5 * (chp_tmax + chp_tinp), patm, calcopt) * (chp_tmax - chp_tinp)) # in kg/s = kW / (kJ/kg/K * K)\n # gas boiler\n qth_n_gb = config_file['components']['gas_boiler']['max_thermal_power_in_kW']\n gb_tinp = config_file['components']['gas_boiler']['design_input_temperature_oC']\n gb_tmax = config_file['components']['gas_boiler']['design_output_temperature_oC']\n mstr_gb = qth_n_gb / (utils.cp_fluid_water(0.5 * (gb_tinp + gb_tmax), patm, calcopt) * (gb_tmax - gb_tinp)) # in kg/s = kW / (kJ/kg/K * K) # in kg/s = kW / (kJ/kg/K * K)\n # storage tank\n effective_height = config_file['components']['storage_tank']['effective_heigth_in_m']\n inner_radius = config_file['components']['storage_tank']['inner_radius_tank_in_m']\n effective_pipe_volume = config_file['components']['storage_tank']['effective_coil_volume_in_m3']\n effective_volume = config_file['components']['storage_tank']['effective_volume_in_m3']\n if (effective_volume <= 0.0):\n effective_volume = math.pi * inner_radius * inner_radius * effective_height - effective_pipe_volume # in m3\n nr_calc = 20\n slice_volume = effective_volume / nr_calc # in m3\n qmax_rod_el = config_file['components']['storage_tank']['power_heating_rod_in_kW']\n open_weather_map_active = config_file['calculation']['platform_mode']['open_weather_map_active']\n # conf_powr\n #print('\\n initialize_thermal_prediction')\n #print('use_predef_loads = {}; {}'.format(use_predef_loads,type(use_predef_loads)))\n #print('predef_loads_file_path = {}; {}'.format(predef_loads_file_path,type(predef_loads_file_path)))\n return predict_thermal.predict_Q(n_day, n_values, precision_in_h, predef_loads_file_path, use_predef_loads, self.output_horizon_in_h, \n self.output_resolution_in_s, conf_powr, hk_tv, hk_tr, hk_ti, hk_ta, hk_qn, hk_n, hk_m, chp_tmax, gb_tmax, slice_volume, \n mstr_chp, mstr_gb, qmax_rod_el, eps_th_chp, eps_el_chp, open_weather_map_active)", "def dynamic_model(self, input_val: float) -> float:\n pass", "def from_config(cls, model_config: Union[dict, ModelConfig]) -> Type[AbstractModel]:\n\n if not (model_config and isinstance(model_config, (ModelConfig, dict))):\n msg = f\"Need a valid model config to create a text/tagger model in AutoModel. \" \\\n f\"Found model_config={model_config} of type({type(model_config)})\"\n raise ValueError(msg)\n\n # get model type upon validation\n model_config = cls._resolve_model_config(model_config)\n model_type = cls._get_model_type(model_config)\n\n # load metadata and return\n if model_type == \"text\":\n model_class = AutoTextModel.get_model_class(model_config)\n elif model_type == \"tagger\":\n model_class = AutoTaggerModel.get_model_class(model_config)\n\n return model_class(model_config)", "def create_regressor(config, parameters):\n\n # Mean and Standard Deviation Constants for normalization.\n with file_io.FileIO(parameters.mean_path, mode='r') as f:\n mean = pickle.load(f)\n with file_io.FileIO(parameters.std_path, mode='r') as f:\n std = pickle.load(f)\n\n # Columns to be used as features.\n hour = tf.feature_column.categorical_column_with_identity(\n 'hour', num_buckets=24)\n hour = tf.feature_column.embedding_column(\n hour, dimension=parameters.hour_embedding)\n\n day = tf.feature_column.categorical_column_with_identity(\n 'day', num_buckets=7)\n day = tf.feature_column.embedding_column(\n day, dimension=parameters.day_embedding)\n\n weather = [tf.feature_column.numeric_column(\n 'weather' + str(i),\n normalizer_fn=(lambda x, i = i: (x - mean[i]) / std[i])\n ) for i in range(constants.WEATHER_SIZE)]\n\n distribution = [tf.feature_column.numeric_column(\n 'distribution' + str(i)\n ) for i in range(constants.DISTRIBUTION_SIZE)]\n\n feature_cols = [hour, day] + weather + distribution\n\n # Evaluation metric.\n def mean_absolute_error(labels, predictions):\n \"\"\"Creates mean absolute error metric.\n\n Metric is used to evaluate the model.\n\n Args:\n labels: Evaluation true labels.\n predictions: Evaluation model predictions.\n\n Returns:\n A dictionary with the evaluation metric\n \"\"\"\n pred_values = predictions['predictions']\n return {'mae': tf.metrics.mean_absolute_error(\n labels, pred_values)}\n\n layer = parameters.first_layer_size\n lfrac = parameters.layer_reduction_fraction\n nlayers = parameters.number_layers\n h_units = [layer]\n for _ in range(nlayers - 1):\n h_units.append(math.ceil(layer * lfrac))\n layer = h_units[-1]\n\n estimator = tf.estimator.DNNRegressor(\n feature_columns=feature_cols,\n hidden_units=h_units,\n optimizer=tf.train.AdagradOptimizer(\n learning_rate=parameters.learning_rate),\n dropout=parameters.dropout, config=config)\n estimator = tf.contrib.estimator.add_metrics(\n estimator, mean_absolute_error)\n estimator = tf.contrib.estimator.forward_features(estimator, 'date')\n return estimator", "def build_model():" ]
[ "0.84457386", "0.83382916", "0.65599996", "0.6377564", "0.6291746", "0.62521607", "0.6228496", "0.6228496", "0.62193686", "0.62099886", "0.6151095", "0.60817146", "0.60338646", "0.60337704", "0.60258543", "0.5996558", "0.5979533", "0.5957634", "0.5921604", "0.59115314", "0.591022", "0.5895959", "0.588716", "0.58588123", "0.58475983", "0.58304906", "0.58231", "0.582295", "0.58008116", "0.57916975", "0.5781211", "0.57633346", "0.57633346", "0.5745872", "0.57453936", "0.5725868", "0.5699846", "0.56939065", "0.5678337", "0.5672961", "0.5662663", "0.56521946", "0.5648744", "0.56426036", "0.5637232", "0.5635688", "0.56301475", "0.561795", "0.5617467", "0.5616332", "0.5615282", "0.5609536", "0.56046265", "0.5599231", "0.55891174", "0.55808264", "0.55770683", "0.55720663", "0.55607605", "0.55546147", "0.5552247", "0.55379564", "0.55279887", "0.55107474", "0.5508882", "0.550775", "0.5497394", "0.5490513", "0.5488134", "0.54860187", "0.548374", "0.54803294", "0.5478904", "0.5467796", "0.54614633", "0.5460739", "0.5452559", "0.54514897", "0.544805", "0.5445093", "0.5444655", "0.54442286", "0.5440199", "0.5437378", "0.543734", "0.5435456", "0.5433256", "0.5431069", "0.5427664", "0.54259646", "0.54179657", "0.5417811", "0.54075444", "0.5407465", "0.5405696", "0.5402593", "0.5402343", "0.5400813", "0.53997827", "0.5396738" ]
0.57454526
34
Loads a checkpoint of a model. The provided model checkpoint must match the stored model.
def try_load_checkpoint_for_mean_teacher_model(self) -> bool: if self._mean_teacher_model is None: raise ValueError("Mean teacher model must be created before it can be adjusted.") if not self.checkpoint_path: raise ValueError("No checkpoint provided") if not self.checkpoint_path.is_file(): logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}') return False epoch = ModelAndInfo._load_checkpoint(model=self._mean_teacher_model, checkpoint_path=self.checkpoint_path, key_in_state_dict=ModelAndInfo.MEAN_TEACHER_STATE_DICT_KEY, use_gpu=self.config.use_gpu) logging.info(f"Loaded mean teacher model from checkpoint (epoch: {epoch})") self.checkpoint_epoch = epoch return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def try_load_checkpoint_for_model(self) -> bool:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if not self.checkpoint_path:\n raise ValueError(\"No checkpoint provided\")\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n epoch = ModelAndInfo._load_checkpoint(model=self._model,\n checkpoint_path=self.checkpoint_path,\n key_in_state_dict=ModelAndInfo.MODEL_STATE_DICT_KEY,\n use_gpu=self.config.use_gpu)\n\n logging.info(f\"Loaded model from checkpoint (epoch: {epoch})\")\n self.checkpoint_epoch = epoch\n return True", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load_model(model, transfer_from, sess):\n param_path = final_param_path(model.name, transfer_from)\n step_to_load = FINAL_PARAM_STEPS[model.name][transfer_from]\n util.load_checkpoint_at_step(\n model_name=model.name,\n global_step=step_to_load,\n saver=tf.train.Saver(),\n sess=sess,\n path=param_path)", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load_model(model):\n # Check if the model is a model directory (containing a metagraph and a checkpoint file)\n # or if it is a protobuf file with a frozen graph\n model_exp = os.path.expanduser(model)\n if os.path.isfile(model_exp):\n print('Model filename: %s' % model_exp)\n with tf.gfile.FastGFile(model_exp, 'rb') as f_l:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f_l.read())\n tf.import_graph_def(graph_def, name='')\n else:\n print('Model directory: %s' % model_exp)\n meta_file, ckpt_file = get_model_filenames(model_exp)\n\n print('Metagraph file: %s' % meta_file)\n print('Checkpoint file: %s' % ckpt_file)\n\n saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))\n saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path,\n key_in_state_dict: str, use_gpu: bool) -> int:\n logging.info(f\"Loading checkpoint {checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu)\n\n try:\n state_dict = checkpoint[key_in_state_dict]\n except KeyError:\n logging.error(f\"Key {key_in_state_dict} not found in checkpoint\")\n return False\n\n if isinstance(model, torch.nn.DataParallel):\n result = model.module.load_state_dict(state_dict, strict=False)\n else:\n result = model.load_state_dict(state_dict, strict=False)\n\n if result.missing_keys:\n logging.warning(f\"Missing keys in model checkpoint: {result.missing_keys}\")\n if result.unexpected_keys:\n logging.warning(f\"Unexpected keys in model checkpoint: {result.unexpected_keys}\")\n\n return checkpoint[ModelAndInfo.EPOCH_KEY]", "def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(pretrain_model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % pretrain_model)\n ckpt = tf.train.get_checkpoint_state(pretrain_model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(pretrain_model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(pretrain_model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(pretrain_model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n pretrain_model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n tf.logging.info(\"Copy the pre-trained model %s as the fine-tuned initialization\" % pretrain_model_checkpoint_path)\n\n import glob\n for filename in glob.glob(pretrain_model_checkpoint_path + \"*\"):\n bas = os.path.basename(filename).split(\"-\", 1)[0]\n ext = os.path.basename(filename).rsplit(\".\", 1)[1]\n shutil.copyfile(filename, os.path.join(target_model, bas + \"-0.\" + ext))\n\n with open(os.path.join(target_model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n return", "def restore_checkpoint(model, checkpoint_dir, cuda=False, force=False, pretrain=False):\n try:\n cp_files = [\n file_\n for file_ in os.listdir(checkpoint_dir)\n if file_.startswith(\"epoch=\") and file_.endswith(\".checkpoint.pth.tar\")\n ]\n except FileNotFoundError:\n cp_files = None\n os.makedirs(checkpoint_dir)\n if not cp_files:\n print(\"No saved model parameters found\")\n if force:\n raise Exception(\"Checkpoint not found\")\n else:\n return model, 0, []\n\n # Find latest epoch\n for i in itertools.count(1):\n if \"epoch={}.checkpoint.pth.tar\".format(i) in cp_files:\n epoch = i\n else:\n break\n\n if not force:\n print(\n \"Which epoch to load from? Choose in range [0, {}].\".format(epoch),\n \"Enter 0 to train from scratch.\",\n )\n print(\">> \", end=\"\")\n inp_epoch = int(input())\n if inp_epoch not in range(epoch + 1):\n raise Exception(\"Invalid epoch number\")\n if inp_epoch == 0:\n print(\"Checkpoint not loaded\")\n clear_checkpoint(checkpoint_dir)\n return model, 0, []\n else:\n print(\"Which epoch to load from? Choose in range [1, {}].\".format(epoch))\n inp_epoch = int(input())\n if inp_epoch not in range(1, epoch + 1):\n raise Exception(\"Invalid epoch number\")\n\n filename = os.path.join(\n checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(inp_epoch)\n )\n\n print(\"Loading from checkpoint {}?\".format(filename))\n\n if cuda:\n checkpoint = torch.load(filename)\n else:\n # Load GPU model on CPU\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n try:\n start_epoch = checkpoint[\"epoch\"]\n stats = checkpoint[\"stats\"]\n if pretrain:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\n \"=> Successfully restored checkpoint (trained for {} epochs)\".format(\n checkpoint[\"epoch\"]\n )\n )\n except:\n print(\"=> Checkpoint not successfully restored\")\n raise\n\n return model, inp_epoch, stats", "def load_checkpoint(model, filename, map_location='cpu', strict=False, logger=None):\n checkpoint = _load_checkpoint(filename, map_location)\n if not isinstance(checkpoint, dict):\n raise RuntimeError(f'No state_dict found in checkpoint file {filename}')\n if 'state_dict' in checkpoint:\n state_dict_tmp = checkpoint['state_dict']\n else:\n state_dict_tmp = checkpoint\n state_dict = OrderedDict()\n for k, v in state_dict_tmp.items():\n if k.startswith('module.backbone.'):\n state_dict[k[16:]] = v\n elif k.startswith('module.'):\n state_dict[k[7:]] = v\n elif k.startswith('backbone.'):\n state_dict[k[9:]] = v\n else:\n state_dict[k] = v\n load_state_dict(model, state_dict, strict, logger)\n return checkpoint", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def load_model_saved_with_module(model, checkpoint_path, logger):\n checkpoint = torch.load(checkpoint_path)\n new_state_dict = dict()\n for k, v in checkpoint[\"model_state_dict\"].items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)\n logger.info(f\"Already restored model from checkpoint: {checkpoint_path}\")\n return model", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_model(self, model_path):\n # Check the model file exists\n if not os.path.isfile(model_path):\n raise ValueError(f\"The model file `{model_path}` is not exists or broken!\")\n\n checkpoint = torch.load(model_path)\n self.model_type = checkpoint['model_type']\n self.label2idx = checkpoint['label2idx']\n self.idx2label = checkpoint['idx2label']\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.to(self.device)", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def load_model_trainer_states_from_checkpoint(self, checkpoint_path, model=None):\n import os\n\n if model is None:\n try:\n import cloudpickle\n except ImportError:\n raise ImportError(\"cloudpickle is required to load model class\")\n logger.info(\"Loading model class\")\n model = cloudpickle.load(open(os.path.join(checkpoint_path, \"model_class.pkl\"), \"rb\"))\n\n self.model = HFWrapper(model)\n logger.info(\"Loading weights of previously trained model\")\n # Restoring model weights\n self.model.load_state_dict(\n # torch.load(os.path.join(training_args.output_dir, \"pytorch_model.bin\"))\n torch.load(os.path.join(checkpoint_path, \"pytorch_model.bin\"))\n )\n # Restoring random state\n rng_file = os.path.join(checkpoint_path, \"rng_state.pth\")\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n # Restoring AMP scaler\n if self.use_amp:\n self.scaler.load_state_dict(torch.load(os.path.join(checkpoint_path, \"scaler.pt\")))", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def _load_checkpoint(filename, map_location=None):\n if filename.startswith('modelzoo://'):\n warnings.warn('The URL scheme of \"modelzoo://\" is deprecated, please '\n 'use \"torchvision://\" instead')\n model_urls = get_torchvision_models()\n model_name = filename[11:]\n checkpoint = load_url_dist(model_urls[model_name])\n elif filename.startswith('torchvision://'):\n model_urls = get_torchvision_models()\n model_name = filename[14:]\n checkpoint = load_url_dist(model_urls[model_name])\n elif filename.startswith('open-mmlab://'):\n model_urls = get_external_models()\n model_name = filename[13:]\n deprecated_urls = get_deprecated_model_names()\n if model_name in deprecated_urls:\n warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '\n f'of open-mmlab://{deprecated_urls[model_name]}')\n model_name = deprecated_urls[model_name]\n model_url = model_urls[model_name]\n # check if is url\n if model_url.startswith(('http://', 'https://')):\n checkpoint = load_url_dist(model_url)\n else:\n filename = osp.join(_get_mmcv_home(), model_url)\n if not osp.isfile(filename):\n raise IOError(f'{filename} is not a checkpoint file')\n checkpoint = torch.load(filename, map_location=map_location)\n elif filename.startswith('mmcls://'):\n model_urls = get_mmcls_models()\n model_name = filename[8:]\n checkpoint = load_url_dist(model_urls[model_name])\n checkpoint = _process_mmcls_checkpoint(checkpoint)\n elif filename.startswith(('http://', 'https://')):\n checkpoint = load_url_dist(filename)\n elif filename.startswith('pavi://'):\n model_path = filename[7:]\n checkpoint = load_pavimodel_dist(model_path, map_location=map_location)\n elif filename.startswith('s3://'):\n checkpoint = load_fileclient_dist(\n filename, backend='ceph', map_location=map_location)\n else:\n if not osp.isfile(filename):\n raise IOError(f'{filename} is not a checkpoint file')\n checkpoint = torch.load(filename, map_location=map_location)\n return checkpoint", "def _resume_from_checkpoint(model: tf.keras.Model,\n model_dir: str,\n train_steps: int) -> int:\n logging.info('Load from checkpoint is enabled.')\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n logging.info('latest_checkpoint: %s', latest_checkpoint)\n if not latest_checkpoint:\n logging.info('No checkpoint detected.')\n return 0\n\n logging.info('Checkpoint file %s found and restoring from '\n 'checkpoint', latest_checkpoint)\n model.load_weights(latest_checkpoint)\n initial_epoch = model.optimizer.iterations // train_steps\n logging.info('Completed loading from checkpoint.')\n logging.info('Resuming from epoch %d', initial_epoch)\n return int(initial_epoch)", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def load_model(model_name):\n model = get_model(training = False)\n checkpoint = torch.load('../models/' + model_name)\n model.load_state_dict(checkpoint['model_state_dict'])\n return model", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def load_checkpoint(checkpoint_file: pl.Path) -> Optional[Dict[str, Any]]:\n if checkpoint_file.exists():\n logger.info(f\"Loading checkpoint {checkpoint_file}.\")\n checkpoint = torch.load(str(checkpoint_file))\n logger.info(f\"Done loading checkpoint from epoch {checkpoint['epoch']}.\")\n else:\n logger.warning(f\"No {checkpoint_file} checkpoint file found. Starting normal.\")\n return checkpoint", "def load_model(model, device, model_path):\n if os.path.exists(model_path):\n print(\"Reading model from \", model_path)\n checkpoint = torch.load(model_path, map_location=torch.device(device))\n model.load_state_dict(checkpoint['state_dict'])\n return model\n else:\n raise RuntimeError('Model does not exist!')", "def load_pretrained_model(self, load_from):\n print(\"loading model from %s\\n\" % (load_from))\n try:\n if self.use_cuda:\n pretrained_dict = torch.load(load_from)\n else:\n pretrained_dict = torch.load(load_from, map_location='cpu')\n\n model_dict = self.online_net.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.online_net.load_state_dict(model_dict)\n print(\"The loaded parameters are:\")\n keys = [key for key in pretrained_dict]\n print(\", \".join(keys))\n print(\"--------------------------\")\n except Exception as e:\n print(\"Failed to load checkpoint...\")\n print(e)", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_model(session: tf.Session, model_dir: Text) -> None:\n saver = tf.train.Saver()\n saver.restore(session, model_dir)", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def resume(self, checkpoint):\n model_dict = paddle.load(checkpoint)\n self.model.set_state_dict(model_dict)", "def try_create_model_and_load_from_checkpoint(self) -> bool:\n self.create_model()\n if self.checkpoint_path:\n # Load the stored model. If there is no checkpoint present, return immediately.\n return self.try_load_checkpoint_for_model()\n return True", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def load_checkpoint(model,\n filename,\n map_location='cpu',\n strict=False,\n logger=None):\n checkpoint = _load_checkpoint(filename, map_location)\n # OrderedDict is a subclass of dict\n if not isinstance(checkpoint, dict):\n raise RuntimeError(\n f'No state_dict found in checkpoint file {filename}')\n # get state_dict from checkpoint\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n elif 'model' in checkpoint:\n state_dict = checkpoint['model']\n elif 'module' in checkpoint:\n state_dict = checkpoint['module']\n else:\n state_dict = checkpoint\n # strip prefix of state_dict\n if list(state_dict.keys())[0].startswith('module.'):\n state_dict = {k[7:]: v for k, v in state_dict.items()}\n\n # for MoBY, load model of online branch\n if sorted(list(state_dict.keys()))[0].startswith('encoder'):\n state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}\n\n # reshape absolute position embedding for Swin\n if state_dict.get('absolute_pos_embed') is not None:\n absolute_pos_embed = state_dict['absolute_pos_embed']\n N1, L, C1 = absolute_pos_embed.size()\n N2, C2, H, W = model.absolute_pos_embed.size()\n if N1 != N2 or C1 != C2 or L != H*W:\n logger.warning(\"Error in loading absolute_pos_embed, pass\")\n else:\n state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)\n\n rank, _ = get_dist_info()\n if \"rel_pos_bias.relative_position_bias_table\" in state_dict:\n if rank == 0:\n print(\"Expand the shared relative position embedding to each layers. \")\n num_layers = model.get_num_layers()\n rel_pos_bias = state_dict[\"rel_pos_bias.relative_position_bias_table\"]\n for i in range(num_layers):\n state_dict[\"blocks.%d.attn.relative_position_bias_table\" % i] = rel_pos_bias.clone()\n\n state_dict.pop(\"rel_pos_bias.relative_position_bias_table\")\n\n all_keys = list(state_dict.keys())\n for key in all_keys:\n if \"relative_position_index\" in key:\n state_dict.pop(key)\n\n if \"relative_position_bias_table\" in key:\n rel_pos_bias = state_dict[key]\n src_num_pos, num_attn_heads = rel_pos_bias.size()\n dst_num_pos, _ = model.state_dict()[key].size()\n dst_patch_shape = model.patch_embed.patch_shape\n if dst_patch_shape[0] != dst_patch_shape[1]:\n raise NotImplementedError()\n num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)\n src_size = int((src_num_pos - num_extra_tokens) ** 0.5)\n dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)\n if src_size != dst_size:\n if rank == 0:\n print(\"Position interpolate for %s from %dx%d to %dx%d\" % (\n key, src_size, src_size, dst_size, dst_size))\n extra_tokens = rel_pos_bias[-num_extra_tokens:, :]\n rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]\n\n def geometric_progression(a, r, n):\n return a * (1.0 - r ** n) / (1.0 - r)\n\n left, right = 1.01, 1.5\n while right - left > 1e-6:\n q = (left + right) / 2.0\n gp = geometric_progression(1, q, src_size // 2)\n if gp > dst_size // 2:\n right = q\n else:\n left = q\n\n # if q > 1.13492:\n # q = 1.13492\n\n dis = []\n cur = 1\n for i in range(src_size // 2):\n dis.append(cur)\n cur += q ** (i + 1)\n\n r_ids = [-_ for _ in reversed(dis)]\n\n x = r_ids + [0] + dis\n y = r_ids + [0] + dis\n\n t = dst_size // 2.0\n dx = np.arange(-t, t + 0.1, 1.0)\n dy = np.arange(-t, t + 0.1, 1.0)\n if rank == 0:\n print(\"x = {}\".format(x))\n print(\"dx = {}\".format(dx))\n\n all_rel_pos_bias = []\n\n for i in range(num_attn_heads):\n z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()\n f = interpolate.interp2d(x, y, z, kind='cubic')\n all_rel_pos_bias.append(\n torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))\n\n rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)\n new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)\n state_dict[key] = new_rel_pos_bias\n\n if 'pos_embed' in state_dict:\n pos_embed_checkpoint = state_dict['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.patch_embed.num_patches\n num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n if rank == 0:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n state_dict['pos_embed'] = new_pos_embed\n\n # interpolate position bias table if needed\n relative_position_bias_table_keys = [k for k in state_dict.keys() if \"relative_position_bias_table\" in k]\n for table_key in relative_position_bias_table_keys:\n table_pretrained = state_dict[table_key]\n table_current = model.state_dict()[table_key]\n L1, nH1 = table_pretrained.size()\n L2, nH2 = table_current.size()\n if nH1 != nH2:\n logger.warning(f\"Error in loading {table_key}, pass\")\n else:\n if L1 != L2:\n S1 = int(L1 ** 0.5)\n S2 = int(L2 ** 0.5)\n table_pretrained_resized = F.interpolate(\n table_pretrained.permute(1, 0).view(1, nH1, S1, S1),\n size=(S2, S2), mode='bicubic')\n state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)\n\n # load state_dict\n load_state_dict(model, state_dict, strict, logger)\n return checkpoint", "def load_checkpoint(args, trainer, epoch_itr):\n os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']", "def load_model(fn, model):\n if fn[-3] != \".tf\":\n fn += \".tf\"\n if model.saver is None:\n with model.graph.as_default():\n model.saver = tf.train.Saver()\n log(\"Loading model from {}\".format(fn))\n model.saver.restore(model.session, fn)\n log(\"Done loading!\")", "def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)", "def load_model():\n with open(paths.model('model.pkl'), 'rb') as stream:\n return pickle.load(stream)", "def load_model(model, path):\n\tmodel.load_state_dict(torch.load(path))\n\tprint(\"pre-trained model loaded from {}\".format(path))", "def _restore(self, checkpoint):\n checkpoint_path = os.path.join(checkpoint, \"model_weights\")\n self.model.load_weights(checkpoint_path)", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def load_model(model):\n fin = False\n backup1 = False\n backup2 = False\n\n if os.path.exists(\"TrainedModel/finalModel.pth\"):\n fin = True\n elif os.path.exists(\"TrainedModel/modelBackup.pth\"):\n backup1 = True\n elif os.path.exists(\"TrainedModel/modelBackupBackup.pth\"):\n backup2 = True\n\n if fin:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/finalModel.pth\"))\n return model\n except:\n print(\"finalModel seems to be corrupted, trying a backup...\")\n \n if fin or backup1:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/modelBackup.pth\"))\n return model\n except:\n print(\"modelBackup seems to be corrupted, trying a backup...\")\n\n if fin or backup1 or backup2:\n try:\n model.load_state_dict(torch.load(\"TrainedModel/modelBackupBackup.pth\"))\n return model\n except:\n print(\"modelBackupBackup seems to be corrupted, you're at the end of the line.\")\n\n print(\"There doesn't seem to be anything to load.\")\n return model", "def load_actor(self, checkpoint):\n \n model = torch.load(checkpoint)\n self.actor_local.load_state_dict(model)", "def load_checkpoint(checkpoint_directory,\n session):\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # filter variables if needed.\n print(variables)\n saver_ob = tf.train.Saver(variables, max_to_keep=0)\n os.makedirs(checkpoint_directory, exist_ok=True)\n # verify if we don't have a checkpoint saved directly\n step = 0\n ckpt = tf.train.get_checkpoint_state(checkpoint_directory)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n model_checkpoint_path = ckpt.model_checkpoint_path\n saver_ob.restore(session, model_checkpoint_path)\n step = int(model_checkpoint_path.rsplit('-', 1)[1])\n print('Model loaded = ', step)\n return saver_ob, step", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def load(self, sess, step=None):\n if step==None:\n ckpt_path = tf.train.latest_checkpoint(self.model.ckpt_dir)\n else:\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model-'+str(step))\n self.saver.restore(sess, ckpt_path)\n step = tf.train.global_step(sess, self.gstep)\n print('Load model at step {} from check point {}.'.format(step, ckpt_path))", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def maybe_load_model(savedir, container):\n if savedir is None:\n return\n\n state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))\n if container is not None:\n logger.log(\"Attempting to download model from Azure\")\n found_model = container.get(savedir, 'training_state.pkl.zip')\n else:\n found_model = os.path.exists(state_path)\n if found_model:\n state = pickle_load(state_path, compression=True)\n model_dir = \"model-{}\".format(state[\"num_iters\"])\n if container is not None:\n container.get(savedir, model_dir)\n U.load_state(os.path.join(savedir, model_dir, \"saved\"))\n logger.log(\"Loaded models checkpoint at {} iterations\".format(state[\"num_iters\"]))\n return state", "def load_pretrained_model(model, pretrained_model_path, verbose=False):\n\n if isinstance(pretrained_model_path, str):\n if not os.path.exists(pretrained_model_path):\n raise IOError(\n \"Can't find pretrained model: {}\".format(pretrained_model_path)\n )\n\n print(\"Loading checkpoint from '{}'\".format(pretrained_model_path))\n pretrained_state = torch.load(pretrained_model_path)[\"state_dict\"]\n else:\n # incase pretrained model weights are given\n pretrained_state = pretrained_model_path\n\n print(len(pretrained_state), \" keys in pretrained model\")\n\n current_model_state = model.state_dict()\n print(len(current_model_state), \" keys in current model\")\n pretrained_state = {\n key: val\n for key, val in pretrained_state.items()\n if key in current_model_state and val.size() == current_model_state[key].size()\n }\n\n print(\n len(pretrained_state),\n \" keys in pretrained model are available in current model\",\n )\n current_model_state.update(pretrained_state)\n model.load_state_dict(current_model_state)\n\n if verbose:\n non_available_keys_in_pretrained = [\n key\n for key, val in pretrained_state.items()\n if key not in current_model_state\n or val.size() != current_model_state[key].size()\n ]\n non_available_keys_in_current = [\n key\n for key, val in current_model_state.items()\n if key not in pretrained_state or val.size() != pretrained_state[key].size()\n ]\n\n print(\n \"not available keys in pretrained model: \", non_available_keys_in_pretrained\n )\n print(\"not available keys in current model: \", non_available_keys_in_current)\n\n return model", "def load_weights_infer(checkpoint_path, model):\n try:\n # catalyst weights\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")[\"model_state_dict\"]\n except:\n # anything else\n state_dict = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(state_dict, strict=True)\n except:\n # for clf + seg for seg only prediction\n print(f\"Non-strict loading of weights from {checkpoint_path}\")\n model.load_state_dict(state_dict, strict=False)\n model.eval()\n return model", "def get_model(self, model: Optional[torch.nn.Module] = None) -> torch.nn.Module:\n with self.as_directory() as tempdir:\n model_path = os.path.join(tempdir, self.MODEL_FILENAME)\n if not os.path.exists(model_path):\n raise RuntimeError(\n \"`model.pt` not found within this checkpoint. Make sure you \"\n \"created this `TorchCheckpoint` from one of its public \"\n \"constructors (`from_state_dict` or `from_model`).\"\n )\n model_or_state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if isinstance(model_or_state_dict, torch.nn.Module):\n if model:\n warnings.warn(\n \"TorchCheckpoint already contains all information needed. \"\n \"Discarding provided `model` argument. This means: \"\n \"If you are using BatchPredictor, you should do \"\n \"`BatchPredictor.from_checkpoint(checkpoint, TorchPredictor)` by\"\n \"removing kwargs `model=`. \"\n \"If you are using TorchPredictor directly, you should do \"\n \"`TorchPredictor.from_checkpoint(checkpoint)` by removing kwargs \"\n \"`model=`.\"\n )\n model = load_torch_model(\n saved_model=model_or_state_dict, model_definition=model\n )\n return model", "def load_model_from_checkpoint(file, device):\r\n\r\n if device == 'cuda':\r\n # Load all tensors onto GPU\r\n map_location = lambda storage, loc: storage.cuda()\r\n else:\r\n # Load all tensors onto CPU\r\n map_location = lambda storage, loc: storage\r\n\r\n # Assuming model was trained and checkpoint saved on Linux, but predict.py inference is executed using Windows.\r\n # Then, it is required to implement the following quick fix, because otherwise the exception is raised:\r\n # \"NotImplementedError: cannot instantiate 'PosixPath' on your system\"\r\n # Credits to https://stackoverflow.com/questions/57286486/i-cant-load-my-model-because-i-cant-put-a-posixpath\r\n if type(file) == pathlib.WindowsPath:\r\n tmp_PosixPath = pathlib.PosixPath\r\n pathlib.PosixPath = pathlib.WindowsPath\r\n\r\n parameters = torch.load(file, map_location=map_location)\r\n\r\n # Restore default\r\n if type(file) == pathlib.WindowsPath:\r\n pathlib.WindowsPath = pathlib.PosixPath\r\n pathlib.PosixPath = tmp_PosixPath\r\n\r\n model = train.create_model(parameters)\r\n\r\n model.class_to_idx = parameters.get('train_datasets_class_to_idx')\r\n model.load_state_dict(parameters.get('state_dict'), strict=False)\r\n\r\n return model, parameters", "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n while True:\n try:\n latest_filename = \"checkpoint_best\" if ckpt_dir==\"eval\" else None\n ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n return ckpt_state.model_checkpoint_path\n except:\n tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n time.sleep(10)", "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n while True:\n try:\n latest_filename = \"checkpoint_best\" if ckpt_dir == \"eval\" else None\n ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n return ckpt_state.model_checkpoint_path\n except:\n tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n time.sleep(10)", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict", "def load_model(self):\n if self.save_path is not None:\n if isfile(self.save_path):\n self.model.load_state_dict(load(self.save_path))\n else:\n raise ValueError(\"Cannot find model save file: \" + self.save_path)", "def parse_checkpoint(checkpoint_path):\n with gfile.Open(checkpoint_path, 'rb') as fp:\n raw_contents = fp.read()\n if raw_contents.startswith(b'model_checkpoint_path'):\n raise ValueError(\n 'Attempting to restore a TensorFlow checkpoint as a native T5X '\n f'checkpoint. Path: {checkpoint_path}')\n return serialization.msgpack_restore(raw_contents)", "def load_ckpt(model,\n weight_path,\n **kargs):\n #model.set_state_dict(state_dict)\n\n if not osp.isfile(weight_path):\n raise IOError(f'{weight_path} is not a checkpoint file')\n #state_dicts = load(weight_path)\n\n logger = get_logger(\"paddlevideo\")\n state_dicts = paddle.load(weight_path)\n if \"VisionTransformer\" in str(model): # For TimeSformer case\n tmp = pretrain_vit_param_trans(model, state_dicts, kargs['num_patches'], kargs['seg_num'], kargs['attention_type'])\n else:\n tmp = {}\n total_len = len(model.state_dict())\n with tqdm(total=total_len, position=1, bar_format='{desc}', desc=\"Loading weights\") as desc:\n for item in tqdm(model.state_dict(), total=total_len, position=0):\n name = item\n desc.set_description('Loading %s' % name)\n if name not in state_dicts: # Convert from non-parallel model\n if str('backbone.' + name) in state_dicts:\n tmp[name] = state_dicts['backbone.' + name]\n else: # Convert from parallel model\n tmp[name] = state_dicts[name]\n time.sleep(0.01)\n ret_str = \"loading {:<20d} weights completed.\".format(len(model.state_dict()))\n desc.set_description(ret_str)\n model.set_state_dict(tmp)" ]
[ "0.8315387", "0.81653464", "0.8112465", "0.80683154", "0.8026479", "0.80094665", "0.79719937", "0.7923825", "0.7876365", "0.7869957", "0.78584534", "0.782106", "0.7777016", "0.7624399", "0.7612878", "0.76103896", "0.7592807", "0.7576732", "0.7537933", "0.75310373", "0.751883", "0.748922", "0.74809384", "0.74320143", "0.73944", "0.7393367", "0.7371976", "0.73655057", "0.73474103", "0.73224443", "0.72946405", "0.72946405", "0.7289665", "0.72687835", "0.7248022", "0.72397214", "0.7231976", "0.72269124", "0.7174827", "0.71698344", "0.714276", "0.7098216", "0.7095512", "0.7047667", "0.70442224", "0.7042748", "0.70327705", "0.7030327", "0.702826", "0.7024368", "0.70205617", "0.7000658", "0.6978624", "0.6975462", "0.6971302", "0.69644946", "0.695275", "0.69508135", "0.6944621", "0.6924109", "0.689542", "0.6886169", "0.68843234", "0.68789566", "0.68789566", "0.6850364", "0.6848456", "0.6843984", "0.68171984", "0.68033427", "0.68017215", "0.67914313", "0.67914313", "0.6775005", "0.6768106", "0.67518497", "0.67357266", "0.6732855", "0.6727436", "0.672266", "0.67028123", "0.6700171", "0.66985", "0.6696957", "0.66958326", "0.66944945", "0.66881377", "0.6682155", "0.6653994", "0.66482776", "0.6634968", "0.6624585", "0.6619646", "0.66150975", "0.6580375", "0.6571225", "0.657082", "0.65638703", "0.65617996", "0.6559275", "0.65570945" ]
0.0
-1
Updates the torch model so that input minibatches are parallelized across the batch dimension to utilise multiple gpus. If model parallel is set to True and execution is in test mode, then model is partitioned to perform full volume inference.
def adjust_mean_teacher_model_for_gpus(self) -> None: if self._mean_teacher_model is None: raise ValueError("Mean teacher model must be created before it can be adjusted.") # Adjusting twice causes an error. if self.is_mean_teacher_model_adjusted: logging.debug("model_and_info.is_mean_teacher_model_adjusted is already True") self._mean_teacher_model = ModelAndInfo._adjust_for_gpus(model=self._mean_teacher_model, config=self.config, model_execution_mode=self.model_execution_mode) self.is_mean_teacher_model_adjusted = True logging.debug("model_and_info.is_mean_teacher_model_adjusted set to True")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model", "def parallelize(self):\r\n self.parallel = True\r\n self.network = torch.nn.DataParallel(self.network)", "def parallelize(self):\n self.parallel = True\n self.network = torch.nn.DataParallel(self.network)", "def train_parallel(config):\n _setup_parallel_env()\n print(f\" | Starting training on {os.getenv('RANK_SIZE', None)} devices.\")\n\n pre_train_dataset = load_dataset(\n data_files=config.pre_train_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.pre_train_dataset else None\n fine_tune_dataset = load_dataset(\n data_files=config.fine_tune_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.fine_tune_dataset else None\n test_dataset = load_dataset(\n data_files=config.test_dataset,\n batch_size=config.batch_size,\n sink_mode=config.dataset_sink_mode,\n rank_size=MultiDevice.get_group_size(),\n rank_id=MultiDevice.get_rank()\n ) if config.test_dataset else None\n\n _build_training_pipeline(config=config,\n pre_training_dataset=pre_train_dataset,\n fine_tune_dataset=fine_tune_dataset,\n test_dataset=test_dataset)", "def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)", "def init_model_parallel(self, global_rank: int, world_size: int) -> None:\n app_state = AppState()\n\n # we initialize megatron-lm model parallel and data parallel groups\n # after initializing DDP with PTL.\n if app_state.model_parallel_size is not None:\n # destroy groups in case they have already been created\n # this happens with multiple calls to trainer.test for example\n parallel_state.destroy_model_parallel()\n if torch.distributed.is_initialized():\n parallel_state.initialize_model_parallel(\n tensor_model_parallel_size=app_state.tensor_model_parallel_size,\n pipeline_model_parallel_size=app_state.pipeline_model_parallel_size,\n virtual_pipeline_model_parallel_size=app_state.virtual_pipeline_model_parallel_size,\n pipeline_model_parallel_split_rank=app_state.pipeline_model_parallel_split_rank,\n use_fp8=app_state.use_fp8,\n )\n\n # assert that fake tp and pp rank match after model parallel init\n assert app_state.tensor_model_parallel_rank == parallel_state.get_tensor_model_parallel_rank()\n assert app_state.pipeline_model_parallel_rank == parallel_state.get_pipeline_model_parallel_rank()\n\n app_state.tensor_model_parallel_group = parallel_state.get_tensor_model_parallel_group()\n app_state.data_parallel_group = parallel_state.get_data_parallel_group()\n app_state.data_parallel_rank = parallel_state.get_data_parallel_rank()\n app_state.data_parallel_size = parallel_state.get_data_parallel_world_size()\n app_state.pipeline_model_parallel_group = parallel_state.get_pipeline_model_parallel_group()\n\n # create MPI process group for UCX-based communication APIs\n if app_state.init_mpi_proc_group:\n torch.distributed.new_group(backend='mpi')", "def partition_data_parallel(\n graph: GraphModule,\n model: nn.Module,\n optimizer: Optional[torch.optim.Optimizer],\n params_buffers: Dict[str, torch.Tensor],\n named_states: Dict[str, Any],\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n mesh: DeviceMesh,\n parallel_style: DataParallelStyle,\n input_batch_dim: int,\n) -> GraphModule:\n num_params_buffers = len(params_buffers)\n flattened_states = pytree.tree_flatten(named_states)[0]\n num_states = len(flattened_states)\n\n changed = graph.graph.eliminate_dead_code()\n if changed:\n graph.recompile()\n\n # 1. First build up data parallel strategies for the whole graph\n strategy_map = build_data_parallel_strategies(\n graph, num_params_buffers, num_states, mesh=mesh, batch_dim=input_batch_dim\n )\n\n # 2. Next we mark the data parallel strategy for each node base on\n # the parallel_style\n mark_data_parallel_shardings(\n graph,\n num_parameters=num_params_buffers,\n num_states=num_states,\n dp_strategy_map=strategy_map,\n parallel_mode=parallel_style,\n )\n\n # 3. Partition the single machine graph to the distribute graph\n partitioned_graph = partitioner(graph)\n\n # preserve node types for the expanded graph\n for node in partitioned_graph.graph.nodes:\n if node in strategy_map:\n node_strategy = strategy_map[node]\n if isinstance(node_strategy, DataParallelStrategy):\n node.meta[\"node_type\"] = node_strategy.node_type\n elif isinstance(node_strategy, TupleStrategy):\n node.meta[\"node_type\"] = NodeType.NON_TENSOR\n else:\n raise RuntimeError(f\"Unknown node strategy {node_strategy}\")\n else:\n # if the nodes are expanded nodes (collectives), we mark them\n # the same type as the input node.\n input_node = node.all_input_nodes[0]\n node.meta[\"node_type\"] = input_node.meta[\"node_type\"]\n\n # 4. Last, inplace partition the weights and optim states to\n # DTensors base on the parallel style\n accessor = NamedMemberAccessor(model)\n for param_key, param in params_buffers.items():\n placement: Placement = Replicate()\n if parallel_style == DataParallelStyle.FULLY_SHARD:\n placement = Shard(0)\n elif parallel_style != DataParallelStyle.REPLICATE:\n raise RuntimeError(f\"parallel style {parallel_style} not supported yet\")\n\n dtensor_param = distribute_tensor(param, mesh, [placement])\n # update re-parameterized module param dict and optim states dict to DTensor\n params_buffers[param_key] = dtensor_param.to_local()\n # update module parameters to DTensor\n accessor.set_tensor(param_key, dtensor_param)\n\n # update the optimizer state key and values to DTensor\n if optimizer is not None and param in optimizer.state:\n param_states = named_states[param_key]\n param_dtensor_states = {}\n for state_key, state_val in param_states.items():\n if isinstance(state_val, torch.Tensor) and state_val.ndim > 0:\n # shard/replicate non-scalar tensors, for scalar tensor, we\n # don't do anything\n dtensor_state = distribute_tensor(state_val, mesh, [placement])\n param_dtensor_states[state_key] = dtensor_state\n param_states[state_key] = dtensor_state.to_local()\n else:\n param_dtensor_states[state_key] = state_val\n\n optimizer.state.pop(param) # type: ignore[call-overload]\n optimizer.state[dtensor_param] = param_dtensor_states # type: ignore[index]\n\n return partitioned_graph", "def train(self, mode=True):\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n # print('the type train model : {}'.format(type(m)))\n if isinstance(m, torch.nn.BatchNorm2d) or \\\n isinstance(m, linklink.nn.syncbn_layer.SyncBatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n # print('the freeze module: {} of {}th'.format(type(m), count))\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n\n loader = tqdm.tqdm(loader, total=num_batches)\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def initialize_multitask_model(\n *,\n model_def: nn.Module,\n input_spec: Dict[Tuple[Tuple[str, str], ...],\n Sequence[Union[Tuple[Tuple[int, ...], jnp.dtype],\n Tuple[int, ...]]]],\n config: ml_collections.ConfigDict,\n rngs: Union[jnp.ndarray, Mapping[str, jnp.ndarray]],\n) -> Tuple[PyTree, PyTree, int, Optional[Dict[str, float]]]:\n\n def init_fn(model_def):\n for kwargs, in_spec in input_spec.items():\n\n if config.get('batch_sizes') is not None:\n batch_size = config.batch_sizes.get(dict(kwargs)['dataset'])\n else:\n batch_size = config.batch_size\n\n batch_size = (batch_size // jax.device_count()) if batch_size else None\n\n input_shapetype = [\n debug_utils.input_spec_to_jax_shape_dtype_struct(\n spec, batch_size=batch_size) for spec in in_spec\n ]\n dummy_input = []\n for in_st in input_shapetype:\n dummy_input.append(jnp.zeros(in_st.shape, in_st.dtype))\n model_def(\n *dummy_input, train=False, debug=False, **dict(kwargs))\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def _initialize_model(rngs):\n \"\"\"Initialization function to be jitted.\"\"\"\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state\n\n if not isinstance(rngs, dict):\n rngs = {'params': rngs}\n init_params, init_model_state = _initialize_model(rngs)\n # Pop out params rng:\n rngs.pop('params')\n\n # Count number of trainable parameters:\n num_trainable_params = debug_utils.log_param_shapes(init_params)\n\n # Count gflops:\n count_flops = config.get('count_flops',\n ml_collections.ConfigDict({'count_flops': True}))\n if count_flops:\n variables = {'params': init_params, **init_model_state}\n gflops_dict = {}\n gflops_all = 0\n for kwargs, in_spec in input_spec.items():\n flops = debug_utils.compute_flops(\n flax_model_apply_fn=functools.partial(\n model_def.apply,\n variables,\n train=False,\n debug=False,\n rngs=rngs,\n **dict(kwargs)),\n input_spec=count_flops.get('input_spec', in_spec),\n fuse_multiply_add=count_flops.get('fuse_multiply_add', True))\n gflops = flops / (10**9)\n gflops_key = 'gflops/' + '/'.join(f'{x}={y}' for x, y in kwargs)\n gflops_dict[gflops_key] = gflops\n gflops_all += gflops\n gflops_dict['gflops'] = gflops_all\n else:\n gflops_dict = None\n\n return init_params, init_model_state, num_trainable_params, gflops_dict", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def run_training(self, schema_params, export_model=False, output_model_dir=None):\n # Log distributed execution context, which includes cluster configuration\n logger.info(f\"Commencing {self.effect_name} training\")\n logger.info(f\"Execution context : {self.execution_context}\")\n\n # Create partition_index_list\n partition_index_list = self._get_partition_list()\n logger.info(f\"This worker on work on the following list of partitions : {partition_index_list}\")\n\n # Sequentially train model on partitions\n for partition_index in partition_index_list:\n logger.info(f\"Commencing {self.effect_name} training for partition index : {partition_index}\")\n\n # Resolve partitioned data directory from raw path params from user\n checkpoint_path = self._anchor_directory(\n self.model.checkpoint_path,\n partition_index)\n training_data_dir = self._anchor_directory(self.model.training_data_dir,\n partition_index)\n validation_data_dir = self._anchor_directory(self.model.validation_data_dir,\n partition_index) if self.model.validation_data_dir else None\n\n if is_empty_directory(training_data_dir):\n logger.info(f\"{training_data_dir} is empty, no dataset to train on.\")\n continue\n # Train model\n self.execution_context[constants.PARTITION_INDEX] = partition_index\n self.model.train(training_data_dir=training_data_dir,\n validation_data_dir=validation_data_dir,\n metadata_file=self.model.metadata_file,\n checkpoint_path=checkpoint_path,\n execution_context=self._prepare_training_context(partition_index),\n schema_params=schema_params)\n\n # Chief should export model\n is_chief = self.execution_context[constants.IS_CHIEF]\n if export_model and is_chief:\n logger.info(f\"Exporting model to directory : {output_model_dir}\")\n self.model.export(output_model_dir=output_model_dir)", "def data_parallel(self, batch_size, inputs):\n inputs = list(inputs)\n\n # quick path: only one device, do not slice\n if len(self.work_devices) == 1:\n assert(self.main_device == self.work_devices[0])\n yield self.main_device, False, tuple(inputs)\n\n # slow path: multi-GPUs\n else:\n # the GPUs are not in the same group, place variables on CPU\n if self.main_device not in self.work_devices:\n yield self.main_device, True, tuple(inputs)\n\n # build the paralleled computation graph for each device\n with tf.name_scope('data_parallel') as ns:\n pass # generate a name scope to place our data slicing ops\n\n k = len(self.work_devices)\n for i, device in enumerate(self.work_devices):\n dev_inputs = []\n with tf.name_scope(ns + 'tower_gpu_{}'.format(i)):\n for inp in inputs:\n slice_len = (batch_size + k - 1) // k\n low, high = slice_len * i, slice_len * (i + 1)\n dev_inputs.append(inp[low: high])\n yield device, False, tuple(dev_inputs)", "def run(rank, world_size, config):\n setup(rank, world_size, master_addr=config.neural_network.train.DistributedDataParallel.MASTER_ADDR, master_port=config.neural_network.train.DistributedDataParallel.MASTER_PORT)\n\n torch.manual_seed(int(config.neural_network.train.random_seed))\n training_dataloader, validation_dataloader, batch_size = partition_dataset(rank, world_size, config)\n\n total_epochs = int(config.neural_network.train.epochs)\n learning_rate = float(config.neural_network.train.learning_rate)\n\n n_hourglass = int(config.neural_network.PoseNet.n_hourglass)\n in_channels = int(config.neural_network.PoseNet.in_channels)\n out_channels = int(config.neural_network.PoseNet.out_channels)\n channel_increase = int(config.neural_network.PoseNet.channel_increase)\n model = PoseNet(n_hourglass=n_hourglass, in_channels=in_channels, out_channels=out_channels, channel_increase=channel_increase).to(rank)\n model = DistributedDataParallel(model, device_ids=[rank])\n\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate)\n loss_fn = HeatMapLossBatch()\n\n train_loader = iter(training_dataloader)\n valid_loader = iter(validation_dataloader)\n\n for epoch in range(total_epochs):\n training_dataloader.sampler.set_epoch(epoch)\n epoch_loss = 0.0\n\n train_iters = 0\n while train_iters < int(config.neural_network.train.train_iterations):\n train_iters += 1\n\n try:\n images, heatmaps = next(train_loader)\n except StopIteration:\n train_loader = iter(training_dataloader)\n images, heatmaps = next(train_loader)\n\n images = images.cuda(non_blocking=True)\n heatmaps = heatmaps.cuda(non_blocking=True)\n optimizer.zero_grad()\n output = model(images)\n loss = loss_fn(output, heatmaps)\n epoch_loss += utils.make_output(loss)\n loss.backward()\n average_gradients(model, world_size)\n optimizer.step()\n\n # validation\n with torch.no_grad():\n validation_loss = 0\n validation_dataloader.sampler.set_epoch(epoch)\n\n valid_iters = 0\n while valid_iters < int(config.neural_network.train.valid_iterations):\n valid_iters += 1\n\n try:\n images, heatmaps = next(valid_loader)\n except StopIteration:\n train_loader = iter(validation_dataloader)\n images, heatmaps = next(valid_loader)\n\n output = model(images)\n loss = loss_fn(output, heatmaps)\n validation_loss += utils.make_output(loss)\n\n epoch_train_loss = epoch_loss/config.neural_network.train.train_iterations\n epoch_valid_loss = validation_loss/config.neural_network.train.valid_iterations\n print(f\"rank:{dist.get_rank():2d} epoch:{epoch:3d} epoch_train_loss:{epoch_train_loss:0.4f} epoch_valid_loss:{epoch_valid_loss:0.4f}\")\n\n save_checkpoint = (rank == 0 and epoch > 0 and config.neural_network.train.checkpoint.save and epoch % config.neural_network.train.checkpoint.save_every == 0)\n if save_checkpoint:\n torch.save(model.state_dict(), config.neural_network.train.checkpoint.path) # saving it in one process is sufficient.\n dist.barrier()\n\n cleanup()", "def may_data_parallel(model):\n if torch.cuda.device_count() > 1:\n model = TransparentDataParallel(model)\n return model", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def sync_model(model):\n size = float(dist.get_world_size())\n\n for param in model.parameters():\n dist.broadcast(param.data, 0)", "def initialize_model(self):\n args = self.args\n\n if self.args.search_space == 'nasbench':\n self.model_fn = NasBenchNetSearchDarts\n self.fixmodel_fn = NasBenchNet\n model = self.model_fn(args)\n utils = darts_nasbench_utils\n else:\n raise NotImplementedError(\"Not supported\")\n # finialize model update\n if args.gpus > 0:\n if self.args.gpus == 1:\n model = model.cuda()\n self.parallel_model = model\n else:\n self.model = model\n self.parallel_model = nn.DataParallel(self.model).cuda()\n # IPython.embed(header='checking replicas and others.')\n else:\n self.parallel_model = model\n\n darts = DartsArchitect(model, args=args)\n model = self.parallel_model\n # logging.info(\"DARTS param size = %fMB\", utils.count_parameters_in_MB(darts))\n self.train_fn = partial(darts_train_model, args=args, architect=darts, sampler=None)\n self.eval_fn = partial(darts_model_validation, args=args, verbose=True)\n self.controller = darts\n\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n optimizer = torch.optim.SGD(\n model.parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n )\n\n # scheduler as Cosine.\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.learning_rate_min)\n return model, optimizer, scheduler, darts, None", "def update_model(self, verbose):\n if self.comm.project.meshes == \"multi-mesh\":\n self.comm.lasif.move_gradient_to_cluster()\n\n if not self.task_dict[\"summing_completed\"]:\n grad_summer = GradientSummer(comm=self.comm)\n grad_summer.sum_gradients(\n events=self.comm.project.non_val_events_in_iteration,\n output_location=self.raw_gradient_path,\n batch_average=True,\n sum_vpv_vph=True,\n store_norms=True,\n )\n write_xdmf(self.raw_gradient_path)\n self.task_dict[\"summing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Summing already done\")\n\n if not self.task_dict[\"raw_update_completed\"]:\n self._update_model(raw=True, smooth=False, verbose=verbose)\n self.task_dict[\"raw_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Raw updating already completed\")\n\n if not self.task_dict[\"smoothing_completed\"]:\n self.perform_smoothing()\n self.task_dict[\"smoothing_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smoothing already done\")\n\n if not self.task_dict[\"smooth_update_completed\"]:\n self._update_model(raw=False, smooth=True, verbose=verbose)\n self.task_dict[\"smooth_update_completed\"] = True\n self._update_task_file()\n else:\n self.print(\"Smooth updating already completed\")\n\n if not self.task_dict[\"iteration_finalized\"]:\n self._finalize_iteration(verbose=verbose)\n self.task_dict[\"iteration_finalized\"] = True\n self._update_task_file()\n else:\n self.print(\"Iteration already finalized\")\n\n self.finish_task()", "def train(self):\r\n print(\"Starting training now\")\r\n cuda = True if torch.cuda.is_available() else False\r\n if cuda:\r\n self.model.cuda()\r\n\r\n # Construct optimizer after the model moved to GPU\r\n self.optm = self.make_optimizer()\r\n self.lr_scheduler = self.make_lr_scheduler(self.optm)\r\n\r\n dim_x = self.flags.dim_x\r\n dim_y = self.flags.dim_y\r\n dim_z = self.flags.dim_z\r\n dim_tot = self.flags.dim_tot\r\n\r\n # Time keeping\r\n tk = time_keeper(time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))\r\n\r\n for epoch in range(self.flags.train_step):\r\n # Set to Training Mode\r\n train_loss = 0\r\n self.model.train()\r\n # If MMD on x-space is present from the start, the model can get stuck.\r\n # Instead, ramp it up exponetially.\r\n loss_factor = min(1., 2. * 0.002 ** (1. - (float(epoch) / self.flags.train_step)))\r\n\r\n for j, (x, y) in enumerate(self.train_loader):\r\n batch_size = len(x)\r\n\r\n ######################\r\n # Preparing the data #\r\n ######################\r\n # Pad the x, y with zero_noise\r\n y_clean = y.clone() # keep a copy of y for backward\r\n x_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_x)\r\n y_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_y - dim_z)\r\n z = torch.randn(batch_size, dim_z)\r\n if cuda:\r\n x = x.cuda() # Put data onto GPU\r\n y = y.cuda() # Put data onto GPU\r\n x_pad = x_pad.cuda()\r\n y_pad = y_pad.cuda()\r\n y_clean = y_clean.cuda()\r\n z = z.cuda()\r\n\r\n # Concate the x and y with pads and add y with small purtubation\r\n y += self.flags.y_noise_scale * torch.randn(batch_size, dim_y, device=device)\r\n\r\n x, y = torch.cat((x, x_pad), dim=1), torch.cat((z, y_pad, y), dim=1)\r\n\r\n ################\r\n # Forward step #\r\n ################\r\n self.optm.zero_grad() # Zero the gradient first\r\n ypred = self.model(x) # Get the Ypred\r\n\r\n\r\n # Do the MSE loss for reconstruction, Doesn't compare z part (only pad and y itself)\r\n MSE_loss_y = self.make_loss(logit=ypred[:, dim_z:], labels=y[:, dim_z:])\r\n\r\n # Use the maximum likelihood method\r\n log_det = self.model.log_jacobian(x=x)\r\n #print(\"The log determinant is\", log_det)\r\n Forward_loss = 0.5 * (MSE_loss_y / self.flags.lambda_mse + torch.mean(torch.pow(z,2))) - torch.mean(log_det)\r\n Forward_loss.backward()\r\n\r\n ######################\r\n # Gradient Clipping #\r\n ######################\r\n for parameter in self.model.parameters():\r\n parameter.grad.data.clamp_(-self.flags.grad_clamp, self.flags.grad_clamp)\r\n\r\n #########################\r\n # Descent your gradient #\r\n #########################\r\n self.optm.step() # Move one step the optimizer\r\n\r\n # MLE training\r\n train_loss += Forward_loss \r\n\r\n # Calculate the avg loss of training\r\n train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)\r\n\r\n if epoch % self.flags.eval_step == 0: # For eval steps, do the evaluations and tensor board\r\n # Record the training loss to the tensorboard\r\n self.log.add_scalar('Loss/total_train', train_avg_loss, epoch)\r\n self.log.add_scalar('Loss/MSE_y_train', MSE_loss_y, epoch)\r\n\r\n # Set to Evaluation Mode\r\n self.model.eval()\r\n print(\"Doing Evaluation on the model now\")\r\n\r\n test_loss = 0\r\n for j, (x, y) in enumerate(self.test_loader): # Loop through the eval set\r\n batch_size = len(x)\r\n\r\n ######################\r\n # Preparing the data #\r\n ######################\r\n # Pad the x, y with zero_noise\r\n y_clean = y.clone() # keep a copy of y for backward\r\n x_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_x)\r\n y_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_y - dim_z)\r\n z = torch.randn(batch_size, dim_z)\r\n if cuda:\r\n x = x.cuda() # Put data onto GPU\r\n y = y.cuda() # Put data onto GPU\r\n x_pad = x_pad.cuda()\r\n y_pad = y_pad.cuda()\r\n y_clean = y_clean.cuda()\r\n z = z.cuda()\r\n\r\n # Concate the x and y with pads and add y with small purtubation\r\n y += self.flags.y_noise_scale * torch.randn(batch_size, dim_y, device=device)\r\n\r\n x, y = torch.cat((x, x_pad), dim=1), torch.cat((z, y_pad, y), dim=1)\r\n\r\n ################\r\n # Forward step #\r\n ################\r\n self.optm.zero_grad() # Zero the gradient first\r\n ypred = self.model(x) # Get the Ypred\r\n # Do the MSE loss for reconstruction, Doesn't compare z part (only pad and y itself)\r\n MSE_loss_y = self.make_loss(logit=ypred[:, dim_z:], labels=y[:, dim_z:])\r\n\r\n log_det = self.model.log_jacobian(x=x)\r\n #print(\"The log determinant is\", log_det)\r\n Forward_loss = 0.5 * (MSE_loss_y / self.flags.lambda_mse + torch.mean(torch.pow(z,2))) - torch.mean(log_det)\r\n test_loss += Forward_loss\r\n # Aggregate the other loss (in np form)\r\n\r\n # Record the testing loss to the tensorboard\r\n test_avg_loss = test_loss.cpu().data.numpy() / (j+1)\r\n\r\n self.log.add_scalar('Loss/total_test', test_avg_loss, epoch)\r\n self.log.add_scalar('Loss/MSE_y_test', MSE_loss_y, epoch)\r\n\r\n print(\"This is Epoch %d, training loss %.5f, validation loss %.5f\" \\\r\n % (epoch, train_avg_loss, test_avg_loss ))\r\n\r\n # Model improving, save the model down\r\n if test_avg_loss < self.best_validation_loss:\r\n self.best_validation_loss = train_avg_loss\r\n self.save()\r\n print(\"Saving the model down...\")\r\n\r\n if self.best_validation_loss < self.flags.stop_threshold:\r\n print(\"Training finished EARLIER at epoch %d, reaching loss of %.5f\" %\\\r\n (epoch, self.best_validation_loss))\r\n break\r\n\r\n # Learning rate decay upon plateau\r\n self.lr_scheduler.step(train_avg_loss)\r\n tk.record(1) # Record the total time of the training peroid\r", "def train(model, data_loader, optimizer, epoch, train_mloss, train_rloss, train_acc, learning_rate, lr_wr, output_tensor):\r\n print('===> Training mode')\r\n\r\n num_batches = len(data_loader) # iteration per epoch. e.g: 469\r\n total_step = args.epochs * num_batches\r\n epoch_tot_acc = 0\r\n\r\n # Switch to train mode\r\n model.train()\r\n\r\n if args.cuda:\r\n # When we wrap a Module in DataParallel for multi-GPUs\r\n model = model.module\r\n\r\n start_time = timer()\r\n\r\n for batch_idx, (data, target) in enumerate(tqdm(data_loader, unit='batch')):\r\n batch_size = data.size(0)\r\n global_step = batch_idx + (epoch * num_batches) - num_batches\r\n\r\n labels = target\r\n target_one_hot = utils.one_hot_encode(target, length=args.num_classes)\r\n assert target_one_hot.size() == torch.Size([batch_size, 10])\r\n\r\n data, target = Variable(data), Variable(target_one_hot)\r\n\r\n if args.cuda:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n labels = labels.to(args.device)\r\n\r\n # Train step - forward, backward and optimize\r\n optimizer.zero_grad()\r\n #utils.exponential_decay_LRR(optimizer, args.lr, global_step, args.decay_steps, args.decay_rate, args.staircase)\r\n # learning rate policies\r\n if args.find_lr:\r\n utils.find_lr(optimizer, global_step)\r\n\r\n elif args.exp_decay_lr:\r\n utils.exponential_decay_LRR(\r\n optimizer, args.lr, global_step, args.decay_steps, args.decay_rate, args.staircase)\r\n\r\n elif args.one_cycle_policy:\r\n utils.one_cycle_policy(optimizer, args.lr, global_step, total_step)\r\n\r\n elif args.warm_restarts:\r\n # lr_wr.update_lr(optimizer, num_batches)\r\n lr_wr.update_lr(optimizer)\r\n\r\n output, reconstruction = model(data, labels, True)\r\n # utils.write_tensor(output, output_tensor)\r\n loss, margin_loss, recon_loss = loss_func(\r\n output, target, args.regularization_scale, reconstruction, data, args.device, batch_size)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n for param_group in optimizer.param_groups:\r\n lr_temp = param_group['lr']\r\n learning_rate.write('%.10f \\n' % lr_temp)\r\n\r\n # Calculate accuracy for each step and average accuracy for each epoch\r\n acc = utils.accuracy(output, labels, args.cuda)\r\n epoch_tot_acc += acc\r\n epoch_avg_acc = epoch_tot_acc / (batch_idx + 1)\r\n\r\n train_mloss.write('%.6f \\n' % margin_loss)\r\n train_rloss.write('%.6f \\n' % recon_loss)\r\n train_acc.write('%.6f \\n' % acc)\r\n\r\n # Print losses\r\n if batch_idx % args.log_interval == 0:\r\n template = 'Epoch {}/{}, ' \\\r\n 'Step {}/{}: ' \\\r\n '[Total loss: {:.6f},' \\\r\n '\\tMargin loss: {:.6f},' \\\r\n '\\tReconstruction loss: {:.6f},' \\\r\n '\\tBatch accuracy: {:.6f},' \\\r\n '\\tAccuracy: {:.6f}]'\r\n tqdm.write(template.format(\r\n epoch,\r\n args.epochs,\r\n global_step,\r\n total_step,\r\n loss.data.item(),\r\n margin_loss.data.item(),\r\n recon_loss.data.item() if args.use_reconstruction_loss else 0,\r\n acc,\r\n epoch_avg_acc))\r\n\r\n # Print time elapsed for an epoch\r\n end_time = timer()\r\n\r\n global avg_training_time_per_epoch\r\n\r\n avg_training_time_per_epoch = (avg_training_time_per_epoch * (epoch - 1) + end_time - start_time) / epoch\r\n\r\n print('Time elapsed for epoch {}: {:.0f}s.'.format(epoch, end_time - start_time))", "def test_auto_scale_batch_size_set_model_attribute(tmpdir, use_hparams):\n tutils.reset_seed()\n\n hparams = EvalModelTemplate.get_default_hparams()\n before_batch_size = hparams.get('batch_size')\n\n class HparamsEvalModelTemplate(EvalModelTemplate):\n\n def dataloader(self, *args, **kwargs):\n # artificially set batch_size so we can get a dataloader\n # remove it immediately after, because we want only self.hparams.batch_size\n setattr(self, \"batch_size\", before_batch_size)\n dataloader = super().dataloader(*args, **kwargs)\n del self.batch_size\n return dataloader\n\n datamodule_model = MNISTDataModule(data_dir=tmpdir, batch_size=111) # this datamodule should get ignored!\n datamodule_fit = MNISTDataModule(data_dir=tmpdir, batch_size=before_batch_size)\n\n model_class = HparamsEvalModelTemplate if use_hparams else EvalModelTemplate\n model = model_class(**hparams)\n model.datamodule = datamodule_model # unused when another module gets passed to .tune() / .fit()\n\n trainer = Trainer(default_root_dir=tmpdir,\n max_epochs=1,\n auto_scale_batch_size=True,\n gpus=1)\n trainer.tune(model, datamodule_fit)\n after_batch_size = model.hparams.batch_size if use_hparams else model.batch_size\n assert trainer.datamodule == datamodule_fit\n assert before_batch_size != after_batch_size\n assert after_batch_size <= len(trainer.train_dataloader.dataset)\n assert datamodule_fit.batch_size == after_batch_size\n # should be left unchanged, since it was not passed to .tune()\n assert datamodule_model.batch_size == 111", "def _train(args): \n\n #device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n logger.info(\"Device Type: {}\".format(device))\n\n logger.info(\"Loading SUN360 dataset\")\n transform = transforms.Compose(\n [transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n target_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()]) \n\n trainset = SUN360Dataset(\"imagedata.json\",transform = transform, target_transform = target_transform)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n \"\"\"\n testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,\n download=False, transform=transform)\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n \"\"\" \n\n logger.info(\"Model loaded\")\n model = EfficientNet.from_name('efficientnet-b0',conv_type='Equi')\n\n if torch.cuda.device_count() > 1:\n logger.info(\"Gpu count: {}\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n\n model = model.to(device)\n\n criterion = CELoss().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(0, args.epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, EM , CM = data\n inputs, EM, CM = inputs.to(device), EM.to(device), CM.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n EMLoss, CMLoss = map_loss(outputs,EM,CM,criterion)\n loss = EMLoss + CMLoss\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n print('Finished Training')\n return _save_model(model, args.model_dir)", "def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )", "def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()", "def configure_ddp(self):\n\n if (hasattr(self.model, 'megatron_amp_o2') and self.model.megatron_amp_o2) or (\n hasattr(self.model, 'with_distributed_adam') and self.model.with_distributed_adam\n ):\n # do not use DDP if using megatron amp O2 or distributed optimizer\n self._model = _LightningModuleWrapperBase(self.model)\n else:\n app_state = AppState()\n\n if app_state.model_parallel_size is not None:\n\n logging.info(f\"Configuring DDP for model parallelism.\")\n\n # With model parallelism, multiple GPUs form a large \"logical GPU\"\n # this means that data parallel groups span multiple GPUs\n # and are non-trivial\n # TODO: for megatron-lm self.model is a list\n # Removing self.pre_configure_ddp() as DDP's 'find_unused_parameters' now defaults\n # to False in PTL 2.0 and hence pre_configure_ddp() is removed in ddp.py\n # self.pre_configure_ddp()\n # device_ids = self.determine_ddp_device_ids()\n self._model = DistributedDataParallel(\n _LightningModuleWrapperBase(self.model),\n process_group=parallel_state.get_data_parallel_group(),\n **self._ddp_kwargs,\n )\n\n if self.no_ddp_communication_hook:\n # When using custom gradient accumulation and allreduce, disable\n # DDP communication hook that works on the gradient bucket.\n # Instead, use the custom gradient function and communication hook,\n # which is defined in the master optimizer wrapper.\n self._model.require_backward_grad_sync = False\n self._model.register_comm_hook(None, noop_hook)\n\n else:\n super().configure_ddp()", "def create_embeddings(model_fc, ds, model_name, storage_path, storage_size=1000, parallel=True): \n\n # create folder when doesn't exist yet\n try:\n os.makedirs(storage_path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n \n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n feature_extractor = model_fc\n if parallel:\n feature_extractor = nn.DataParallel(model_fc)\n target_dataset = ds\n len_target_dataset = len(target_dataset)\n # save some memory\n\n feature_extractor.eval()\n \n with torch.no_grad():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(f\"Moving model to {device}\")\n feature_extractor = feature_extractor.to(device)\n params = {'batch_size': 50,\n 'shuffle': False,\n 'num_workers': 6,\n 'pin_memory': False}\n\n print(f\"Length of dataset is {len_target_dataset}\")\n if (len_target_dataset >= storage_size):\n\n if len_target_dataset % storage_size != 0:\n until_i = (len_target_dataset // storage_size + 1)\n else:\n until_i = (len_target_dataset // storage_size)\n\n for i in range(until_i):\n\n \"\"\"Check if we overshot the entries\"\"\"\n if ((i+1)*storage_size <= len_target_dataset):\n t_dataset = torch.utils.data.Subset(target_dataset, range(i*storage_size, (i+1)*storage_size))\n else:\n remainder = len_target_dataset - i*storage_size\n print(f\"Calculating for remainder: {remainder} because we want to extract {(i+1)*storage_size}\")\n t_dataset = torch.utils.data.Subset(target_dataset, range(i*storage_size, (i*storage_size) + remainder))# use remainder\n\n training_generator = data.DataLoader(t_dataset, **params)\n\n features = torch.Tensor([]).to(device)\n labels = torch.LongTensor([]).to(device)\n\n for local_batch, local_labels in training_generator:\n local_batch = local_batch.to(device)\n local_labels = local_labels.to(device)\n output = feature_extractor(local_batch)\n features = torch.cat([features, output], dim=0)\n labels = torch.cat([labels, local_labels], dim=0)\n\n print(features.size())\n features = features.to(\"cpu\")\n labels = labels.to(\"cpu\")\n\n x = features.detach().numpy()\n y = labels.detach().numpy()\n\n np.savez_compressed(f'{storage_path}/{model_name}_{i}.npz', x=x, y=y)\n\n del features\n del labels\n del local_batch\n del local_labels\n torch.cuda.empty_cache()\n\n if (len_target_dataset < storage_size):\n training_generator = data.DataLoader(target_dataset, **params)\n features = torch.Tensor([]).to(device)\n labels = torch.LongTensor([]).to(device)\n\n for local_batch, local_labels in training_generator:\n local_batch = local_batch.to(device)\n local_labels = local_labels.to(device)\n output = feature_extractor(local_batch)\n features = torch.cat([features, output], dim=0)\n labels = torch.cat([labels, local_labels], dim=0)\n\n print(features.size())\n features = features.to(\"cpu\")\n labels = labels.to(\"cpu\")\n\n x = features.detach().numpy()\n y = labels.detach().numpy()\n\n np.savez_compressed(f'{storage_path}/{model_name}_0.npz', x=x, y=y)\n\n del features\n del labels\n del local_batch\n del local_labels\n torch.cuda.empty_cache()", "def train():\n init_distributed_mode(args)\n save_dir = TRAIN_CFG['save_dir']\n if not os.path.exists(save_dir) and torch.distributed.get_rank() == 0:\n os.mkdir(save_dir)\n kwargs = {}\n # If augmenting data, disable Pytorch's own augmentataion\n # This has to be done manually as augmentation is embedded\n # refer : https://github.com/pytorch/vision/issues/2263\n base_path = DATASET_CFG['base_path']\n train_set = DATASET_CFG['train']\n valid_set = DATASET_CFG['valid']\n dset_mean_std = DATASET_CFG['mean_std']\n if dset_mean_std is not None:\n dataset_mean = [i/255. for i in dset_mean_std[0]]\n dataset_std = [i/255. for i in dset_mean_std[1]]\n else:\n dataset_mean, dataset_std = compute_mean_std(base_path, train_set)\n kwargs['image_mean'] = dataset_mean\n kwargs['image_std'] = dataset_std\n kwargs['min_size'] = DATASET_CFG['min_size']\n kwargs['max_size'] = DATASET_CFG['max_size']\n kwargs['box_detections_per_img'] = 300 # increase max det to max val in our benchmark\n\n # Set benchmark related parameters\n if benchmark == 'ScutHead':\n combined_cfg = {**cfg, **sh_anchors}\n elif benchmark == 'CrowdHuman':\n combined_cfg = {**cfg, **ch_anchors}\n elif benchmark == 'Combined':\n combined_cfg = {**cfg, **combined_anchors}\n else:\n raise ValueError(\"New dataset has to be registered\")\n\n # Create Model\n default_filter = False\n model = customRCNN(cfg=combined_cfg,\n use_deform=NET_CFG['use_deform'],\n ohem=NET_CFG['ohem'],\n context=NET_CFG['context'],\n custom_sampling=NET_CFG['custom_sampling'],\n default_filter=default_filter,\n soft_nms=NET_CFG['soft_nms'],\n upscale_rpn=NET_CFG['upscale_rpn'],\n median_anchors=NET_CFG['median_anchors'],\n **kwargs).cuda() \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],\n find_unused_parameters=True)\n model_without_ddp = model.module\n\n # Create Optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=HYP_CFG['learning_rate'],\n momentum=HYP_CFG['learning_rate'],\n weight_decay=HYP_CFG['weight_decay'])\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=TRAIN_CFG['milestones'],\n gamma=HYP_CFG['gamma'])\n # Restore from checkpoint\n pt_model = TRAIN_CFG['pretrained_model']\n if pt_model:\n model_without_ddp = restore_network(model_without_ddp, pt_model,\n only_backbone=TRAIN_CFG['only_backbone'])\n \n # Create training and vaid dataset\n dataset_param = {'mean': dataset_mean, 'std':dataset_std,\n 'shape':(kwargs['min_size'], kwargs['max_size'])}\n batch_size = HYP_CFG['batch_size']\n train_dataset = HeadDataset(train_set,\n base_path,\n dataset_param,\n train=True)\n val_dataset = HeadDataset(valid_set,\n base_path,\n dataset_param,\n train=False)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,\n batch_size,\n drop_last=True)\n train_data_loader = torch.utils.data.DataLoader(train_dataset,\n batch_sampler=train_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)\n val_batch_sampler = torch.utils.data.BatchSampler(val_sampler,\n batch_size,\n drop_last=True)\n val_data_loader = torch.utils.data.DataLoader(val_dataset,\n batch_sampler=val_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n # Fastforward the LR decayer\n start_epoch = TRAIN_CFG['start_epoch']\n max_epoch = TRAIN_CFG['max_epoch']\n for _ in range(0, -1):\n scheduler.step()\n\n # Start training\n print(\"======= Training for \" + str(max_epoch) + \"===========\")\n for epoch in range(start_epoch, int(max_epoch) + 1):\n if epoch % TRAIN_CFG['eval_every'] == 0:\n print(\"========= Evaluating Model ==========\")\n result_dict = evaluate(model, val_data_loader, benchmark=benchmark)\n if torch.distributed.get_rank() == 0:\n logging.info('Eval score at {0} epoch is {1}'.format(str(epoch),\n result_dict))\n \n train_one_epoch(model, optimizer, train_data_loader,\n device, epoch, print_freq=1000)\n scheduler.step()\n if torch.distributed.get_rank() == 0:\n print(\"Saving model\")\n torch.save(model.state_dict(), osp.join(save_dir,\n TRAIN_CFG['exp_name'] + '_epoch_' + str(epoch) + '.pth'))", "def calibrate_model(model, criterion, data_loader, neval_batches):\n model.eval()\n cpu = torch.device(\"cpu\")\n \n cnt = 0\n\n with torch.no_grad():\n for image, target in data_loader:\n image = image.to(cpu)\n target = target.to(cpu)\n output = model(image)\n loss = criterion(output, target)\n cnt += 1\n if cnt >= neval_batches:\n return", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def keras_multitask(self, args):\n start_time = time.time()\n\n # if self.args.log_metrics:\n # utils.wandb_init_logs(self.config[\"multitask_trainer\"])\n\n embedding_type = self.config[\"multitask_trainer\"][\"embedding_type\"]\n max_len = int(self.config[\"multitask_trainer\"][\"max_len\"])\n\n reader = SciciteReader(self.config[\"preprocessor\"])\n print(\"Loading data...\")\n text, labels, sections, worthiness = reader.load_data(\n _type=\"train\", multitask=True\n )\n text_dev, labels_dev, _, _ = reader.load_data(_type=\"dev\", multitask=False)\n text_test, labels_test, _, _ = reader.load_data(_type=\"test\", multitask=False)\n\n keras_model = MultitaskLearner(self.config)\n\n if embedding_type == \"bert\" or embedding_type == \"albert\":\n input_ids, input_masks, input_segments = keras_model.prepare_input_data(\n text\n )\n (\n dev_input_ids,\n dev_input_masks,\n dev_input_segments,\n ) = keras_model.prepare_input_data(text_dev)\n (\n test_input_ids,\n test_input_masks,\n test_input_segments,\n ) = keras_model.prepare_input_data(text_test)\n\n print(\"Preparing data...\")\n text_tensor, text_tokenizer = keras_model.prepare_data(text, max_len=max_len)\n labels_tensor, labels_tokenizer = keras_model.prepare_data(labels)\n sections_tensor, sections_tokenizer = keras_model.prepare_data(sections)\n worthiness_tensor, worthiness_tokenizer = keras_model.prepare_data(worthiness)\n\n text_tensor_dev = keras_model.prepare_dev_data(\n text_dev, text_tokenizer, max_len=max_len\n )\n labels_tensor_dev = keras_model.prepare_dev_data(labels_dev, labels_tokenizer)\n text_tensor_test = keras_model.prepare_dev_data(\n text_test, text_tokenizer, max_len=max_len\n )\n labels_tensor_test = keras_model.prepare_dev_data(labels_test, labels_tokenizer)\n\n print(\"Creating datasets...\")\n if embedding_type == \"lstm\":\n dataset = keras_model.create_dataset(\n text=text_tensor,\n labels=labels_tensor,\n sections=sections_tensor,\n worthiness=worthiness_tensor,\n ids=None,\n mask=None,\n segments=None,\n )\n dev_dataset = keras_model.create_dev_dataset(\n text=text_tensor_dev,\n ids=None,\n mask=None,\n segments=None,\n labels=labels_tensor_dev,\n )\n test_dataset = keras_model.create_dev_dataset(\n text=text_tensor_test,\n ids=None,\n mask=None,\n segments=None,\n labels=labels_tensor_test,\n )\n elif embedding_type == \"bert\" or embedding_type == \"albert\":\n dataset = keras_model.create_dataset(\n text=None,\n labels=labels_tensor,\n sections=sections_tensor,\n worthiness=worthiness_tensor,\n ids=input_ids,\n mask=input_masks,\n segments=input_segments,\n )\n dev_dataset = keras_model.create_dev_dataset(\n text=None,\n ids=dev_input_ids,\n mask=dev_input_masks,\n segments=dev_input_segments,\n labels=labels_tensor_dev,\n )\n test_dataset = keras_model.create_dev_dataset(\n text=None,\n ids=test_input_ids,\n mask=test_input_masks,\n segments=test_input_segments,\n labels=labels_tensor_test,\n )\n\n vocab_size = len(text_tokenizer.word_index.keys()) + 1\n labels_size = len(labels_tokenizer.word_index.keys())\n section_size = len(sections_tokenizer.word_index.keys())\n worthiness_size = len(worthiness_tokenizer.word_index.keys())\n\n print(\"Creating model...\")\n keras_model.create_model(vocab_size, labels_size, section_size, worthiness_size)\n print(\"Fitting model...\")\n keras_model.fit_model(dataset, dev_dataset)\n\n print(\"Saving model...\")\n keras_model.save_model()\n\n print(\"Evaluating...\")\n keras_model.eval(test_dataset, save_output=True)\n keras_model.eval(test_dataset, save_output=False)\n\n end_time = time.time()\n total_time = end_time - start_time\n print(\"Execution time:\", str(datetime.timedelta(seconds=total_time)))", "def train_model_batch(model, config, test, resume=None):\n\n if config['optimizer']['method'] == 'adagrad':\n optimizer = Adagrad()\n elif config['optimizer']['method'] == 'adadelta':\n optimizer = Adadelta()\n elif config['optimizer']['method'] == 'adam':\n optimizer = Adam()\n else: # default SGD\n params = config['optimizer']['params']\n if resume is None: # New experiment\n optimizer = SGD(lr=params['lrate'], momentum=params['momentum'], decay=params['decay'],\n nesterov=params['nesterov'])\n iepoch = 0\n else: # Resume training\n nlrate = params['lrate'] - ((params['lrate'] / config['train']['epochs']) * params['epochs_trained'])\n\n optimizer = SGD(lr=nlrate, momentum=params['momentum'], decay=params['decay'],\n nesterov=params['nesterov'])\n iepoch = config['train']['epochs_trained']\n\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n classweight = detransweights(config['train']['classweight'])\n if 'log' not in config or config['log'] == 'db':\n dblog = DBLog(database=mongoconnection, config=config, model=model, modelj=model.to_json(), resume=resume)\n else:\n dblog = FileLog(config=config, modelj=model.to_json())\n\n recode = None if 'recode' not in config else recoding_dictionary(config['recode'])\n\n train = Dataset(config['datapath'], config['traindata'], config['zfactor'], imgord=config['imgord'],\n nclasses=test.nclasses, recode=recode)\n\n # Train Epochs\n logs = {'loss': 0.0, 'acc': 0.0, 'val_loss': 0.0, 'val_acc': 0.0}\n train.open()\n chunks, _ = train.chunks_list()\n\n for epoch in range(iepoch, config['train']['epochs']):\n\n shuffle(chunks)\n\n # Train Batches\n lloss = []\n lacc = []\n for chunk in chunks:\n train.load_chunk(chunk, config['train']['batchsize'])\n\n for p in train.perm:\n loss, acc = model.train_on_batch(train.X_train[p], train.y_train[p], class_weight=classweight)\n lloss.append(loss)\n lacc.append(acc)\n\n logs['loss'] = float(np.mean(lloss))\n logs['acc'] = float(np.mean(lacc))\n\n logs['val_loss'], logs['val_acc'] = model.evaluate(test.X_train, test.y_train, verbose=0)\n\n force_stop = dblog.force_stop()\n dblog.on_epoch_end(epoch, logs=logs)\n\n if config['savepath']:\n model.save(config['savepath'] + '/' + str(dblog.id) + '.h5')\n\n # If the training is stopped remotely training stops\n if force_stop:\n break\n train.close()\n\n scores = model.evaluate(test.X_train, test.y_train, verbose=0)\n dblog.on_train_end(logs={'acc': logs['acc'], 'val_acc': scores[1]})\n y_pred = model.predict_classes(test.X_train, verbose=0)\n dblog.save_final_results(scores, confusion_matrix(test.y_labels, y_pred),\n classification_report(test.y_labels, y_pred))", "def train_epoch(model, data_loader, data_iter, optimizer, device,\n epoch_size=None, eval_cluster_error=True, core_reset=False,\n eval_rank=False, mc_mode=False, lip_mode=False):\n data_tic = epoch_tic = time.time()\n data_rtime, reset_rtime = 0.0, 0.0\n metrics = None\n conf_mats = ut.AverageMeter() if eval_cluster_error else None\n resets = [] if core_reset else None\n comp_err = ut.AverageMeter() if mc_mode else None\n itr, epochN = 1, 0\n epoch_stop = False\n if data_iter is None:\n data_iter = iter(data_loader)\n model.epoch_init()\n while not epoch_stop:\n try:\n data_tup = next(data_iter)\n except StopIteration:\n if epoch_size is None or epochN >= epoch_size:\n data_iter, epoch_stop = None, True\n break\n else:\n data_iter = iter(data_loader)\n data_tup = next(data_iter)\n if epochN >= epoch_size:\n epoch_stop = True\n\n if len(data_tup) == 3:\n x, groups, x0 = data_tup\n if x0 is not None:\n x0 = x0.to(device)\n else:\n x, groups = data_tup\n x0 = None\n x = x.to(device)\n batch_size = x.shape[0]\n epochN += batch_size\n data_rtime += time.time() - data_tic\n\n # opt step\n optimizer.zero_grad()\n (batch_obj_mean, batch_obj, batch_loss,\n batch_reg_in, batch_reg_out) = model.objective(x)\n\n if torch.isnan(batch_obj_mean.data):\n raise RuntimeError('Divergence! NaN objective.')\n\n batch_obj_mean.backward()\n optimizer.step()\n\n batch_metrics = [batch_obj, batch_loss, batch_reg_in, batch_reg_out]\n if metrics is None:\n metrics = [ut.AverageMeter() for _ in range(len(batch_metrics))]\n for kk in range(len(batch_metrics)):\n metrics[kk].update(batch_metrics[kk].cpu(), batch_size)\n\n # eval batch cluster confusion\n if eval_cluster_error:\n batch_conf_mats = torch.stack([\n torch.from_numpy(ut.eval_confusion(model.groups[:, ii], groups,\n model.k, true_classes=data_loader.dataset.classes))\n for ii in range(model.replicates)])\n conf_mats.update(batch_conf_mats, 1)\n\n # eval batch completion if in missing data setting\n if mc_mode and x0 is not None:\n batch_comp_err = model.eval_comp_error(x0)\n comp_err.update(batch_comp_err.cpu(), batch_size)\n\n if core_reset:\n reset_tic = time.time()\n batch_resets = model.core_reset()\n if batch_resets.shape[0] > 0:\n rIdx = np.unique(batch_resets[:, 0].astype(np.int64))\n ut.reset_optimizer_state(model, optimizer, rIdx)\n batch_resets = np.insert(batch_resets, 0, itr, axis=1)\n resets.append(batch_resets)\n reset_rtime += time.time() - reset_tic\n\n itr += 1\n data_tic = time.time()\n\n # evaluate summary metrics\n metrics = torch.stack([met.avg for met in metrics])\n conf_mats, errors, error_stats = _cluster_error_summary(eval_cluster_error,\n conf_mats, model)\n resets, reset_count, rep_reset_counts = _resets_summary(core_reset, resets,\n model)\n svs, rank_stats = _rank_summary(eval_rank, model)\n comp_err, comp_err_stats = _comp_err_summary(mc_mode, comp_err, model)\n lip, lip_stats = _lip_summary(lip_mode, model)\n\n rtime = time.time() - epoch_tic\n sampsec = epochN / rtime\n\n metrics, metrics_summary = _all_metrics_summary(metrics, errors, error_stats,\n reset_count, rep_reset_counts, rank_stats, comp_err, comp_err_stats, lip,\n lip_stats, sampsec, rtime, data_rtime, reset_rtime)\n return metrics_summary, metrics, conf_mats, resets, svs, data_iter", "def module_transfer_to_device(self) -> None:\n for name, module in self.modules.items():\n module.to(self.device)\n if self.device.type == 'cuda':\n self.modules[name] = torch.nn.DataParallel(module, self.gpu_ids)\n return", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def train_step(args, model: torch.nn.Module, batch: Tuple, meters: AverageMeterSet, epoch: int, batch_idx: int):\n labeled_batch, unlabeled_batch = batch\n labeled, targets = labeled_batch\n unlabeled_k, _ = unlabeled_batch\n\n # One hot labels\n targets = torch.zeros(args.batch_size, args.num_classes).scatter_(\n 1, targets.view(-1, 1), 1\n )\n\n unlabeled_k = [u_k.to(args.device) for u_k in unlabeled_k]\n labeled = labeled.to(args.device)\n targets = targets.to(args.device)\n\n # Disable batch-norm running_mean and running_var updates for pseduo-label forward passes\n set_bn_running_updates(model, enable=False)\n with torch.no_grad():\n preds = [\n torch.softmax(model(u_k.to(args.device)), dim=1) for u_k in unlabeled_k\n ]\n avg_preds = torch.stack(preds).mean(dim=0)\n sharpened_preds = torch.pow(avg_preds, 1 / args.temperature)\n unlabeled_targets = sharpened_preds / sharpened_preds.sum(dim=-1, keepdim=True)\n unlabeled_targets = unlabeled_targets.detach()\n\n all_inputs = torch.cat([labeled] + unlabeled_k, dim=0)\n all_targets = torch.cat(\n [targets] + [unlabeled_targets for _ in range(len(unlabeled_k))], dim=0\n )\n\n mixed_input, mixed_targets = mixup(all_inputs, all_targets, args.alpha)\n\n # Interleave labeled and unlabeled samples to avoid biased batch norm calculation\n mixed_input = list(torch.split(mixed_input, args.batch_size))\n mixed_input = interleave(mixed_input, args.batch_size)\n\n # Only update running batch-norm parameters for first batch of mixed batches\n set_bn_running_updates(model, enable=True)\n logits = [model(mixed_input[0])]\n set_bn_running_updates(model, enable=False)\n for input in mixed_input[1:]:\n logits.append(model(input))\n\n # Put interleaved samples back - reverses interleaving applied before\n logits = interleave(logits, args.batch_size)\n logits_x = logits[0]\n logits_u = torch.cat(logits[1:], dim=0)\n\n # Cross entropy loss for labeled samples\n labeled_loss = -torch.sum(\n F.log_softmax(logits_x, dim=1) * mixed_targets[: args.batch_size], dim=1\n )\n # L2-distance loss for unlabeled samples\n unlabeled_loss = torch.mean(\n (torch.softmax(logits_u, dim=1) - mixed_targets[args.batch_size :]) ** 2\n )\n\n # Update unlabeled loss weight based on current step (linear rampup to max. value over first 16 epochs)\n step = epoch * args.iters_per_epoch + (batch_idx + 1)\n wu = (\n args.wu * linear_rampup(step, 16 * args.iters_per_epoch)\n if not args.resume\n else args.wu\n )\n\n # Total loss\n loss = torch.mean(labeled_loss) + wu * unlabeled_loss\n\n meters.update(\"total_loss\", loss.item(), targets.size(0))\n meters.update(\"labeled_loss\", torch.mean(labeled_loss).item(), targets.size(0))\n meters.update(\"unlabeled_loss\", unlabeled_loss.item(), targets.size(0))\n meters.update(\"wu\", wu, 1)\n return loss", "def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)", "def run(net, loader, edit_set_cmd, model_name):\n answ = []\n accs = []\n ss_vc = []\n image_ids =[]\n ques_ids = []\n softmax = nn.Softmax(dim=1).cuda()\n for v, q, a, idx, img_id, ques_id, q_len in tqdm(loader): # image, ques to vocab mapped , answer, item (sth to help index shuffled data with), len_val\n #ipdb.set_trace()\n var_params = {\n 'volatile': False,\n 'requires_grad': False,\n }\n v = Variable(v.cuda(async=True), **var_params)\n q = Variable(q.cuda(async=True), **var_params)\n a = Variable(a.cuda(async=True), **var_params)\n q_len = Variable(q_len.cuda(async=True), **var_params) ### len of question\n\n with torch.no_grad():\n out = net(v, q, q_len)\n softmax_vc = softmax(out) # torch.size(128,3000)\n #ipdb.set_trace() ## check type of softmax_vc- enforce it to torch16 here itself/ alse see what happens when np.16..\n acc = utils.batch_accuracy(out.data, a.data).cpu() #torch.Size([128, 1]) official vqa acc for every questions\n\n # store information about evaluation of this minibatch\n _, answer = out.data.cpu().max(dim=1) ### torch.Size([128) !!!! this is the predicted answer id!!!\n answ.append(answer.view(-1)) # pred_ans_id\n ss_vc.append(softmax_vc) # #torch.Size([128, 3000])\n accs.append(acc.view(-1)) # official vqa accurcay per question\n ques_ids.append(ques_id.view(-1))\n\n if config.vis_attention:\n output_qids_answers = []\n if config.fintuned_model_test:\n model_name = 'finetuned_' + model_name\n if edit_set_cmd:\n saaa_vqa_ans_q_id = '/BS/vedika3/nobackup/pytorch-vqa/cvpr_rebuttal_' + model_name + '_edit_vqa_ans_q_id.pickle'\n print(img_id)\n ipdb.set_trace()\n output_qids_answers += [\n {'ans_id': p, 'ques_id': qid, 'accuracy': acc}\n for p, qid, acc in zip(answ, ques_ids, accs)]\n else:\n saaa_vqa_ans_q_id = '/BS/vedika3/nobackup/pytorch-vqa/cvpr_rebuttal_' + model_name + '_orig_vqa_ans_q_id.pickle'\n print(img_id)\n ipdb.set_trace()\n output_qids_answers += [\n {'ans_id': p, 'ques_id': qid, 'accuracy': acc}\n for p, qid, acc in zip(answ, ques_ids, accs)]\n\n with open(saaa_vqa_ans_q_id, 'wb') as f:\n pickle.dump(output_qids_answers, f, pickle.HIGHEST_PROTOCOL)\n\n exit()\n\n\n\n if edit_set_cmd:\n image_ids.append(img_id)\n else:\n image_ids.append(img_id.view(-1))\n\n\n\n\n ss_vc = torch.cat(ss_vc, dim=0) ## softmax_vectors\n answ = torch.cat(answ, dim=0) ## pred_ans_id\n accs = torch.cat(accs, dim=0) ## official vqa accurcay per question\n\n ques_ids = torch.cat(ques_ids, dim=0)\n if edit_set_cmd:\n image_ids = [item for sublist in image_ids for item in sublist]\n else:\n image_ids = torch.cat(image_ids, dim=0)\n ### might be string in edit config case\n print('the accuracy is:', torch.mean(accs)) ### mean of entire accuracy vector # tensor(0.6015) for val set\n\n\n\n\n\n return answ, image_ids, ques_ids, ss_vc", "def do_train_job(self):\n # get the initial tensor dict\n # initial_tensor_dict = self.wrapped_model.get_tensor_dict()\n\n # get the training data size\n data_size = self.wrapped_model.get_training_data_size()\n\n # train the model\n # FIXME: model header \"version\" needs to be changed to \"rounds_trained\"\n # FIXME: We assume the models allow training on partial batches.\n # FIXME: Currently, num_batches_per_round overrides epochs per round. Is this the correct behavior?\n if self.num_batches_per_round is not None:\n num_batches = self.num_batches_per_round\n else:\n batches_per_epoch = int(np.ceil(data_size/self.wrapped_model.data.batch_size))\n num_batches = int(np.floor(batches_per_epoch * self.epochs_per_round))\n loss = self.wrapped_model.train_batches(num_batches=num_batches)\n self.logger.debug(\"{} Completed the training job for {} batches.\".format(self, num_batches))\n\n # get the trained tensor dict and store any designated to be held out from aggregation\n shared_tensors = self._remove_and_save_holdout_tensors(self.wrapped_model.get_tensor_dict(with_opt_vars=self._with_opt_vars()))\n\n # create the model proto\n if self.send_model_deltas:\n deltas = self.create_deltas(tensor_dict=shared_tensors)\n model_proto = construct_proto(tensor_dict=deltas[\"tensor_dict\"],\n model_id=self.model_header.id,\n model_version=self.model_header.version,\n compression_pipeline=self.compression_pipeline,\n is_delta=True,\n delta_from_version=deltas[\"delta_from_version\"])\n else:\n model_proto = construct_proto(tensor_dict=shared_tensors,\n model_id=self.model_header.id,\n model_version=self.model_header.version,\n compression_pipeline=self.compression_pipeline,\n is_delta=False,\n delta_from_version=-1)\n\n self.logger.debug(\"{} - Sending the model to the aggregator.\".format(self))\n\n reply = self.channel.UploadLocalModelUpdate(LocalModelUpdate(header=self.create_message_header(), model=model_proto, data_size=data_size, loss=loss))\n self.validate_header(reply)\n check_type(reply, LocalModelUpdateAck, self.logger)\n self.logger.info(\"{} - Model update succesfully sent to aggregator\".format(self))", "def update_model(engine, batch):\n\t\tengine.model.train()\n\t\tengine.model.rpn.nms_thresh = 0.7\n\t\timg, target = prepare_batch(batch, device=get_device(engine.model))\n\t\tengine.optimizer.zero_grad()\n\t\tloss = engine.model(img, target)\n\t\tlosses = sum(l for l in loss.values())\n\t\tlosses.backward()\n\t\tengine.optimizer.step()\n\t\treturn loss", "def train_shared(self, dag=None):\n model = self.shared\n model.train()\n self.controller.eval()\n raw_total_loss = 0\n total_loss = 0\n for step,batch in enumerate(self.train_data_loader):\n dags = dag if dag else self.controller.sample(\n self.args.shared_num_sample)\n if self.args.use_ref and step<self.args.ref_model_num:\n dags=[self.args.ref_arch]\n\n inputs=torch.from_numpy(batch['data']).cuda()\n targets=torch.from_numpy(batch['seg'].astype(int)).cuda()\n targets=get_multi_class_labels(targets,n_labels=self.args.n_classes)\n \n print('epoch :',self.epoch,'step :', step, 'time:' ,time.time()-self.time)\n print(dags[0])\n #print('momery',torch.cuda.memory_allocated(device=None))\n\n loss = self.get_loss(inputs,targets,dags)\n raw_total_loss += loss.data\n \n #print('after model momery',torch.cuda.memory_allocated(device=None))\n print('loss :', loss.item())\n\n\n # update\n self.shared_optim.zero_grad()\n loss.backward()\n self.shared_optim.step()\n\n total_loss += loss.data\n\n if ((step % self.args.log_step) == 0) and (step > 0):\n self._summarize_shared_train(total_loss, raw_total_loss)\n raw_total_loss = 0\n total_loss = 0\n self._summarize_shared_train(total_loss, raw_total_loss)", "def train(model, config, logger, record): \n # initialize userIDs\n users_to_sample = config.users\n userIDs = np.arange(config.users) \n\n # initialize the optimizer for the server model\n dataset = assign_user_data(config, logger)\n\n # initialize the delta offset buffers and local residual buffers\n offset_buffers = []\n residual_buffers = []\n for user in range(users_to_sample):\n offset_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n residual_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n\n global_updater = GlobalUpdater(config, model.state_dict()) \n\n # before optimization, report the result first\n validate_and_log(model, dataset, config, record, logger)\n \n for comm_round in range(config.rounds):\n userIDs_candidates = userIDs[:users_to_sample]\n \n # Wait for all users updating locally\n local_packages = []\n for i, user_id in enumerate(userIDs_candidates):\n user_resource = assign_user_resource(config, user_id, \n dataset[\"train_data\"], dataset[\"user_with_data\"])\n updater = LocalUpdater(user_resource, config)\n updater.local_step(model, offset_buffers[user_id])\n local_package = updater.uplink_transmit()\n local_packages.append(local_package)\n\n # Update the global model\n global_updater.global_step(model, local_packages, residual_buffers)\n\n # Update local offsets\n update_offset_buffers(offset_buffers, \n residual_buffers,\n global_updater.accumulated_delta, \n config.tau) \n\n # log and record\n logger.info(\"Round {:d}\".format(comm_round))\n validate_and_log(model, dataset, config, record, logger)\n\n # if comm_round == config.scheduler[0]:\n # config.lr *= config.lr_scaler\n # config.scheduler.pop(0)", "def _update_model(self, X_all, Y_all):\n if self.model is None:\n self._create_model(X_all, Y_all)\n else:\n self.model.set_XY(X_all, Y_all)\n\n # WARNING: Even if self.max_iters=0, the hyperparameters are bit modified...\n if self.max_iters > 0:\n # --- update the model maximizing the marginal likelihood.\n if self.optimize_restarts==1:\n self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False)\n else:\n self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose)", "def train_step(self, batch: dict, epoch: int):\n\n with torch.cuda.amp.autocast(self.mixed_precision):\n \n # Update momentum {key, pseudo} networks\n with torch.no_grad():\n self._momentum_update_key_net()\n self._momentum_update_pseudo_net()\n\n # Get data (3 views)\n x_q = batch['x1'].to(self.local_rank)\n x_k = batch['x2'].to(self.local_rank)\n x_ps = batch['x3'].to(self.local_rank)\n \n # Compute strong query features; (B, f)\n z_q = F.normalize(self.net_q(x_q), dim=1)\n\n with torch.no_grad():\n \n # Shuffle across nodes (gpus)\n x_k, idx_unshuffle_k = ForMoCo.batch_shuffle_ddp(x_k)\n x_ps, idx_unshuffle_ps = ForMoCo.batch_shuffle_ddp(x_ps)\n \n # Compute {key, pseudo} features; (B, f)\n z_k = F.normalize(self.net_k(x_k), dim=1)\n z_ps = F.normalize(self.net_ps(x_ps), dim=1)\n \n # Restore {key, pseudo} features to their original nodes\n z_k = ForMoCo.batch_unshuffle_ddp(z_k, idx_unshuffle_k)\n z_ps = ForMoCo.batch_unshuffle_ddp(z_ps, idx_unshuffle_ps)\n\n # Compute loss\n loss, logits, labels, loss_pseudo, probs_pseudo_neg = \\\n self.loss_function(z_q, z_ps, z_k, self.queue.buffer, threshold=self.threshold)\n \n # Backpropagate & update\n if loss_pseudo.isnan() or (epoch <= self.ramp_up):\n self.backprop(loss)\n else:\n alpha = 1.0\n self.backprop(loss + alpha * loss_pseudo)\n \n # Compute metrics\n with torch.no_grad():\n \n # Accuracy of true positives against all negatives\n rank_1 = TopKAccuracy(k=1)(logits, labels)\n \n # Accuracy of pseudo positives with ground truth labels\n above_threshold = probs_pseudo_neg.ge(self.threshold)\n num_pseudo = above_threshold.sum()\n \n # No pseudo positives may have been selected\n if self.queue.is_reliable and (num_pseudo > 0):\n labels_query = batch['y'].to(self.local_rank) # (B, )\n labels_queue = self.queue.labels # (k, )\n is_correct = labels_query.view(-1, 1).eq(labels_queue.view(1, -1)) # (B, 1) @ (1, k) -> (B, k)\n num_correct = is_correct.masked_select(above_threshold).sum()\n precision = torch.true_divide(num_correct, num_pseudo)\n else:\n num_correct = torch.zeros(1, dtype=torch.long, device=num_pseudo.device)\n precision = torch.zeros(1, dtype=torch.float32, device=num_pseudo.device)\n \n # Update memory queue\n self.queue.update(keys=z_k, labels=batch['y'].to(self.local_rank))\n\n return {\n 'loss': loss.detach(),\n 'loss_pseudo': loss_pseudo.detach(), # (1, ) or tensor(nan)\n 'rank@1': rank_1,\n 'num_correct': num_correct,\n 'num_pseudo': num_pseudo,\n 'precision': precision,\n }", "def train(cfg):\n # Set up environment.\n distributed.init_distributed_training(cfg)\n\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n\n # Print config.\n if distributed.is_master_proc():\n print(\"Train with config:\")\n print(pprint.pformat(cfg))\n\n # Create train and val loaders.\n train_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.TRAIN_SPLIT, train=True)\n train_loader = DataLoader(\n train_dataset,\n batch_size=int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)),\n shuffle=(False if cfg.NUM_GPUS > 1 else True),\n sampler=(DistributedSampler(train_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n if cfg.DATA.VAL_SPLIT is not None:\n val_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.VAL_SPLIT, train=False)\n val_loader = DataLoader(\n val_dataset,\n batch_size=(1 if cfg.NUM_GPUS > 1 else cfg.TRAIN.BATCH_SIZE),\n shuffle=False,\n sampler=(DistributedSampler(val_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n\n # Build the model and print model statistics.\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # Construct the model\n model = PanopticNarrativeGroundingBaseline(cfg, device=device)\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device,\n find_unused_parameters=True\n )\n if cfg.LOG_MODEL_INFO and distributed.is_master_proc():\n print(\"Model:\\n{}\".format(model))\n print(\"Params: {:,}\".format(np.sum([p.numel() for p in model.parameters()]).item()))\n print(\"Mem: {:,} MB\".format(torch.cuda.max_memory_allocated() / 1024 ** 3))\n print(\"nvidia-smi\")\n os.system(\"nvidia-smi\")\n\n if cfg.MODEL.BERT_FREEZE:\n if cfg.NUM_GPUS > 1:\n for param in model.module.bert_encoder.model.bert.encoder.layer.parameters():\n param.requires_grad = False\n else:\n for param in model.bert_encoder.model.bert.encoder.layer.parameters():\n param.requires_grad = False\n\n # Construct the optimizer.\n def optimizer_wrapper(Optim, **kwargs):\n def init_func(model):\n return Optim(model.parameters(), **kwargs)\n return init_func\n\n optimizers = {\n \"adamax\": (\n optimizer_wrapper(optim.Adamax, lr=cfg.SOLVER.BASE_LR),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n \"adam\": (\n optimizer_wrapper(optim.Adam, lr=cfg.SOLVER.BASE_LR),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n \"sgd\": (\n optimizer_wrapper(optim.SGD, lr=cfg.SOLVER.BASE_LR, momentum=0.9),\n lambda optim: optim.param_groups[0][\"lr\"],\n ),\n }\n\n if cfg.SOLVER.OPTIMIZING_METHOD not in optimizers:\n cfg.SOLVER.OPTIMIZING_METHOD = 'adam'\n if distributed.is_master_proc():\n print(\"{0} not defined in available optimizer list, fallback to Adam\")\n\n optimizer, _ = optimizers[cfg.SOLVER.OPTIMIZING_METHOD]\n optimizer = optimizer(model)\n if distributed.is_master_proc():\n print('optimizer: {}'.format(optimizer))\n\n # Load a checkpoint to resume training if applicable.\n checkpoint_path = osp.join(cfg.OUTPUT_DIR, 'checkpoint.pth')\n if osp.exists(checkpoint_path):\n if distributed.is_master_proc():\n print('Resuming training: loading model from: {0}'.format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n start_epoch = checkpoint['epoch'] + 1\n model_final_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n if osp.exists(model_final_path):\n model_final = torch.load(model_final_path)\n best_val_score = model_final['accuracy']\n else:\n best_val_score = None\n elif osp.exists(cfg.TRAIN.CHECKPOINT_FILE_PATH):\n if distributed.is_master_proc():\n print('Loading model from: {0}'.format(cfg.TRAIN.CHECKPOINT_FILE_PATH))\n checkpoint = torch.load(cfg.TRAIN.CHECKPOINT_FILE_PATH, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n start_epoch, best_val_score = 0, None\n else: \n start_epoch, best_val_score = 0, None\n\n # Define loss function\n loss_function = nn.BCEWithLogitsLoss()\n\n if distributed.is_master_proc():\n print('Train begins...')\n if cfg.TRAIN.EVAL_FIRST:\n accuracy = evaluate(val_loader, model, -1, cfg)\n if best_val_score is None or accuracy > best_val_score:\n best_val_score = accuracy\n try:\n # Perform the training loop\n for epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCH):\n epoch_start_time = time.time()\n # Shuffle the dataset\n if cfg.NUM_GPUS > 1:\n train_loader.sampler.set_epoch(epoch)\n # Train for one epoch\n train_loss = train_epoch(train_loader, model, optimizer, loss_function, epoch, cfg)\n accuracy = evaluate(val_loader, model, epoch, cfg) \n\n if distributed.is_master_proc():\n # Save best model in the validation set\n if best_val_score is None or accuracy > best_val_score:\n best_val_score = accuracy\n model_final_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n model_final = {\n \"epoch\": epoch,\n \"model_state\": model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n \"accuracy\": accuracy\n }\n torch.save(model_final, model_final_path)\n print('-' * 89)\n print('| end of epoch {:3d} | time: {:5.2f}s '\n '| epoch loss {:.6f} |'.format(\n epoch, time.time() - epoch_start_time, train_loss))\n print('-' * 89)\n except KeyboardInterrupt:\n if distributed.is_master_proc():\n print('-' * 89)\n print('Exiting from training early')", "def mount(xpu, model):\n # Unwrap the core model if necessary\n model = xpu.raw(model)\n model = xpu.move(model)\n if xpu._device_ids and len(xpu._device_ids) > 1:\n model = ContainerDataParallel(\n model, device_ids=xpu._device_ids,\n output_device=xpu._main_device_id)\n else:\n model = DataSerial(model)\n return model", "def mpirun_pipeline(image=\"uber/horovod:0.13.11-tf1.10.0-torch0.4.0-py3.5\",\n\t\t\t\t\t\t batch_size=\"64\",\n\t\t\t\t\t\t optimizer='momentum',\n sync_source='https://github.com/tensorflow/benchmarks.git',\n git_sync_branch='cnn_tf_v1.9_compatible',\n data='user-susan:/training',\n gpus=1,\n workers=1,\n cpu_limit='2',\n metric='images/sec',\n memory_limit='10Gi'):\n\n env = ['NCCL_DEBUG=INFO','GIT_SYNC_BRANCH={0}'.format(git_sync_branch)]\n\n train=arena.mpi_job_op(\n \tname=\"all-reduce\",\n \timage=image,\n \tenv=env,\n data=[data],\n workers=workers,\n sync_source=sync_source,\n gpus=gpus,\n cpu_limit=cpu_limit,\n memory_limit=memory_limit,\n metrics=[metric],\n \tcommand=\"\"\"\n \tmpirun python code/benchmarks/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py --model resnet101 \\\n \t--batch_size {0} --variable_update horovod --optimizer {1}\\\n \t--summary_verbosity=3 --save_summaries_steps=10\n \t\"\"\".format(batch_size, optimizer)\n )", "def train_epoch(self, epoch=None):\n self.model.train()\n loader_iterators = dict([(k, iter(v))\n for k, v in self.train_loaders.items()])\n train_losses_ts = dict(\n [(k, torch.tensor(0.).to(self.device)) for k in self.task_ids])\n train_metrics_ts = dict(\n [(k, torch.tensor(0.).to(self.device)) for k in self.task_ids])\n total_batches = min([len(loader)\n for _, loader in self.train_loaders.items()])\n num_branches = dict()\n for idx, (ctrl, block) in enumerate(self.model.control_blocks()):\n n_branches = max(len(ctrl.serving_tasks), 1.)\n num_branches[idx] = torch.tensor(n_branches, device=self.device)\n\n pbar = tqdm(desc=' train', total=total_batches, ascii=True)\n for batch_idx in range(total_batches):\n self.model.zero_grad()\n\n # for each task, calculate head grads and accumulate body grads\n for task_idx, task_id in enumerate(self.task_ids):\n data, target = loader_iterators[task_id].next()\n data, target = data.to(self.device), target.to(self.device)\n\n # do inference with backward\n output = self.model(data, task_id)\n loss = self.losses[task_id](output, target)\n wloss = self.loss_weights[task_id] * loss\n wloss.backward()\n\n # calculate training metrics\n with torch.no_grad():\n train_losses_ts[task_id] += loss.sum()\n train_metrics_ts[task_id] += \\\n self.metrics[task_id](output, target)\n\n # network slimming\n if self.slimming is not None:\n slim_loss = self.slimming * slimming_loss(self.model)\n if slim_loss > 1e-5:\n slim_loss.backward()\n\n # averaging out body gradients and optimize the body\n for idx, (_, block) in enumerate(self.model.control_blocks()):\n for p in block.parameters():\n p.grad /= num_branches[idx]\n self.optimizers.step()\n pbar.update()\n\n for task_id in self.task_ids:\n train_losses_ts[task_id] /= \\\n len(self.train_loaders[task_id].dataset)\n train_metrics_ts[task_id] /= \\\n len(self.train_loaders[task_id].dataset)\n\n train_losses = dict([(k, v.item())\n for k, v in train_losses_ts.items()])\n train_metrics = dict([(k, v.item())\n for k, v in train_metrics_ts.items()])\n pbar.close()\n return train_losses, train_metrics", "def __init__(\n self,\n model: nn.Module,\n input_path: Union[Path, str],\n out_activations: Dict[str, str],\n out_boundary_weights: Dict[str, bool],\n stride: int,\n patch_size: Tuple[int, int],\n instance_postproc: str,\n padding: int = None,\n batch_size: int = 8,\n normalization: str = None,\n device: str = \"cuda\",\n n_devices: int = 1,\n save_intermediate: bool = False,\n save_dir: Union[Path, str] = None,\n save_format: str = \".mat\",\n checkpoint_path: Union[Path, str] = None,\n n_images: int = None,\n type_post_proc: Callable = None,\n sem_post_proc: Callable = None,\n **kwargs,\n ) -> None:\n super().__init__(\n model=model,\n input_path=input_path,\n out_activations=out_activations,\n out_boundary_weights=out_boundary_weights,\n patch_size=patch_size,\n padding=padding,\n batch_size=batch_size,\n normalization=normalization,\n instance_postproc=instance_postproc,\n device=device,\n save_intermediate=save_intermediate,\n save_dir=save_dir,\n save_format=save_format,\n checkpoint_path=checkpoint_path,\n n_images=n_images,\n n_devices=n_devices,\n type_post_proc=type_post_proc,\n sem_post_proc=sem_post_proc,\n **kwargs,\n )\n\n self.stride = stride", "def test_torch_prepare_model(ray_start_4_cpus_2_gpus):\n\n def train_fn():\n model = torch.nn.Linear(1, 1)\n\n # Wrap in DDP.\n model = train.torch.prepare_model(model)\n\n # Make sure model is wrapped in DDP.\n assert isinstance(model, DistributedDataParallel)\n\n # Make sure model is on cuda.\n assert next(model.parameters()).is_cuda\n\n trainer = Trainer(\"torch\", num_workers=2, use_gpu=True)\n trainer.start()\n trainer.run(train_fn)\n trainer.shutdown()", "def compute_parallel(self, inputs, communicator):\n self.compute_sequential([inputs], [communicator])", "def optimize_model(input,\n model_type='bert',\n num_heads=0,\n hidden_size=0,\n optimization_options=None,\n opt_level=0,\n use_gpu=False,\n only_onnxruntime=False):\n (optimizer_class, producer, run_onnxruntime) = MODEL_CLASSES[model_type]\n\n temp_model_path = None\n if opt_level > 1: # Optimization specified for an execution provider.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=use_gpu, opt_level=opt_level)\n elif run_onnxruntime:\n # Use Onnxruntime to do optimizations (like constant folding and cast elimation) that is not specified to exection provider.\n # CPU provider is used here so that there is no extra node for GPU memory copy.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=False, opt_level=1)\n\n model = load_model(temp_model_path or input, format=None, load_external_data=True)\n\n if model.producer_name and producer != model.producer_name:\n logger.warning(\n f\"Model producer not matched: Expect {producer}, Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter.\"\n )\n\n if optimization_options is None:\n optimization_options = BertOptimizationOptions(model_type)\n\n optimizer = optimizer_class(model, num_heads, hidden_size)\n\n if not only_onnxruntime:\n optimizer.optimize(optimization_options)\n\n # Remove the temporary model.\n if temp_model_path:\n os.remove(temp_model_path)\n logger.debug(\"Remove tempoary model: {}\".format(temp_model_path))\n\n optimizer.model.producer_name = \"onnxruntime.transformers\"\n from onnxruntime import __version__ as onnxruntime_version\n optimizer.model.producer_version = onnxruntime_version\n\n return optimizer", "def worker(self, gpu_id: int):\n if self.seed is not None:\n make_deterministic(self.seed)\n self.current_rank = self.rank\n if self.distributed:\n if self.multiprocessing:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n self.current_rank = self.rank * self.ngpus_per_node + gpu_id\n dist.init_process_group(\n backend=self.dist_backend,\n init_method=self.dist_url,\n world_size=self.world_size,\n rank=self.current_rank\n )\n # set up process logger\n self.logger = logging.getLogger(\"worker_rank_{}\".format(self.current_rank))\n self.logger.propagate = False\n handler = QueueHandler(self.logger_queue)\n self.logger.addHandler(handler)\n self.logger.setLevel(logging.INFO)\n\n # only write in master process\n if self.current_rank == 0:\n self.tb_writer = self.tb_writer_constructor()\n\n self.logger.info(\n \"Use GPU: %d for training, current rank: %d\",\n gpu_id,\n self.current_rank\n )\n # get dataset\n train_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"train\"\n )\n val_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"val\"\n )\n # create model\n self.model = get_model(\n model_name=self.global_cfg[\"model\"][\"name\"],\n num_classes=self.global_cfg[\"dataset\"][\"n_classes\"]\n )\n\n self.device = torch.device(\"cuda:{}\".format(gpu_id))\n self.model.to(self.device)\n\n batch_size = self.global_cfg[\"training\"][\"batch_size\"]\n n_workers = self.global_cfg[\"training\"][\"num_workers\"]\n if self.distributed:\n batch_size = int(batch_size / self.ngpus_per_node)\n n_workers = int((n_workers + self.ngpus_per_node - 1) / self.ngpus_per_node)\n if self.global_cfg[\"training\"][\"sync_bn\"]:\n self.model = SyncBatchNorm.convert_sync_batchnorm(self.model)\n self.model = DistributedDataParallel(self.model, device_ids=[gpu_id])\n self.logger.info(\"batch_size: {}, workers: {}\".format(batch_size, n_workers))\n\n # define loss function (criterion) and optimizer\n self.loss_fn = CrossEntropyLoss().to(self.device)\n\n optimizer_cls = get_optimizer(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params = copy.deepcopy(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params.pop(\"name\")\n self.optimizer: Optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)\n self.logger.info(\"Loaded optimizer:\\n%s\", self.optimizer)\n\n # scheduler\n self.scheduler = get_scheduler(self.optimizer, self.global_cfg[\"training\"][\"lr_schedule\"])\n\n if self.distributed:\n train_sampler = DistributedSampler(\n train_dataset,\n shuffle=True,\n drop_last=True\n )\n val_sampler = DistributedSampler(\n val_dataset,\n shuffle=False\n )\n else:\n train_sampler = RandomSampler(train_dataset)\n val_sampler = SequentialSampler(val_dataset)\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=train_sampler\n )\n\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=val_sampler\n )\n self.logger.info(\n \"Load dataset done\\nTraining: %d imgs, %d batchs\\nEval: %d imgs, %d batchs\",\n len(train_dataset),\n len(train_loader),\n len(val_dataset),\n len(self.val_loader)\n )\n iter_generator = make_iter_dataloader(train_loader)\n\n while self.iter < self.global_cfg[\"training\"][\"train_iters\"]:\n img, label = next(iter_generator)\n self.train_iter(img, label)\n\n def is_val():\n p1 = self.iter != 0\n p2 = (self.iter + 1) % self.global_cfg[\"training\"][\"val_interval\"] == 0\n p3 = self.iter == self.global_cfg[\"training\"][\"train_iters\"] - 1\n return (p1 and p2) or p3\n\n # have a validation\n if is_val():\n self.validate()\n # end one iteration\n self.iter += 1", "def _update_model(self, normalization_type='stats'):\n if self.num_acquisitions % self.model_update_interval == 0:\n\n # input that goes into the model (is unziped in case there are categorical variables)\n X_inmodel = self.space.unzip_inputs(self.X)\n\n # Y_inmodel is the output that goes into the model\n if self.normalize_Y:\n Y_inmodel = normalize(self.Y, normalization_type)\n else:\n Y_inmodel = self.Y\n\n self.model.updateModel(X_inmodel, Y_inmodel, None, None)", "def train(self, mode=True):\n super().train(mode)\n if mode and self.freeze_2d and self.backbone is not None:\n self._freeze(self.backbone)\n return self", "def __init__(self,\n names,\n data,\n embedding_fns,\n encoder_fns_1,\n encoder_fns_2,\n logits_fns,\n evaluation_fns,\n # MTL\n mixing_ratios,\n L2_coefficient=None,\n is_distill=False,\n distill_coefficient_loc=None,\n distill_coefficient_scale=None,\n distill_temperature=1.0,\n # optimization\n optimizer=\"Adam\",\n learning_rate=0.001,\n gradient_clipping_norm=2.0,\n # misc\n graph=None,\n logdir=None,\n main_model_index=0,\n debug_mode=False):\n \n super(MultitaskBaseModel, self).__init__(\n logdir=logdir, graph=graph,\n saver_max_to_keep=MAX_CHECKPOINTS_TO_KEEP)\n\n num_models = len(names)\n _check_list_compatability(data, num_models)\n _check_fn_list_compatability(embedding_fns, num_models, True)\n _check_fn_list_compatability(encoder_fns_1, num_models, True)\n _check_fn_list_compatability(encoder_fns_2, num_models, True)\n _check_fn_list_compatability(logits_fns, num_models, False)\n _check_fn_list_compatability(evaluation_fns, num_models, False)\n\n # check mixing ratios and MTL\n if len(names) == 1:\n raise ValueError(\"Not supported\")\n _mr_compatible(mixing_ratios, num_models, print_out=True)\n if main_model_index != 0:\n raise ValueError(\"`main_model_index` must be set to `0`\")\n\n self._names = names\n self._data = data\n self._embedding_fns = embedding_fns\n self._encoder_fns_1 = encoder_fns_1\n self._encoder_fns_2 = encoder_fns_2\n self._logits_fns = logits_fns\n self._evaluation_fns = evaluation_fns\n\n # MTL\n self._mixing_ratios = mixing_ratios\n self._L2_coefficient = L2_coefficient\n self._is_disill = is_distill\n self._distill_temperature = distill_temperature\n self._distill_coefficient_loc = distill_coefficient_loc\n self._distill_coefficient_scale = distill_coefficient_scale\n\n self._optimizer = optimizer\n self._learning_rate = learning_rate\n self._gradient_clipping_norm = gradient_clipping_norm\n\n self._main_model_index = main_model_index\n self._debug = collections.defaultdict(list)\n self._debug_mode = debug_mode", "def convert_model(self, model: nn.Module) -> nn.Module:\n if self.sync_bn is not None:\n try:\n model = convert_sync_batchnorm(model, self.sync_bn)\n except ValueError as e:\n self.logger.error('cfg.sync_bn should be \"torch\" or '\n f'\"mmcv\", but got {self.sync_bn}')\n raise e\n\n return model", "def update(self, batch):\n if self.opt['cuda']:\n inputs = [Variable(torch.LongTensor(b).cuda()) for b in batch[:3]]\n subj_start_binary = Variable(torch.LongTensor(batch[5]).cuda()).float()\n subj_end_binary = Variable(torch.LongTensor(batch[6]).cuda()).float()\n obj_start_relation = Variable(torch.LongTensor(batch[7]).cuda())\n obj_end_relation = Variable(torch.LongTensor(batch[8]).cuda())\n subj_start_type = Variable(torch.LongTensor(batch[9]).cuda())\n subj_end_type = Variable(torch.LongTensor(batch[10]).cuda())\n obj_start_type = Variable(torch.LongTensor(batch[11]).cuda())\n obj_end_type = Variable(torch.LongTensor(batch[12]).cuda())\n nearest_subj_start_position_for_each_token = Variable(torch.LongTensor(batch[13]).cuda())\n distance_to_nearest_subj_start = Variable(torch.LongTensor(batch[14]).cuda())\n distance_to_subj = Variable(torch.LongTensor(batch[15]).cuda())\n nearest_obj_start_position_for_each_token = Variable(torch.LongTensor(batch[3]).cuda())\n distance_to_nearest_obj_start = Variable(torch.LongTensor(batch[4]).cuda())\n else:\n inputs = [Variable(torch.LongTensor(b)) for b in batch[:4]]\n subj_start_label = Variable(torch.LongTensor(batch[4])).float()\n subj_end_label = Variable(torch.LongTensor(batch[5])).float()\n obj_start_label = Variable(torch.LongTensor(batch[6]))\n obj_end_label = Variable(torch.LongTensor(batch[7]))\n subj_type_start_label = Variable(torch.LongTensor(batch[8]))\n subj_type_end_label = Variable(torch.LongTensor(batch[9]))\n obj_type_start_label = Variable(torch.LongTensor(batch[10]))\n obj_type_end_label = Variable(torch.LongTensor(batch[11]))\n subj_nearest_start_for_each = Variable(torch.LongTensor(batch[12]))\n subj_distance_to_start = Variable(torch.LongTensor(batch[13]))\n \n \n mask = (inputs[0].data>0).float()\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n\n \n subj_start_logits, subj_end_logits, obj_start_logits, obj_end_logits = self.model(inputs, distance_to_subj)\n\n subj_start_loss = self.obj_criterion(subj_start_logits.view(-1, self.opt['num_subj_type']+1), subj_start_type.view(-1).squeeze()).view_as(mask)\n subj_start_loss = torch.sum(subj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n subj_end_loss = self.obj_criterion(subj_end_logits.view(-1, self.opt['num_subj_type']+1), subj_end_type.view(-1).squeeze()).view_as(mask)\n subj_end_loss = torch.sum(subj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_start_loss = self.obj_criterion(obj_start_logits.view(-1, self.opt['num_class']+1), obj_start_relation.view(-1).squeeze()).view_as(mask)\n obj_start_loss = torch.sum(obj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_end_loss = self.obj_criterion(obj_end_logits.view(-1, self.opt['num_class']+1), obj_end_relation.view(-1).squeeze()).view_as(mask)\n obj_end_loss = torch.sum(obj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n loss = self.opt['subj_loss_weight']*(subj_start_loss + subj_end_loss) + (obj_start_loss + obj_end_loss)\n \n # backward\n loss.backward()\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data.item()\n return loss_val", "def dist_setting(current_gpu, model, args):\n print(\"channels_last : {}\".format(args.channels_last))\n if args.channels_last:\n args.memory_format = torch.channels_last\n else:\n args.memory_format = torch.contiguous_format\n\n if args.apex:\n args.lr = args.lr*float(args.batch_size*args.world_size)/256.\n args.current_gpu = current_gpu\n if args.current_gpu is not None:\n print(\"Use GPU: {} for training\".format(args.current_gpu))\n\n if args.multigpus_distributed:\n args.rank = args.num_gpus * args.host_num + args.current_gpu\n dist.init_process_group(backend=args.backend,\n rank=args.rank, world_size=args.world_size)\n logger.info('Initialized the distributed environment: \\'{}\\' backend on {} nodes. '.format(\n args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(\n dist.get_rank(), args.num_gpus))\n else:\n args.rank = 0\n\n if args.sync_bn:\n import apex\n print(\"using apex synced BN\")\n model = apex.parallel.convert_syncbn_model(model)\n\n if args.multigpus_distributed:\n if args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n args.batch_size = int(args.batch_size / args.num_gpus)\n logger.info(\"Batch size for each GPU: {}\".format(args.batch_size))\n if not args.apex:\n model.cuda(args.current_gpu)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.current_gpu])\n else:\n if not args.apex:\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n if not args.apex:\n model = model.cuda(args.current_gpu)\n else:\n if not args.apex:\n model = torch.nn.DataParallel(model).cuda()\n\n return model, args", "def update(self, batch_size=None, concurrent=False, max_concurrent_workers=None,\n send_signals=True, _use_super=False, return_queryset=False, **kwargs):\n if _use_super:\n return super().update(**kwargs)\n\n if send_signals:\n pre_update.send(sender=self.model, instances = self)\n\n n_concurrent_writers = self._get_n_concurrent_workers(max_concurrent_workers)\n concurrent = self._get_concurrent(concurrent)\n\n chunks = self.get_chunks(batch_size, n_concurrent_writers)\n\n n = 0\n\n if concurrent:\n # question: how do you pass arguments in this function?\n jobs = [partial(BulkModelQuerySet._update_chunk, self, chunk, **kwargs) for chunk in chunks if chunk]\n executor = ConcurrentExecutor(jobs)\n results = executor.run_async()\n n = sum(results)\n\n else:\n for chunk in chunks:\n if not chunk:\n # skip empty chunks (only happens in the case of an empty queryset)\n continue\n\n n += self._update_chunk(chunk, **kwargs)\n\n if send_signals:\n post_update.send(sender = self.model, instances = self)\n\n if return_queryset:\n _ids = []\n for obj in self:\n _id = getattr(obj, 'id') or getattr(obj, 'pk')\n if _id:\n _ids.append(_id)\n\n return self.filter(id__in = _ids)\n\n return n", "def update(self, x_train_single, updated_h):\n x_row = x_train_single.toarray()\n for i in range(self.num_models):\n self.models[i].partial_fit(x_row, [updated_h[i]])", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def prepare(self, n_cores=1, ipp_client=None):\n if len(self.shape_parameters):\n self.morpher = MORPHERS[self.config['morpher']](self.config.get('morpher_config', {}),\n self.shape_parameters)\n zs_list = self.morpher.get_anchor_points(bounds=self.get_bounds())\n\n # Create the configs for each new model\n configs = []\n for zs in zs_list:\n config = deepcopy(self.pdf_base_config)\n for i, (setting_name, (anchors, _, _)) in enumerate(self.shape_parameters.items()):\n # Translate from zs to settings using the anchors dict. Maybe not all settings are numerical.\n config[setting_name] = anchors[zs[i]]\n if ipp_client is None and n_cores != 1:\n # We have to compute in parallel: must have delayed computation on\n config['delay_pdf_computation'] = True\n configs.append(config)\n\n # Create the new models\n if n_cores == 1:\n models = [Model(c) for c in tqdm(configs, desc=\"Computing/loading models on one core\")]\n\n elif ipp_client is not None:\n models = create_models_ipyparallel(configs, ipp_client,\n block=self.config.get('block_during_paralellization', False))\n\n else:\n models = [Model(c) for c in tqdm(configs, desc=\"Preparing model computation tasks\")]\n\n hashes = set()\n for m in models:\n for s in m.sources:\n hashes.add(s.hash)\n\n compute_many(hashes, n_cores)\n\n # Reload models so computation takes effect\n models = [Model(c) for c in tqdm(configs, desc=\"Loading computed models\")]\n\n # Add the new models to the anchor_models dict\n for zs, model in zip(zs_list, models):\n self.anchor_models[tuple(zs)] = model\n\n # Build the interpolator for the rates of each source.\n self.mus_interpolator = self.morpher.make_interpolator(f=lambda m: m.expected_events(),\n extra_dims=[len(self.source_name_list)],\n anchor_models=self.anchor_models)\n\n self.is_data_set = False\n self.is_prepared = True", "def bn_update(loader, model):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n for input, _ in loader:\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def update_dependencies():\r\n\r\n if par['task'] == 'mnist':\r\n par['n_tasks'] = 100\r\n par['input_shape'] = [28, 28]\r\n par['n_input'] = np.product(par['input_shape'])\r\n par['n_output'] = 10\r\n elif par['task'] == 'omniglot':\r\n par['input_shape'] = [26, 26]\r\n par['n_input'] = 256 if par['conv_input'] else np.product(par['input_shape'])\r\n par['n_output'] = par['n_ways'] #par['n_meta_tasks'] + par['n_test_tasks']\r\n\r\n par['layer_dims'] = [par['n_input']] + par['hidden_layers'] + [par['n_output']]\r\n\r\n\r\n par['n_layers'] = len(par['layer_dims'])\r\n if par['task'] == 'mnist' or par['task'] == 'imagenet':\r\n par['labels_per_task'] = 10\r\n elif par['task'] == 'cifar':\r\n par['labels_per_task'] = 5", "def train(world_rank=0, world_size=4, train_data=None, train_target=None, do_log=False, comms=None):\n torch.manual_seed(1234)\n model = Net()\n optimizer = optim.SGD(model.parameters(),\n lr=0.01, momentum=0.5)\n\n num_batches = train_data.shape[1]\n\n if (world_rank == 0 and do_log):\n print(\"Started Training\")\n total_data = len(train_data)\n epochs = 1\n total_steps = epochs * total_data\n local_time_communication = 0\n local_total_time_communication = 0\n\n for epoch in range(epochs):\n epoch_loss = 0.0\n count = 0\n for data, target in zip(train_data, train_target):\n data = np.reshape(data, (data.shape[0], 1, data.shape[1], data.shape[2])) / 128.0\n count = count + 1\n result = '{0:.4g}'.format((count / float(total_steps)) * 100.0)\n if (world_rank == 0):\n print(\"Progress {}% \\r\".format(result), end='\\r')\n optimizer.zero_grad()\n output = model(data)\n # this comes with data loading mechanism use target or target.long()\n # depending on network specifications.\n target = target.long()\n loss = F.nll_loss(output, target)\n epoch_loss += loss.item()\n # print(epoch_loss)\n loss.backward()\n if (world_rank == 0):\n local_time_communication = time.time()\n average_gradients_mpi(model, comm=comms, world_size=4)\n if (world_rank == 0):\n local_time_communication = time.time() - local_time_communication\n local_total_time_communication = local_total_time_communication + local_time_communication\n optimizer.step()\n if (world_rank == 0):\n print('Rank ', world_rank, ', epoch ',\n epoch, ': ', epoch_loss / num_batches)\n return model, local_total_time_communication", "def train_model(self\n\t\t, epochs=100\n\t\t, minibatch_size=20\n\t\t, yield_every_iteration=False):\n\n\t\tif self.input_batch is None:\n\t\t\traise ValueError(\"Denoising autoencoder must be initialised with \"\n\t\t\t\t\"input data to train model independently.\")\n\t\tif self.output_batch is None:\n\t\t\traise ValueError(\"RMI denoising autoencoder must be initialised \"\n\t\t\t\t\"with output data to train model independently.\")\n\n\t\tbatch_count = self.input_batch.get_value(\n\t\t\tborrow=True).shape[0]//minibatch_size\n\n\t\tfor epoch in xrange(epochs):\n\t\t\tcosts = []\n\t\t\tfor index in xrange(batch_count):\n\t\t\t\tcost = self.train_model_once(index, minibatch_size)\n\t\t\t\tcosts.append(cost)\n\t\t\t\tif yield_every_iteration:\n\t\t\t\t\tyield (index, cost)\n\n\t\t\tif not yield_every_iteration:\n\t\t\t\tyield (epoch, numpy.mean(costs))", "def enable_model_cpu_offload(self, gpu_id=0):\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook", "def train(self, mode=True):\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()", "async def fit_model_on_worker(\n worker,\n built_model: sy.Plan,\n built_loss_fn: sy.Plan,\n encrypters,\n batch_size: int,\n curr_round: int,\n max_nr_batches: int,\n lr: float,\n):\n num_of_parameters = len(built_model.parameters())\n built_model.id = \"GlobalModel\"\n # built_loss_fn.id = \"LossFunc\"\n # model_config = sy.ModelConfig(model=built_model,\n # loss_fn=built_loss_fn,\n # optimizer=\"SGD\",\n # batch_size=batch_size,\n # optimizer_args={\"lr\": lr},\n # epochs=1,\n # max_nr_batches=max_nr_batches)\n # model_config_send_start = time.time()\n built_model.send(worker)\n # model_config_send_end = time.time()\n print(\"[trace] GlobalInformationSend duration\", worker.id, model_config_send_end - model_config_send_start)\n\n return_ids = [0, 1]\n for i in range(num_of_parameters):\n return_ids.append(\"p\" + str(i))\n\n fit_sagg_start = time.time()\n result_list = await worker.async_fit_sagg_mc(dataset_key=\"mnist\", encrypters=encrypters, return_ids=return_ids)\n fit_sagg_end = time.time()\n print(\"[trace] FitSagg\", \"duration\", worker.id, fit_sagg_end - fit_sagg_start)\n\n loss = result_list[0]\n num_of_training_data = result_list[1]\n enc_params = result_list[2:]\n\n print(\"Iteration %s: %s loss: %s\" % (curr_round, worker.id, loss))\n\n return worker.id, enc_params, loss, num_of_training_data", "def test_multitask(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def test_build_default_model(self):\n cfg = get_cfg_defaults()\n cfg.SYSTEM.NUM_GPUS = self.num_gpu\n model = build_model(cfg, self.device)\n self.assertTrue(isinstance(model, (torch.nn.Module,\n torch.nn.DataParallel,\n torch.nn.parallel.DistributedDataParallel)))", "def main(batch_size, saves_dir=TENSORFLOW_SAVES_DIR):\n batches = [1, 8, 16, 32, 64]\n if batch_size:\n batches = [batch_size]\n\n for batch_size in batches:\n print(\"Batch size: {}\".format(batch_size))\n batch = np.random.random((batch_size, 224, 224, 3))\n\n # our default model\n tf.reset_default_graph()\n usual_model = Model()\n measure_model(usual_model, \"Usual model\", batch)\n usual_model.sess.close()\n\n # our binary file\n tf.reset_default_graph()\n frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='constant_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(frozen_model, \"Frozen model\", batch)\n frozen_model.sess.close()\n\n # binary file with some constant operations\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='optimized_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, \"Optimized frozen model\", batch)\n optimized_frozen_model.sess.close()\n\n # model quantized with python\n model_name = \"Quantized with python\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_python.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)\n\n # model quantized with bazel\n model_name = \"Quantized with bazel\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_bazel.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)", "def train(self, mode=True):\n super(CRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def _train_task(self, train_loader, val_loader):\n if self._task == 0:\n epochs = 90\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.1, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [50, 60], gamma=0.1)\n self._train(train_loader, val_loader, epochs, optimizer, scheduler)\n return\n\n # Training on all new + examplars\n print(\"Training\")\n self._finetuning = False\n epochs = 60\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.1, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [40, 50], gamma=0.1)\n self._train(train_loader, val_loader, epochs, optimizer, scheduler)\n\n # Fine-tuning on sub-set new + examplars\n print(\"Fine-tuning\")\n self._old_model = self._network.copy().freeze()\n\n self._finetuning = True\n self._build_examplars(train_loader,\n n_examplars=self._k // (self._n_classes - self._task_size))\n train_loader.dataset.set_idxes(self.examplars) # Fine-tuning only on balanced dataset\n\n optimizer = factory.get_optimizer(self._network.parameters(), self._opt_name, 0.01, 0.001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 20], gamma=0.1)\n self._train(train_loader, val_loader, 40, optimizer, scheduler)", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def train_wrapper(model):\n if FLAGS.pretrained_model:\n model.load(FLAGS.pretrained_model)\n # load data\n train_input_handle, test_input_handle = datasets_factory.data_provider(\n FLAGS.dataset_name,\n FLAGS.train_data_paths,\n FLAGS.valid_data_paths,\n FLAGS.batch_size * FLAGS.n_gpu,\n FLAGS.img_width,\n seq_length=FLAGS.total_length,\n is_training=True)\n\n eta = FLAGS.sampling_start_value\n\n for itr in range(1, FLAGS.max_iterations + 1):\n if train_input_handle.no_batch_left():\n train_input_handle.begin(do_shuffle=True)\n ims = train_input_handle.get_batch()\n if FLAGS.dataset_name == 'penn':\n ims = ims['frame']\n ims = preprocess.reshape_patch(ims, FLAGS.patch_size)\n\n eta, real_input_flag = schedule_sampling(eta, itr)\n\n trainer.train(model, ims, real_input_flag, FLAGS, itr)\n\n if itr % FLAGS.snapshot_interval == 0:\n model.save(itr)\n\n if itr % FLAGS.test_interval == 0:\n trainer.test(model, test_input_handle, FLAGS, itr)\n\n train_input_handle.next()", "def bn_update(loader, model, device):\n if not check_bn(model):\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print('no bn in model?!')\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>!')\n # return model\n\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n\n model = model.to(device)\n pbar = tqdm(loader, unit=\"samples\", unit_scale=loader.batch_size)\n for sample in pbar:\n inputs, targets, target_lengths = sample['input'].to(device), sample['label'].to(device), sample['label_length'].to(device)\n\n inputs = inputs.to(device)\n b = inputs.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n # model(inputs)\n # TODO:\n model(inputs, False, targets, target_lengths, 275, test_dataset.tokenizer)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n return model", "def train_next_model(self, wait=True, input_data_s3_prefix=None, input_model_id=None):\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n\n # use 'last_trained_model_id' by default as input model for next training\n if input_model_id is None and self.experiment_record._last_trained_model_id is not None:\n logger.info(\n f\"Use last trained model {self.experiment_record._last_trained_model_id} \"\n \"as pre-trained model for training\"\n )\n\n input_model_id = self.experiment_record._last_trained_model_id\n\n if input_model_id != self.experiment_record._last_trained_model_id:\n # No deployment if the given model is not ready\n if not self._check_if_model_ready(input_model_id):\n return\n\n # experiment only allows one training job at a time,\n # validate no other training request is in progress\n if (\n self.experiment_record._training_state is not None\n and self.experiment_record._training_state.endswith(\"ING\")\n ):\n logger.error(\n f\"A training request with model id '{self.experiment_record._next_model_to_train_id}' \"\n f\"was in the state of '{self.experiment_record._training_state}'. \"\n \"Please wait until the training job is finished.\"\n )\n raise InvalidUsageException(\n \"Please wait for old Training Job to Complete before requesting a new one!\"\n )\n else:\n # update next_model_to_train_id and training state\n next_model_to_train_id = ModelManager.name_next_model(experiment_id=self.experiment_id)\n\n logger.info(f\"Starting training job for ModelId '{next_model_to_train_id}''\")\n\n self.exp_db_client.update_experiment_next_model_to_train_id(\n self.experiment_id, next_model_to_train_id\n )\n self.exp_db_client.update_experiment_training_state(\n self.experiment_id, TrainingState.PENDING\n )\n\n manifest_file_path = None\n if isinstance(input_data_s3_prefix, list):\n # generate manifest file and upload to s3 when having multiple inputs\n manifest_file_path = self._generate_manifest(input_data_s3_prefix)\n\n try:\n self.next_model_to_train = ModelManager(\n model_db_client=self.model_db_client,\n experiment_id=self.experiment_id,\n model_id=next_model_to_train_id,\n image=self.image,\n role=self.resource_manager.iam_role_arn,\n instance_config=self.resource_manager.training_fleet_config,\n boto_session=self.boto_session,\n algor_config=self.algor_config,\n )\n self.next_model_to_train.fit(\n wait=wait,\n input_model_id=input_model_id,\n input_data_s3_prefix=input_data_s3_prefix,\n manifest_file_path=manifest_file_path,\n logs=wait,\n )\n except Exception as e:\n logger.error(e)\n pass\n\n # wait until exp ddb table updated\n if self.local_mode or wait:\n trained_state = (\n self.experiment_record._training_state == TrainingState.TRAINED\n and self.experiment_record._last_trained_model_id == next_model_to_train_id\n and self.experiment_record._next_model_to_train_id is None\n )\n num_retries = 0\n\n while not trained_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table training status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n trained_state = (\n self.experiment_record._training_state == TrainingState.TRAINED\n and self.experiment_record._last_trained_model_id == next_model_to_train_id\n and self.experiment_record._next_model_to_train_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Training job '{self.experiment_record._next_model_to_train_id}' \"\n f\"was in state of '{self.experiment_record._training_state}'. Expected it to be TRAINED.\"\n )\n if (\n self.experiment_record._training_state == TrainingState.FAILED\n or self.experiment_record._training_state == TrainingState.STOPPED\n ):\n raise SageMakerTrainingJobException(\n f\"Training job '{self.experiment_record._next_model_to_train_id}' \"\n f\"ended in state of '{self.experiment_record._training_state}'. Please check Sagemaker logs for \"\n \"more information.\"\n )", "def train(self, mode=True, freeze_bn=False):\n super(NetFeat, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train(config, model, train_iterator, criterion, optimizer, scheduler=None):\n if isinstance(model, collections.Iterable) or isinstance(\n optimizer, collections.Iterable) or isinstance(\n scheduler, collections.Iterable):\n raise ValueError(\n \"Need to provide custom training function if using multi-model \"\n \"or multi-scheduler or multi-optimizer training.\")\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n timers = {k: TimerStat() for k in [\"h2d\", \"fwd\", \"grad\", \"apply\"]}\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n for batch_idx, (features, target) in enumerate(train_iterator):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # Create non_blocking tensors for distributed training\n with timers[\"h2d\"]:\n if torch.cuda.is_available():\n features = features.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n with timers[\"fwd\"]:\n output = model(features)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n losses.update(loss.item(), features.size(0))\n\n with timers[\"grad\"]:\n # compute gradients in a backward pass\n optimizer.zero_grad()\n\n if config.get(USE_FP16):\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n with timers[\"apply\"]:\n # Call step of optimizer to update model params\n optimizer.step()\n\n if scheduler and config.get(SCHEDULER_STEP) == SCHEDULER_STEP_BATCH:\n scheduler.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if config.get(TEST_MODE) and batch_idx == 0:\n break\n\n if scheduler and config.get(SCHEDULER_STEP) == SCHEDULER_STEP_EPOCH:\n scheduler.step()\n\n stats = {\n \"batch_time\": batch_time.avg,\n BATCH_COUNT: batch_idx + 1,\n \"train_loss\": losses.avg,\n \"data_time\": data_time.avg,\n }\n stats.update({k: t.mean for k, t in timers.items()})\n return stats", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def grid_search(train_loader, val_loader, criterion, alpha, beta):\n\n # Initializing training variables\n best_acc = 0\n all_losses = []\n\n # Initializing log file\n logfile = open('./model_compound_scaling/logfiles/logfile.txt', 'a+')\n logfile.write('depth multiplier: {}, width multiplier: {}\\n'.format(alpha, beta))\n\n # Building the model\n if args.dataset == 'CIFAR100' or args.dataset == 'CIFAR10':\n model = micronet(d_multiplier=alpha, w_multiplier=beta)\n\n elif args.dataset == 'ImageNet':\n model = image_micronet(d_multiplier=alpha, w_multiplier=beta)\n\n # If multipile GPUs are used\n if use_cuda and torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n model = nn.DataParallel(model)\n\n # Transfers model to device (GPU/CPU). Device is globally initialized.\n model.to(device)\n\n # Defining the optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=True)\n\n # KERAS like summary of the model architecture\n # summary(your_model, input_size=(channels, H, W), batch_size=-1, device=\"cuda\")\n if use_cuda:\n if args.dataset == 'CIFAR100' or args.dataset == 'CIFAR10':\n summary(model, (3, 32, 32), batch_size=args.batch_size)\n print(model)\n\n elif args.dataset == 'ImageNet':\n summary(model, (3, args.image_size, args.image_size), batch_size=args.batch_size)\n print(model)\n\n # Optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_acc = checkpoint['acc']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n load_last_epoch = checkpoint['epoch']-1\n print(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n load_last_epoch = -1\n\n # Learning rate schedulers for cifar_micronet and imagenet_micronet\n if args.dataset == 'CIFAR100' or args.data == 'CIFAR10':\n lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,\n T_max = args.epochs,\n eta_min = 0,\n last_epoch = load_last_epoch)\n\n elif args.dataset == 'ImageNet':\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=[30, 60, 90],\n gamma=0.1,\n last_epoch = load_last_epoch)\n\n # START TRAINING\n start_time = time.time()\n model.train()\n\n for epoch in range(args.start_epoch, args.epochs):\n\n print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))\n\n # Executing training process\n running_loss, running_accuracy = train(train_loader, model, criterion, optimizer, epoch)\n\n # Evaluation\n model.eval()\n val_loss, val_accuracy = evaluate(model, criterion, val_loader)\n\n # Logging the accuracies\n all_losses += [(epoch, running_loss, val_loss, running_accuracy, val_accuracy)]\n print('Epoch {0} running loss {1:.3f} val loss {2:.3f} running acc {3:.3f} '\n 'val acc{4:.3f} time {5:.3f}'.format(*all_losses[-1], time.time() - start_time))\n logfile.write('Epoch {0} running loss {1:.3f} val loss {2:.3f} running acc {3:.3f} '\n 'val acc{4:.3f} time {5:.3f}\\n'.format(*all_losses[-1], time.time() - start_time))\n\n # Saving checkpoint\n torch.save({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'acc': val_accuracy,\n 'lr': optimizer.param_groups[0]['lr']\n }, args.resume)\n\n # Make a lr scheduler step\n lr_scheduler.step()\n\n # Checking if current epoch yielded best validation accuracy\n is_best = val_accuracy > best_acc\n best_acc = max(val_accuracy, best_acc)\n\n # If so, saving best model state_dict\n if is_best and epoch > 0:\n torch.save(model.state_dict(), './model_compound_scaling/saved_models/best_model.pt')\n\n # Switch back to train mode\n model.train()\n start_time = time.time()", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def run_mlp_experiment(args, device):\n validation_ratio, record_train_acc, record_val_acc, record_test_acc = utils.configure_training_mode(args)\n\n train_loader, validation_loader, test_loader = datasets.build_loaders_by_dataset(\n args.dataset, args.batch_size, validation_ratio=validation_ratio, train_validation_split_seed=0)\n local_loss_list = utils.get_loss(args)\n nonlinearity = utils.get_nonlinearity(args)\n\n optimizer_local, local_opt_arguments_dict, local_scheduler_arguments_dict, \\\n optimizer_final, final_opt_arguments_dict, final_scheduler_arguments_dict = \\\n utils.choose_optimizers_and_parameters(args)\n\n conv_sizes = []\n do_pooling = []\n kernel_sizes = []\n\n fc_layers = [args.mlp_layer_size, args.mlp_layer_size, args.mlp_layer_size]\n\n if args.divisive_norm_fc:\n divisive_norm_list = [networks.DivisiveNorm(args.divnorm_power, args.grouping_dim,\n args.grouped_var_delta)\n for i in range(len(fc_layers))]\n else:\n divisive_norm_list = None\n\n alt_feedback_type = None\n if args.feedback_alignment:\n alt_feedback_type = 'feedback_alignment'\n elif args.sign_symmetry:\n alt_feedback_type = 'sign_symmetry'\n\n net = networks.Network(nonlinearity, local_loss_list, optimizer_local,\n torch.optim.lr_scheduler.MultiStepLR, conv_sizes, kernel_sizes,\n do_pooling, fc_layers, 'max', args.dataset, bias=False,\n local_opt_arguments_dict=local_opt_arguments_dict,\n local_scheduler_arguments_dict=local_scheduler_arguments_dict,\n dropout_p=args.dropout_p, batch_norm=args.batch_norm,\n divisive_norm_list_conv=None, divisive_norm_list_fc=divisive_norm_list,\n spatial_dropout=args.spatial_dropout, alt_feedback_type=alt_feedback_type)\n\n net = net.to(device)\n print(net)\n\n final_loss = nn.CrossEntropyLoss()\n\n if args.backprop:\n final_opt = optimizer_final(net.parameters(), **final_opt_arguments_dict)\n compute_local_loss = False\n update_local_loss = False\n else:\n final_opt = optimizer_final(net.softmax_layer.parameters(), **final_opt_arguments_dict)\n compute_local_loss = True\n update_local_loss = True\n\n final_scheduler = torch.optim.lr_scheduler.MultiStepLR(final_opt, **final_scheduler_arguments_dict)\n\n train_acc, val_acc, test_acc = utils.train_network(\n net, device, final_loss, final_opt, final_scheduler, args.n_epochs, train_loader, validation_loader,\n test_loader, compute_local_loss=compute_local_loss, update_local_loss=update_local_loss,\n record_train_acc=record_train_acc, record_val_acc=record_val_acc, record_test_acc=record_test_acc,\n print_results=True, backprop_batch_manhattan=args.backprop_batch_manhattan)\n\n return train_acc, val_acc, test_acc", "def update_model(train_dir, image_size = 224, batch_size = 8, epochs = 2):\n \n # Create a data generator and specify\n # the parameters for augmentation\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n \n # create an iterator for data generator\n # and autment the images\n \n train_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size=(image_size, image_size),\n batch_size= batch_size,\n class_mode='categorical')\n \n #load pretrained model\n model = models.load_model('vgg16_finetuned.h5')\n \n # Compile the pretrained model in order to update its weight\n model.compile(loss='categorical_crossentropy',\n optimizer = optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n \n # use keras checkpoint to update the model weight\n file_path = 'vgg16_finetuned.h5'\n checkpoint = ModelCheckpoint(file_path)\n callbacks_list = [checkpoint]\n \n # Train the model to update model weight\n history = model.fit_generator(\n train_generator,\n steps_per_epoch = train_generator.samples/train_generator.batch_size,\n epochs = epochs,\n callbacks = callbacks_list)", "def pretrained():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659)", "def test_no_model_parallel(self):\n for m in ['transformer/generator', 'transformer/ranker']:\n try:\n _ = self._distributed_train_model(model=m, model_parallel=True)\n except RuntimeError:\n pass\n else:\n self.fail('Did not raise RuntimeError')", "def just_clml(model):\n logging.info(f\"just_clml | {model['name']}\")\n logging.info(\"-------------- BEGIN ORIGINAL --------------\")\n logging.info(model[\"mod\"])\n logging.info(\"-------------- END ORIGINAL ----------------\")\n tmp_dir = tempfile.mkdtemp()\n with tvm.transform.PassContext(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n logging.info(\"Partitioning for CLML...\")\n mod = tvm.relay.op.contrib.clml.partition_for_clml(model[\"mod\"], model[\"params\"])\n partitioned_model = model.copy()\n partitioned_model[\"mod\"] = mod\n logging.info(\"-------------- BEGIN PARTITIONED --------------\")\n logging.info(partitioned_model[\"mod\"])\n logging.info(\"-------------- END PARTITIONED ----------------\")\n targets = []\n targets.append(OPENCL)\n targets.append(tvm.target.Target(\"clml\", HOST))\n compile_and_benchmark(\"just_clml\", partitioned_model, targets, tmp_dir)", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def fit(\n self,\n lr: float,\n epochs: int,\n model_dir: str = \"checkpoints\",\n model_name: str = None,\n momentum: float = 0.95,\n weight_decay: float = 0.0001,\n mixed_prec: bool = False,\n use_one_cycle_policy: bool = False,\n warmup_pct: float = 0.3,\n lr_gamma: float = 0.1,\n lr_step_size: float = None,\n grad_steps: int = 2,\n save_model: bool = False,\n ) -> None:\n # set epochs\n self.epochs = epochs\n\n # set lr_step_size based on epochs\n if lr_step_size is None:\n lr_step_size = np.ceil(2 / 3 * self.epochs)\n\n # set model name\n if model_name is None:\n model_name = self.model_name\n\n os.makedirs(model_dir, exist_ok=True)\n\n data_loaders = {}\n data_loaders[\"train\"] = self.dataset.train_dl\n data_loaders[\"valid\"] = self.dataset.test_dl\n\n # Move model to gpu before constructing optimizers and amp.initialize\n device = torch_device()\n self.model.to(device)\n count_devices = num_devices()\n torch.backends.cudnn.benchmark = True\n\n named_params_to_update = {}\n total_params = 0\n for name, param in self.model.named_parameters():\n total_params += 1\n if param.requires_grad:\n named_params_to_update[name] = param\n\n print(\"Params to learn:\")\n if len(named_params_to_update) == total_params:\n print(\"\\tfull network\")\n else:\n for name in named_params_to_update:\n print(f\"\\t{name}\")\n\n # create optimizer\n optimizer = optim.SGD(\n list(named_params_to_update.values()),\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n )\n\n # Use mixed-precision if available\n # Currently, only O1 works with DataParallel: See issues https://github.com/NVIDIA/apex/issues/227\n if mixed_prec:\n # break if not AMP_AVAILABLE\n assert AMP_AVAILABLE\n # 'O0': Full FP32, 'O1': Conservative, 'O2': Standard, 'O3': Full FP16\n self.model, optimizer = amp.initialize(\n self.model,\n optimizer,\n opt_level=\"O1\",\n loss_scale=\"dynamic\",\n # keep_batchnorm_fp32=True doesn't work on 'O1'\n )\n\n # Learning rate scheduler\n if use_one_cycle_policy:\n # Use warmup with the one-cycle policy\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=lr,\n total_steps=self.epochs,\n pct_start=warmup_pct,\n base_momentum=0.9 * momentum,\n max_momentum=momentum,\n )\n else:\n # Simple step-decay\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, step_size=lr_step_size, gamma=lr_gamma,\n )\n\n # DataParallel after amp.initialize\n model = (\n nn.DataParallel(self.model) if count_devices > 1 else self.model\n )\n\n criterion = nn.CrossEntropyLoss().to(device)\n\n # set num classes\n topk = 5\n if topk >= self.num_classes:\n topk = self.num_classes\n\n for e in range(1, self.epochs + 1):\n print(\n f\"Epoch {e} =========================================================\"\n )\n print(f\"lr={scheduler.get_lr()}\")\n\n self.results.append(\n self.train_an_epoch(\n model,\n data_loaders,\n device,\n criterion,\n optimizer,\n grad_steps=grad_steps,\n mixed_prec=mixed_prec,\n topk=topk,\n )\n )\n\n scheduler.step()\n\n if save_model:\n self.save(\n os.path.join(\n model_dir,\n \"{model_name}_{epoch}.pt\".format(\n model_name=model_name, epoch=str(e).zfill(3),\n ),\n )\n )\n self.plot_precision_loss_curves()", "def train_epoch(loader, model, criterion, optimizer, cuda=True, verbose=False, subset=None,\n ia_model=None, ia_batch_c=64, ):\n loss_sum = 0.0\n stats_sum = defaultdict(float)\n correct_1 = 0.0\n correct_5 = 0.0\n verb_stage = 0\n\n num_objects_current = 0\n num_batches = len(loader)\n\n model.train()\n\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for i, (input, target) in enumerate(loader):\n if cuda:\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n loss, output, stats = criterion(model, input, target)\n\n optimizer.zero_grad()\n loss.backward()\n\n optimizer.step()\n loss_sum += loss.data.item() * input.size(0)\n for key, value in stats.items():\n stats_sum[key] += value * input.size(0)\n\n #pred = output.data.argmax(1, keepdim=True)\n #correct += pred.eq(target.data.view_as(pred)).sum().item()\n _, pred = output.topk(5, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n correct_1 += correct[0].view(-1).float().sum(0)\n correct_5 += correct[:5].view(-1).float().sum(0)\n\n num_objects_current += input.size(0)\n\n if verbose and 10 * (i + 1) / num_batches >= verb_stage + 1:\n print('Stage %d/10. Loss: %12.4f. Acc: %6.2f. Top 5 Acc: %6.2f' % (\n verb_stage + 1, loss_sum / num_objects_current,\n correct_1 / num_objects_current * 100.0,\n correct_5 / num_objects_current * 100.0\n ))\n verb_stage += 1\n # print(loss_sum / num_objects_current)\n if ia_model is not None and i % ia_batch_c == 0:\n ia_model.collect_model(model)\n\n correct_5 = correct_5.cpu()\n correct_1 = correct_1.cpu()\n return {\n 'loss': loss_sum / num_objects_current,\n 'accuracy': correct_1 / num_objects_current * 100.0,\n 'top5_accuracy': correct_5 / num_objects_current * 100.0,\n 'stats': {key: value / num_objects_current for key, value in stats_sum.items()}\n }", "def train(molecule: Sequence[system.Atom],\n spins: Tuple[int, int],\n batch_size: int,\n network_config: Optional[NetworkConfig] = None,\n pretrain_config: Optional[PretrainConfig] = None,\n optim_config: Optional[OptimConfig] = None,\n kfac_config: Optional[KfacConfig] = None,\n mcmc_config: Optional[MCMCConfig] = None,\n logging_config: Optional[LoggingConfig] = None,\n multi_gpu: bool = False,\n double_precision: bool = False,\n graph_path: Optional[str] = None):\n\n if not mcmc_config:\n mcmc_config = MCMCConfig()\n if not logging_config:\n logging_config = LoggingConfig()\n if not pretrain_config:\n pretrain_config = PretrainConfig()\n if not optim_config:\n optim_config = OptimConfig()\n if not kfac_config:\n kfac_config = KfacConfig()\n if not network_config:\n network_config = NetworkConfig()\n\n nelectrons = sum(spins)\n precision = tf.float64 if double_precision else tf.float32\n\n if multi_gpu:\n strategy = tf.distribute.MirroredStrategy()\n else:\n # Get the default (single-device) strategy.\n strategy = tf.distribute.get_strategy()\n if multi_gpu:\n batch_size = batch_size // strategy.num_replicas_in_sync\n logging.info('Setting per-GPU batch size to %s.', batch_size)\n logging_config.replicas = strategy.num_replicas_in_sync\n logging.info('Running on %s replicas.', strategy.num_replicas_in_sync)\n\n # Create a re-entrant variable scope for network.\n with tf.variable_scope('model') as model:\n pass\n\n with strategy.scope():\n with tf.variable_scope(model, auxiliary_name_scope=False) as model1:\n with tf.name_scope(model1.original_name_scope):\n fermi_net = networks.FermiNet(\n atoms=molecule,\n nelectrons=spins,\n slater_dets=network_config.determinants,\n hidden_units=network_config.hidden_units,\n after_det=network_config.after_det,\n architecture=network_config.architecture,\n r12_ee_features=network_config.r12_ee_features,\n r12_en_features=network_config.r12_en_features,\n pos_ee_features=network_config.pos_ee_features,\n build_backflow=network_config.build_backflow,\n use_backflow=network_config.backflow,\n jastrow_en=network_config.jastrow_en,\n jastrow_ee=network_config.jastrow_ee,\n jastrow_een=network_config.jastrow_een,\n logdet=True,\n envelope=network_config.use_envelope,\n residual=network_config.residual,\n pretrain_iterations=pretrain_config.iterations)\n\n scf_approx = scf.Scf(\n molecule,\n nelectrons=spins,\n restricted=False,\n basis=pretrain_config.basis)\n if pretrain_config.iterations > 0:\n scf_approx.run()\n\n hamiltonian_ops = hamiltonian.operators(molecule, nelectrons)\n if mcmc_config.init_means:\n if len(mcmc_config.init_means) != 3 * nelectrons:\n raise RuntimeError('Initial electron positions of incorrect shape. '\n '({} not {})'.format(\n len(mcmc_config.init_means), 3 * nelectrons))\n init_means = [float(x) for x in mcmc_config.init_means]\n else:\n init_means = assign_electrons(molecule, spins)\n\n # Build the MCMC state inside the same variable scope as the network.\n with tf.variable_scope(model, auxiliary_name_scope=False) as model1:\n with tf.name_scope(model1.original_name_scope):\n data_gen = mcmc.MCMC(\n fermi_net,\n batch_size,\n init_mu=init_means,\n init_sigma=mcmc_config.init_width,\n move_sigma=mcmc_config.move_width,\n dtype=precision)\n with tf.variable_scope('HF_data_gen'):\n hf_data_gen = mcmc.MCMC(\n scf_approx.tf_eval_slog_hartree_product,\n batch_size,\n init_mu=init_means,\n init_sigma=mcmc_config.init_width,\n move_sigma=mcmc_config.move_width,\n dtype=precision)\n\n with tf.name_scope('learning_rate_schedule'):\n global_step = tf.train.get_or_create_global_step()\n lr = optim_config.learning_rate * tf.pow(\n (1.0 / (1.0 + (tf.cast(global_step, tf.float32) /\n optim_config.learning_rate_delay))),\n optim_config.learning_rate_decay)\n\n if optim_config.learning_rate < 1.e-10:\n logging.warning('Learning rate less than 10^-10. Not using an optimiser.')\n optim_fn = lambda _: None\n update_cached_data = None\n elif optim_config.use_kfac:\n cached_data = tf.get_variable(\n 'MCMC_cache',\n initializer=tf.zeros(shape=data_gen.walkers.shape, dtype=precision),\n use_resource=True,\n trainable=False,\n dtype=precision,\n )\n if kfac_config.adapt_damping:\n update_cached_data = tf.assign(cached_data, data_gen.walkers)\n else:\n update_cached_data = None\n optim_fn = lambda layer_collection: mean_corrected_kfac_opt.MeanCorrectedKfacOpt( # pylint: disable=g-long-lambda\n invert_every=kfac_config.invert_every,\n cov_update_every=kfac_config.cov_update_every,\n learning_rate=lr,\n norm_constraint=kfac_config.norm_constraint,\n damping=kfac_config.damping,\n cov_ema_decay=kfac_config.cov_ema_decay,\n momentum=kfac_config.momentum,\n momentum_type=kfac_config.momentum_type,\n loss_fn=lambda x: tf.nn.l2_loss(fermi_net(x)[0]),\n train_batch=data_gen.walkers,\n prev_train_batch=cached_data,\n layer_collection=layer_collection,\n batch_size=batch_size,\n adapt_damping=kfac_config.adapt_damping,\n is_chief=True,\n damping_adaptation_decay=kfac_config.damping_adaptation_decay,\n damping_adaptation_interval=kfac_config.damping_adaptation_interval,\n min_damping=kfac_config.min_damping,\n use_passed_loss=False,\n estimation_mode='exact',\n )\n else:\n adam = tf.train.AdamOptimizer(lr)\n optim_fn = lambda _: adam\n update_cached_data = None\n\n qmc_net = qmc.QMC(\n hamiltonian_ops,\n fermi_net,\n data_gen,\n hf_data_gen,\n clip_el=optim_config.clip_el,\n check_loss=optim_config.check_loss,\n )\n\n qmc_net.train(\n optim_fn,\n optim_config.iterations,\n logging_config,\n using_kfac=optim_config.use_kfac,\n strategy=strategy,\n scf_approx=scf_approx,\n global_step=global_step,\n determinism_mode=optim_config.deterministic,\n cached_data_op=update_cached_data,\n write_graph=os.path.abspath(graph_path) if graph_path else None,\n burn_in=mcmc_config.burn_in,\n mcmc_steps=mcmc_config.steps,\n )", "def _local_train(self, dataloader_with_memory, num_updates):\n # Local train\n _size = len(dataloader_with_memory)\n self.model = self.model.train()\n for _batch in range(num_updates):\n X, y = dataloader_with_memory.get_samples()\n X, y = X.to(self._device), y.to(self._device)\n if _batch == 0:\n # Initialize the batch-size using the first batch to avoid\n # edge cases with drop_last=False\n _batch_size = X.shape[0]\n _num_batches_per_epoch = (_size // _batch_size) + int(\n (_size % _batch_size) != 0\n )\n # Compute prediction and loss\n _pred = self.model(X)\n _loss = self._loss(_pred, y)\n\n # Backpropagation\n _loss.backward()\n self._optimizer.step()\n self._optimizer.zero_grad()\n self.num_batches_seen += 1\n _loss, _current_epoch = (\n _loss.item(),\n self.num_batches_seen // _num_batches_per_epoch,\n )\n\n if self.log:\n if _batch % self.log_period == 0:\n print(\n f\"loss: {_loss:>7f} after {self.num_batches_seen:>5d}\"\n f\" batches of data amounting to {_current_epoch:>5d}\"\n \" epochs.\"\n )\n self.writer.add_scalar(\n f\"client{self.client_id}/train/Loss\",\n _loss,\n self.num_batches_seen,\n )\n\n if _current_epoch > self.current_epoch:\n # At each epoch we look at the histograms of all the\n # network's parameters\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(\n f\"client{self.client_id}/{name}\", p, _current_epoch\n )\n\n self.current_epoch = _current_epoch", "def __init__(\n self,\n config: ModelParallelConfig,\n encoder_type: enum,\n total_virtual_tokens: int,\n token_dim: int,\n hidden_size,\n lstm_dropout: float,\n num_layers: int,\n init_std: float,\n taskname: str = \"taskname\",\n ):\n super().__init__()\n self.token_dim = token_dim\n self.input_size = token_dim\n self.output_size = token_dim\n self.hidden_size = hidden_size\n self.total_virtual_tokens = total_virtual_tokens\n self.encoder_type = encoder_type\n self.activation = \"gelu\"\n self.init_std = init_std\n self.taskname = taskname\n\n # Set fixed indicies for forward pass\n self.register_buffer(\"indices\", torch.LongTensor(list(range(self.total_virtual_tokens))))\n\n # embedding\n self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)\n self.inference_table = InferenceTable(taskname, self.token_dim, self.total_virtual_tokens)\n\n if self.encoder_type == PromptEncoderType.EMBEDDING:\n init.xavier_normal_(self.embedding.weight)\n elif self.encoder_type == PromptEncoderType.LSTM:\n # LSTM\n self.lstm_head = torch.nn.LSTM(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=num_layers,\n dropout=lstm_dropout,\n bidirectional=True,\n batch_first=True,\n )\n\n self.mlp_head = nn.Sequential(\n nn.Linear(self.hidden_size * 2, self.hidden_size * 2),\n nn.ReLU(),\n nn.Linear(self.hidden_size * 2, self.output_size),\n )\n\n elif self.encoder_type == PromptEncoderType.MLP:\n if num_layers <= 1:\n raise ValueError(\n \"The MLP prompt encoder must have at least 2 layers, and exactly 2 layers is recommended.\"\n )\n\n layers = [nn.Linear(self.input_size, self.hidden_size), nn.ReLU()]\n for _ in range(num_layers - 2):\n layers.extend([nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU()])\n\n layers.append(nn.Linear(self.hidden_size, self.output_size))\n self.mlp_head = nn.Sequential(*layers)\n\n elif self.encoder_type == PromptEncoderType.TPMLP:\n self.tpmlp = TPMLP(config, self.total_virtual_tokens, self.hidden_size, self.output_size, self.init_std,)\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")", "def eval_model(\n self,\n model: nn.Module,\n batch_size: int = 32,\n data: Union[str, th.utils.data.Dataset] = \"test\",\n collate_fn: Optional[Callable] = None,\n by_example: bool = False,\n label_map: Optional[Callable] = None,\n nll: bool = False,\n ):\n # Set model to test mode\n mode = model.training\n model.train(mode=False)\n # Select dataset for evaluation\n dataset = data\n if isinstance(data, str):\n dataset = self.get_split(data)\n elif not isinstance(dataset, th.utils.data.Dataset):\n raise ValueError(\n \"`data` must be a pytorch dataset or one of 'dev'/'valid'\"\n f\"/'test/'train', got {dataset.__class__.__name__} instead\"\n )\n # Dataloader\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n collate_fn=self.collate_fn if collate_fn is None else collate_fn,\n )\n y, y_hat, all_nlls = [], [], []\n for batch in data_loader:\n # Get model predictions\n with th.no_grad():\n nlls, _, predicted = self.nll(\n model,\n batch,\n reduction=\"none\",\n predict=True,\n )\n # Track predictions and reference\n y.append(batch[-1])\n y_hat.append(predicted)\n all_nlls.append(nlls)\n # Concatenate\n y = th.cat(y, dim=0).cpu()\n y_hat = th.cat(y_hat, dim=0).cpu()\n all_nlls = th.cat(all_nlls, dim=0).cpu()\n # Map predictions to labels (this is useful for single\n # head model evaluated on multiple tasks)\n if label_map:\n y_hat = th.tensor([label_map(y_hat_i.item()) for y_hat_i in y_hat])\n # Task specific score\n if by_example:\n score = (y == y_hat).float()\n else:\n score = self.score(y_hat, y)\n nlls = nlls.mean()\n # Reset model to the original mode\n model.train(mode=mode)\n\n result = score\n if nll:\n result = (score, all_nlls)\n return result" ]
[ "0.63509315", "0.6149969", "0.61396825", "0.6108458", "0.61034065", "0.60946226", "0.59626156", "0.57862693", "0.57671374", "0.5759996", "0.57448643", "0.5693965", "0.56239045", "0.5599338", "0.55595565", "0.55579144", "0.55540186", "0.5442606", "0.54349464", "0.5431222", "0.5386346", "0.5386165", "0.5382863", "0.53612775", "0.535642", "0.53510004", "0.5349532", "0.53321517", "0.53299785", "0.53141254", "0.53120035", "0.5302654", "0.5298746", "0.52948606", "0.5273332", "0.52696776", "0.5256843", "0.5256843", "0.5253346", "0.5240591", "0.52376413", "0.5232878", "0.52316076", "0.52256465", "0.5223348", "0.52146846", "0.51987374", "0.51858467", "0.518402", "0.51781124", "0.517077", "0.51703286", "0.5164608", "0.5158021", "0.5157439", "0.5155707", "0.51411027", "0.51398665", "0.51366055", "0.5130489", "0.5123576", "0.51188713", "0.51177007", "0.51044434", "0.509997", "0.5097477", "0.5095674", "0.50933796", "0.5085796", "0.50786906", "0.5077824", "0.50778115", "0.5074932", "0.5070305", "0.50701505", "0.5062838", "0.50608045", "0.50606984", "0.50562835", "0.5053644", "0.5050302", "0.5049271", "0.50476396", "0.5046463", "0.50441426", "0.50415415", "0.50412184", "0.50401163", "0.50334066", "0.50332135", "0.5033165", "0.50331324", "0.5029536", "0.50282246", "0.5022945", "0.5016402", "0.5014772", "0.50007474", "0.49959967", "0.49935916", "0.49933645" ]
0.0
-1
Generates the model summary, which is required for model partitioning across GPUs, and then moves the model to GPU with data parallel/model parallel by calling adjust_model_for_gpus.
def create_summary_and_adjust_mean_teacher_model_for_gpus(self) -> None: if self._mean_teacher_model is None: raise ValueError("Mean teacher model must be created before it can be adjusted.") if self.config.is_segmentation_model: summary_for_segmentation_models(self.config, self._mean_teacher_model) # Prepare for mixed precision training and data parallelization (no-op if already done). # This relies on the information generated in the model summary. self.adjust_mean_teacher_model_for_gpus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_summary_and_adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if self.config.is_segmentation_model:\n summary_for_segmentation_models(self.config, self._model)\n # Prepare for mixed precision training and data parallelization (no-op if already done).\n # This relies on the information generated in the model summary.\n self.adjust_model_for_gpus()", "def generate_and_print_model_summary(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n random_state = RandomStateSnapshot.snapshot_random_state()\n # There appears to be a bug in apex, where previous use (in training for example) causes problems\n # when another model is later built on the CPU (for example, before loading from a checkpoint)\n # https://github.com/NVIDIA/apex/issues/694\n # Hence, move the model to the GPU before doing model summary.\n if config.use_gpu:\n model = model.cuda()\n if isinstance(config, ScalarModelBase):\n # To generate the model summary, read the first item of the dataset. Then use the model's own\n # get_model_input function to convert the dataset item to input tensors, and feed them through the model.\n train_dataset = config.get_torch_dataset_for_inference(ModelExecutionMode.TRAIN)\n train_item_0 = next(iter(train_dataset.as_data_loader(shuffle=False, batch_size=1, num_dataload_workers=0)))\n model_inputs = get_scalar_model_inputs_and_labels(config, model, train_item_0).model_inputs\n # The model inputs may already be converted to float16, assuming that we would do mixed precision.\n # However, the model is not yet converted to float16 when this function is called, hence convert back to float32\n summary = ModelSummary(model)\n summary.generate_summary(input_tensors=model_inputs, log_summaries_to_files=config.log_summaries_to_files)\n elif config.is_segmentation_model:\n summary_for_segmentation_models(config, model)\n assert model.summarizer\n summary = model.summarizer # type: ignore\n else:\n raise ValueError(\"Don't know how to generate a summary for this type of model?\")\n RUN_CONTEXT.log(LoggingColumns.NumTrainableParameters, summary.n_trainable_params)\n random_state.restore_random_state()", "def model_summary():\n print(\"\\n\")\n print(\"=\" * 30 + \"Model Structure\" + \"=\" * 30)\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n print(\"=\" * 60 + \"\\n\")", "def _build_model(self):\n tf.set_random_seed(self.params.tf_random_seed)\n np.random.seed(4321)\n phase_train = not (self.params.eval or self.params.forward_only)\n\n log_fn('Generating model')\n losses = []\n device_grads = []\n all_logits = []\n all_top_1_ops = []\n all_top_5_ops = []\n enqueue_ops = []\n gpu_compute_stage_ops = []\n gpu_grad_stage_ops = []\n\n with tf.device(self.global_step_device):\n global_step = tf.train.get_or_create_global_step()\n \n # Build the processing and model for the worker.\n (image_producer_ops,\n image_producer_stages) = self._build_image_processing(shift_ratio=0)\n image_producer_ops = tf.group(*image_producer_ops)\n update_ops = None\n staging_delta_ops = []\n\n for device_num in range(len(self.devices)):\n with self.variable_mgr.create_outer_variable_scope(\n device_num), tf.name_scope('tower_%i' % device_num) as name_scope:\n results = self.add_forward_pass_and_gradients(\n phase_train, device_num, device_num,\n image_producer_stages[device_num], gpu_compute_stage_ops,\n gpu_grad_stage_ops)\n if phase_train:\n losses.append(results['loss'])\n device_grads.append(results['gradvars'])\n \n\n if device_num == 0:\n # Retain the Batch Normalization updates operations only from the\n # first tower. These operations update the moving mean and moving\n # variance variables, which are updated (but not used) during\n # training, and used during evaluation. The moving mean and variance\n # approximate the true mean and variance across all images in the\n # dataset. Therefore, in replicated mode, these moving averages would\n # be almost identical for each tower, and so we only update and save\n # the moving averages for one tower. In parameter server mode, all\n # towers share a copy of the variables so we also only need to update\n # and save the moving averages once.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)\n staging_delta_ops = list(self.variable_mgr.staging_delta_ops)\n \n enqueue_ops.append(tf.group(*gpu_compute_stage_ops))\n\n fetches = self._build_fetches(global_step, all_logits, losses, device_grads,\n enqueue_ops, update_ops, all_top_1_ops,\n all_top_5_ops, phase_train)\n return (image_producer_ops, enqueue_ops, fetches)", "def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model", "def _regular_build(self):\n # This overwrites define_model, is that ok?\n self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101\n self.define_model,\n create_scope_now_=True)\n\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n\n # TODO Move clean and summary to proper section\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(\n tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(\n tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(\n tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(\n tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n\n self.writer = tf.summary.FileWriter(self.output_path,\n self.session.graph)\n self.saver = tf.train.Saver()\n # TODO Add routine to save\n logging.info('Model construction complete.')", "def _build_all_models(self):\r\n self.output_tensors = {}\r\n self.loss_terms = {}\r\n self.metrics = {}\r\n\r\n def _build_datasource_summaries(data_sources, mode):\r\n \"\"\"Register summary operations for input data from given data sources.\"\"\"\r\n with tf.variable_scope('%s_data' % mode):\r\n for data_source_name, data_source in data_sources.items():\r\n tensors = data_source.output_tensors\r\n for key, tensor in tensors.items():\r\n summary_name = '%s/%s' % (data_source_name, key)\r\n shape = tensor.shape.as_list()\r\n num_dims = len(shape)\r\n if num_dims == 4: # Image data\r\n if shape[1] == 1 or shape[1] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_first')\r\n elif shape[3] == 1 or shape[3] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_last')\r\n # TODO: fix issue with no summary otherwise\r\n elif num_dims == 2:\r\n self.summary.histogram(summary_name, tensor)\r\n else:\r\n logger.debug('I do not know how to create a summary for %s (%s)' %\r\n (summary_name, tensor.shape.as_list()))\r\n\r\n def _build_train_or_test(mode):\r\n data_sources = self._train_data if mode == 'train' else self._test_data\r\n\r\n # Build model\r\n output_tensors, loss_terms, metrics = self.build_model(data_sources, mode=mode)\r\n\r\n # Record important tensors\r\n self.output_tensors[mode] = output_tensors\r\n self.loss_terms[mode] = loss_terms\r\n self.metrics[mode] = metrics\r\n\r\n # Create summaries for scalars\r\n if mode == 'train':\r\n for name, loss_term in loss_terms.items():\r\n self.summary.scalar('loss/%s/%s' % (mode, name), loss_term)\r\n for name, metric in metrics.items():\r\n self.summary.scalar('metric/%s/%s' % (mode, name), metric)\r\n\r\n # Build the main model\r\n if len(self._train_data) > 0:\r\n _build_datasource_summaries(self._train_data, mode='train')\r\n _build_train_or_test(mode='train')\r\n logger.info('Built model.')\r\n\r\n # Print no. of parameters and lops\r\n flops = tf.profiler.profile(\r\n options=tf.profiler.ProfileOptionBuilder(\r\n tf.profiler.ProfileOptionBuilder.float_operation()\r\n ).with_empty_output().build())\r\n logger.info('------------------------------')\r\n logger.info(' Approximate Model Statistics ')\r\n logger.info('------------------------------')\r\n logger.info('FLOPS per input: {:,}'.format(flops.total_float_ops / self._batch_size))\r\n logger.info(\r\n 'Trainable Parameters: {:,}'.format(\r\n np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])\r\n )\r\n )\r\n logger.info('------------------------------')\r\n\r\n # If there are any test data streams, build same model with different scope\r\n # Trainable parameters will be copied at test time\r\n if len(self._test_data) > 0:\r\n _build_datasource_summaries(self._test_data, mode='test')\r\n with tf.variable_scope('test'):\r\n _build_train_or_test(mode='test')\r\n logger.info('Built model for live testing.')\r\n\r\n if self._enable_live_testing:\r\n self._tester._post_model_build() # Create copy ops to be run before every test run\r", "def main(model_arch: str, images: List, batch_size: int,\n batches_per_step: int, loop: bool, num_iterations: int, num_ipus: int, mode: str, data: str,\n available_memory_proportion: float, gen_report: bool, save_graph_pb: bool, use_ipu_model: bool) -> None:\n\n if (available_memory_proportion <= 0.05) or (available_memory_proportion > 1):\n raise ValueError('Invalid \"availableMemoryProportion\" value: must be a float >=0.05'\n ' and <=1 (default value is 0.6)')\n\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --log_cycle_count=0\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--log_cycle_count=0\"\n\n if data == \"synthetic\":\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_synthetic_data --synthetic_data_initializer=random\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"\"\n\n if use_ipu_model:\n if \"TF_POPLAR_FLAGS\" in os.environ:\n os.environ[\"TF_POPLAR_FLAGS\"] += \" --use_ipu_model\"\n else:\n os.environ[\"TF_POPLAR_FLAGS\"] = \"--use_ipu_model\"\n\n # Select model architecture\n model_cls = model_dict[model_arch]\n if model_arch == 'googlenet':\n model_arch = 'inceptionv1'\n config = Path(f'configs/{model_arch}.yml')\n\n # Create graph and data iterator\n loop_op, infeed_initializer, outfeed_op = construct_graph(model_cls, config,\n f\"./checkpoints/{model_arch}/\",\n batch_size, batches_per_step,\n images, loop,\n model_cls.preprocess_method(), num_ipus,\n mode, save_graph_pb)\n # Run on model or device\n if gen_report:\n get_report(loop_op, infeed_initializer, outfeed_op, f\"{config.stem}_report.txt\",\n available_memory_proportion=available_memory_proportion)\n else:\n ground_truth = tuple([Path(filename).stem for filename in images])\n run_inference(loop_op, infeed_initializer, outfeed_op, batch_size, batches_per_step, config.stem,\n model_cls.decode_method(), ground_truth, num_iterations, num_ipus, mode, data,\n available_memory_proportion=available_memory_proportion)", "def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)", "def train(hparams, summary_dir, num_gpus, model_type, max_steps, save_step,\n data_dir, num_targets, dataset, validate, seed, shuffled, shift,\n pad, batch_size=128):\n summary_dir += '/train/'\n with tf.Graph().as_default():\n # Build model\n features = get_features('train', batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=False,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n # Print stats\n param_stats = tf.profiler.profile(\n tf.get_default_graph(),\n options=tf.contrib.tfprof.model_analyzer.\n TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\n writer = tf.summary.FileWriter(summary_dir)\n run_experiment(load_training, summary_dir, writer, train_experiment,\n model, result, max_steps, save_step)\n writer.close()", "def build_model(cfg, model, gpu_id=None):\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n # Construct the model\n # name = cfg.MODEL.MODEL_NAME\n # model = MODEL_REGISTRY.get(name)(cfg)\n \n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n #, find_unused_parameters=True\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n \n return model", "def summary_for_segmentation_models(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n assert isinstance(model, BaseModel)\n crop_size = config.crop_size\n if isinstance(crop_size, int):\n crop_size = (crop_size, crop_size, crop_size)\n try:\n model.generate_model_summary(crop_size, log_summaries_to_files=config.log_summaries_to_files)\n except AttributeError as e:\n logging.warning(f\"summary_for_segmentation_models failed with exception {e}\")", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def summarize_model(\n model: keras.Model, fig_dir: Union[str, None] = None\n) -> None:\n\n submodels = []\n for layer in model.layers:\n if isinstance(layer, TimeDistributed):\n submodels.append(layer.layer)\n\n for submodel in submodels:\n submodel.summary()\n model.summary()\n\n if fig_dir is not None:\n for submodel in submodels:\n keras.utils.plot_model(\n submodel, os.path.join(fig_dir, f'model_{submodel.name}.png'),\n dpi=300\n )\n keras.utils.plot_model(\n model, os.path.join(fig_dir, 'model_full.png'), dpi=300\n )", "def main():\n\n args = get_arguments()\n\n w, h = map(int, args.input_size.split(','))\n\n config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\n args.model = config['model']\n print('ModelType:%s'%args.model)\n print('NormType:%s'%config['norm_style'])\n gpu0 = args.gpu\n batchsize = args.batchsize\n\n model_name = os.path.basename( os.path.dirname(args.restore_from) )\n #args.save += model_name\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n confidence_path = os.path.join(args.save, 'submit/confidence')\n label_path = os.path.join(args.save, 'submit/labelTrainIds')\n label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')\n for path in [confidence_path, label_path, label_invalid_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n if args.model == 'DeepLab':\n model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])\n elif args.model == 'Oracle':\n model = Res_Deeplab(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_ORC\n elif args.model == 'DeeplabVGG':\n model = DeeplabVGG(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_VGG\n\n if args.restore_from[:4] == 'http' :\n saved_state_dict = model_zoo.load_url(args.restore_from)\n else:\n saved_state_dict = torch.load(args.restore_from)\n\n try:\n model.load_state_dict(saved_state_dict)\n except:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(saved_state_dict)\n model.eval()\n model.cuda(gpu0)\n\n testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n scale = 1.25\n testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n\n if version.parse(torch.__version__) >= version.parse('0.4.0'):\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n else:\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear')\n\n sm = torch.nn.Softmax(dim = 1)\n log_sm = torch.nn.LogSoftmax(dim = 1)\n kl_distance = nn.KLDivLoss( reduction = 'none')\n prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]\n prior = torch.from_numpy(prior)\n for index, img_data in enumerate(zip(testloader, testloader2) ):\n batch, batch2 = img_data\n image, _, name = batch\n image2, _, name2 = batch2\n\n inputs = image.cuda()\n inputs2 = image2.cuda()\n print('\\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')\n if args.model == 'DeepLab':\n with torch.no_grad():\n output1, output2 = model(inputs)\n output_batch = interp(sm(0.5* output1 + output2))\n\n heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)\n\n output1, output2 = model(fliplr(inputs))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs\n\n output1, output2 = model(inputs2)\n output_batch += interp(sm(0.5* output1 + output2))\n output1, output2 = model(fliplr(inputs2))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs2\n ratio = 0.95\n output_batch = output_batch.cpu() / 4\n # output_batch = output_batch *(ratio + (1 - ratio) * prior)\n output_batch = output_batch.data.numpy()\n heatmap_batch = heatmap_batch.cpu().data.numpy()\n elif args.model == 'DeeplabVGG' or args.model == 'Oracle':\n output_batch = model(Variable(image).cuda())\n output_batch = interp(output_batch).cpu().data.numpy()\n\n output_batch = output_batch.transpose(0,2,3,1)\n score_batch = np.max(output_batch, axis=3)\n output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)\n\n threshold = 0.3274\n for i in range(output_batch.shape[0]):\n output_single = output_batch[i,:,:]\n output_col = colorize_mask(output_single)\n output = Image.fromarray(output_single)\n\n name_tmp = name[i].split('/')[-1]\n dir_name = name[i].split('/')[-2]\n save_path = args.save + '/' + dir_name\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n output.save('%s/%s' % (save_path, name_tmp))\n print('%s/%s' % (save_path, name_tmp))\n output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))\n\n # heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])\n # fig = plt.figure()\n # plt.axis('off')\n # heatmap = plt.imshow(heatmap_tmp, cmap='viridis')\n # fig.colorbar(heatmap)\n # fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))\n\n if args.set == 'test' or args.set == 'val':\n # label\n output.save('%s/%s' % (label_path, name_tmp))\n # label invalid\n output_single[score_batch[i, :, :] < threshold] = 255\n output = Image.fromarray(output_single)\n output.save('%s/%s' % (label_invalid_path, name_tmp))\n # conficence\n\n confidence = score_batch[i, :, :] * 65535\n confidence = np.asarray(confidence, dtype=np.uint16)\n print(confidence.min(), confidence.max())\n iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)\n\n return args.save", "def create_snapshot_model(model_args):\n # similar to create_separate_model but with experts pretrained\n # 1. get model directory path with models at each epoch for a global model\n # 2. choose the model at epochs that gives best validation performance for each cohort\n # as starting point\n # 3. finetune the resulting model\n tasks = model_args['tasks']\n X_val, y_val, cohorts_val = model_args['X_val'], model_args['y_val'], model_args['cohorts_val']\n val_loader = create_loader(X_val, y_val, batch_size=100, shuffle=False) \n # convert y_val and cohorts_val to numpy\n y_val, cohorts_val = dataset2numpy(y_val).astype(int), dataset2numpy(cohorts_val).astype(int)\n\n experts_auc = [(None, 0) for _ in range(len(tasks))] # init to (n model, 0 auc)\n for fn in glob.glob(model_args['global_model_dir'] + \"/epoch*.m\"):\n net = torch.load(fn)\n y_pred = get_output(net, val_loader).ravel()\n for i, task in enumerate(tasks):\n y_val_in_task = y_val[cohorts_val == task]\n y_pred_in_task = y_pred[cohorts_val == task]\n try:\n auc = roc_auc_score(y_val_in_task, y_pred_in_task)\n except:\n auc = 0.1 # slightly larger than 0 but shouldn't be selected\n if auc > experts_auc[i][1]:\n experts_auc[i] = (net, auc)\n\n experts = nn.ModuleList([expert for expert, auc in experts_auc])\n # currently is inefficient by running all models for all tasks\n # I should be able to just run the required expert\n model = Separate_MIMIC_Model(experts)\n return model", "def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')", "def main(batch_size, saves_dir=TENSORFLOW_SAVES_DIR):\n batches = [1, 8, 16, 32, 64]\n if batch_size:\n batches = [batch_size]\n\n for batch_size in batches:\n print(\"Batch size: {}\".format(batch_size))\n batch = np.random.random((batch_size, 224, 224, 3))\n\n # our default model\n tf.reset_default_graph()\n usual_model = Model()\n measure_model(usual_model, \"Usual model\", batch)\n usual_model.sess.close()\n\n # our binary file\n tf.reset_default_graph()\n frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='constant_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(frozen_model, \"Frozen model\", batch)\n frozen_model.sess.close()\n\n # binary file with some constant operations\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='optimized_graph.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, \"Optimized frozen model\", batch)\n optimized_frozen_model.sess.close()\n\n # model quantized with python\n model_name = \"Quantized with python\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_python.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)\n\n # model quantized with bazel\n model_name = \"Quantized with bazel\"\n try:\n tf.reset_default_graph()\n optimized_frozen_model = BinaryModel(\n saves_dir=saves_dir,\n model_file='quantized_graph_bazel.pb',\n input_node_name=Model.input_node_name,\n output_node_name=Model.output_node_name\n )\n measure_model(optimized_frozen_model, model_name, batch)\n optimized_frozen_model.sess.close()\n except FileNotFoundError:\n print(\"skipped // %s\" % model_name)", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):\n # datetime object containing current date and time\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n pdf = FPDF()\n pdf.set_title(\"training_summary_{}_{}\".format(model_name.lower(), dt_string))\n pdf.add_page()\n pdf.set_xy(0, 10)\n pdf.set_font(\"Helvetica\", \"BI\", 16)\n pdf.set_text_color(25, 33, 78)\n pdf.set_draw_color(25, 33, 78)\n pdf.cell(20)\n pdf.cell(\n 200,\n 10,\n \"Model Training Summary: {}\".format(model_name.upper()),\n 0,\n 2,\n )\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(\n 200,\n 5,\n dt_string,\n 0,\n 2,\n )\n\n # Model Configuration Section\n pdf.cell(150, 10, \"Model Configuration:\", 0, 2)\n pdf.cell(30, 10, \"Parameter\", 1, 0)\n pdf.cell(140, 10, \"Value\", 1, 2)\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-30)\n attributes = [\n \"model_dir\",\n \"log_dir\",\n \"check_dir\",\n \"current_epoch\",\n \"overwrite\",\n \"exp_name\",\n ]\n for i, val in enumerate(hyperparams):\n if val not in attributes:\n pdf.cell(30, 10, \"%s\" % (val), 1, 0)\n pdf.cell(140, 10, \"%s\" % (hyperparams[val]), 1, 2)\n pdf.cell(-30)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Model Performance Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Performance Stats:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n\n loss = train_stats[\"test_loss\"]\n acc = train_stats[\"test_acc\"]\n\n pdf.set_text_color(255, 96, 80)\n pdf.cell(35, 6, \"Best Loss:\", 0, 0)\n pdf.cell(\n 45, 6, \"{:.3f} (Epoch {})\".format(min(loss), loss.index(min(loss)) + 1), 0, 0\n )\n pdf.cell(60, 6, \"Training Duration:\", 0, 0)\n pdf.cell(30, 6, \"{:.3f} (s)\".format(train_stats[\"total_dur\"]), 0, 2)\n pdf.cell(-140)\n pdf.cell(35, 6, f\"Best Accuracy:\", 0, 0)\n pdf.cell(45, 6, \"{:.3f} (Epoch {})\".format(max(acc), acc.index(max(acc)) + 1), 0, 0)\n pdf.cell(60, 6, \"Average Epoch Duration:\", 0, 0)\n pdf.cell(\n 30,\n 6,\n \"{:.3f} (s)\".format(train_stats[\"total_dur\"] / hyperparams[\"current_epoch\"]),\n 0,\n 2,\n )\n pdf.cell(-140)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Loss Curve Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Loss Curve:\", 0, 2)\n pdf.image(img_path, x=None, y=None, w=160, h=0, type=\"PNG\", link=\"\")\n\n # Second Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20)\n\n # Model Arch Section\n pdf.cell(150, 20, \"Model Configuration:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n if model_arch is None:\n model_arch = \"No model configuration was provided\"\n pdf.set_text_color(255, 96, 80)\n pdf.multi_cell(180, 8, str(model_arch))\n\n # Third Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20, \" \")\n\n # Training Loss Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 20, \"Detailed Loss Output:\", 0, 2)\n pdf.cell(40, 8, \"Epoch\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Acc\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Acc\", 1, 2, \"C\")\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-130)\n for i in range(0, len(train_stats[\"train_loss\"])):\n pdf.cell(40, 8, \"{}\".format((i + 1)), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_acc\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_acc\"][i])), 1, 2, \"C\")\n pdf.cell(-130)\n pdf.cell(90, 3, \"\", 0, 2)\n\n pdf.output(\n os.path.join(\n os.path.dirname(img_path),\n \"training_summary_{}.pdf\".format(model_name.lower()),\n ),\n \"F\",\n )", "def main_stats_model(y_train: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.ndarray,\n model_name: str = '',\n model_parameters: dict = None,\n model_preprocessing: str = '',\n sequence_origin: str = '',\n primers_origin: str = '',\n taxonomy_level: Union[List[int], int] = '',\n selected_primer: Union[List[str], str] = '',\n test_size: float = 0.2,\n feature_importances: np.ndarray = None,\n k: int = 4,\n save_csv: bool = False,\n xgb_model=None,\n rf_model=None,\n save_model=False,\n save_tree: int = 0):\n model_path = folder_paths['model_results'] + model_name + '{}'.format(slash)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n folder_number = get_new_model_folder_number(model_name=model_name)\n analysis_path = model_path + '{:0>5d}_analysis_{}_{}{}'.format(folder_number, selected_primer, taxonomy_level, slash)\n os.makedirs(analysis_path)\n\n log_path = analysis_path + 'model_results.txt'\n logger = StatLogger(log_path=log_path)\n\n # Basic information on configuration\n test_size = get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin,\n primers_origin, taxonomy_level, selected_primer, test_size, logger)\n\n # Metrics of model results\n main_class_prop, accuracy = get_metrics_model(y_train, y_test, y_pred, logger, feature_importances, k, save_tree,\n xgb_model,\n analysis_path=analysis_path)\n\n if save_csv:\n add_optimal_model_params(folder_number, selected_primer, taxonomy_level, accuracy, model_parameters,\n model_path=model_path)\n\n if save_model:\n if xgb_model is not None:\n xgb_model.save_model(analysis_path+'0001.model')\n if rf_model is not None:\n filename = analysis_path+'0001.model'\n pickle.dump(rf_model, open(filename, 'wb'))\n\n logger.close_file()\n\n return test_size, main_class_prop, accuracy", "def _prepare_models(self):\n if self.freeze_layers is not None:\n self._set_freeze_layers()\n self._load_weight_if_possible()\n print(self.keras_model.summary())\n self.show_configuration()", "def _benchmark_cnn(self):\n self.single_session = False\n (image_producer_ops, enqueue_ops, fetches) = self._build_model()\n fetches_list = nest.flatten(list(fetches.values()))\n main_fetch_group = tf.group(*fetches_list)\n execution_barrier = None\n \n\n global_step = tf.train.get_global_step()\n with tf.device(self.global_step_device):\n with tf.control_dependencies([main_fetch_group]):\n fetches['inc_global_step'] = global_step.assign_add(1)\n\n\n local_var_init_op = tf.local_variables_initializer()\n variable_mgr_init_ops = [local_var_init_op]\n with tf.control_dependencies([local_var_init_op]):\n variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())\n local_var_init_op_group = tf.group(*variable_mgr_init_ops)\n\n summary_op = tf.summary.merge_all()\n is_chief = (not self.job_name or self.task_index == 0)\n summary_writer = None\n \n # We run the summaries in the same thread as the training operations by\n # passing in None for summary_op to avoid a summary_thread being started.\n # Running summaries and training operations in parallel could run out of\n # GPU memory.\n saver = tf.train.Saver(\n self.variable_mgr.savable_variables(), save_relative_paths=True)\n ready_for_local_init_op = None\n \n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=self.params.train_dir,\n ready_for_local_init_op=ready_for_local_init_op,\n local_init_op=local_var_init_op_group,\n saver=saver,\n global_step=global_step,\n summary_op=None,\n save_model_secs=self.params.save_model_secs,\n summary_writer=summary_writer)\n\n step_train_times = []\n start_standard_services = (\n self.params.summary_verbosity >= 1 or\n self.dataset.queue_runner_required())\n target = self.cluster_manager.get_target() if self.cluster_manager else ''\n with sv.managed_session(\n master=target,\n config=create_config_proto(self.params),\n start_standard_services=start_standard_services) as sess:\n image_producer = cnn_util.ImageProducer(sess, image_producer_ops,\n self.batch_group_size)\n image_producer.start()\n for i in xrange(len(enqueue_ops)):\n sess.run(enqueue_ops[:(i + 1)])\n image_producer.notify_image_consumption()\n self.init_global_step, = sess.run([global_step])\n if not self.single_session:\n global_step_watcher = GlobalStepWatcher(\n sess, global_step,\n self.num_workers * self.num_warmup_batches +\n self.init_global_step,\n self.num_workers * (self.num_warmup_batches + self.num_batches) - 1)\n global_step_watcher.start()\n \n\n log_fn('Running warm up')\n local_step = -1 * self.num_warmup_batches\n done_fn = global_step_watcher.done\n loop_start_time = time.time()\n while not done_fn():\n if local_step == 0:\n log_fn('Done warm up')\n \n header_str = 'Step\\tImg/sec\\tloss'\n \n log_fn(header_str)\n \n # reset times to ignore warm up batch\n step_train_times = []\n loop_start_time = time.time()\n \n fetch_summary = None\n summary_str = benchmark_one_step(\n sess, fetches, local_step,\n self.batch_size * (self.num_workers if self.single_session else 1),\n step_train_times, self.trace_filename, image_producer, self.params,\n fetch_summary)\n \n local_step += 1\n loop_end_time = time.time()\n # Waits for the global step to be done, regardless of done_fn.\n \n num_steps = global_step_watcher.num_steps()\n elapsed_time = global_step_watcher.elapsed_time()\n\n average_wall_time = elapsed_time / num_steps if num_steps > 0 else 0\n images_per_sec = ((self.num_workers * self.batch_size) / average_wall_time\n if average_wall_time > 0 else 0)\n\n log_fn('-' * 64)\n log_fn('total images/sec: %.2f' % images_per_sec)\n log_fn('-' * 64)\n image_producer.done()\n #if is_chief:\n # store_benchmarks({'total_images_per_sec': images_per_sec}, self.params)\n # Save the model checkpoint.\n \n sv.stop()\n return {\n 'num_workers': self.num_workers,\n 'num_steps': num_steps,\n 'average_wall_time': average_wall_time,\n 'images_per_sec': images_per_sec\n }", "def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)", "def main():\n args = arg_parser()\n if(args.gpu):\n gpu = args.gpu\n else:\n print(\"GPU mode not specified, will use the default value - Use GPU\")\n gpu = \"Y\"\n # Device setting:\n device = device_setting(gpu)\n \n # Prepare the datasets and dataloaders:\n print(\"\\nPreparing dataset for train/valid/test ...\")\n train_loader, valid_loader, test_loader, train_data, valid_data, test_data = load_dataset()\n \n # Model architects, criterion and optimizer:\n print(\"\\nNetwork archetecture building ...\")\n model, criterion, optimizer = network(device=device,\n architecture=args.architecture,\n learning_rate=args.learning_rate,\n hidden_size=args.hidden_size,\n dropout=args.dropout,\n output_size=args.output_size)\n \n # Train the model:\n print(\"\\n\")\n model = train(model=model,\n epochs=5,\n learning_rate=args.learning_rate,\n criterion=criterion,\n optimizer=optimizer,\n train_loader=train_loader,\n valid_loader=valid_loader,\n device=device)\n \n # Validate the model performance on the test set:\n print(\"\\nValidate model performance on test set ...\")\n test(model=model, test_loader=test_loader, device=device)\n \n # Save model checkpoint:\n print(\"\\nSave model checkpoint ...\")\n save(model=model, train_data=train_data, epochs=args.epochs, architecture=args.architecture)", "def auto_model_profiling(model_info, server_name, device_util_thd=0.01, device_memory_thd=0.01, period=10):\n\n different_kind_devices = collections.OrderedDict()\n for gpu in GPUtil.getGPUs():\n if gpu.name not in different_kind_devices:\n different_kind_devices[gpu.name] = gpu\n\n for device in list(different_kind_devices.values()):\n profiler = Profiler(model_info=model_info, server_name=server_name)\n monitor = UtilMonitor(device, profiler, period, device_util_thd, device_memory_thd)\n monitor.start()", "def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n self._add_placeholders()\n with tf.device(\"/gpu:0\"):\n self._add_seq2seq()\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n if self._hps.mode == 'train':\n self._add_train_op()\n self._summaries = tf.summary.merge_all()\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)\n \n print('#'*78,'\\nprinting model variables:')\n total_parameters = 0\n for variable in tf.trainable_variables():\n shape = variable.get_shape().as_list()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim\n print('{:}: shape={:}, variable_parameters={:}'.format(\n variable.name, shape, variable_parameters))\n total_parameters += variable_parameters\n print('total model parameters: {:}'.format(total_parameters))\n print('#'*78)", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def build_model(cfg, gpu_id=None):\n # Construct the model\n if MODEL_REGISTRY.get(cfg.MODEL.NAME) == None:\n # attempt to find standard models\n model = BaseVideoModel(cfg)\n else:\n # if the model is explicitly defined,\n # it is directly constructed from the model pool\n model = MODEL_REGISTRY.get(cfg.MODEL.NAME)(cfg)\n\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n model = model.cuda(device=cur_device)\n \n model_ema = None\n if cfg.MODEL.EMA.ENABLE:\n model_ema = ModelEmaV2(model, decay=cfg.MODEL.EMA.DECAY)\n\n try:\n # convert batchnorm to be synchronized across \n # different GPUs if needed\n sync_bn = cfg.BN.SYNC_BN\n if sync_bn == True and cfg.NUM_GPUS * cfg.NUM_SHARDS > 1:\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n except:\n sync_bn = None\n\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS*cfg.NUM_SHARDS > 1:\n # Make model replica operate on the current device\n if cfg.PAI:\n # Support distributed training on the cluster\n model = torch.nn.parallel.DistributedDataParallel(\n module=model\n )\n else:\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n\n return model, model_ema", "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def run_model(model, data, inst, cfg, *, var='gender'):\n\n seed = stan_seed(inst, var)\n\n users = data.profiles.assign(unum=np.arange(len(data.profiles), dtype='i4') + 1)\n\n lists = data.lists.reset_index()\n lists['Algorithm'] = lists['Algorithm'].astype('category')\n algos = lists['Algorithm'].cat.categories\n\n lists = lists.join(users[['unum']], on='user')\n\n _log.info('running full model on %d profiles and %d lists (%d algorithms) for %s',\n len(data.profiles), len(data.lists), len(algos), inst)\n timer = Stopwatch()\n\n stan_data = {\n 'A': len(algos),\n 'J': len(users),\n 'NL': len(lists),\n 'ru': lists['unum'],\n 'ra': lists['Algorithm'].cat.codes + 1,\n }\n if var == 'gender':\n stan_data['n'] = users['Known']\n stan_data['y'] = users['female']\n stan_data['rn'] = lists['Known']\n stan_data['ry'] = lists['female']\n out_pfx = 'full'\n elif var == 'dcode':\n stan_data['n'] = users['dcknown']\n stan_data['y'] = users['dcyes']\n stan_data['rn'] = lists['dcknown']\n stan_data['ry'] = lists['dcyes']\n out_pfx = 'full-dcode'\n else:\n raise ValueError(f'unknown variant {var}')\n\n fit = model.sampling(stan_data, seed=seed, check_hmc_diagnostics=True, **cfg)\n _log.info('full-model sampling for %s finished in %s', inst, timer)\n summary = fit.stansummary(pars=[\"mu\", \"sigma\", \"nMean\", \"nDisp\", \"recB\", \"recS\", \"recV\"])\n print(summary)\n (data_dir / inst / f'{out_pfx}-model.txt').write_text(summary)\n\n _log.info('extracting samples')\n samples = fit.extract(permuted=True)\n write_samples(data_dir / inst / f'{out_pfx}-samples.h5', samples, algo_names=list(algos))", "def compute(self):\n first_row = True\n assert (self.num_samples_batch <= 64), \"Batch sizes must be small, <= 64\"\n image_names_list, labels = read_img_names_labels_csv(self.image_vs_labels_csv)\n if len(image_names_list) < 64:\n self.num_samples_batch = len(image_names_list)\n samples_path_list = [os.path.join(self.image_samples_folder, img_name)\n for img_name in image_names_list[0:self.num_samples_batch]]\n batch_ndarray = read_images(samples_path_list, self.image_height, self.image_width)\n model_load_time = self.metric_load_model()\n pre_process_time = self.metric_pre_process_time(batch_ndarray)\n inf_time = self.metric_inference(batch_ndarray)\n col1 = \"Time taken to load the model(in sec)\"\n col2 = \"Time taken to pre_process a batch of \" + str(self.num_samples_batch) + \" images(in sec)\"\n col3 = \"Time taken to predict a batch of \" + str(self.num_samples_batch) + \" images(in sec)\"\n\n if not os.path.isdir(self.root_folder_to_save_results):\n os.makedirs(self.root_folder_to_save_results, exist_ok=True)\n with open(os.path.join(self.root_folder_to_save_results, \"model_inference.csv\"), \"w\", newline='') as \\\n model_inference_file:\n headers = [col1, col2, col3]\n model_inference_writer = DictWriter(model_inference_file, delimiter=',', fieldnames=headers)\n if first_row:\n model_inference_writer.writeheader()\n first_row = False\n model_inference_writer.writerow({col1: model_load_time, col2: pre_process_time, col3: inf_time})\n print(\"model_inference.csv file generated successfully..\")\n\n self.metric_layers(batch_ndarray, self.input_placeholder_tensor_name)\n # Check if the metrics result files have been generated successfully and give a success/failure message\n return result_message_performance_metrics(self.root_folder_to_save_results)", "def multi_gpu_online_evaluation(\n model: Module,\n data_loader: DataLoader,\n metric: Union[str, Sequence[str]] = 'EPE',\n tmpdir: Optional[str] = None,\n gpu_collect: bool = False) -> Dict[str, np.ndarray]:\n\n model.eval()\n metrics = metric if isinstance(metric, (type, list)) else [metric]\n result_metrics = []\n\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n\n for data in data_loader:\n with torch.no_grad():\n batch_results = model(test_mode=True, **data)\n # data['img_metas'] is Datacontainer\n img_metas = data['img_metas'].data[0]\n batch_flow = []\n batch_flow_gt = []\n batch_valid = []\n\n # a batch of result and a batch of img_metas\n for i in range(len(batch_results)):\n result = batch_results[i]\n img_meta = img_metas[i]\n # result.keys() is 'flow' or ['flow_fw','flow_bw']\n # img_meta.keys() is 'flow_gt' or ['flow_fw_gt','flow_bw_gt']\n for k in result.keys():\n\n if img_meta.get(k + '_gt', None) is None:\n # img_meta does not have flow_bw_gt, so just check\n # the forward predicted.\n if k == 'flow_bw':\n continue\n elif k == 'flow_fw':\n batch_flow_gt.append(img_meta['flow_gt'])\n else:\n batch_flow_gt.append(img_meta[k + '_gt'])\n\n batch_flow.append(result[k])\n batch_valid.append(\n img_meta.get('valid', np.ones_like(result[k][..., 0])))\n\n batch_results_metrics = eval_metrics(batch_flow, batch_flow_gt,\n batch_valid, metrics)\n # result_metrics is different from result_metrics in\n # `single_gpu_online_evaluation`\n # result_metrics is Sequence[Dict[str,ndarray]]\n result_metrics.append(batch_results_metrics)\n\n if rank == 0:\n batch_size = len(batch_results)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n from mmflow.apis.test import collect_results_cpu, collect_results_gpu\n if gpu_collect:\n result_metrics = collect_results_gpu(result_metrics, len(dataset))\n else:\n result_metrics = collect_results_cpu(result_metrics, len(dataset),\n tmpdir)\n rank, world_size = get_dist_info()\n if rank == 0:\n sys.stdout.write('\\n')\n # result_metrics_ is final result of evaluation with type\n # dict(metric_name=metric)\n result_metrics_ = dict()\n\n for sample_result_metrics in result_metrics:\n for k in sample_result_metrics.keys():\n if result_metrics_.get(k, None) is None:\n result_metrics_[k] = sample_result_metrics[k] / len(\n result_metrics)\n else:\n result_metrics_[k] += sample_result_metrics[k] / len(\n result_metrics)\n\n return result_metrics_", "def main(gpu_device='/gpu:0', cpu_device='/cpu:0'):\n config = Config()\n params = experiment_params()\n model_tools.model_builder(\n params=params,\n config=config,\n model_spec=build_model,\n gpu_device=gpu_device,\n cpu_device=cpu_device)", "def main(gpu_device='/gpu:0', cpu_device='/cpu:0'):\n config = Config()\n params = experiment_params()\n model_tools.model_builder(\n params=params,\n config=config,\n model_spec=build_model,\n gpu_device=gpu_device,\n cpu_device=cpu_device)", "def optimize_models_asr(args, models):\n for model in models:\n model.make_generation_fast_(\n beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,\n need_attn=args.print_alignment,\n )\n\n model.to(dev)", "def _export_model_representations(self, config):\n\n self.logger.msg1(\"Preparing model representations\")\n modelsets = get_modelsets(self.dbpath, self.obo, config.partition_size)\n prefix = self.rootpath + \"-models-\"\n for i, refset in enumerate(modelsets):\n progress = str(i+1) + \"/\" + str(len(modelsets))\n self.logger.msg1(\"Saving model representations: \"+progress)\n refset.save(prefix + str(i+1), \"phenotype\", what=(\"data\",))", "def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector", "def cli(ctx: click.Context,\n experiment: str,\n devices: List[int],\n ) -> None:\n f = EXPERIMENTS[experiment]\n try:\n model, B, C, _devices = f(devices)\n except ValueError as exc:\n # Examples:\n # ValueError: too few devices to hold given partitions (devices: 1, paritions: 2)\n ctx.fail(str(exc))\n\n optimizer = SGD(model.parameters(), lr=0.1)\n\n in_device = _devices[0]\n out_device = _devices[-1]\n torch.cuda.set_device(in_device)\n\n input = torch.rand(32, 3, 192, 192, device=in_device)\n target = torch.rand(32, 1, 192, 192, device=out_device)\n\n # HEADER ======================================================================================\n\n title = f'{experiment}, U-Net ({B}, {C})'\n click.echo(title)\n\n if isinstance(model, GPipe):\n click.echo(f'balance: {model.balance}')\n\n click.echo('torchgpipe: %s, python: %s, torch: %s, cudnn: %s, cuda: %s, gpu: %s' % (\n torchgpipe.__version__,\n platform.python_version(),\n torch.__version__,\n torch.backends.cudnn.version(),\n torch.version.cuda,\n torch.cuda.get_device_name(in_device)))\n\n hr()\n\n # PARAMETERS ==================================================================================\n\n param_count = sum(p.storage().size() for p in model.parameters())\n param_size = sum(p.storage().size() * p.storage().element_size() for p in model.parameters())\n param_scale = 2 # param + grad\n\n click.echo(f'# of Model Parameters: {param_count:,}')\n click.echo(f'Total Model Parameter Memory: {param_size*param_scale:,} Bytes')\n\n # ACTIVATIONS =================================================================================\n\n try:\n torch.cuda.empty_cache()\n for d in _devices:\n torch.cuda.reset_max_memory_cached(d)\n\n for _ in range(2):\n output = model(input)\n output = cast(Tensor, output)\n loss = F.binary_cross_entropy_with_logits(output, target)\n loss.backward()\n optimizer.step()\n\n max_memory = 0\n for d in _devices:\n torch.cuda.synchronize(d)\n max_memory += torch.cuda.max_memory_cached(d)\n\n latent_size = max_memory - param_size*param_scale\n click.echo(f'Peak Activation Memory: {latent_size:,} Bytes')\n click.echo(f'Total Memory: {max_memory:,} Bytes')\n\n # MAX MEMORY PER DEVICE =======================================================================\n\n finally:\n hr()\n\n for d in _devices:\n memory_usage = torch.cuda.memory_cached(d)\n click.echo(f'{d!s}: {memory_usage:,} Bytes')", "def model_fn(features, labels, mode, params):\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec", "def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):\n\n # Build embeddings.\n if model_opt.model_type == \"text\":\n src_field = fields[\"src\"]\n src_emb = build_embeddings(model_opt, src_field)\n else:\n src_emb = None\n\n # Build encoder.\n encoder = build_encoder(model_opt, src_emb)\n\n # Build decoder.\n tgt_field = fields[\"tgt\"]\n tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n # src/tgt vocab should be the same if `-share_vocab` is specified.\n assert src_field.base_field.vocab == tgt_field.base_field.vocab, \\\n \"preprocess with -share_vocab if you use share_embeddings\"\n\n tgt_emb.word_lut.weight = src_emb.word_lut.weight\n\n if model_opt.share_position_embeddings:\n tgt_emb.make_embedding.pe.pe.weight = src_emb.make_embedding.pe.pe.weight\n\n decoder = build_decoder(model_opt, tgt_emb)\n\n # Build NMTModel(= encoder + decoder).\n if gpu and gpu_id is not None:\n device = torch.device(\"cuda\", gpu_id)\n elif gpu and not gpu_id:\n device = torch.device(\"cuda\")\n elif not gpu:\n device = torch.device(\"cpu\")\n\n # Build separate LM if doing simple fusion\n if model_opt.simple_fusion:\n layers = 12\n size = 768\n heads = 12\n\n lm_decoder_opt = copy.deepcopy(model_opt)\n lm_decoder_opt.dec_layers = layers\n lm_decoder_opt.use_GPT_version_ctxattn = False\n lm_decoder_opt.use_GPT_version_psa = False\n lm_decoder_opt.use_GPT_version_unconditional = True\n lm_decoder_opt.tgt_word_vec_size = size\n lm_decoder_opt.rnn_size = size\n lm_decoder_opt.dec_rnn_size = size\n lm_decoder_opt.transformer_ff = size*4\n lm_decoder_opt.dec_heads = heads\n lm_decoder_opt.position_encoding_learned_dec = True\n lm_decoder_opt.share_decoder_embeddings = True\n lm_decoder_opt.dropout = 0\n\n lm_decoder_emb = build_embeddings(lm_decoder_opt, tgt_field, for_encoder=False)\n logger.info(lm_decoder_emb)\n\n lm_decoder = build_decoder(lm_decoder_opt, lm_decoder_emb)\n load_decoder = lm_decoder\n\n model = onmt.models.SimpleFusionModel(encoder, decoder, lm_decoder)\n\n generator = SimpleFusionGenerator(model_opt.dec_rnn_size,\n lm_decoder_opt.dec_rnn_size,\n len(fields[\"tgt\"].base_field.vocab))\n generator.lm_linear.weight = lm_decoder.embeddings.word_lut.weight\n\n if model_opt.share_decoder_embeddings:\n generator.decoder_linear.weight = decoder.embeddings.word_lut.weight\n gen_linear = generator.lm_linear\n else:\n load_decoder = decoder\n if model_opt.unconditional:\n model = onmt.models.UncondModel(decoder)\n else:\n model = onmt.models.NMTModel(encoder, decoder)\n\n # Build Generator.\n if not model_opt.copy_attn:\n if model_opt.generator_function == \"sparsemax\":\n gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)\n else:\n gen_func = nn.LogSoftmax(dim=-1)\n\n if model_opt.padded_vocab_fix_me_later:\n gen_func = nn.Sequential(PadGen(), gen_func)\n\n generator = nn.Sequential(\n nn.Linear(model_opt.dec_rnn_size,\n len(fields[\"tgt\"].base_field.vocab)),\n Cast(torch.float32),\n gen_func\n )\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n gen_linear = generator[0]\n else:\n tgt_base_field = fields[\"tgt\"].base_field\n vocab_size = len(tgt_base_field.vocab)\n pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]\n generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)\n if model_opt.share_decoder_embeddings:\n generator.linear.weight = decoder.embeddings.word_lut.weight\n gen_linear = generator.linear\n\n if model_opt.encdec_share_params:\n for name, p in decoder.named_parameters():\n if 'ctx' in name or 'context' in name:\n continue\n pointer = encoder\n attrs = name.split('.')\n for attr_name in attrs[:-1]:\n pointer = getattr(pointer, attr_name)\n\n # pointer now has the encoder version of the parameter parent\n setattr(pointer, attrs[-1], p)\n\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n # Normally, just load the model parameters from checkpoint\n if 'gpt2_params' not in checkpoint and 'enc_model' not in checkpoint:\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.b_2',\n r'\\1.layer_norm\\2.bias', s)\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.a_2',\n r'\\1.layer_norm\\2.weight', s)\n return s\n \n checkpoint['model'] = {fix_key(k): v\n for k, v in checkpoint['model'].items()}\n # end of patch for backward compatibility\n\n # Initialize rest of parameters normally\n if hasattr(model_opt, 'load_uncond_from') and model_opt.load_uncond_from:\n for p in decoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n \n # Always initialize encoder parameters normally\n for p in encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if model_opt.ctx_weight_param:\n for name, p in decoder.named_parameters():\n if 'ctx_weight' in name:\n p.data.zero_()\n if 'ctx_bias' in name:\n p.data.fill_(-10)\n\n\n model.load_state_dict(checkpoint['model'], strict=False)\n generator.load_state_dict(checkpoint['generator'], strict=False)\n else:\n # load the gpt parameters\n if 'gpt2_params' in checkpoint:\n init_something = model_opt.gpt2_init_embanddec or model_opt.simple_fusion or model_opt.gpt2_init_embandenc or model_opt.GPT_representation_mode != 'none'\n \n if init_something:\n # Initialize all the weights first\n if model_opt.gpt2_init_zero:\n for p in decoder.parameters():\n p.data.zero_()\n if model_opt.simple_fusion:\n generator.decoder_linear.weight.data.zero_()\n generator.decoder_linear.bias.data.zero_()\n else:\n for p in decoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n \n # Always initialize encoder parameters normally\n if encoder is not None:\n for p in encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n if model_opt.zero_bias_init:\n gen_linear.bias.data.zero_()\n\n if model_opt.ctx_weight_param:\n for name, p in decoder.named_parameters():\n if 'ctx_weight' in name:\n p.data.zero_()\n if 'ctx_bias' in name:\n p.data.fill_(-10)\n gen_linear.bias.data.zero_()\n\n load_models = []\n if model_opt.GPT_representation_mode != 'none':\n load_embs = []\n if model_opt.GPT_representation_loc in ['both', 'src']:\n load_models.append(src_emb.gpt_model)\n load_embs.append(src_emb)\n if model_opt.GPT_representation_loc in ['both', 'tgt']:\n load_models.append(tgt_emb.gpt_model)\n load_embs.append(tgt_emb)\n \n else:\n if model_opt.gpt2_init_embanddec or model_opt.simple_fusion:\n load_models = [load_decoder]\n elif model_opt.gpt2_init_embandenc:\n load_models = [encoder]\n \n it_list = list(checkpoint['gpt2_params'])\n for lm_idx, load_model in enumerate(load_models):\n #print(lm_idx, load_model)\n for name, array in it_list:\n name = name[12:] # skip \"transformer.\"\n name = name.split('.')\n\n assigned = False\n if name[0] == 'wpe':\n if model_opt.GPT_representation_mode != 'none':\n pointer = load_embs[lm_idx].make_embedding.pe.pe.weight\n else:\n pointer = load_model.embeddings.make_embedding.pe.pe.weight\n\n elif name[0] == 'wte':\n if model_opt.GPT_representation_mode != 'none':\n pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]\n else:\n pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]\n if not model_opt.nopretrain_decemb:\n pointer.append(gen_linear.weight)\n if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:\n pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)\n\n elif name[0] == 'ln_f':\n if name[1] == 'weight':\n pointer = load_model.layer_norm.weight\n elif name[1] == 'bias':\n pointer = load_model.layer_norm.bias\n else:\n raise ValueError('I am missing something here!')\n\n elif name[0] == 'h':\n layer_num = name[1]\n pointer = getattr(load_model.transformer_layers, layer_num)\n if name[2] == 'attn':\n assigned = True\n pointer = pointer.self_attn\n full_data = torch.from_numpy(array)\n if name[3] == 'c_attn':\n end_size = full_data.shape[-1]//3\n assert full_data.shape[-1] % 3 == 0\n if name[4] == 'bias':\n if init_something:\n pointer.linear_query.bias.data = full_data[:end_size]\n pointer.linear_keys.bias.data = full_data[end_size:end_size*2]\n pointer.linear_values.bias.data = full_data[end_size*2:]\n if model_opt.gpt2_params_std > 0:\n pointer.linear_query.bias.orig = full_data[:end_size].clone()\n pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()\n pointer.linear_values.bias.orig = full_data[end_size*2:].clone()\n elif name[4] == 'weight':\n if init_something:\n pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()\n pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()\n pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()\n if model_opt.gpt2_params_std > 0:\n pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()\n pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()\n pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()\n else:\n raise ValueError('I am missing something here!')\n elif name[3] == 'c_proj':\n if name[4] == 'bias':\n if init_something:\n pointer.final_linear.bias.data = full_data\n if model_opt.gpt2_params_std > 0:\n pointer.final_linear.bias.orig = full_data.clone()\n elif name[4] == 'weight':\n if init_something:\n pointer.final_linear.weight.data = full_data.t().contiguous()\n if model_opt.gpt2_params_std > 0:\n pointer.final_linear.weight.orig = full_data.t().contiguous().clone()\n\n else:\n raise ValueError('I am missing something here!')\n\n elif name[2] == 'ln_1' or name[2] == 'ln_2':\n num = name[2][3]\n pointer = getattr(pointer, 'layer_norm_'+num)\n if name[2] == 'bias':\n pointer = pointer.bias\n elif name[2] == 'weight':\n pointer = pointer.weight\n else:\n raise ValueError('I am missing something here!')\n elif name[2] == 'mlp':\n pointer = pointer.feed_forward\n pointer = getattr(pointer, name[2])\n if name[3] == 'bias':\n pointer = pointer.bias\n elif name[3] == 'weight':\n pointer = pointer.weight\n else:\n raise ValueError('I am missing something here!')\n else:\n raise ValueError('I am missing something here!')\n else:\n raise ValueError('I am missing something here!')\n \n if not assigned:\n # if name[0] == 'wte':\n # print(array.shape)\n # continue\n if name[-1] == 'weight':\n array = array.T\n\n if not isinstance(pointer, list):\n pointer = [pointer]\n for pointer_i in pointer:\n target_size = int(math.ceil(array.shape[0]/8))*8\n padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size\n padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]\n try:\n assert pointer_i.shape == array.shape or padded_vocab\n except AssertionError as e:\n \n e.args += (pointer_i.shape, array.shape)\n raise\n if init_something:\n print(\"Initialize PyTorch weight {}\".format(name))\n if padded_vocab:\n pointer_i.data[:array.shape[0]] = torch.from_numpy(array)\n else:\n pointer_i.data = torch.from_numpy(array)\n if model_opt.gpt2_params_std > 0:\n if padded_vocab:\n raise NotImplementedError\n else:\n pointer_i.orig = torch.from_numpy(array).clone()\n # name = name[6:] # skip \"model/\"\n # name = name.split('/')\n\n # assigned = False\n # if name[0] == 'wpe':\n # if model_opt.GPT_representation_mode != 'none':\n # pointer = load_embs[lm_idx].make_embedding.pe.pe.weight\n # else:\n # pointer = load_model.embeddings.make_embedding.pe.pe.weight\n\n # elif name[0] == 'wte':\n # if model_opt.GPT_representation_mode != 'none':\n # pointer = [load_embs[lm_idx].make_embedding.emb_luts[0].weight, gen_linear.weight]\n # else:\n # pointer = [load_model.embeddings.make_embedding.emb_luts[0].weight]\n # if not model_opt.nopretrain_decemb:\n # pointer.append(gen_linear.weight)\n # if model_opt.simple_fusion and model_opt.sf_pretrain_dec_emb:\n # pointer.append(decoder.embeddings.make_embedding.emb_luts[0].weight)\n\n # elif name[0] == 'ln_f':\n # if name[1] == 'g':\n # pointer = load_model.layer_norm.weight\n # elif name[1] == 'b':\n # pointer = load_model.layer_norm.bias\n # else:\n # raise ValueError('I am missing something here!')\n\n # elif name[0][0] == 'h':\n # layer_num = name[0][1:]\n # pointer = getattr(load_model.transformer_layers, layer_num)\n # if name[1] == 'attn':\n # assigned = True\n # pointer = pointer.self_attn\n # full_data = torch.from_numpy(array)\n # if name[2] == 'c_attn':\n # end_size = full_data.shape[-1]//3\n # assert full_data.shape[-1] % 3 == 0\n # if name[3] == 'b':\n # if init_something:\n # pointer.linear_query.bias.data = full_data[:end_size]\n # pointer.linear_keys.bias.data = full_data[end_size:end_size*2]\n # pointer.linear_values.bias.data = full_data[end_size*2:]\n # if model_opt.gpt2_params_std > 0:\n # pointer.linear_query.bias.orig = full_data[:end_size].clone()\n # pointer.linear_keys.bias.orig = full_data[end_size:end_size*2].clone()\n # pointer.linear_values.bias.orig = full_data[end_size*2:].clone()\n # elif name[3] == 'w':\n # if init_something:\n # pointer.linear_query.weight.data = full_data[:, :end_size].t().contiguous()\n # pointer.linear_keys.weight.data = full_data[:, end_size:end_size*2].t().contiguous()\n # pointer.linear_values.weight.data = full_data[:, end_size*2:].t().contiguous()\n # if model_opt.gpt2_params_std > 0:\n # pointer.linear_query.weight.orig = full_data[:, :end_size].t().contiguous().clone()\n # pointer.linear_keys.weight.orig = full_data[:, end_size:end_size*2].t().contiguous().clone()\n # pointer.linear_values.weight.orig = full_data[:, end_size*2:].t().contiguous().clone()\n # else:\n # raise ValueError('I am missing something here!')\n # elif name[2] == 'c_proj':\n # if name[3] == 'b':\n # if init_something:\n # pointer.final_linear.bias.data = full_data\n # if model_opt.gpt2_params_std > 0:\n # pointer.final_linear.bias.orig = full_data.clone()\n # elif name[3] == 'w':\n # if init_something:\n # pointer.final_linear.weight.data = full_data.t().contiguous()\n # if model_opt.gpt2_params_std > 0:\n # pointer.final_linear.weight.orig = full_data.t().contiguous().clone()\n\n # else:\n # raise ValueError('I am missing something here!')\n\n # elif name[1] == 'ln_1' or name[1] == 'ln_2':\n # num = name[1][3]\n # pointer = getattr(pointer, 'layer_norm_'+num)\n # if name[2] == 'b':\n # pointer = pointer.bias\n # elif name[2] == 'g':\n # pointer = pointer.weight\n # else:\n # raise ValueError('I am missing something here!')\n # elif name[1] == 'mlp':\n # pointer = pointer.feed_forward\n # pointer = getattr(pointer, name[2])\n # if name[3] == 'b':\n # pointer = pointer.bias\n # elif name[3] == 'w':\n # pointer = pointer.weight\n # else:\n # raise ValueError('I am missing something here!')\n # else:\n # raise ValueError('I am missing something here!')\n # else:\n # raise ValueError('I am missing something here!')\n \n # if not assigned:\n # if name[0] == 'wte':\n # print(array.shape)\n # continue\n # if name[-1] == 'w' or name[-1] == 'g':\n # array = array.T\n\n # if not isinstance(pointer, list):\n # pointer = [pointer]\n # for pointer_i in pointer:\n # target_size = int(math.ceil(array.shape[0]/8))*8\n # padded_vocab = name[0] == 'wte' and pointer_i.shape[0] == target_size\n # padded_vocab = padded_vocab and pointer_i.shape[1:] == array.shape[1:]\n # try:\n # assert pointer_i.shape == array.shape or padded_vocab\n # except AssertionError as e:\n \n # e.args += (pointer_i.shape, array.shape)\n # raise\n # if init_something:\n # print(\"Initialize PyTorch weight {}\".format(name))\n # if padded_vocab:\n # pointer_i.data[:array.shape[0]] = torch.from_numpy(array)\n # else:\n # pointer_i.data = torch.from_numpy(array)\n # if model_opt.gpt2_params_std > 0:\n # if padded_vocab:\n # raise NotImplementedError\n # else:\n # pointer_i.orig = torch.from_numpy(array).clone()\n if 'enc_model' in checkpoint:\n load_dict = {k[8:]: v for k, v in checkpoint['enc_model'] if 'encoder' in k}\n encoder.load_state_dict(load_dict, strict=True)\n else:\n if model_opt.param_init != 0.0:\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if model_opt.param_init_glorot:\n for p in model.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if not model_opt.unconditional and hasattr(model.encoder, 'embeddings') \\\n and model.encoder.embeddings is not None:\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec)\n\n # remove requires_grad from params that are not trained:\n if model_opt.notrain_emb or model_opt.notrain_embanddec:\n if model_opt.position_encoding_learned_enc and model_opt.share_position_embeddings:\n model.encoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False\n if model_opt.share_embeddings:\n model.encoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False\n model.decoder.embeddings.make_embedding.pe.pe.weight.requires_grad = False\n model.decoder.embeddings.make_embedding.emb_luts[0].weight.requires_grad = False\n generator[0].weight.requires_grad = False\n\n if model_opt.notrain_genbias:\n generator[0].bias.requires_grad = False\n\n if model_opt.notrain_embanddec:\n for name, p in load_decoder.layer_norm.named_parameters():\n p.requires_grad = False\n for name, p in load_decoder.transformer_layers.named_parameters():\n if 'context' not in name and 'ctx' not in name: # Takes care of normal and psa versions\n p.requires_grad = False\n \n if model_opt.onlytrainln:\n for name, p in model.decoder.named_parameters():\n if 'layer_norm' not in name:\n p.requires_grad = False\n for p in generator.parameters():\n p.requires_grad = False\n\n if model_opt.onlytrainoutp:\n if model_opt.share_decoder_embeddings:\n raise ValueError\n\n for p in model.decoder.parameters():\n p.requires_grad = False\n\n if model_opt.simple_fusion:\n for p in lm_decoder.parameters():\n p.requires_grad = False\n for p in generator.lm_linear.parameters():\n p.requires_grad = False\n\n model.generator = generator\n model.to(device)\n if model_opt.model_dtype == 'fp16':\n model.half()\n\n for p in model.parameters():\n if hasattr(p, 'orig'):\n p.orig = p.orig.to(device)\n if model_opt.model_dtype == 'fp16':\n p.orig = p.orig.half()\n\n return model", "def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()", "def test_build_default_model(self):\n cfg = get_cfg_defaults()\n cfg.SYSTEM.NUM_GPUS = self.num_gpu\n model = build_model(cfg, self.device)\n self.assertTrue(isinstance(model, (torch.nn.Module,\n torch.nn.DataParallel,\n torch.nn.parallel.DistributedDataParallel)))", "def train_model(model, data_train, criterion, optimizer, csv_folder, gpu_id=0):\n model.train()\n model.cuda(gpu_id)\n for batch, (_, images, masks) in enumerate(data_train):\n w_prev = get_model_weights(model)\n # if batch%10 == 0:\n #print('Batch:', batch, 'of', len(data_train))\n images = Variable(images.cuda(gpu_id))\n masks = Variable(masks.cuda(gpu_id))\n outputs = model(images)\n #print(masks.shape, outputs.shape)\n loss = criterion(outputs, masks)\n optimizer.zero_grad()\n loss.backward()\n # Update weights\n optimizer.step()\n w_after = get_model_weights(model)\n diff = find_weight_diff(w_after, w_prev)\n export_history(diff, csv_folder, \"weight_difference.csv\")", "def main(opt):\n\n outputDir = \"processedOutput\"\n os.makedirs(outputDir, exist_ok=True)\n\n print(\"-------------------\")\n print(\"Processing results:\")\n print(\"-------------------\")\n \n cuda = torch.cuda.is_available()\n\n hr_shape = (opt.hr_height, opt.hr_width)\n\n # Count the number of unique residual layers mentioned in the generator state dict:\n generatorStateDict = torch.load(GetModelDataPath(\"generator\")) # Load the max trained weights from the /saved_models directory\n resBlocks = {}\n for key in generatorStateDict:\n processedKey = re.split(r'^(res_blocks\\.[0-9].)', key)\n if len(processedKey) > 1:\n resBlocks[processedKey[1]] = processedKey[1] # Insert an arbitrary entry: We just care about counting the unique keys\n\n num_residual_blocks = len(resBlocks)\n print(\"Counted \" + str(num_residual_blocks) + \" residual blocks in loaded generator state dict\")\n\n # Initialize generator and discriminator\n generator = GeneratorResNet(n_residual_blocks=num_residual_blocks)\n \n if cuda:\n print(\"Cuda is supported!!!\")\n torch.cuda.empty_cache()\n\n generator = generator.cuda()\n\n # Load pretrained models\n generator.load_state_dict(generatorStateDict)\n\n Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor\n\n\n #----------------\n # Process images:\n #----------------\n print(\"Processing images using the trained model:\")\n\n torch.cuda.empty_cache()\n\n testStartTime = time.time()\n totalTestTime = 0\n numTests = 0\n\n with torch.no_grad(): # Prevent OOM errors\n\n # Set models to eval mode, so batchnorm is disabled\n generator.eval()\n\n dataPath = GetDataPath(opt.valid_dataset_name)\n\n dataloader = DataLoader(\n ImageLoader(dataPath),\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.n_cpu,\n )\n\n # Process:\n for i, imgs in enumerate(dataloader):\n testStartTime = time.time()\n\n # Configure model input\n imgs_lr = Variable(imgs[\"img\"].type(Tensor))\n\n # Generate a high resolution image from low resolution input\n gen_hr = generator(imgs_lr)\n\n # --------------\n # Log Progress\n # --------------\n testTime = time.time() - testStartTime\n sys.stdout.write(\n \"[Processed image %d/%d] [Test time: %fs]\\n\"\n % (i, len(dataloader), testTime)\n )\n \n gen_hr = make_grid(gen_hr, nrow=1, normalize=True)\n\n save_image(gen_hr, GetArbitraryPath(outputDir) + (\"0\" if i < 10 else \"\") + \"%d.png\" % (i + 1), normalize=False)\n\n # Record the iteration time:\n totalTestTime = totalTestTime + testTime\n numTests = numTests + 1\n\n\n # ------------\n # Print stats:\n # ------------\n testTime = time.time() - testStartTime\n averageTestTime = totalTestTime / numTests\n\n print(\"\\Processing results:\\n-------------\")\n print(\"Total processing time = \" + str(testTime) + \" (secs) for \" + str(len(dataloader.dataset)) + \" test images\")\n print(\"Average processing time = \" + str(averageTestTime) + \" (secs)\")", "def build_sys_rec_model():\n print(\"building model...\")\n model = Merchant2VecModel()\n model.train(final_training=True)\n model.save_model()", "def save_model(self, epoch):\n # Set the name for the model\n gen_lungs_filename = 'gen_lungs_model_epoch_{}.h5'.format(epoch + 1)\n disc_lungs_filename = 'disc_lungs_model_epoch_{}.h5'.format(epoch + 1)\n train_summary_lungs_filename = 'train_summary_lungs_epoch_{}.csv'.format(epoch + 1)\n\n gen_organs_filename = 'gen_organs_model_epoch_{}.h5'.format(epoch + 1)\n disc_organs_filename = 'disc_organs_model_epoch_{}.h5'.format(epoch + 1)\n train_summary_organs_filename = 'train_summary_organs_epoch_{}.csv'.format(epoch + 1)\n\n # Save the model and train summary\n self.generator_lungs.save(op.join(self.model_dir, gen_lungs_filename), include_optimizer=True)\n self.disc_lungs.save(op.join(self.model_dir, disc_lungs_filename), include_optimizer=True)\n self.summary_writer_lungs.to_csv(op.join(self.train_summary_dir, train_summary_lungs_filename))\n\n self.generator_organs.save(op.join(self.model_dir, gen_organs_filename), include_optimizer=True)\n self.disc_organs.save(op.join(self.model_dir, disc_organs_filename), include_optimizer=True)\n self.summary_writer_organs.to_csv(op.join(self.train_summary_dir, train_summary_organs_filename))\n return self", "def eval_model(device, model, sampler, loss_compute, logit_modifier_fxn, token_sampler,\n print_every, max_len, user_items_df, max_name_len=15, ingr_map=None, \n base_save_dir='', pad_ingr=None, ppx_only=False, **tensor_kwargs):\n start = datetime.now()\n results_dicts = []\n\n # Extract into tuples and list\n tensor_names, base_tensors = zip(*tensor_kwargs.items())\n\n # Iterate through batches in the epoch\n model.eval()\n with torch.no_grad():\n total_tokens = 0\n total_name_tokens = 0\n total_loss = 0.0\n total_name_loss = 0.0\n print_tokens = 0\n\n for i, batch in enumerate(tqdm(sampler.epoch_batches(), total=sampler.n_batches), 1):\n batch_users, items = [t.to(device) for t in batch]\n\n # Fill out batch information\n batch_map = dict(zip(\n tensor_names,\n get_batch_information_general(items, *base_tensors)\n ))\n use_ingr_embedding = batch_map['ingr_tensor'].size(-1) != MAX_INGR * MAX_INGR_TOK\n\n user_prior_technique_masks = torch.stack([get_user_prior_techniques_mask(\n user_ix=uix.item(), item_ix=iix.item(),\n user_items_df=user_items_df, tech_mask_tensor=tensor_kwargs['tech_mask_tensor'],\n device=device, normalize=True\n ) for uix, iix in zip(batch_users, items)], dim=0)\n\n # Logistics\n this_batch_size = batch_map['steps_tensor'].size(0)\n this_batch_num_tokens = (batch_map['steps_tensor'] != PAD_INDEX).data.sum().item()\n this_batch_num_name_tokens = (batch_map['name_tensor'] != PAD_INDEX).data.sum().item()\n name_targets = batch_map['name_tensor'][:, :-1]\n\n '''\n Teacher forcing - evaluate\n '''\n # Comparing out(token[t-1]) to token[t]\n (log_probs, _), (name_log_probs, _) = model.forward(\n device=device, inputs=(\n batch_map['calorie_level_tensor'],\n batch_map['name_tensor'],\n batch_map['ingr_tensor']\n ),\n ingr_masks=batch_map['ingr_mask_tensor'],\n user_prior_technique_masks=user_prior_technique_masks,\n targets=batch_map['steps_tensor'][:, :-1], max_len=max_len-1,\n start_token=START_INDEX, teacher_forcing=True,\n name_targets=name_targets,\n max_name_len=max_name_len-1,\n visualize=False\n )\n loss, name_loss = loss_compute(\n log_probs, batch_map['steps_tensor'][:, 1:],\n name_outputs=name_log_probs,\n name_targets=name_targets,\n norm=this_batch_size,\n model=model,\n clip=None\n )\n\n total_loss += loss\n total_name_loss += name_loss\n\n # Logging\n total_tokens += this_batch_num_tokens\n total_name_tokens += this_batch_num_name_tokens\n print_tokens += this_batch_num_tokens\n\n del log_probs, name_log_probs\n\n # Short-circuit if we only want to calculate test perplexity\n if ppx_only:\n if i % print_every == 0:\n elapsed = datetime.now() - start\n print(\"Epoch Step: {} LM Loss: {:.5f}; Name Loss: {:.5f}; Tok/s: {:.3f}\".format(\n i, loss / this_batch_size, name_loss / this_batch_size,\n print_tokens / elapsed.seconds\n ))\n start = datetime.now()\n print_tokens = 0\n continue\n\n '''\n Non-teacher-forcing - Generate!\n '''\n # Generates probabilities\n (log_probs, output_tokens, ingr_attns, prior_tech_attns), \\\n (name_log_probs, name_output_tokens) = model.forward(\n device=device, inputs=(\n batch_map['calorie_level_tensor'],\n batch_map['name_tensor'],\n batch_map['ingr_tensor']\n ),\n ingr_masks=batch_map['ingr_mask_tensor'],\n user_prior_technique_masks=user_prior_technique_masks,\n targets=batch_map['steps_tensor'][:, :-1], max_len=max_len-1,\n start_token=START_INDEX, teacher_forcing=False,\n logit_modifier_fxn=logit_modifier_fxn, token_sampler=token_sampler,\n visualize=True, max_name_len=max_name_len-1, name_targets=name_targets,\n )\n\n del log_probs, name_log_probs\n\n # Generated recipe\n calorie_levels, technique_strs, ingredient_strs, gold_strs, generated_strs, \\\n prior_items, recipe_reprs = get_batch_generated_recipes(\n batch_users=batch_users, batch_generated=output_tokens,\n max_ingr=MAX_INGR, max_ingr_tok=MAX_INGR_TOK,\n names_generated=name_output_tokens, ingr_map=ingr_map,\n user_items_df=user_items_df, **batch_map\n )\n\n for ix in range(len(generated_strs)):\n # Create save location: test_i<item>_u<user>\n ii = items[ix].data.item()\n uu = batch_users[ix].data.item()\n sample_id = 'test_i{}_u{}'.format(ii, uu)\n trial_save_dir = os.path.join(base_save_dir, sample_id)\n if not os.path.exists(trial_save_dir):\n os.mkdir(trial_save_dir)\n\n # Output tokens for heatmap axes\n out_indices = output_tokens[ix].detach().cpu().numpy().tolist()\n out_tokens = decode_ids(out_indices)\n trunc_indices = out_indices[:out_indices.index(END_INDEX)] \\\n if END_INDEX in out_indices else out_indices\n output_len = len(trunc_indices)\n output_techniques = [t for t in TECHNIQUES_LIST if t in generated_strs[ix]]\n results_dicts.append({\n 'u': uu,\n 'i': ii,\n 'generated': generated_strs[ix],\n 'n_tokens': output_len,\n 'generated_techniques': output_techniques,\n 'n_techniques': len(output_techniques)\n })\n\n # Save output\n with open(os.path.join(trial_save_dir, 'output.txt'), 'w+', encoding='utf-8') as wf:\n wf.write(recipe_reprs[ix])\n\n # Ingredient Attention\n ingr_attentions = np.matrix([\n a.squeeze().detach().cpu().numpy().tolist() for a in ingr_attns[ix]\n ]).T\n ingr_attn_df = pd.DataFrame(\n ingr_attentions[:len(ingredient_strs[ix])],\n index=ingredient_strs[ix], columns=out_tokens\n )\n ingr_attn_df = ingr_attn_df[ingr_attn_df.index != '']\n ingr_attn_df.to_pickle(\n os.path.join(trial_save_dir, 'ingredient_attention.pkl')\n )\n\n # Prior Technique Attention\n prior_tech_attention = np.matrix([\n a.squeeze().detach().cpu().numpy().tolist() for a in prior_tech_attns[ix]\n ]).T\n prior_tech_attn_df = pd.DataFrame(\n prior_tech_attention, index=TECHNIQUES_LIST + ['PAD'], columns=out_tokens\n )\n prior_tech_attn_df = prior_tech_attn_df[(prior_tech_attn_df.T != 0.0).any()]\n prior_tech_attn_df.to_pickle(\n os.path.join(trial_save_dir, 'prior_tech_attention.pkl')\n )\n\n if i % print_every == 0:\n elapsed = datetime.now() - start\n print(\"Epoch Step: {} LM Loss: {:.5f}; Name Loss: {:.5f}; Tok/s: {:.3f}\".format(\n i, loss / this_batch_size, name_loss / this_batch_size,\n print_tokens / elapsed.seconds\n ))\n print('SAMPLE DECODED RECIPE:\\n\\n{}\\n\\n'.format(recipe_reprs[0]))\n start = datetime.now()\n print_tokens = 0\n\n # Reshuffle the sampler\n sampler.renew_indices()\n\n if total_name_tokens > 0:\n print('\\nName Perplexity: {}'.format(\n np.exp(total_name_loss / float(total_name_tokens))\n ))\n\n # Store perplexity\n ppx = np.exp(total_loss / float(total_tokens))\n with open(os.path.join(base_save_dir, 'ppx.pkl'), 'wb') as wf:\n pickle.dump(ppx, wf)\n print('PERPLEXITY: {:.5f}'.format(\n ppx\n ))\n\n if not ppx_only:\n # Store recipe information -- generated string, # tokens (length), tech, # tech\n gen_df = pd.DataFrame(results_dicts)[[\n 'u', 'i', 'generated', 'n_tokens', 'generated_techniques', 'n_techniques'\n ]]\n df_loc = os.path.join(base_save_dir, 'generated_df.pkl')\n gen_df.to_pickle(df_loc)\n print('Saved generation DF to {}'.format(\n df_loc\n ))\n print(gen_df.head(3))", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def run(opt):\n # logging\n trn_log, val_log = set_logger(opt)\n\n # model related stuff\n device = torch.device(\"cuda\")\n trn_set, val_set, wmp_set = get_dsets(opt)\n model = get_model(opt, device)\n optimizer = getattr(optim, opt.optim.name)(\n model.parameters(), **vars(opt.optim.args)\n )\n # batch_size\n batch_size = opt.trn_loader.batch_size\n\n rlog.info(U.config_to_string(opt))\n rlog.info(\"Model: %s\", str(model))\n rlog.info(\"Optimizer: %s \\n\", str(optimizer))\n\n # Warm-up the mode on a partition of the training dataset\n if wmp_set is not None:\n rlog.info(\"Warming-up on dset of size %d\", len(wmp_set))\n for epoch in range(opt.warmup.epochs):\n # train for one epoch\n trn_loss, trn_acc = train(\n DataLoader(wmp_set, **vars(opt.trn_loader)),\n model,\n optimizer,\n get_criterion(opt, model, len(wmp_set) // batch_size),\n mc_samples=opt.trn_mcs,\n )\n\n val_stats = valid_stats(opt, model, val_set)\n trn_stats = train_stats(opt, model, wmp_set)\n trn_stats[\"loss\"], trn_stats[\"acc\"] = trn_loss, trn_acc\n\n # to pickle and tensorboard\n val_log.trace(step=epoch, **val_stats)\n trn_log.trace(step=epoch, **trn_stats)\n\n # to console\n for log, stats in zip([trn_log, val_log], [trn_stats, val_stats]):\n log.info(log.fmt.format(epoch, stats[\"acc\"], stats[\"loss\"]))\n\n # extra logging\n model_stats(opt, epoch, model)\n\n # maybe reset optimizer after warmup\n if opt.warmup.reset_optim:\n rlog.info(\"\\nWarmup ended. Resetting optimizer.\")\n optimizer = getattr(optim, opt.optim.name)(\n model.parameters(), **vars(opt.optim.args)\n )\n\n # Train on the full training dataset\n if wmp_set is not None:\n epochs = range(opt.warmup.epochs, opt.warmup.epochs + opt.epochs)\n else:\n epochs = range(opt.epochs)\n\n rlog.info(\"\\nTraining on dset: %s\", str(trn_set))\n for epoch in epochs:\n trn_loss, trn_acc = train(\n DataLoader(trn_set, **vars(opt.trn_loader)),\n model,\n optimizer,\n get_criterion(opt, model, len(trn_set) // batch_size),\n mc_samples=opt.trn_mcs,\n )\n\n val_stats = valid_stats(opt, model, val_set)\n trn_stats = train_stats(opt, model, trn_set)\n trn_stats[\"loss\"], trn_stats[\"acc\"] = trn_loss, trn_acc\n\n # to pickle and tensorboard\n val_log.trace(step=epoch, **val_stats)\n trn_log.trace(step=epoch, **trn_stats)\n\n # to console\n for log, stats in zip([trn_log, val_log], [trn_stats, val_stats]):\n log.info(log.fmt.format(epoch, stats[\"acc\"], stats[\"loss\"]))\n\n # extra logging\n model_stats(opt, epoch, model)", "def _generate_model(self, specs, experiment = None, filename = 'dist/app/Model.hs'):\n with open(filename, \"w\") as file:\n self._write_model(file, specs, experiment = experiment)", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def dist_setting(current_gpu, model, args):\n print(\"channels_last : {}\".format(args.channels_last))\n if args.channels_last:\n args.memory_format = torch.channels_last\n else:\n args.memory_format = torch.contiguous_format\n\n if args.apex:\n args.lr = args.lr*float(args.batch_size*args.world_size)/256.\n args.current_gpu = current_gpu\n if args.current_gpu is not None:\n print(\"Use GPU: {} for training\".format(args.current_gpu))\n\n if args.multigpus_distributed:\n args.rank = args.num_gpus * args.host_num + args.current_gpu\n dist.init_process_group(backend=args.backend,\n rank=args.rank, world_size=args.world_size)\n logger.info('Initialized the distributed environment: \\'{}\\' backend on {} nodes. '.format(\n args.backend, dist.get_world_size()) + 'Current host rank is {}. Number of gpus: {}'.format(\n dist.get_rank(), args.num_gpus))\n else:\n args.rank = 0\n\n if args.sync_bn:\n import apex\n print(\"using apex synced BN\")\n model = apex.parallel.convert_syncbn_model(model)\n\n if args.multigpus_distributed:\n if args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n args.batch_size = int(args.batch_size / args.num_gpus)\n logger.info(\"Batch size for each GPU: {}\".format(args.batch_size))\n if not args.apex:\n model.cuda(args.current_gpu)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.current_gpu])\n else:\n if not args.apex:\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.current_gpu is not None:\n torch.cuda.set_device(args.current_gpu)\n if not args.apex:\n model = model.cuda(args.current_gpu)\n else:\n if not args.apex:\n model = torch.nn.DataParallel(model).cuda()\n\n return model, args", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def train_model(train_generator, validation_generator):\n # we build a test generator to benchmark the model on unseen data\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n test_generator = test_datagen.flow_from_directory(\n test_path,\n target_size=(200, 200),\n color_mode=\"rgb\",\n shuffle=True,\n class_mode='sparse',\n batch_size=batch_size)\n model = build_model()\n filepath = join(save_path, weights_path)\n checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', save_best_only=True, mode='max')\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=epochs // 5, verbose=1, restore_best_weights=True)\n log_dir = join(home, save_path, 'logs', 'fit_smart', datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)\n callbacks_list = [early_stopping, checkpoint, tensorboard_callback]\n # origin [sessions] models each [epochs] times\n max_acc = 0.0\n for i in range(sessions):\n # model training and evaluation\n history = model.fit(\n train_generator,\n steps_per_epoch=train_generator.samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples // batch_size\n , verbose=2, callbacks=callbacks_list, workers=multiprocessing.cpu_count(),\n use_multiprocessing=False)\n model.load_weights(join(save_path, weights_path))\n test_loss, test_acc = model.evaluate(test_generator, steps=len(test_generator))\n # save model if it performed better\n if test_acc > max_acc:\n max_acc = test_acc\n model.save(join(home, save_path, model_name))\n print(\"accuracy: \", test_acc, \"\\n Loss:\", test_loss)", "def trainer(current_gpu, args):\n\n model_history = train_utils.init_model_history()\n batch_size = args.batch_size\n num_epochs = args.num_epochs\n feature_extract = False\n\n model = train_utils.initialize_ft_model(args.model_name, num_classes=args.num_classes, feature_extract=feature_extract)\n model, args = dist_setting(current_gpu, model, args)\n logger.info(f\"==> Training on rank {args.rank}.\")\n logger.info(args)\n \n dataloaders, transforms, train_sampler = train_utils.create_dataloaders(\n args.train_dir, args.valid_dir, rank=args.rank, \n world_size=args.world_size, batch_size=batch_size,\n num_workers=args.num_workers\n )\n \n optimizer = train_utils.initialize_optimizer(model, feature_extract, lr=args.lr*args.world_size, momentum=0.9) \n criterion = nn.CrossEntropyLoss()\n\n since = time.time()\n val_acc_history = []\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc1 = 0.0\n num_samples = {k: len(dataloaders[k].dataset) for k, v in dataloaders.items()}\n num_steps = {k: int(np.ceil(len(dataloaders[k].dataset) / (batch_size*args.world_size))) for k, v in dataloaders.items()}\n device = torch.device(f'cuda:{current_gpu}') \n\n for epoch in range(1, num_epochs+1):\n \n batch_time = train_utils.AverageMeter('Time', ':6.3f')\n data_time = train_utils.AverageMeter('Data', ':6.3f')\n losses = train_utils.AverageMeter('Loss', ':.4e')\n top1 = train_utils.AverageMeter('Acc@1', ':6.2f')\n \n logger.info('-' * 40)\n logger.info('[Rank {}, Epoch {}/{}] Processing...'.format(args.rank, epoch, num_epochs))\n logger.info('-' * 40)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'valid']: \n\n if phase == 'train':\n model.train() # Set model to training mode\n if args.multigpus_distributed:\n dataloaders[phase].sampler.set_epoch(epoch) # Set epoch count for DistributedSampler \n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n epoch_tic = time.time() \n tic = time.time()\n \n for i, (inputs, labels) in enumerate(dataloaders[phase]):\n # measure data loading time\n data_time.update(time.time() - tic) \n \n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n with torch.set_grad_enabled(phase=='train'):\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n probs, preds = torch.max(outputs, 1)\n \n # Compute gradient and do stochastic gradient descent\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n acc1 = train_utils.accuracy(outputs, labels, topk=(1,))\n \n # Average loss and accuracy across processes for logging\n if args.multigpus_distributed:\n reduced_loss = train_utils.reduce_tensor(loss.data, args)\n reduced_acc1 = train_utils.reduce_tensor(acc1[0], args)\n else:\n reduced_loss = loss.data\n reduced_acc1 = acc1[0]\n\n losses.update(train_utils.to_python_float(reduced_loss), inputs.size(0))\n top1.update(train_utils.to_python_float(reduced_acc1), inputs.size(0))\n \n # measure elapsed time\n batch_time.update(time.time() - tic)\n tic = time.time()\n\n if phase == 'train' and i % args.log_interval == 0:\n step_loss = running_loss / ((i+1)*batch_size)\n step_acc = running_corrects.double() / ((i+1)*batch_size)\n logger.info(f'[Rank {args.rank}, Epoch {epoch}/{num_epochs}, Step {i+1}/{num_steps[phase]}] {phase}-acc: {step_acc:.4f}, '\n f'{phase}-loss: {step_loss:.4f}, data-time: {data_time.val:.4f}, batch-time: {batch_time.val:.4f}') \n \n\n ## Waiting until finishing operations on GPU (Pytorch default: async)\n torch.cuda.synchronize()\n \n if current_gpu == 0: \n logger.info(f'[Epoch {epoch}/{num_epochs}] {phase}-acc: {top1.avg:.4f}, '\n f'{phase}-loss: {losses.val:.4f}, time: {time.time()-epoch_tic:.4f}') \n \n model_history['epoch'].append(epoch)\n model_history['batch_idx'].append(i)\n model_history['data_time'].append(data_time.val) \n model_history['batch_time'].append(batch_time.val)\n model_history['losses'].append(losses.val)\n model_history['top1'].append(top1.val)\n\n if phase == 'valid':\n is_best = top1.avg > best_acc1\n best_acc1 = max(top1.avg, best_acc1)\n \n if (args.multigpus_distributed and args.rank % args.num_gpus == 0):\n #train_utils.save_history(os.path.join(args.output_data_dir, 'model_history.p'), model_history) \n train_utils.save_model({\n 'epoch': epoch + 1,\n 'model_name': args.model_name,\n 'state_dict': model.module.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_acc1': best_acc1,\n 'loss': losses\n }, is_best, args.model_chkpt_dir, args.model_dir)\n elif not args.multigpus_distributed:\n #train_utils.save_history(os.path.join(args.output_data_dir, 'model_history.p'), model_history) \n train_utils.save_model({\n 'epoch': epoch + 1,\n 'model_name': args.model_name,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_acc1': best_acc1,\n 'loss': losses\n }, is_best, args.model_chkpt_dir, args.model_dir) \n \n \n time_elapsed = time.time() - since\n if current_gpu == 0:\n logger.info('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n logger.info('Best val acc: {:.4f}'.format(best_acc1))\n \n if args.multigpus_distributed:\n dist_cleanup()", "def train_model1(model, criterion, optimizer, scheduler, device, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n model.to(device)\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs).squeeze(-1)\n # if outputs.shape != labels.shape:\n # print(outputs.shape)\n # print(outputs.shape)\n loss = criterion(outputs, labels)\n # preds = outputs\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n # print(outputs.device, labels.to('cpu').device)\n running_corrects += acc_calculate(outputs, labels)\n # print(labels.data)\n if phase == 'train':\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model,best_acc", "def main(cfg):\n # logging.info(f'Found base dir {cfg.BASE_DIR}')\n\n rng = cfg.experiment.random_seed\n if rng == -1:\n rng = np.random.randint(0, 1000)\n\n if rng is not False:\n np.random.seed(rng)\n torch.torch.manual_seed(rng)\n\n dcc = cfg.dataset.get('creation_args', dict())\n if dcc.get('dim_normalise_mean', False):\n dim = dcc.dim\n dcc.f_mean = float(dcc.f_mean / np.sqrt(dim))\n dcc.p_mean = float(dcc.p_mean / np.sqrt(dim))\n logging.info(\n f'Updating means in dataset cfg: {cfg.dataset.creation_args}')\n\n stats = dict(\n dir=os.getcwd(),\n host=socket.gethostname(),\n job_id=os.getenv(\"SLURM_JOB_ID\", None),\n random_state=rng)\n STATS_STATUS = False\n\n logging.info(\n f'Logging to {stats[\"dir\"]} on {stats[\"host\"]} '\n f'for id={cfg.get(\"id\", -1)}')\n\n logging.info(f'Slurm job: {stats[\"job_id\"]}.')\n logging.info(f'Setting random seed to {rng}.')\n logging.info(f'Uniform clip val is {cfg.acquisition.uniform_clip}.')\n\n hoover = Hoover(cfg.hoover)\n\n model = None\n\n # Right now this averages over both train and testing!\n for run in range(cfg.experiment.n_runs):\n if run % cfg.experiment.log_every == 0 or cfg.experiment.debug:\n logging.info(f'Run {run} in {os.getcwd()} ****NEW RUN****')\n if cuda := torch.cuda.is_available():\n logging.info(f'Still using cuda: {cuda}.')\n else:\n logging.info('No cuda found!')\n os.system('touch cuda_failure.txt')\n\n dataset = maps.dataset[cfg.dataset.name](\n cfg.dataset, model_cfg=cfg.model)\n\n # Train model on training data.\n if (not cfg.model.get('keep_constant', False)) or (model is None):\n # default case\n model = maps.model[cfg.model.name](cfg.model)\n\n # test_data = model.make_loader(dataset.test_data, train=False)\n # loss = model.evaluate(model.model, test_data)\n # logging.info(f'Model test loss is {loss}.')\n\n # train_data = model.make_loader(dataset.train_data, train=False)\n # loss = model.evaluate(model.model, train_data)\n # logging.info(f'Model train loss is {loss}.')\n\n model.fit(*dataset.train_data)\n\n loss = model.performance(\n *dataset.test_data, dataset.cfg['task_type'])\n # logging.info(\n # f'Weights vs 1 : {np.sqrt(np.sum((model.model.coef_ - 1)**2))}')\n\n if cfg.experiment.get('constant_val_set', False):\n add_val_idxs_to_cfg(cfg, model.val_idxs)\n\n if not STATS_STATUS:\n STATS_STATUS = True\n stats['loss'] = loss\n to_json(stats, 'stats.json')\n # test_data = model.make_loader(dataset.test_data, train=False)\n # loss = model.evaluate(model.model, test_data)\n # logging.info(f'Model test loss is {loss}.')\n\n # Always predict on test data again\n # TODO: need to fix this for efficient prediction\n if cfg.model.get('efficient', False):\n logging.debug('Eficient prediction on test set.')\n model = make_efficient(model, dataset)\n\n # if cfg.experiment.debug:\n # Report train error\n # logging.info('Model train error:')\n # model.performance(\n # *dataset.train_data, dataset.cfg.task_type)\n\n # if not check_valid(model, dataset):\n # continue\n\n if run < cfg.experiment.save_data_until:\n hoover.add_data(run, dataset.export())\n\n for acq_dict in cfg.acquisition_functions:\n # Slightly unclean, but could not figure out how to make\n # this work with Hydra otherwise\n acquisition = list(acq_dict.keys())[0]\n acq_cfg_name = list(acq_dict.values())[0]\n\n if cfg.experiment.debug:\n logging.info(f'\\t Acquisition: {acquisition}')\n\n # Reset selected test_indices.\n dataset.restart(acquisition)\n\n if (n := acq_cfg_name) is not None:\n acq_config = cfg['acquisition_configs'][n]\n else:\n acq_config = None\n\n experiment = Experiment(\n run, cfg, dataset, model, acquisition, acq_config)\n\n i = 0\n while not experiment.finished:\n i += 1\n # print('debug', i)\n if cfg.experiment.debug:\n logging.info(\n f'\\t Acquisition: {acquisition} – \\t Step {i}.')\n\n experiment.step(i)\n\n # Add config to name for logging.\n if (n := acq_cfg_name) is not None:\n acquisition = f'{acquisition}_{n}'\n\n # Extract results from acquisition experiment\n hoover.add_results(run, acquisition, experiment.export_data())\n\n if run % cfg.experiment.get('save_every', 1e19) == 0:\n logging.info('Intermediate save.')\n hoover.save()\n\n logging.info('Completed all runs.')\n hoover.save()", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def train():\n\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Get images and labels for blood_model.\n blood_datasets = blood_model.inputs(eval_data=False)\n\n # randomize the inputs look\n x, y_, data, keep_prob = blood_model.prepare_input()\n\n # build the convolution network\n conv_output, _, _, _, _ = blood_model.inference(data, keep_prob)\n # Calculate loss.\n loss = blood_model.loss(conv_output, y_)\n accuracy = blood_model.accuracy(conv_output, y_)\n\n train_op = blood_model.train(loss, global_step)\n\n sess = tf.InteractiveSession()\n\n sess.run(tf.initialize_all_variables())\n\n # Build the summary operation based on the TF collection of Summaries.\n summary_op = tf.merge_all_summaries()\n\n saver = tf.train.Saver()\n\n check_filesystem()\n\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation', sess.graph)\n test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test', sess.graph)\n\n _ = reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer)\n for step in range(tf.train.global_step(sess, global_step)+1, FLAGS.max_steps):\n batch = blood_datasets.train.next_batch()\n _, loss_output = sess.run([train_op, loss], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n assert not np.isnan(loss_output)\n if step % 100 == 0:\n summary, train_accuracy = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n train_writer.add_summary(summary, step)\n print(\"step %d, training accuracy %g, loss %g\" % (step, train_accuracy, loss_output))\n\n if (step % 1000 == 0 or (step + 1) == FLAGS.max_steps) and not step == 0:\n batch = blood_datasets.validation.next_batch()\n summary_validation, accuracy_validation = sess.run([summary_op, accuracy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: 1.0})\n validation_writer.add_summary(summary_validation, step)\n print(\"validation accuracy %g\" % accuracy_validation)\n\n # save checkpoint\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n print(\"saving checkpoint\")", "def set_model(self, model):\r\n self.model = model.model\r\n with context.eager_mode():\r\n self._close_writers()\r\n if self.write_graph:\r\n with self._get_writer(self._train_run_name).as_default():\r\n with summary_ops_v2.always_record_summaries():\r\n if not self.model.run_eagerly:\r\n summary_ops_v2.graph(K.get_graph(), step=0)\r\n\r\n summary_writable = (\r\n self.model._is_graph_network or # pylint: disable=protected-access\r\n self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access\r\n if summary_writable:\r\n summary_ops_v2.keras_model('keras', self.model, step=0)\r\n\r\n if self.embeddings_freq:\r\n self._configure_embeddings()", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def model_train(fold_name, model_dir, model_dict, dataset_path, development_subj, mu, sigma):\n valid_frames_before=200\n valid_frames_after=0\n valid_batch_size=8\n generators = TXT_Train_Validation_Generators(dataset_path=dataset_path, subject_list=development_subj, train_size=model_dict[\"train_set_ratio\"], frames_before=model_dict[\n \"frames\"]-model_dict[\"frame_shift\"], frames_after=model_dict[\"frame_shift\"], view_IDs=model_dict[\"view_IDs\"], batch_size=model_dict[\"batch_size\"], mu=mu, sigma=sigma, label_name=model_dict[\"label_name\"], shuffle=True,\n valid_frames_before=valid_frames_before, valid_frames_after=valid_frames_after, valid_batch_size=valid_batch_size)\n train_gen, valid_gen = generators.get_train(), generators.get_valid()\n losses = Losses_Keras(\n frames=model_dict['frames'], frame_shift=model_dict['frame_shift'])\n loss_fnc = losses.get_by_name(model_dict[\"loss_function\"])\n ap_metrics = [AUC_AP(), Accuracy_AP(), Precision_AP(),\n Recall_AP(), PrecisionAtRecall_AP(0.95)]\n fp_hdf5 = os.path.join(model_dir, fold_name+\".hdf5\")\n fp_hdf5 = os.path.join(model_dir, fold_name+\".hdf5\")\n mcp = ModelCheckpoint(fp_hdf5, monitor='val_loss', verbose=True,\n save_best_only=True, save_weights_only=True)\n tbl = tensorflow.keras.callbacks.TensorBoard(os.path.join(model_dir, 'logs{}'.format(fold_name)))\n metrics = ap_metrics\n callbacks = [mcp, tbl]\n optimizer = tensorflow.keras.optimizers.Adam(learning_rate=model_dict['learning_rate'])\n epochs = model_dict[\"epochs\"]\n #### 1\n compile_kwargs = {\"loss\": loss_fnc,\n \"optimizer\": optimizer, \"metrics\": metrics}\n fit_kwargs = {\"x\": train_gen, \"epochs\": epochs,\n \"validation_data\": valid_gen, \"callbacks\": callbacks}\n Setup = SETUP_DIC[model_dict[\"architecture\"]]\n setup = Setup(name=model_dict[\"name\"], compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs,\n TPA_view_IDs=model_dict['view_IDs'])\n # setup.delete_existing_model_data_and_output()\n print(setup.model.summary())\n\n setup.train()\n setup.write_architecture()\n # setup.plot_metrics(plot_val_metrics=valid_gen)\n #### /1\n #### 2\n # Get optimal threshold.\n print(\"Getting optimal threshold...\")\n # RELOAD\n data_models_model_path = setup.data_models_model_path\n setup = Model_Evaluation(data_models_model_path, fold_name=fold_name,\n stateful=False, weights_ext=\"hdf5\", load_scaling=False)\n\n # https://support.sas.com/en/books/reference-books/analyzing-receiver-operating-characteristic-curves-with-sas/review.html\n # Gonen, Mithat. 2007. Analyzing Receiver Operating Characteristic Curves with SAS. Cary, NC: SAS Institute Inc.\n preds_list, trues_list = [], []\n # generators = [train_gen, valid_gen] if valid_gen else [train_gen]\n generators = [valid_gen] if valid_gen else [train_gen]\n for generator in generators:\n for i in range(len(generator)):\n x, y = generator[i]\n preds_list.append(setup.model.predict(x))\n trues_list.append(y)\n preds = np.vstack(preds_list)\n trues = np.vstack(trues_list)\n labels_dict, predictions_dict = {}, {}\n for idx, l in enumerate(zip(preds, trues)):\n pred, true = l\n predictions_dict[idx] = pred[:, 1]\n sample_class = true[-1][-1]\n labels_dict[idx] = model_dict[\"frames\"] - \\\n model_dict[\"frame_shift\"] if sample_class else -1\n if valid_gen:\n labels_dict[idx] = valid_frames_before if sample_class else -1\n prc_pre_fpr, prc_pre_tpr, prc_pre_thresholds = plots.prediction_pr_curve(\n labels_dict, predictions_dict)\n # get optimal threshold\n fpr, tpr, thresh = prc_pre_fpr[:-1], prc_pre_tpr[:-1], prc_pre_thresholds\n xy = np.stack([fpr, tpr]).T\n ideal = np.array([1, 1])\n d = ideal-xy\n D = (d*d).sum(axis=-1)\n optimal_threshold = thresh[D.argmin()]\n with open(os.path.join(data_models_model_path, project.THRESHOLD_FILE_PATTERN.format(fold_name)), \"wb\") as f:\n pickle.dump(optimal_threshold, f)\n #### /2\n print(\"Trained {}\".format(model_dict[\"name\"]))\n clear_session()\n return True", "def model(self, hyperparams, test_mode=False):\n run_doc = OrderedDict() # Document important hyperparameters\n run_start_time = time.time()\n run_id = str(uuid4())\n # TODO: Not ideal: Loads from memory every time. Use generator?\n train_data, train_targets, test_data, test_targets = \\\n self.data_loader(dataset=hyperparams['dataset'], size=hyperparams['dataset_size'])\n run_doc['dataset'] = hyperparams['dataset']\n run_doc['data_size'] = len(train_targets)\n # Visualization tools\n if config.INPUT_DEBUG:\n image_analysis(image=train_data[0, :, :, :], label=train_targets[0, :])\n # Input shape comes from image shape\n img_width = train_data[0].shape[0]\n img_height = train_data[0].shape[1]\n num_channels = train_data[0].shape[2]\n input_shape = (img_width, img_height, num_channels)\n run_doc['input_shape'] = '(%d, %d, %d)' % input_shape\n input_tensor = Input(shape=input_shape, dtype='float32', name='input_image')\n try: # Model creation is in separate file\n x, run_doc = custom_model(input_tensor, params=hyperparams, run_doc=run_doc)\n except ValueError as e:\n if not test_mode: # If not testing, ignore error causing models\n return {'loss': 100, 'status': STATUS_OK}\n else:\n raise e\n # Final layer classifies into 4 possible actions\n output = layers.Dense(4, activation='softmax')(x)\n # File names for the model and logs\n log_file = os.path.join(self._logs_dir, run_id)\n model_file = os.path.join(self._models_dir, run_id + '.h5')\n # Add some callbacks so we can track progress using Tensorboard\n callbacks = [keras.callbacks.EarlyStopping('val_loss', patience=config.TRAIN_PATIENCE, mode=\"min\")]\n if not test_mode: # Don't save models/logs if in testing mode\n callbacks += [keras.callbacks.TensorBoard(log_dir=log_file),\n keras.callbacks.ModelCheckpoint(model_file, save_best_only=True)]\n # Choice of optimizer and optimization parameters\n if hyperparams['optimizer'] == 'sgd':\n optimizer = optimizers.SGD(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'rmsprop':\n optimizer = optimizers.RMSprop(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'nadam':\n optimizer = optimizers.Nadam(lr=hyperparams[\"learning_rate\"],\n schedule_decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'adam':\n optimizer = optimizers.Adam(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n # Save optimizer parameters to run doc\n run_doc['optimizer'] = hyperparams['optimizer']\n run_doc['opt_learning_rate'] = hyperparams[\"learning_rate\"]\n run_doc['opt_decay'] = hyperparams[\"decay\"]\n run_doc['opt_clipnorm'] = hyperparams[\"clipnorm\"]\n # Create and compile the model\n model = Model(input_tensor, output)\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n # Print out model summary and store inside run documentation as list of strings\n model.summary()\n run_doc['model_summary'] = []\n model.summary(print_fn=(lambda a: run_doc['model_summary'].append(a)))\n # Fit the model to the datasets\n self.log.info(\"Fitting model (eval %d of %d) ...\" % (self._eval_idx + 1, self._max_eval))\n self._eval_idx += 1\n model.fit(x=train_data, y=train_targets,\n batch_size=hyperparams['batch_size'],\n epochs=hyperparams['epochs'],\n validation_data=(test_data, test_targets),\n callbacks=callbacks,\n verbose=1)\n val_loss, val_acc = model.evaluate(x=test_data, y=test_targets, verbose=2)\n self.log.info(\" .... Completed!\")\n self.log.info(\" -- Evaluation time %ds\" % (time.time() - run_start_time))\n self.log.info(\" -- Total time %ds\" % (time.time() - self._start_time))\n # Save training parameters to run doc\n run_doc['batch_size'] = hyperparams['batch_size']\n run_doc['epochs'] = hyperparams['epochs']\n run_doc['val_loss'] = val_loss\n run_doc['val_acc'] = val_acc\n # Results are used to pick best pirate\n self._results[run_id] = val_loss\n # Save run_doc to pickle file in model directory\n run_doc_file_name = run_id + '.pickle'\n if not test_mode: # Don't save docs if in testing mode\n with open(os.path.join(self._models_dir, run_doc_file_name), 'wb') as f:\n pickle.dump(run_doc, f)\n self.log.info('Run Dictionary %s' % str(run_doc))\n # Delete the session to prevent GPU memory from getting full\n keras.backend.clear_session()\n # Optimizer minimizes validation loss\n return {'loss': val_loss, 'status': STATUS_OK}", "def train_on_one_batch(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess: #config=tf.ConfigProto(log_device_placement=True)\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 2), save_model_path)\n\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics", "def combineModelsAndExport(builderSpec, nmsSpec, fileName, quantize=False):\n try:\n print(f\"Combine CoreMl model with nms and export model\")\n # Combine models to a single one\n pipeline = ct.models.pipeline.Pipeline(\n input_features=[\n (\"image\", ct.models.datatypes.Array(3, 460, 460)),\n (\"iouThreshold\", ct.models.datatypes.Double()),\n (\"confidenceThreshold\", ct.models.datatypes.Double()),\n ],\n output_features=[\"confidence\", \"coordinates\"],\n )\n\n # Required version (>= ios13) in order for mns to work\n pipeline.spec.specificationVersion = 4\n\n pipeline.add_model(builderSpec)\n pipeline.add_model(nmsSpec)\n\n pipeline.spec.description.input[0].ParseFromString(\n builderSpec.description.input[0].SerializeToString()\n )\n pipeline.spec.description.output[0].ParseFromString(\n nmsSpec.description.output[0].SerializeToString()\n )\n pipeline.spec.description.output[1].ParseFromString(\n nmsSpec.description.output[1].SerializeToString()\n )\n\n # Metadata for the model‚\n pipeline.spec.description.input[\n 1\n ].shortDescription = \"(optional) IOU Threshold override (Default: 0.6)\"\n pipeline.spec.description.input[\n 2\n ].shortDescription = \"(optional) Confidence Threshold override (Default: 0.4)\"\n pipeline.spec.description.output[\n 0\n ].shortDescription = \"Boxes \\xd7 Class confidence\"\n pipeline.spec.description.output[\n 1\n ].shortDescription = \"Boxes \\xd7 [x, y, width, height] (relative to image size)\"\n pipeline.spec.description.metadata.versionString = \"1.0\"\n pipeline.spec.description.metadata.shortDescription = \"yolov5\"\n pipeline.spec.description.metadata.author = \"Leon De Andrade\"\n pipeline.spec.description.metadata.license = \"\"\n\n model = ct.models.MLModel(pipeline.spec)\n model.save(fileName)\n\n if quantize:\n fileName16 = fileName.replace(\".mlmodel\", \"_16.mlmodel\")\n modelFp16 = ct.models.neural_network.quantization_utils.quantize_weights(\n model, nbits=16\n )\n modelFp16.save(fileName16)\n\n fileName8 = fileName.replace(\".mlmodel\", \"_8.mlmodel\")\n modelFp8 = ct.models.neural_network.quantization_utils.quantize_weights(\n model, nbits=8\n )\n modelFp8.save(fileName8)\n\n print(f\"CoreML export success, saved as {fileName}\")\n except Exception as e:\n print(f\"CoreML export failure: {e}\")", "def meta_train(tasks, model, args, device, method='random', meta_iters=10000, num_updates=5, meta_batch_size=5):\n # Define logging\n os.makedirs(args.save_path, exist_ok=True)\n writer = SummaryWriter(\n os.path.join(args.save_path, 'runs', '{}'.format(datetime.now()).replace(\":\", \"_\")))\n\n header = ' Time Task Iteration Loss Accuracy'\n log_template = '{:>10} {:>25} {:10.0f} {:10.6f} {:10.6f}'\n test_template = 'Test mean: {}, Test std: {}'\n\n print(header)\n start = time.time()\n\n # Define optimizers, lr schedulers and loss function\n optimizer_bert = AdamW(params=model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n optimizer = optim.Adam(params=chain(model.proto_net.encoder.mlp.parameters(),\n model.output_layer.parameters()),\n lr=args.lr)\n scheduler_bert = get_cosine_schedule_with_warmup(optimizer_bert, 200, meta_iters)\n scheduler = get_cosine_schedule_with_warmup(optimizer, 0, meta_iters)\n # ProtoNets always have CrossEntropy loss due to softmax output\n cross_entropy = nn.CrossEntropyLoss()\n\n print('Loading Tokenizer..')\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n special_tokens_dict = {'additional_special_tokens': [\"[MNT]\", \"[URL]\"]}\n\n num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n print('We have added', num_added_toks, 'tokens')\n model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.\n\n # setup task sampler and task model\n sampler = TaskSampler(tasks, method=method, custom_task_ratio=args.custom_task_ratio, supp_query_split=True)\n task_model = type(model)(args)\n task_model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n\n iterations = 0\n # Iterate over the data\n train_iter = sampler.get_iter('train', tokenizer, batch_size=args.batch_size, shuffle=True)\n model.train()\n\n # setup validation task and episodes for evaluation\n val_task = get_validation_task(args)\n episodes = torch.load(args.episodes)\n\n # dummy data to overwrite old values of task model output layer\n dummy_w = torch.randn((args.mlp_dims[-1], 2))\n dummy_b = torch.randn(2)\n\n average_query_loss = 0\n best_query_loss = 1e+9\n best_test_mean = -1\n best_test_last = -1\n convergence_tolerance_cnt = 0\n # outer loop (meta-iterations)\n for i in range(meta_iters):\n grads = []\n task_losses_inner = {}\n task_accuracies_inner = {}\n task_losses_outer = {}\n task_accuracies_outer = {}\n # inner loop (sample different tasks)\n for task_sample in range(meta_batch_size):\n # clone original model\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n task_model.to(device)\n task_model.train()\n\n # new optimizer for every new task model\n task_optimizer_bert = optim.SGD(params=task_model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n task_optimizer = optim.SGD(params=chain(task_model.proto_net.encoder.mlp.parameters(),\n task_model.output_layer.parameters()),\n lr=args.inner_lr)\n\n # prepare support and query set\n batch = next(train_iter)\n support = batch[:3]\n query = batch[3:]\n\n # setup output layer (via meta-model's prototype network)\n proto_embeddings = model.proto_net(support[0].to(device), attention_mask=support[2].to(device))\n prototypes = model.proto_net.calculate_centroids((proto_embeddings, support[1]), sampler.get_num_classes())\n W, b = task_model.calculate_output_params(prototypes.detach())\n task_model.initialize_classifier(W, b)\n\n # train some iterations on support set\n for update in range(num_updates):\n task_optimizer_bert.zero_grad()\n task_optimizer.zero_grad()\n predictions = task_model(support[0].to(device), attention_mask=support[2].to(device))\n task_loss = cross_entropy(predictions, support[1].long().squeeze().to(device))\n task_loss.backward()\n task_optimizer.step()\n task_optimizer_bert.step()\n\n # record task losses and accuracies for logging\n task_losses_inner[sampler.get_name()] = task_loss.item()\n task_accuracies_inner[sampler.get_name()] = sampler.calculate_accuracy(predictions, support[1].to(device))\n\n # trick to add prototypes back to computation graph\n W = 2 * prototypes + (W - 2 * prototypes).detach()\n b = -prototypes.norm(dim=1)**2 + (b + prototypes.norm(dim=1)**2).detach()\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # calculate gradients for meta update on the query set\n predictions = task_model(query[0].to(device), attention_mask=query[2].to(device))\n query_loss = cross_entropy(predictions, query[1].long().squeeze().to(device))\n query_loss.backward()\n\n # record task losses and accuracies for logging\n task_losses_outer[sampler.get_name()] = query_loss.item()\n task_accuracies_outer[sampler.get_name()] = sampler.calculate_accuracy(predictions, query[1].to(device))\n average_query_loss += query_loss.item()\n\n # register W and b parameters again to avoid error in weight update\n W = nn.Parameter(W)\n b = nn.Parameter(b)\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # save gradients of first task model\n if task_sample == 0:\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads.append(param.grad.clone())\n # add the gradients of all task samples\n else:\n p = 0\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads[p] += param.grad.clone()\n p += 1\n\n # perform meta update\n # first load/add the calculated gradients in the meta-model\n # (already contains gradients from prototype calculation)\n p = 0\n for param in model.parameters():\n if param.requires_grad and param.grad is not None:\n param.grad += grads[p]\n p += 1\n # update model parameters according to the gradients from inner loop (clear gradients afterwards)\n optimizer.step()\n optimizer_bert.step()\n scheduler.step()\n scheduler_bert.step()\n optimizer.zero_grad()\n optimizer_bert.zero_grad()\n\n iterations += 1\n if iterations % args.log_every == 0:\n average_query_loss /= (args.log_every*meta_batch_size)\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n iter_acc = sum(task_accuracies_outer.values()) / len(task_accuracies_outer.values())\n writer.add_scalar('Meta_Average/Loss/outer'.format(sampler.get_name()), iter_loss, iterations)\n writer.add_scalar('Meta_Average/Accuracy/outer'.format(sampler.get_name()), iter_acc, iterations)\n for t in tasks:\n task_name = t.get_name()\n if task_name in task_losses_inner.keys():\n writer.add_scalar('{}/Loss/inner'.format(task_name), task_losses_inner[task_name], iterations)\n writer.add_scalar('{}/Accuracy/inner'.format(task_name), task_accuracies_inner[task_name], iterations)\n writer.add_scalar('{}/Loss/outer'.format(task_name), task_losses_outer[task_name], iterations)\n writer.add_scalar('{}/Accuracy/outer'.format(task_name), task_accuracies_outer[task_name], iterations)\n print(log_template.format(\n str(timedelta(seconds=int(time.time() - start))),\n sampler.get_name(),\n iterations,\n iter_loss,\n iter_acc))\n\n # save best snapshot\n if average_query_loss < best_query_loss:\n best_query_loss = average_query_loss\n average_query_loss = 0\n snapshot_prefix = os.path.join(args.save_path, 'best_query')\n snapshot_path = (\n snapshot_prefix +\n '_loss_{:.5f}_iter_{}_model.pt'\n ).format(best_query_loss, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n # evaluate in k shot fashion\n if iterations % args.eval_every == 0:\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n test_mean, test_std = k_shot_testing(task_model, episodes, val_task, device, num_updates=args.inner_updates,\n num_test_batches=args.num_test_batches)\n writer.add_scalar('{}/Acc'.format(val_task.get_name()), test_mean, iterations)\n writer.add_scalar('{}/STD'.format(val_task.get_name()), test_std, iterations)\n print(test_template.format(test_mean, test_std), flush=True)\n if test_mean > best_test_mean:\n best_test_mean = test_mean\n snapshot_prefix = os.path.join(args.save_path, 'best_test_{}'.format(val_task.get_name()))\n snapshot_path = (\n snapshot_prefix +\n '_acc_{:.5f}_iter_{}_model.pt'\n ).format(best_test_mean, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n \n if test_mean > best_test_last:\n best_test_last = best_test_mean\n convergence_tolerance_cnt = 0\n else:\n convergence_tolerance_cnt += 1\n\n if convergence_tolerance_cnt == args.convergence_tolerance:\n break\n\n\n # saving redundant parameters\n # Save model checkpoints.\n if iterations % args.save_every == 0:\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n snapshot_prefix = os.path.join(args.save_path, 'snapshot')\n snapshot_path = (\n snapshot_prefix +\n '_iter_{}_loss_{}_model.pt'\n ).format(iterations, iter_loss)\n logging.debug('Saving model...')\n model.save_model(snapshot_path)\n # Keep only the last snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n writer.close()", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def run_model(self):\n hparams = self.hparams\n\n # Build the child graph\n with tf.Graph().as_default(), tf.device(\n '/cpu:0' if FLAGS.use_cpu else '/gpu:0'):\n m, meval = self._build_models()\n\n # Figure out what epoch we are on\n starting_epoch = self._calc_starting_epoch(m)\n\n # Run the validation error right at the beginning\n valid_accuracy = self.eval_child_model(\n meval, self.data_loader, 'val')\n tf.logging.info('Before Training Epoch: {} Val Acc: {}'.format(\n starting_epoch, valid_accuracy))\n training_accuracy = None\n\n for curr_epoch in xrange(starting_epoch, hparams.num_epochs):\n\n # Run one training epoch\n training_accuracy = self._run_training_loop(m, curr_epoch)\n\n valid_accuracy = self.eval_child_model(\n meval, self.data_loader, 'val')\n tf.logging.info('Epoch: {} Valid Acc: {}'.format(\n curr_epoch, valid_accuracy))\n\n valid_accuracy, test_accuracy = self._compute_final_accuracies(\n meval)\n\n tf.logging.info(\n 'Train Acc: {} Valid Acc: {} Test Acc: {}'.format(\n training_accuracy, valid_accuracy, test_accuracy))", "def _setup_summaries(self, sess):\n # Output directory for models and summaries\n\n\n print(\"Writing to {}\\n\".format(os.path.abspath(self._log_dir)))\n\n train_summary_dir = os.path.join(self._log_dir, \"summaries\", \"train\")\n self._train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n val_summary_dir = os.path.join(self._log_dir, \"summaries\", \"validation\")\n self._val_summary_writer = tf.summary.FileWriter(val_summary_dir, sess.graph)\n\n # Model checkpoints\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n self.checkpoint_dir = os.path.abspath(os.path.join(self._save_dir, \"checkpoints/\"))\n\n if not os.path.exists(self.checkpoint_dir):\n os.makedirs(self.checkpoint_dir)\n\n self._saver = tf.train.Saver(max_to_keep=10) # Save model after each epoch\n\n self.train_summary_op = tf.summary.merge(self._train_summaries)\n self.val_summary_op = tf.summary.merge(self._val_summaries)\n\n print(\"--------------------------------------------------\")\n print(\"\\ntensorboard --logdir {}\".format(os.path.abspath(self._log_dir)))\n print(\"\\ntensorboard --logdir {} --port 6007\".format(os.path.abspath(self.checkpoint_dir)))\n print(\"--------------------------------------------------\")", "def _main():\n\n # setup paths\n json_model_path = osp.join(FLAGS.input_dir, FLAGS.json_model_fname)\n weights_path = osp.join(FLAGS.input_dir, FLAGS.weights_fname)\n save_path = osp.splitext(json_model_path)[0][:-6] + \"graph_w\" + str(weights_path.split(\"_\")[-1][:-3]) + \".pb\"\n print(\"Loading Model: \" + json_model_path)\n print(\"Loading Weights: \" + weights_path)\n\n # Set keras to test phase\n k.set_learning_phase(0)\n\n # Load json and weights, then compile model\n with open(json_model_path, 'r') as json_file:\n loaded_model_json = json_file.read()\n model = model_from_json(loaded_model_json)\n model.load_weights(weights_path)\n model.compile(loss='mse', optimizer='sgd')\n\n # Freeze graph\n frozen_graph = freeze_session(k.get_session(), output_names=[out.op.name for out in model.outputs])\n\n # Write graph to protobuf file\n tf.train.write_graph(frozen_graph, \"model\", save_path, as_text=False)\n print(\"Written Graph to: \" + save_path)", "def learn(model: KW_Model,\n trainloader: DataLoader,\n testloader: DataLoader,\n optimizer: optim.Optimizer,\n nb_epoch: int,\n device: torch.device,\n eval_fn: Callable[[List[bool], List[Qid]], Dict[Qid, float]],\n mean_window: int = 50,\n entropy_lambda: float = 0.025,\n smt_lambda: float = 1.0,\n reinforce_lambda: float = 1.0,\n ) -> Tuple[nn.Module, Dict[str, List[torch.tensor]], Dict[str, List[torch.tensor]]]:\n print(\"Memory usage: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n past_rewards = {str(q_id.long().item()): deque(maxlen=mean_window)\n for _, _, q_ids, _ in chain(trainloader, testloader)\n for q_id in q_ids}\n \n logs = [\"reward\",\n \"scaled_entropy\",\n \"scaled_reinforce\",\n \"scaled_smt\",\n \"total_loss\",\n \"accuracy\"]\n train_logs = {log: list() for log in logs}\n test_logs = {log: list() for log in logs}\n del logs\n \n for epoch in range(nb_epoch):\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n print(f\"\\nEpoch {epoch}\")\n \n print(\"Begin epoch: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n model.train()\n for x, y, q_id, masks in trainloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n\n # entropy_lambda = min(1.01*entropy_lambda, 0.025)\n # reinforce_lambda = min(1.01*reinforce_lambda, 1.0)\n # smt_lambda = max(0.99*smt_lambda, 0.05)\n loss, reinforce_loss, entropy, smt_loss = losses\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().cpu().sum().tolist()\n nb_total += masks.cpu().sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {mean(running_loss): .3f} Rewa {mean(running_reward): .5f}\", end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n\n # Logs\n train_logs[\"reward\"].append(mean(running_reward))\n train_logs[\"scaled_entropy\"].append(mean(entropies))\n train_logs[\"scaled_reinforce\"].append(mean(reinforces))\n train_logs[\"scaled_smt\"].append(mean(smts))\n train_logs[\"total_loss\"].append(mean(running_loss))\n train_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n \n train_loss, train_reward = mean(running_loss), mean(running_reward)\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n model.eval()\n for x, y, q_id, masks in testloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n loss, reinforce_loss, entropy, smt_loss = losses\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().sum().tolist()\n nb_total += masks.sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {train_loss: .3f} Rewa {train_reward: .3f}\",\n f\"Te Loss{mean(running_loss): .3f} Rewa {mean(running_reward): .3f}\",\n end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n \n \n # Logs\n test_logs[\"reward\"].append(mean(running_reward))\n test_logs[\"scaled_entropy\"].append(mean(entropies))\n test_logs[\"scaled_reinforce\"].append(mean(reinforces))\n test_logs[\"scaled_smt\"].append(mean(smts))\n test_logs[\"total_loss\"].append(mean(running_loss))\n test_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n\n return model, train_logs, test_logs", "def main(u_net_settings):\n model = build_u_net(*u_net_settings)\n print(model.summary())", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op, self.global_step]\n else:\n fetches = [self.train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1", "def prepare_training_components(self):\n # Instantiate model objects.\n self.instantiate_model_objects()\n\n # Create checkpoint machinery to save/restore checkpoints.\n self.create_checkpoint_machinery()\n\n # Create summary file writer.\n self.summary_file_writer = tf.summary.create_file_writer(\n logdir=os.path.join(self.params[\"output_dir\"], \"summaries\"),\n name=\"summary_file_writer\"\n )", "def sample_model(model, x, y, params_init, model_loss='multi_class_linear_output' ,num_samples=10, num_steps_per_sample=10, step_size=0.1, burn=0, inv_mass=None, jitter=None, normalizing_const=1., softabs_const=None, explicit_binding_const=100, fixed_point_threshold=1e-5, fixed_point_max_iterations=1000, jitter_max_tries=10, sampler=Sampler.HMC, integrator=Integrator.IMPLICIT, metric=Metric.HESSIAN, debug=False, tau_out=1.,tau_list=None, store_on_GPU = True, desired_accept_rate=0.8, verbose = False):\n\n device = params_init.device\n params_shape_list = []\n params_flattened_list = []\n build_tau = False\n if tau_list is None:\n tau_list = []\n build_tau = True\n for weights in model.parameters():\n params_shape_list.append(weights.shape)\n params_flattened_list.append(weights.nelement())\n if build_tau:\n tau_list.append(torch.tensor(1.))\n\n log_prob_func = define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=normalizing_const, device = device)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n return sample(log_prob_func, params_init, num_samples=num_samples, num_steps_per_sample=num_steps_per_sample, step_size=step_size, burn=burn, jitter=jitter, inv_mass=inv_mass, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, fixed_point_threshold=fixed_point_threshold, fixed_point_max_iterations=fixed_point_max_iterations, jitter_max_tries=jitter_max_tries, sampler=sampler, integrator=integrator, metric=metric, debug=debug, desired_accept_rate=desired_accept_rate, store_on_GPU = store_on_GPU, verbose = verbose)", "def adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n # Adjusting twice causes an error.\n if self.is_model_adjusted:\n logging.debug(\"model_and_info.is_model_adjusted is already True\")\n\n if self._optimizer:\n raise ValueError(\"Create an optimizer only after creating and adjusting the model.\")\n\n self._model = ModelAndInfo._adjust_for_gpus(model=self._model,\n config=self.config,\n model_execution_mode=self.model_execution_mode)\n\n self.is_model_adjusted = True\n logging.debug(\"model_and_info.is_model_adjusted set to True\")", "def compute_stats(self, epoch, loader, setname):\n args = self.args\n self.model.eval()\n ndevbatches = len(self.dev_loader)\n logging.info(f\"Evaluating {ndevbatches} batches ...\")\n\n recons, gts = defaultdict(list), defaultdict(list)\n acquisition_machine_by_fname = dict()\n with torch.no_grad():\n for batch_idx, batch in enumerate(self.dev_loader):\n progress = epoch + batch_idx/ndevbatches\n logging_epoch = batch_idx % args.log_interval == 0\n logging_epoch_info = batch_idx % (2 * args.log_interval) == 0\n log = logging.info if logging_epoch_info else logging.debug\n\n self.start_of_test_batch_hook(progress, logging_epoch)\n\n batch = self.preprocess_data(batch)\n output, target = self.predict(batch)\n output = self.unnorm(output, batch)\n target = self.unnorm(target, batch)\n fname, slice = batch.fname, batch.slice\n\n for i in range(output.shape[0]):\n slice_cpu = slice[i].item()\n recons[fname[i]].append((slice_cpu, output[i].float().cpu().numpy()))\n gts[fname[i]].append((slice_cpu, target[i].float().cpu().numpy()))\n\n acquisition_type = batch.attrs_dict['acquisition'][i]\n machine_type = batch.attrs_dict['system'][i]\n acquisition_machine_by_fname[fname[i]] = machine_type + '_' + acquisition_type\n\n if logging_epoch or batch_idx == ndevbatches-1:\n gpu_memory_gb = torch.cuda.memory_allocated()/1000000000\n host_memory_gb = utils.host_memory_usage_in_gb()\n log(f\"Evaluated {batch_idx+1} of {ndevbatches} (GPU Mem: {gpu_memory_gb:2.3f}gb Host Mem: {gpu_memory_gb:2.3f}gb)\")\n sys.stdout.flush()\n\n if self.args.debug_epoch_stats:\n break\n del output, target, batch\n\n logging.debug(f\"Finished evaluating\")\n self.end_of_test_epoch_hook()\n\n recons = {\n fname: np.stack([pred for _, pred in sorted(slice_preds)])\n for fname, slice_preds in recons.items()\n }\n gts = {\n fname: np.stack([pred for _, pred in sorted(slice_preds)])\n for fname, slice_preds in gts.items()\n }\n\n nmse, psnr, ssims = [], [], []\n ssim_for_acquisition_machine = defaultdict(list)\n recon_keys = list(recons.keys()).copy()\n for fname in recon_keys:\n pred_or, gt_or = recons[fname].squeeze(1), gts[fname].squeeze(1)\n pred, gt = transforms.center_crop_to_smallest(pred_or, gt_or)\n del pred_or, gt_or\n\n ssim = evaluate.ssim(gt, pred)\n acquisition_machine = acquisition_machine_by_fname[fname]\n ssim_for_acquisition_machine[acquisition_machine].append(ssim)\n ssims.append(ssim)\n nmse.append(evaluate.nmse(gt, pred))\n psnr.append(evaluate.psnr(gt, pred))\n del gt, pred\n del recons[fname], gts[fname]\n\n if len(nmse) == 0:\n nmse.append(0)\n ssims.append(0)\n psnr.append(0)\n\n min_vol_ssim = np.argmin(ssims)\n min_vol = str(recon_keys[min_vol_ssim])\n logging.info(f\"Min vol ssims: {min_vol}\")\n sys.stdout.flush()\n\n del recons, gts\n\n acquisition_machine_losses = dict.fromkeys(self.dev_data.system_acquisitions, 0)\n for key, value in ssim_for_acquisition_machine.items():\n acquisition_machine_losses[key] = np.mean(value)\n\n losses = {'NMSE': np.mean(nmse),\n 'PSNR': np.mean(psnr),\n 'SSIM': np.mean(ssims),\n 'SSIM_var': np.var(ssims),\n 'SSIM_min': np.min(ssims),\n **acquisition_machine_losses}\n\n return losses", "def create_models(self):\r\n self.all_ratings = AllRatingsWithCommon(\r\n experts=self.users,\r\n objects=self.videos,\r\n output_features=self.features,\r\n name=\"prod\",\r\n )\r\n\r\n print_memory(stage=\"DPLF:ratings_nodata_created\")\r\n\r\n # creating models\r\n self.user_to_model = {\r\n user: FeaturelessPreferenceLearningModel(\r\n expert=user, all_ratings=self.all_ratings\r\n )\r\n for user in self.users\r\n }\r\n\r\n print_memory(stage=\"DPLF:models_created\")\r\n\r\n # before creating the aggregator, filling models with data\r\n self.user_to_size = {\r\n user: self.fill_model_data(self.user_to_model[user], user)\r\n for user in tqdmem(self.users, desc=\"fill_data\")\r\n }\r\n\r\n # virtual 'common' data\r\n fplm_common = FeaturelessPreferenceLearningModel(\r\n expert=AllRatingsWithCommon.COMMON_EXPERT, all_ratings=self.all_ratings\r\n )\r\n fplm_common.on_dataset_end()\r\n\r\n print_memory(stage=\"DPLF:data_filled\")\r\n\r\n # resetting the model given the data\r\n self.all_ratings.reset_model()\r\n\r\n print_memory(stage=\"DPLF:model_reset_ok\")\r\n\r\n # aggregating models\r\n self.aggregator = FeaturelessMedianPreferenceAverageRegularizationAggregator(\r\n models=[self.user_to_model[u] for u in self.users]\r\n )\r\n self.aggregator.certification_status = self.user_certified\r\n\r\n print_memory(stage=\"DPLF:aggregator_created\")", "def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = next(self.rollout_provider)\n batch = process_rollout(rollout, gamma=self.config.discount)\n\n should_compute_summary = (self.task == 0 \n and self.local_steps % self.config.summary_every == 0)\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op, self.global_step]\n else:\n fetches = [self.train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.r: batch.r,\n self.w: batch.w,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(\n tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1", "def run(self):\n\n self._logger.debug(\"Starting Dummy Model: modelID=%s;\" % (self._modelID))\n\n # =========================================================================\n # Initialize periodic activities (e.g., for model result updates)\n # =========================================================================\n periodic = self._initPeriodicActivities()\n\n self._optimizedMetricLabel = self._optimizeKeyPattern\n self._reportMetricLabels = [self._optimizeKeyPattern]\n\n # =========================================================================\n # Create our top-level loop-control iterator\n # =========================================================================\n if self._iterations >= 0:\n iterTracker = iter(xrange(self._iterations))\n else:\n iterTracker = iter(itertools.count())\n\n # =========================================================================\n # This gets set in the unit tests. It tells the worker to sys exit\n # the first N models. This is how we generate orphaned models\n doSysExit = False\n if self._sysExitModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._sysExitModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n doSysExit = True\n\n if self._delayModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._delayModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n time.sleep(10)\n \n # DEBUG!!!! infinite wait if we have 50 models\n #if len(modelIDs) >= 50:\n # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]\n # while not jobCancel:\n # time.sleep(1)\n # jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0]\n\n if self._errModelRange is not None:\n modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)\n modelIDs = [x[0] for x in modelAndCounters]\n modelIDs.sort()\n (beg,end) = self._errModelRange\n if self._modelID in modelIDs[int(beg):int(end)]:\n raise RuntimeError(\"Exiting with error due to errModelRange parameter\")\n\n # =========================================================================\n # Delay, if necessary\n if self._delay is not None:\n time.sleep(self._delay)\n\n # =========================================================================\n # Run it!\n # =========================================================================\n self._currentRecordIndex = 0\n while True:\n\n # =========================================================================\n # Check if the model should be stopped\n # =========================================================================\n\n # If killed by a terminator, stop running\n if self._isKilled:\n break\n\n # If job stops or hypersearch ends, stop running\n if self._isCanceled:\n break\n\n # If model is mature, stop running ONLY IF we are not the best model\n # for the job. Otherwise, keep running so we can keep returning\n # predictions to the user\n if self._isMature:\n if not self._isBestModel:\n self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED\n break\n else:\n self._cmpReason = self._jobsDAO.CMPL_REASON_EOF\n\n # =========================================================================\n # Get the the next record, and \"write it\"\n # =========================================================================\n try:\n self._currentRecordIndex = next(iterTracker)\n except StopIteration:\n break\n\n # \"Write\" a dummy output value. This is used to test that the batched\n # writing works properly\n\n self._writePrediction(ModelResult(None, None, None, None))\n\n periodic.tick()\n\n # =========================================================================\n # Compute wait times. See if model should exit\n # =========================================================================\n\n if self.__shouldSysExit(self._currentRecordIndex):\n sys.exit(1)\n\n # Simulate computation time\n if self._busyWaitTime is not None:\n time.sleep(self._busyWaitTime)\n self.__computeWaitTime()\n\n # Asked to abort after so many iterations?\n if doSysExit:\n sys.exit(1)\n\n # Asked to raise a jobFailException?\n if self._jobFailErr:\n raise utils.JobFailException(\"E10000\",\n \"dummyModel's jobFailErr was True.\")\n\n # =========================================================================\n # Handle final operations\n # =========================================================================\n if self._doFinalize:\n if not self._makeCheckpoint:\n self._model = None\n\n # Delay finalization operation\n if self._finalDelay is not None:\n time.sleep(self._finalDelay)\n\n self._finalize()\n\n self._logger.info(\"Finished: modelID=%r \"% (self._modelID))\n\n return (self._cmpReason, None)", "def represent():\n\tmodel.eval()\n\twith torch.no_grad():\n\n\t\tall_data = []\n\t\tall_targets = []\n\n\t\tfor batch_idx, (data, labels) in enumerate(nat_test_loader):\n\t\t\tall_data.append(data)\n\t\t\tall_targets.append(labels.float()+50) # +50 for nat data, for distinction between nat and syn\n\t\tfor batch_idx, (data, labels) in enumerate(syn_test_loader):\n\t\t\tall_data.append(data)\n\t\t\tall_targets.append(labels.float())\n\n\t\tall_data = torch.cat(all_data, 0) # Merges the list of tensors\n\t\tall_data = all_data.cuda()\n\t\tall_targets = torch.cat(all_targets, 0)\n\n\t\trepresentation = model.representation(all_data)\n\t\t\n\t\ttorch.save(representation, directory + \"/representations/repr\" + str(epoch) + \".pt\")\n\t\twith open(directory + \"/representations/tar\" + str(epoch) + \".log\", \"w\") as f:\n\t\t\tfor t in all_targets:\n\t\t\t\tf.write(str(t.item()) + \"\\n\")\n\n\t\t# Optional: Plotting of the UMAP in each represent()\n\t\t#sns.set(style='white', context='notebook', rc={'figure.figsize': (14, 10)})\n\t\t#reducer = umap.UMAP()\n\t\t#embedding = reducer.fit_transform(representation.cpu())\n\t\t# flatui = [\"#ff0000\", \"#000000\", \"#001800\", \"#003000\", \"#004800\", \"#006000\", \"#007800\", \"#009000\", \"#00a800\", \"#00c000\", \"#00d800\"]\n\t\t# plt.scatter(embedding[:, 0], embedding[:, 1], c=[sns.color_palette(flatui)[x] for x in all_targets.int()])\n\t\t#plt.scatter(embedding[:, 0], embedding[:, 1], c=all_targets.cpu())\n\t\t#plt.gca().set_aspect('equal', 'datalim')\n\t\t#plt.title('UMAP projection of cell data', fontsize=24);\n\t\t#plt.savefig(directory + \"/umap_\" + str(epoch) + \".png\")\n\t\t#plt.clf()", "def train(model, train_sampler, train_loader, test_sampler, test_loader,\n use_cuda, epochs, loss_func, optimizer_name, lr,\n batch_log_interval, hvd, smlb_out):\n console = smlb_out.log.console\n device = smlb_out.log.device\n\n # training history (validation only)\n with console.subproc('Creating training history'):\n loss_val_hist = torch.zeros(epochs, dtype=torch.float32)\n acc_val_hist = torch.zeros(epochs, dtype=torch.float32)\n\n # send to device\n if use_cuda:\n with console.subproc('Sending model and history to device'):\n model.cuda()\n loss_val_hist = loss_val_hist.cuda()\n acc_val_hist = acc_val_hist.cuda()\n\n # loss\n with console.subproc('Creating loss function'):\n loss_func = eval(f'nn.{loss_func}()')\n console.message(f'Loss function: {loss_func}')\n\n # optimizer\n with console.subproc('Creating optimizer'):\n console.message(f'Learning rate specified: {lr}')\n console.message(f'Reduction operation: {\"hvd.Average\"}')\n console.message(f'Learning rate will be scaled by a factor of '\n f'{hvd.size()} (hvd.size())')\n optimizer = eval(f'torch.optim.{optimizer_name}(model.parameters(), '\n f'lr={lr * hvd.size()})')\n console.message(f'Optimizer: {optimizer}')\n # Horovod: wrap optimizer with DistributedOptimizer\n optimizer = hvd.DistributedOptimizer(optimizer, op=hvd.Average)\n\n # Horovod: broadcast model and optimizer\n with console.subproc('Broadcasting model and optimizer'):\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n # ----------------------\n # Epoch loop starts here\n # ----------------------\n console.begin('**** EPOCH LOOP ****')\n device.begin('**** EPOCH LOOP ****')\n for epoch in range(epochs):\n # only log on device within epoch loop\n device.begin(f'Epoch: {epoch}')\n\n # -------------------\n # Training batch loop\n # -------------------\n device.begin('Training batch loop')\n # stamp train epoch in system monitor\n smlb_out.system.stamp_event(f'epoch {epoch}: train')\n # enter train mode\n model.train()\n # Horovod: set epoch to sampler for shuffling\n train_sampler.set_epoch(epoch)\n # batch loop\n for batch_idx, (batch_x, batch_y) in enumerate(train_loader):\n if use_cuda:\n batch_x, batch_y = batch_x.cuda(), batch_y.cuda()\n # forward, loss, acc\n pred_y = model(batch_x)\n loss = loss_func(pred_y, batch_y)\n # backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if batch_idx % batch_log_interval == 0:\n # accuracy on batch\n with torch.no_grad():\n acc = compute_acc(pred_y, batch_y)\n # Horovod: use train_sampler to determine the number of\n # samples in this worker's partition\n device.message(\n '[{:5d}/{:5d} ({:3.0f}%)] loss={:f}, acc={:f}, '\n 'elapsed={:f} sec'.format(\n batch_idx * len(batch_x), len(train_sampler),\n 100 * batch_idx / len(train_loader), loss, acc,\n device.elapsed_shallowest))\n device.ended('Training batch loop')\n\n # ----------------------\n # Validation on test set\n # ----------------------\n device.begin('Validating on test set')\n # stamp validate epoch in system monitor\n smlb_out.system.stamp_event(f'epoch {epoch}: validate')\n # enter eval mode\n model.eval()\n # accumulate loss and acc\n loss_val = torch.zeros((1,), dtype=torch.float32)\n acc_val = torch.zeros((1,), dtype=torch.float32)\n if use_cuda:\n loss_val, acc_val = loss_val.cuda(), acc_val.cuda()\n for batch_x, batch_y in test_loader:\n if use_cuda:\n batch_x, batch_y = batch_x.cuda(), batch_y.cuda()\n # forward, loss, acc\n with torch.no_grad():\n pred_y = model(batch_x)\n loss_val += loss_func(pred_y, batch_y)\n acc_val += compute_acc(pred_y, batch_y) * len(pred_y)\n if use_cuda:\n loss_val, acc_val = loss_val.cpu(), acc_val.cpu()\n loss_val /= len(test_sampler)\n acc_val /= len(test_sampler)\n # average metrics across ranks and save to history\n with device.subproc('Averaging metrics across ranks (allreduce)'):\n loss_val_hist[epoch] = metric_average(loss_val, 'avg_loss', hvd)\n acc_val_hist[epoch] = metric_average(acc_val, 'avg_accuracy', hvd)\n # log device-wise and average metrics\n device.message('Metrics on rank: loss_val={:f}, acc_val={:f}'\n .format(loss_val.item(), acc_val.item()))\n device.message('Average metrics: loss_val={:f}, acc_val={:f}'\n .format(loss_val_hist[epoch], acc_val_hist[epoch]))\n device.ended('Validating on test set')\n\n # only show average on console\n console.message(f'Epoch {epoch:2d}: '\n f'loss_val={loss_val_hist[epoch]:f}, '\n f'acc_val={acc_val_hist[epoch]:f}, '\n f'elapsed={device.elapsed_shallowest:f} sec')\n device.ended(f'Epoch: {epoch}')\n device.ended('**** EPOCH LOOP ****')\n console.ended('**** EPOCH LOOP ****')\n\n # send model and data back to CPU\n if use_cuda:\n with console.subproc('Sending model and history back to cpu'):\n model.cpu()\n loss_val_hist = loss_val_hist.cpu()\n acc_val_hist = acc_val_hist.cpu()\n\n # return history\n return {'loss_val': loss_val_hist.numpy().tolist(),\n 'acc_val_hist': acc_val_hist.numpy().tolist()}", "def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()", "def single_gpu_online_evaluation(\n model: Module,\n data_loader: DataLoader,\n metric: Union[str, Sequence[str]] = 'EPE') -> Dict[str, np.ndarray]:\n\n model.eval()\n metrics = metric if isinstance(metric, (type, list)) else [metric]\n result_metrics = defaultdict(list)\n\n prog_bar = mmcv.ProgressBar(len(data_loader))\n for data in data_loader:\n with torch.no_grad():\n batch_results = model(test_mode=True, **data)\n img_metas = data['img_metas'].data[0]\n batch_flow = []\n batch_flow_gt = []\n batch_valid = []\n # a batch of result and a batch of img_metas\n for i in range(len(batch_results)):\n result = batch_results[i]\n img_meta = img_metas[i]\n\n # result.keys() is 'flow' or ['flow_fw','flow_bw']\n # img_meta.keys() is 'flow_gt' or ['flow_fw_gt','flow_bw_gt']\n for k in result.keys():\n\n if img_meta.get(k + '_gt', None) is None:\n # img_meta does not have flow_bw_gt, so just check\n # the forward predicted.\n if k == 'flow_bw':\n continue\n elif k == 'flow_fw':\n batch_flow_gt.append(img_meta['flow_gt'])\n else:\n batch_flow_gt.append(img_meta[k + '_gt'])\n\n batch_flow.append(result[k])\n batch_valid.append(\n img_meta.get('valid', np.ones_like(result[k][..., 0])))\n\n batch_results_metrics = eval_metrics(batch_flow, batch_flow_gt,\n batch_valid, metrics)\n for i_metric in metrics:\n result_metrics[i_metric].append(\n batch_results_metrics[i_metric])\n\n prog_bar.update()\n\n for i_metric in metrics:\n if result_metrics.get(i_metric) is None:\n raise KeyError(f'Model cannot compute {i_metric}')\n result_metrics[i_metric] = np.array(result_metrics[i_metric]).mean()\n\n return result_metrics", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def continueTraining(self,model): \n self.setOldModel(model)\n self.model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n\n # fit model to data\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)", "def run():\n # get arguments\n args = parse_args()\n assert args.batch_size % args.gpu_num == 0\n assert args.gru_hidden_size % 2 == 0\n\n # create a logger\n logger = logging.getLogger(\"GACM\")\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n check_path(args.save_dir)\n check_path(args.load_dir)\n check_path(args.result_dir)\n check_path(args.summary_dir)\n if args.log_dir:\n check_path(args.log_dir)\n file_handler = logging.FileHandler(args.log_dir + time.strftime('%Y-%m-%d-%H:%M:%S',time.localtime(time.time())) + '.txt')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n logger.info('Running with args : {}'.format(args))\n\n logger.info('Checking the directories...')\n for dir_path in [args.save_dir, args.result_dir, args.summary_dir]:\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n \n global Dataset\n global Agent\n logger.info('Agent version: {}.0'.format(args.agent_version))\n logger.info('Dataset version: {}.0'.format(args.dataset_version))\n logger.info('Checking the directories...')\n Dataset = importlib.import_module('dataset{}'.format(args.dataset_version)).Dataset\n Agent = importlib.import_module('Agent{}'.format(args.agent_version)).Agent\n \n if args.pretrain:\n pretrain(args)\n if args.train:\n train(args)\n if args.test:\n test(args)\n if args.rank:\n rank(args)\n if args.generate_synthetic_dataset:\n generate_synthetic_dataset(args)\n logger.info('run done.')", "def build_graph(self):\n tf.logging.info('Building graph...')\n t0 = time.time()\n\n self._add_placeholders()\n\n with tf.device(\"/gpu:%d\"%(config.gpu_selection)):\n self._add_seq2seq()\n\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n\n\n if self._mode == 'train':\n self._add_train_op()\n\n self._summaries = tf.summary.merge_all()\n\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)" ]
[ "0.78193945", "0.70014805", "0.6252593", "0.6235304", "0.61942273", "0.6128506", "0.608759", "0.5960813", "0.59513026", "0.59509206", "0.58809274", "0.58801025", "0.5874364", "0.5837946", "0.58353555", "0.5803747", "0.57844466", "0.5753908", "0.5749925", "0.57394", "0.57334524", "0.5730291", "0.5723675", "0.5723551", "0.5714144", "0.5712949", "0.5692779", "0.56628966", "0.5652885", "0.5649871", "0.56397444", "0.56347793", "0.5621697", "0.56064576", "0.56024784", "0.5601831", "0.5590464", "0.55842376", "0.55842376", "0.5569094", "0.55589193", "0.55413705", "0.5531317", "0.5520086", "0.55031145", "0.550202", "0.54930073", "0.54895484", "0.548377", "0.54824877", "0.5479255", "0.54764867", "0.54705536", "0.54702747", "0.5467068", "0.54656595", "0.54656595", "0.54492855", "0.5445617", "0.54426473", "0.54424196", "0.54378104", "0.5433539", "0.54329574", "0.54329574", "0.54313636", "0.54310393", "0.54268855", "0.54237044", "0.54152143", "0.54072475", "0.54070765", "0.54042625", "0.5403478", "0.5401277", "0.5399984", "0.53989136", "0.53984237", "0.53953457", "0.53936166", "0.53922623", "0.5380648", "0.53768325", "0.5367875", "0.53622437", "0.5361536", "0.5357702", "0.53524727", "0.53506273", "0.53396887", "0.5334884", "0.5334789", "0.53316855", "0.53310376", "0.5330445", "0.532972", "0.53284", "0.5326326", "0.53259104", "0.5324142" ]
0.5817216
15
Creates a model as per the config, and loads the parameters from the given checkpoint path. Also updates the checkpoint_epoch.
def try_create_mean_teacher_model_and_load_from_checkpoint(self) -> bool: self.create_mean_teacher_model() if self.checkpoint_path: # Load the stored model. If there is no checkpoint present, return immediately. return self.try_load_checkpoint_for_mean_teacher_model() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_checkpoint(model_config, path):\n model = models.VisionTransformer(num_classes=1, **model_config)\n variables = model.init(\n jax.random.PRNGKey(0),\n jnp.ones([1, 16, 16, 3], jnp.float32),\n train=False,\n )\n _save(variables['params'], path)", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def init_model(config, program, exe):\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n if os.path.exists(checkpoints + '.pdparams'):\n path = checkpoints\n fluid.load(program, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise ValueError(\"Model checkpoints {} does not exists,\"\n \"check if you lost the file prefix.\".format(\n checkpoints + '.pdparams'))\n else:\n pretrain_weights = config['Global'].get('pretrain_weights')\n if pretrain_weights:\n path = pretrain_weights\n load_params(exe, program, path)\n logger.info(\"Finish initing model from {}\".format(path))", "def save_checkpoint(self, model_path=None):\n # TODO: include new params based on ConfigEnum\n if not os.path.isdir(path_checkpoints_dir):\n os.mkdir(path_checkpoints_dir)\n if model_path is None:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"saved the model at {model_path}\") \n model_state_dict = self.model.state_dict()\n if self.optimizer is not None:\n opt_state_dict = self.optimizer.state_dict()\n else:\n opt_state_dict = None\n if self.scheduler is not None:\n sch_state_dict = self.scheduler.state_dict()\n else:\n sch_state_dict = None\n \n if self.scaler is not None:\n amp_grad_scaler = self.scaler.state_dict()\n else:\n amp_grad_scaler = None\n\n model_dict = {}\n model_dict[\"state_dict\"] = model_state_dict\n model_dict[\"optimizer\"] = opt_state_dict\n model_dict[\"scheduler\"] = sch_state_dict\n model_dict['scaler'] = amp_grad_scaler\n model_dict['image_size'] = self.image_size\n model_dict['device'] = self.device\n model_dict['fp16'] = self.fp16\n model_dict['accumulate_grad_steps'] = self.accumulate_grad_steps\n\n model_dict['experiment_id'] = self.experiment_id\n model_dict['experiment_tag'] = self.experiment_tag\n\n model_dict['seed'] = self.seed\n\n model_dict['train_batch_size'] = self.train_batch_size\n model_dict['valid_batch_size'] = self.valid_batch_size\n model_dict['test_batch_size'] = self.test_batch_size\n model_dict['dataloader_num_workers'] = self.dataloader_num_workers\n model_dict['train_dataloader_shuffle'] = self.train_dataloader_shuffle\n\n model_dict['optimizer_type'] = self.optimizer_type\n model_dict['optimizer_params'] = self.optimizer_params\n\n model_dict['scheduler_type'] = self.scheduler_type\n model_dict['scheduler_params'] = self.scheduler_params\n model_dict['step_scheduler_after'] = self.step_scheduler_after\n model_dict['step_scheduler_metric'] = self.step_scheduler_metric\n\n model_dict['compute_train_loss_after'] = self.compute_train_loss_after\n model_dict['compute_train_metric_after'] = self.compute_train_metric_after\n model_dict['compute_valid_loss_after'] = self.compute_valid_loss_after\n model_dict['compute_valid_metric_after'] = self.compute_valid_metric_after\n\n model_dict['training_stopping_criteria'] = self.training_stopping_criteria\n model_dict['stopping_criteria_params'] = self.stopping_criteria_params\n model_dict['max_epoch'] = self.max_epoch\n model_dict['train_on_all_data'] = self.train_on_all_data\n model_dict['validate_after'] = self.validate_after\n model_dict['validation_steps'] = self.validation_steps\n model_dict['run_lr_range_test'] = self.run_lr_range_test\n model_dict['sleep_in_epochs'] = self.sleep_in_epochs\n model_dict['sleep_time'] = self.sleep_time\n model_dict['checkpoint_epochs'] = self.checkpoint_epochs\n\n model_dict['_best_score'] = self._best_score\n model_dict['_current_score'] = self._current_score\n model_dict['_counter'] = self._counter\n\n model_dict['metrics'] = self.metrics\n model_dict['current_epoch'] = self.current_epoch\n model_dict['current_train_batch'] = self.current_train_batch\n model_dict['current_valid_batch'] = self.current_valid_batch\n\n model_dict['num_train_samples'] = self.num_train_samples\n model_dict['num_train_iterations'] = self.num_train_iterations\n model_dict['checkpoint_snapshot'] = self.checkpoint_snapshot \n torch.save(model_dict, model_path)", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def create_or_load_model(model, model_dir, session, name):\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n start_time = time.time()\n # It only takes a few seconds to initialize all variables.\n session.run(tf.global_variables_initializer())\n logging.info(\n \"Initialize %s model with fresh parameters before loading variables \"\n \"from the checkpoint, time %.2fs\", name,\n time.time() - start_time)\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def restore_checkpoint(model, checkpoint_dir, cuda=False, force=False, pretrain=False):\n try:\n cp_files = [\n file_\n for file_ in os.listdir(checkpoint_dir)\n if file_.startswith(\"epoch=\") and file_.endswith(\".checkpoint.pth.tar\")\n ]\n except FileNotFoundError:\n cp_files = None\n os.makedirs(checkpoint_dir)\n if not cp_files:\n print(\"No saved model parameters found\")\n if force:\n raise Exception(\"Checkpoint not found\")\n else:\n return model, 0, []\n\n # Find latest epoch\n for i in itertools.count(1):\n if \"epoch={}.checkpoint.pth.tar\".format(i) in cp_files:\n epoch = i\n else:\n break\n\n if not force:\n print(\n \"Which epoch to load from? Choose in range [0, {}].\".format(epoch),\n \"Enter 0 to train from scratch.\",\n )\n print(\">> \", end=\"\")\n inp_epoch = int(input())\n if inp_epoch not in range(epoch + 1):\n raise Exception(\"Invalid epoch number\")\n if inp_epoch == 0:\n print(\"Checkpoint not loaded\")\n clear_checkpoint(checkpoint_dir)\n return model, 0, []\n else:\n print(\"Which epoch to load from? Choose in range [1, {}].\".format(epoch))\n inp_epoch = int(input())\n if inp_epoch not in range(1, epoch + 1):\n raise Exception(\"Invalid epoch number\")\n\n filename = os.path.join(\n checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(inp_epoch)\n )\n\n print(\"Loading from checkpoint {}?\".format(filename))\n\n if cuda:\n checkpoint = torch.load(filename)\n else:\n # Load GPU model on CPU\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n try:\n start_epoch = checkpoint[\"epoch\"]\n stats = checkpoint[\"stats\"]\n if pretrain:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\n \"=> Successfully restored checkpoint (trained for {} epochs)\".format(\n checkpoint[\"epoch\"]\n )\n )\n except:\n print(\"=> Checkpoint not successfully restored\")\n raise\n\n return model, inp_epoch, stats", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def try_create_model_and_load_from_checkpoint(self) -> bool:\n self.create_model()\n if self.checkpoint_path:\n # Load the stored model. If there is no checkpoint present, return immediately.\n return self.try_load_checkpoint_for_model()\n return True", "def load_model_trainer_states_from_checkpoint(self, checkpoint_path, model=None):\n import os\n\n if model is None:\n try:\n import cloudpickle\n except ImportError:\n raise ImportError(\"cloudpickle is required to load model class\")\n logger.info(\"Loading model class\")\n model = cloudpickle.load(open(os.path.join(checkpoint_path, \"model_class.pkl\"), \"rb\"))\n\n self.model = HFWrapper(model)\n logger.info(\"Loading weights of previously trained model\")\n # Restoring model weights\n self.model.load_state_dict(\n # torch.load(os.path.join(training_args.output_dir, \"pytorch_model.bin\"))\n torch.load(os.path.join(checkpoint_path, \"pytorch_model.bin\"))\n )\n # Restoring random state\n rng_file = os.path.join(checkpoint_path, \"rng_state.pth\")\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n # Restoring AMP scaler\n if self.use_amp:\n self.scaler.load_state_dict(torch.load(os.path.join(checkpoint_path, \"scaler.pt\")))", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def load_checkpoint(path: str, save_dir: str, cuda: bool = False, attention_viz: bool = False) -> nn.Module:\r\n # Load model and args\r\n state = torch.load(path, map_location=lambda storage, loc: storage)\r\n args, loaded_state_dict = state['args'], state['state_dict']\r\n\r\n # Update args with current args\r\n args.cuda = cuda\r\n args.attention_viz = attention_viz\r\n args.save_dir = save_dir\r\n\r\n model = build_model(args)\r\n model.load_state_dict(loaded_state_dict)\r\n\r\n if cuda:\r\n print('Moving model to cuda')\r\n model = model.cuda()\r\n\r\n return model", "def init_model(config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None):\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config):\n raise TypeError('config must be a filename or Config object, '\n 'but got {}'.format(type(config)))\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n elif 'init_cfg' in config.model.backbone:\n config.model.backbone.init_cfg = None\n config.model.pretrained = None\n config.model.train_cfg = None\n init_default_scope(config.get('default_scope', 'mmseg'))\n\n model = MODELS.build(config.model)\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n dataset_meta = checkpoint['meta'].get('dataset_meta', None)\n # save the dataset_meta in the model for convenience\n if 'dataset_meta' in checkpoint.get('meta', {}):\n # mmseg 1.x\n model.dataset_meta = dataset_meta\n elif 'CLASSES' in checkpoint.get('meta', {}):\n # < mmseg 1.x\n classes = checkpoint['meta']['CLASSES']\n palette = checkpoint['meta']['PALETTE']\n model.dataset_meta = {'classes': classes, 'palette': palette}\n else:\n warnings.simplefilter('once')\n warnings.warn(\n 'dataset_meta or class names are not saved in the '\n 'checkpoint\\'s meta data, classes and palette will be'\n 'set according to num_classes ')\n num_classes = model.decode_head.num_classes\n dataset_name = None\n for name in dataset_aliases.keys():\n if len(get_classes(name)) == num_classes:\n dataset_name = name\n break\n if dataset_name is None:\n warnings.warn(\n 'No suitable dataset found, use Cityscapes by default')\n dataset_name = 'cityscapes'\n model.dataset_meta = {\n 'classes': get_classes(dataset_name),\n 'palette': get_palette(dataset_name)\n }\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model", "def create_model( session, batch_size ):\n model = linear_model.LinearModel(\n FLAGS.linear_size,\n FLAGS.num_layers,\n FLAGS.residual,\n FLAGS.batch_norm,\n FLAGS.max_norm,\n batch_size,\n FLAGS.learning_rate,\n FLAGS.origin_bc,\n summaries_dir,\n dtype=tf.float16 if FLAGS.use_fp16 else tf.float32)\n\n if FLAGS.load <= 0:\n # Create a new model from scratch\n print(\"Creating model with fresh parameters.\")\n session.run( tf.global_variables_initializer() )\n return model\n\n # Load a previously saved model\n ckpt = tf.train.get_checkpoint_state( train_dir, latest_filename=\"checkpoint\")\n print( \"train_dir\", train_dir )\n\n if ckpt and ckpt.model_checkpoint_path:\n # Check if the specific cpixels = pixels / pixels[2,:]heckpoint exists\n if FLAGS.load > 0:\n if os.path.isfile(os.path.join(train_dir,\"checkpoint-{0}.index\".format(FLAGS.load))):\n ckpt_name = os.path.join( os.path.join(train_dir,\"checkpoint-{0}\".format(FLAGS.load)) )\n else:\n raise ValueError(\"Asked to load checkpoint {0}, but it does not seem to exist\".format(FLAGS.load))\n else:\n ckpt_name = os.path.basename( ckpt.model_checkpoint_path )\n\n print(\"Loading model {0}\".format( ckpt_name ))\n model.saver.restore( session, ckpt.model_checkpoint_path )\n return model\n else:\n print(\"Could not find checkpoint. Aborting.\")\n raise( ValueError, \"Checkpoint {0} does not seem to exist\".format( ckpt.model_checkpoint_path ) )\n\n return model", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def try_load_checkpoint_for_model(self) -> bool:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if not self.checkpoint_path:\n raise ValueError(\"No checkpoint provided\")\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n epoch = ModelAndInfo._load_checkpoint(model=self._model,\n checkpoint_path=self.checkpoint_path,\n key_in_state_dict=ModelAndInfo.MODEL_STATE_DICT_KEY,\n use_gpu=self.config.use_gpu)\n\n logging.info(f\"Loaded model from checkpoint (epoch: {epoch})\")\n self.checkpoint_epoch = epoch\n return True", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def create_reference_model(self, config, tmp_path_factory: pytest.TempPathFactory, *args):\n config = copy.deepcopy(config) # ensure the reference model is not passed to tests\n\n save_folder = tmp_path_factory.mktemp('{device}-{precision}'.format(**config))\n config.update({'save_interval': '1ep', 'save_folder': str(save_folder), 'save_filename': 'ep{epoch}.pt'})\n\n trainer = Trainer(**config)\n trainer.fit()\n\n self.reference_model = trainer.state.model\n self.reference_folder = save_folder", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def init_model(session, model):\n # If there is a checkpoint, load it\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n\n # Else initialize the variables\n else:\n if FLAGS.decode:\n input(\"You sure you want to talk to an untrained chatbot? Press Ctrl-C to stop, Return to continue \")\n print(\"Fine.\")\n\n print(\"Creating model with fresh parameters.\")\n session.run(tf.global_variables_initializer())", "def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n from_hf = kwargs.pop(\"from_hf\", False)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n default_gpu = kwargs.pop(\"default_gpu\", True)\n\n # Load config\n assert config is not None\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n else:\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n logger.error(\"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path, \", \".join(cls.pretrained_model_archive_map.keys()), archive_file)\n )\n return None\n if default_gpu:\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Rename Bert parameters for our framework\n # NB: Assume 1 Bert layer is mapped to 1 layer only (cannot be used to init multiple layers)\n old_keys = []\n new_keys = []\n nums = []\n for key in state_dict.keys():\n new_key = None\n if \".layer.\" in key and from_hf:\n num = int(key.split(\".layer.\")[-1].split(\".\")[0])\n if \".attention.\" in key:\n new_key = key.replace(\".layer.%d.attention.\" % num,\n \".layer.%d.attention_\" % config.bert_layer2attn_sublayer.get(str(num), num))\n elif \".intermediate.\" in key:\n new_key = key.replace(\".layer.%d.intermediate.\" % num,\n \".layer.%d.intermediate.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n elif \".output.\" in key:\n new_key = key.replace(\".layer.%d.output.\" % num,\n \".layer.%d.output.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n nums.append(num)\n for old_key, new_key, _ in sorted(zip(old_keys, new_keys, nums), key=lambda x: x[2], reverse=True):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Load from a PyTorch state_dict\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n logger.info(start_prefix)\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0 and default_gpu:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(model.__class__.__name__, missing_keys)\n )\n if len(unexpected_keys) > 0 and default_gpu:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(model.__class__.__name__, unexpected_keys)\n )\n if len(error_msgs) > 0 and default_gpu:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(model.__class__.__name__, \"\\n\\t\".join(error_msgs))\n )\n\n if hasattr(model, \"tie_weights\"):\n model.tie_weights() # make sure word embedding weights are still tied\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"error_msgs\": error_msgs,\n }\n return model, loading_info\n\n return model", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def _init_model(self, checkpoint_path: str) -> None:\n # load weights\n logger.info(f\"Load weights from the checkpoint {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n\n state_dict = checkpoint[\"state_dict\"]\n self.orig_acc = checkpoint[\"test_acc\"]\n\n is_pruned = (\n next((name for name in state_dict if \"mask\" in name), None) is not None\n )\n\n if is_pruned:\n logger.info(\"Dummy prunning to load pruned weights\")\n model_utils.dummy_pruning(self.params_all)\n\n model_utils.initialize_params(self.model, state_dict)\n logger.info(\"Initialized weights\")\n\n # check the trained model is pruned\n\n if is_pruned:\n logger.info(\n \"Get masks and remove prunning reparameterization for prepare_qat\"\n )\n self.mask = model_utils.get_masks(self.model)\n model_utils.remove_pruning_reparameterization(self.params_all)", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model", "def create_checkpoint(model, save_dir, train_data):\n model.class_to_idx = train_data.class_to_idx\n\n checkpoint = {\n 'model': model.name,\n 'classifier': model.classifier,\n 'class_to_idx': model.class_to_idx,\n 'state_dict': model.state_dict()\n }\n\n if save_dir and isdir(save_dir):\n torch.save(checkpoint, save_dir + 'checkpoint.pth')\n print('checkpoint created')\n else: \n print(\"Directory not found. Saving at current directory in checkpoint.pth\")\n torch.save(checkpoint, 'checkpoint.pth')", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model, **kwargs)\n self.path = model\n self.model = get_zennet()\n\n model_pth_path = osp.join(self.path, ModelFile.TORCH_MODEL_FILE)\n\n checkpoint = torch.load(model_pth_path, map_location='cpu')\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n\n self.model.load_state_dict(state_dict, strict=True)\n logger.info('load model done')", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def initialize_model_from_cfg(args, gpu_id=0):\n model = eval(args.model).loot_model(args)\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n\n return model", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(pretrain_model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % pretrain_model)\n ckpt = tf.train.get_checkpoint_state(pretrain_model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(pretrain_model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(pretrain_model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(pretrain_model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n pretrain_model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n tf.logging.info(\"Copy the pre-trained model %s as the fine-tuned initialization\" % pretrain_model_checkpoint_path)\n\n import glob\n for filename in glob.glob(pretrain_model_checkpoint_path + \"*\"):\n bas = os.path.basename(filename).split(\"-\", 1)[0]\n ext = os.path.basename(filename).rsplit(\".\", 1)[1]\n shutil.copyfile(filename, os.path.join(target_model, bas + \"-0.\" + ext))\n\n with open(os.path.join(target_model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n return", "def train(self, config: ConfigurationNode = None):\n if config is None:\n config = self.config\n # Create writable timestamp for easier record keeping\n timestamp = datetime.now().isoformat(sep=\"T\", timespec=\"auto\")\n name_timestamp = timestamp.replace(\":\", \"_\")\n\n # Start the mlflow run:\n mlflow.start_run(run_name=name_timestamp)\n\n # Check valid output path, set path from the path_cfg_override modules respectively\n assert config.OUTPUT_PATH != ''\n path_output = config.OUTPUT_PATH # output folder\n path_train = config.DATASET.TRAIN_DATA_PATH # training data folder\n path_val = config.DATASET.VAL_DATA_PATH # validation data folder\n\n # Make output dir and its parents if not exist.\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Make result folders if they do not exist.\n self.results_dir = (Path(path_output) / name_timestamp)\n if not os.path.exists(self.results_dir):\n os.makedirs(self.results_dir)\n\n # Make backup folders if they do not exist.\n self.backup_dir = os.path.join(self.results_dir, 'model_backups')\n if not os.path.exists(self.backup_dir):\n os.makedirs(self.backup_dir)\n\n writer_tensorboard = SummaryWriter(log_dir=Path(self.results_dir / \"logs_tensorflow\"))\n\n # Now that CFG has been properly merged with new data along the way, time to dump a version of it into a string for trackability purposes.\n config.dump(stream=open(os.path.join(self.results_dir, f'config{name_timestamp}.yaml'), 'w'))\n\n # file path to store the state of the model.\n state_fpath = os.path.join(self.results_dir, f'model{name_timestamp}.pt')\n\n # ????\n perf_path = os.path.join(self.results_dir, f'trace{name_timestamp}.p')\n perf_trace = []\n\n # Load data, create the data loader objects from them.\n data_train = pickle.load(open(path_train, 'rb'))\n data_val = pickle.load(open(path_val, 'rb'))\n self.loader_train = build_data_loader(data_train, config.DATASET, True)\n self.loader_val = build_data_loader(data_val, config.DATASET, False)\n\n # Build the model using configue dict node\n self.model = build_model(config.MODEL)\n\n # Enable parallel multi GPU mode if the config specify it.\n if config.MODEL.PARALLEL:\n print(\"Utilized parallel processing\")\n self.model = torch.nn.DataParallel(self.model)\n\n current_epoch = 0\n\n # For resuming training (i.e. load checkpoint)\n if config.RESUME_PATH != \"\":\n checkpoint = torch.load(config.RESUME_PATH, map_location='cpu')\n current_epoch = checkpoint['epoch']\n self.model.load_state_dict(checkpoint[\"model_state\"])\n _ = self.model.cuda()\n\n # SOLVER EVALUATOR\n cfg_solver = config.MODEL.SOLVER\n\n # Build optimizer (between train/validation, using the solver portion of the configuration.\n optimizer = build_optimizer(self.model, cfg_solver)\n\n # Build evaluator (between train/validation, using the solver portion of the configuration.\n evaluator = build_evaluator(cfg_solver)\n\n evaluator.float().cuda()\n total_epochs = cfg_solver.TOTAL_EPOCHS\n\n\n # Main training epoch loop starts here.\n for epoch in range(current_epoch, total_epochs):\n\n # Train a single epoch\n self.train_epoch(epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard)\n\n mlflow.end_run()", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def load_pretrained_model(self,model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n self.create_training_model(model_dir = model_dir,\n **rnn_params)\n #从目录中读取神经网络参数\n self.set_model_from_file()", "def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step,\r\n last_global_data_samples, **kwargs):\r\n checkpoint_state_dict = {\r\n 'epoch': epoch,\r\n 'last_global_step': last_global_step,\r\n 'last_global_data_samples': last_global_data_samples\r\n }\r\n # Add extra kwargs too\r\n checkpoint_state_dict.update(kwargs)\r\n\r\n success = model.network.save_checkpoint(PATH, ckpt_id,\r\n checkpoint_state_dict)\r\n status_msg = 'checkpointing: PATH={}, ckpt_id={}'.format(PATH, ckpt_id)\r\n if success:\r\n logging.info(f\"Success {status_msg}\")\r\n else:\r\n logging.warning(f\"Failure {status_msg}\")\r\n return", "def save_model(self, checkpoint_path, epoch):\n self.saver.save(self.sess, checkpoint_path, global_step = epoch)", "def build_graph_from_config(self, model_config, track_config, checkpoint_path):\n self.build_model()\n \n ema = tf.train.ExponentialMovingAverage(0)\n variables_to_restore = ema.variables_to_restore(moving_avg_variables=[])\n\n # Filter out State variables\n variables_to_restore_filterd = {}\n for key, value in variables_to_restore.items():\n if key.split('/')[1] != 'State':\n if \"alex_branch\" not in key:\n if \"vggf_branch\" not in key:\n variables_to_restore_filterd[key] = value\n \n saver = tf.train.Saver(variables_to_restore_filterd)\n \n\n if osp.isdir(checkpoint_path):\n #checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)\n if not checkpoint_path:\n raise ValueError(\"No checkpoint file found in: {}\".format(checkpoint_path))\n\n def _restore_fn(sess):\n logging.info(\"Loading model from checkpoint: %s\", checkpoint_path)\n saver.restore(sess, checkpoint_path)\n logging.info(\"Successfully loaded checkpoint: %s\", os.path.basename(checkpoint_path))\n logging.info(\"Restore CANet...\")\n\n return _restore_fn", "def load_model(self, model_path):\n # Check the model file exists\n if not os.path.isfile(model_path):\n raise ValueError(f\"The model file `{model_path}` is not exists or broken!\")\n\n checkpoint = torch.load(model_path)\n self.model_type = checkpoint['model_type']\n self.label2idx = checkpoint['label2idx']\n self.idx2label = checkpoint['idx2label']\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.to(self.device)", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def __init__(self, saved_model=None, serialize_input=True):\n assert saved_model\n self.saved_model_path = saved_model\n self.serialize_input = serialize_input\n logging.info(\"Reading checkpoint {}.\".format(saved_model))\n imported_model = tf.saved_model.load(saved_model)\n self.bleurt_model_ops = imported_model.signatures[\"serving_default\"]\n logging.info(\"BLEURT initialized.\")", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def load_model(path='./model_checkpoint', name='tf_model'):\n with open(os.path.join(path, name + '.json')) as json_file:\n json_config = json_file.read()\n model = TensorModel(tf.keras.models.model_from_json(json_config))\n model._model.load_weights(os.path.join(path, name + '_weights.h5'))\n\n return model", "def try_create_model_load_from_checkpoint_and_adjust(self) -> bool:\n success = self.try_create_model_and_load_from_checkpoint()\n self.create_summary_and_adjust_model_for_gpus()\n return success", "def load_model_from_checkpoint(file, device):\r\n\r\n if device == 'cuda':\r\n # Load all tensors onto GPU\r\n map_location = lambda storage, loc: storage.cuda()\r\n else:\r\n # Load all tensors onto CPU\r\n map_location = lambda storage, loc: storage\r\n\r\n # Assuming model was trained and checkpoint saved on Linux, but predict.py inference is executed using Windows.\r\n # Then, it is required to implement the following quick fix, because otherwise the exception is raised:\r\n # \"NotImplementedError: cannot instantiate 'PosixPath' on your system\"\r\n # Credits to https://stackoverflow.com/questions/57286486/i-cant-load-my-model-because-i-cant-put-a-posixpath\r\n if type(file) == pathlib.WindowsPath:\r\n tmp_PosixPath = pathlib.PosixPath\r\n pathlib.PosixPath = pathlib.WindowsPath\r\n\r\n parameters = torch.load(file, map_location=map_location)\r\n\r\n # Restore default\r\n if type(file) == pathlib.WindowsPath:\r\n pathlib.WindowsPath = pathlib.PosixPath\r\n pathlib.PosixPath = tmp_PosixPath\r\n\r\n model = train.create_model(parameters)\r\n\r\n model.class_to_idx = parameters.get('train_datasets_class_to_idx')\r\n model.load_state_dict(parameters.get('state_dict'), strict=False)\r\n\r\n return model, parameters", "def create_checkpoint(self, name, path=''):\n\n\t\tnb_path = self._get_os_path(name, path)\n\t\tself.log.debug('creating checkpoint \"%s\" \"%s\" \"%s\"' % (path, name, nb_path))\n\t\t# only the one checkpoint ID:\n\t\tcheckpoint_id = u\"checkpoint\"\n\t\tcp_path = self.get_checkpoint_path(checkpoint_id, name, path)\n\t\tself.log.debug(\"creating checkpoint for notebook %s\", name)\n\t\tif not key_exists(self.bucket, self.checkpoint_dir):\n\t\t\tnew_key_from_string(self.bucket, self.checkpoint_dir, '')\n\t\tself._copy(nb_path, cp_path)\n\n\t\t# return the checkpoint info\n\t\treturn self.get_checkpoint_model(checkpoint_id, name, path)", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def create_model(self):\n try:\n self.model = PPO2.load(self.save_path)\n self.model.set_env(self.env)\n print(\"Loading of the latest model successful!\")\n except:\n print(\"Creating new model...\")\n self.model = PPO2(CnnPolicy, self.env, verbose=1)", "def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)", "def load_model(config, batchmanager):\n \n # this function returns a dictionary mapping\n # name of the task (string) --> number of classes in the task (int)\n tasks = batchmanager.getTasksWithNClasses()\n # this \"tasks\" object is used to initialize the model (with the right output layers)\n model = MultiTaskBERT(device = config.device, tasks = tasks)\n\n if not config.untrained_baseline:\n\n # if we evaluate only, model MUST be loaded.\n if config.k_shot_only:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--k_shot_only` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n raise Exception()\n \n # if we saved the state dictionary, load it.\n elif config.resume:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--resume` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n else:\n if os.path.exists(path_to_dicts(config)):\n print(f\"WARNING: `--resume` flag was NOT passed, but `{path_to_dicts(config)}` was found!\") \n\n return model", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def train_and_eval(config, babas_data):\n\n if config.resume_from_checkpoint is not None:\n try:\n if config.augment_background == 'background':\n bg = config.augment_background\n else:\n bg = None\n rfc = config.resume_from_checkpoint\n ic = config.include_validation\n print 'Loading saved config: %s' % config.saved_config\n config = np.load(config.saved_config).item()\n config.resume_from_checkpoint = rfc\n config.include_validation = ic\n if not hasattr(config, 'augment_background'):\n config.augment_background = 'constant'\n if not hasattr(config, 'background_folder'):\n config.background_folder = 'backgrounds'\n if bg is not None:\n print 'Overriding saved config to add kinect backgrounds to training.'\n config.augment_background = bg\n results_dir = rfc\n except:\n print 'Relying on default config file.'\n\n if babas_data: # Shitty naive training method\n config.tfrecord_dir = '/media/data_cifs/monkey_tracking/data_for_babas/tfrecords_from_babas'\n config.babas_tfrecord_dir = config.tfrecord_dir\n config.steps_before_validation = 20\n config.epochs = 2000\n config.convert_labels_to_pixel_space = False\n config.augment_background = 'constant'\n\n # Import your model\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n model_file = import_cnn(config.model_type)\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = '%s_%s' % (config.model_type, dt_stamp)\n if config.selected_joints is not None:\n dt_dataset = '_%s' % (config.selected_joints) + dt_dataset\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, dt_dataset)\n results_dir = os.path.join(config.npy_dir, dt_dataset)\n print 'Saving Dmurphy\\'s online updates to: %s' % results_dir\n dir_list = [config.train_checkpoint, config.summary_dir, results_dir]\n [tf_fun.make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, config.train_tfrecords)\n if config.babas_tfrecord_dir is not None:\n train_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.train_tfrecords)\n if config.include_validation or config.include_validation is None:\n val_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.val_tfrecords)\n else:\n train_babas_tfrecord_dir = None\n val_babas_tfrecord_dir = None\n\n if isinstance(config.include_validation, basestring):\n validation_data = config.include_validation\n elif config.include_validation == True:\n validation_data = os.path.join(\n config.tfrecord_dir,\n config.val_tfrecords)\n else:\n validation_data = None\n\n print 'Using training set: %s' % train_data\n print 'Using validation set: %s' % validation_data\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_data_dict = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background=config.augment_background,\n background_folder=config.background_folder,\n randomize_background=config.randomize_background,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=train_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n train_data_dict['deconv_label_size'] = len(config.labels)\n\n val_data_dict = inputs(\n tfrecord_file=validation_data,\n batch_size=config.validation_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background='none',\n background_folder=config.background_folder,\n randomize_background=None,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=val_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n val_data_dict['deconv_label_size'] = len(config.labels)\n\n # Check output_shape\n if config.selected_joints is not None:\n print 'Targeting joint: %s' % config.selected_joints\n joint_shape = len(config.selected_joints) * config.keep_dims\n if (config.num_classes // config.keep_dims) > (joint_shape):\n print 'New target size: %s' % joint_shape\n config.num_classes = joint_shape\n\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n print 'Creating training graph:'\n model = model_file.model_struct(\n weight_npy_path=config.weight_npy_path)\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n rgb=train_data_dict['image'],\n target_variables=train_data_dict,\n train_mode=train_mode,\n batchnorm=config.batch_norm)\n train_mu, train_var = tf.nn.moments(train_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"train image mean\", train_mu)\n tf.summary.histogram(\"train image std\", tf.sqrt(train_var))\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv train', model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(model.deconv, axis=3), tf.float32), 3))\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n print 'Creating validation graph:'\n val_model = model_file.model_struct()\n val_model.build(\n rgb=val_data_dict['image'],\n target_variables=val_data_dict)\n\n # Calculate validation accuracy\n val_mu, val_var = tf.nn.moments(val_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"validation image mean\", val_mu)\n tf.summary.histogram(\"validation image std\", tf.sqrt(val_var))\n if 'label' in val_data_dict.keys():\n # val_score = tf.reduce_mean(\n # tf_fun.l2_loss(\n # val_model.output, val_data_dict['label']))\n if config.keep_dims == 3:\n z_mask = tf.expand_dims(tf.tile([1, 1, 0], [int(val_data_dict['label'].get_shape()[-1]) // 3]), axis=0)\n z_mask = tf.cast(z_mask, tf.float32)\n val_model.output = val_model.output * z_mask\n val_data_dict['label'] = val_data_dict['label'] * z_mask \n val_score = tf.reduce_mean(tf.nn.l2_loss(val_model.output - val_data_dict['label']))\n tf.summary.scalar(\"validation mse\", val_score)\n if 'fc' in config.aux_losses:\n tf.summary.image('FC val activations', val_model.final_fc)\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv val', val_model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(val_model.deconv, axis=3),\n tf.float32), 3))\n tf.summary.image(\n 'validation images',\n tf.cast(val_data_dict['image'], tf.float32))\n\n # Prepare the loss functions:::\n loss_list, loss_label = [], []\n if 'label' in train_data_dict.keys():\n # 1. Joint localization loss\n if config.calculate_per_joint_loss == 'thomas':\n label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton and joint':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n loss_label += ['skeleton loss']\n delta = model['output'] - train_data_dict['label']\n proc_weights = np.asarray(\n config.dim_weight)[None,:].repeat(\n len(config.joint_names), axis=0).reshape(1, -1)\n delta *= proc_weights\n # label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n # model=model,\n # train_data_dict=train_data_dict,\n # config=config,\n # y_key='label',\n # yhat_key='output')\n # loss_list += [label_loss]\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n else:\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n loss_label += ['combined head']\n for al in loss_helper.potential_aux_losses():\n loss_list, loss_label = loss_helper.get_aux_losses(\n loss_list=loss_list,\n loss_label=loss_label,\n train_data_dict=train_data_dict,\n model=model,\n aux_loss_dict=al,\n domain_adaptation=train_babas_tfrecord_dir)\n loss = tf.add_n(loss_list)\n\n # Add wd if necessary\n if config.wd_penalty is not None:\n _, l2_wd_layers = tf_fun.fine_tune_prepare_layers(\n tf.trainable_variables(), config.wd_layers)\n l2_wd_layers = [\n x for x in l2_wd_layers if 'biases' not in x.name]\n if config.wd_type == 'l1':\n loss += (config.wd_penalty * tf.add_n(\n [tf.reduce_sum(tf.abs(x)) for x in l2_wd_layers]))\n elif config.wd_type == 'l2':\n loss += (config.wd_penalty * tf.add_n(\n [tf.nn.l2_loss(x) for x in l2_wd_layers]))\n\n optimizer = loss_helper.return_optimizer(config.optimizer)\n optimizer = optimizer(config.lr)\n\n if hasattr(config, 'fine_tune_layers') and config.fine_tune_layers is not None:\n print 'Finetuning learning for: %s' % config.fine_tune_layers\n train_op, grads = tf_fun.finetune_learning(\n loss,\n trainables=tf.trainable_variables(),\n fine_tune_layers=config.fine_tune_layers,\n config=config\n )\n else:\n # Op to calculate every variable gradient\n grads = optimizer.compute_gradients(\n loss, tf.trainable_variables())\n # Op to update all variables according to their gradient\n train_op = optimizer.apply_gradients(\n grads_and_vars=grads)\n\n # Summarize all gradients and weights\n [tf.summary.histogram(\n var.name + '/gradient', grad)\n for grad, var in grads if grad is not None]\n # train_op = optimizer.minimize(loss)\n\n # Summarize losses\n [tf.summary.scalar(lab, il) for lab, il in zip(\n loss_label, loss_list)]\n\n # Summarize images and l1 weights\n tf.summary.image(\n 'train images',\n tf.cast(train_data_dict['image'], tf.float32))\n tf_fun.add_filter_summary(\n trainables=tf.trainable_variables(),\n target_layer='conv1_1_filters')\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n tf.add_to_collection('output', model.output)\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Create list of variables to run through training model\n train_session_vars = {\n 'train_op': train_op,\n 'loss_value': loss,\n 'im': train_data_dict['image'],\n 'yhat': model.output,\n 'ytrue': train_data_dict['label']\n }\n if hasattr(model, 'deconv'):\n train_session_vars['deconv'] = model.deconv\n if hasattr(model, 'final_fc'):\n train_session_vars['fc'] = model.final_fc\n\n # Create list of variables to run through validation model\n val_session_vars = {\n 'val_acc': val_score,\n 'val_pred': val_model.output,\n 'val_ims': val_data_dict['image'],\n 'val_true': val_data_dict['label'],\n }\n\n # Create list of variables to save to numpys\n save_training_vars = [\n 'im',\n 'yhat',\n 'ytrue',\n 'yhat'\n ]\n\n for al in loss_helper.potential_aux_losses():\n if al.keys()[0] in train_data_dict.keys():\n y_key = '%s' % al.keys()[0]\n train_session_vars[y_key] = train_data_dict[al.values()[0]['y_name']]\n save_training_vars += [y_key]\n\n yhat_key = '%s_hat' % al.keys()[0]\n train_session_vars[yhat_key] = model[al.values()[0]['model_name']]\n save_training_vars += [yhat_key]\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, losses = 0, []\n num_joints = int(\n train_data_dict['label'].get_shape()[-1]) // config.keep_dims\n normalize_vec = tf_fun.get_normalization_vec(config, num_joints)\n if config.resume_from_checkpoint is not None:\n if '.ckpt' in config.resume_from_checkpoint:\n ckpt = config.resume_from_checkpoint\n 'Restoring specified checkpoint: %s' % config.resume_from_checkpoint\n else:\n ckpt = tf.train.latest_checkpoint(config.resume_from_checkpoint)\n print 'Evaluating checkpoint: %s' % ckpt\n saver.restore(sess, ckpt)\n try:\n while not coord.should_stop():\n start_time = time.time()\n train_out_dict = sess.run(train_session_vars.values())\n train_out_dict = {k: v for k, v in zip(\n train_session_vars.keys(), train_out_dict)}\n losses.append(train_out_dict['loss_value'])\n duration = time.time() - start_time\n assert not np.isnan(\n train_out_dict['loss_value']), 'Model diverged with loss = NaN'\n if step % config.steps_before_validation == 0:\n if validation_data is not False:\n val_out_dict = sess.run(\n val_session_vars.values())\n val_out_dict = {k: v for k, v in zip(\n val_session_vars.keys(), val_out_dict)}\n # if config.normalize_labels:\n # val_out_dict['val_pred'] *= normalize_vec\n # val_out_dict['val_true'] *= normalize_vec\n np.savez(\n os.path.join(\n results_dir, '%s_val_coors' % step),\n val_pred=val_out_dict['val_pred'],\n val_ims=val_out_dict['val_ims'],\n val_true=val_out_dict['val_true'],\n normalize_vec=normalize_vec)\n with open(\n os.path.join(\n results_dir, '%s_config.p' % step), 'wb') as fp:\n pickle.dump(config, fp)\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy attach 9177\n format_str = (\n '%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch) | '\n 'Validation l2 loss = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, train_out_dict['loss_value'],\n config.train_batch / duration, float(duration),\n val_out_dict['val_acc'],\n config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if config.normalize_labels:\n train_out_dict['yhat'] *= normalize_vec\n train_out_dict['ytrue'] *= normalize_vec\n [save_training_data(\n output_dir=results_dir,\n data=train_out_dict[k],\n name='%s_%s' % (k, step)) for k in save_training_vars]\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch)')\n print (format_str % (\n datetime.now(),\n step,\n train_out_dict['loss_value'],\n config.train_batch / duration,\n float(duration)))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%s_training_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_epoch_checkpoint(self, directory, epoch):\n chkpnt = torch.load(directory / f\"chkpnt_epoch{epoch:04d}.pth\")\n self.load_state_dict(chkpnt['model_state_dict'])", "def __init__(self, model_dir: str, *args, **kwargs):\n super().__init__(model_dir, *args, **kwargs)\n self.model = FRCRN(*args, **kwargs)\n model_bin_file = os.path.join(model_dir,\n ModelFile.TORCH_MODEL_BIN_FILE)\n if os.path.exists(model_bin_file):\n checkpoint = torch.load(\n model_bin_file, map_location=torch.device('cpu'))\n if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:\n # the new trained model by user is based on FRCRNDecorator\n self.load_state_dict(checkpoint['state_dict'])\n else:\n # The released model on Modelscope is based on FRCRN\n self.model.load_state_dict(checkpoint, strict=False)", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path,\n key_in_state_dict: str, use_gpu: bool) -> int:\n logging.info(f\"Loading checkpoint {checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu)\n\n try:\n state_dict = checkpoint[key_in_state_dict]\n except KeyError:\n logging.error(f\"Key {key_in_state_dict} not found in checkpoint\")\n return False\n\n if isinstance(model, torch.nn.DataParallel):\n result = model.module.load_state_dict(state_dict, strict=False)\n else:\n result = model.load_state_dict(state_dict, strict=False)\n\n if result.missing_keys:\n logging.warning(f\"Missing keys in model checkpoint: {result.missing_keys}\")\n if result.unexpected_keys:\n logging.warning(f\"Unexpected keys in model checkpoint: {result.unexpected_keys}\")\n\n return checkpoint[ModelAndInfo.EPOCH_KEY]", "def setup_training(model, train_loader, valid_loader, hps):\r\n\r\n train_dir = os.path.join(hps.save_root, \"train\")\r\n if not os.path.exists(train_dir): os.makedirs(train_dir)\r\n\r\n if hps.restore_model != 'None':\r\n logger.info(\"[INFO] Restoring %s for training...\", hps.restore_model)\r\n bestmodel_file = os.path.join(train_dir, hps.restore_model)\r\n loader = ModelLoader()\r\n loader.load_pytorch(model, bestmodel_file)\r\n else:\r\n logger.info(\"[INFO] Create new model for training...\")\r\n\r\n run_training(model, train_loader, valid_loader, hps) # this is an infinite loop until interrupted\r", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))", "def _save_model(self, checkpoint_dir):\n # Check whether the specified path exists or not\n isExist = os.path.exists(checkpoint_dir)\n\n if not isExist:\n # Create a new directory because it does not exist\n os.makedirs(checkpoint_dir)\n\n filename = self._get_checkpoint_name()\n path = checkpoint_dir + filename\n\n # Serialize the model checkpoint in to a Python Pickle file\n with open(path, 'wb') as f:\n pickle.dump(self._model, f)\n return path" ]
[ "0.77202636", "0.7588646", "0.7440869", "0.7204906", "0.71627265", "0.7153112", "0.7153112", "0.7096752", "0.7063371", "0.7042817", "0.7034828", "0.6975058", "0.69707805", "0.69557154", "0.69152653", "0.6887786", "0.6820848", "0.68042725", "0.67946786", "0.6771584", "0.6762465", "0.675391", "0.67403316", "0.6708518", "0.6686366", "0.66816217", "0.6679283", "0.66758716", "0.6670577", "0.6657054", "0.6651751", "0.6651429", "0.66511196", "0.6647377", "0.6644745", "0.6636243", "0.6636181", "0.6630305", "0.6626725", "0.6623121", "0.66225934", "0.6618249", "0.6616538", "0.66093624", "0.66044694", "0.65970784", "0.65946275", "0.6587542", "0.6585933", "0.6574459", "0.65616167", "0.6557188", "0.65475404", "0.6543599", "0.6537196", "0.6535007", "0.65204644", "0.6514639", "0.6513976", "0.65099424", "0.65099126", "0.65099126", "0.64938927", "0.64918244", "0.64895964", "0.6488237", "0.64868593", "0.64865357", "0.6480784", "0.64773977", "0.6466501", "0.64652777", "0.6453639", "0.6448046", "0.6445826", "0.6429533", "0.6419853", "0.6418043", "0.64152324", "0.6408688", "0.6391328", "0.6379987", "0.6373571", "0.63657904", "0.6355582", "0.6334412", "0.63329536", "0.6322521", "0.63189983", "0.6312858", "0.6305118", "0.6300038", "0.62980545", "0.6296586", "0.62689227", "0.6256852", "0.6245888", "0.6243168", "0.6242567", "0.6239272", "0.62377995" ]
0.0
-1
Creates a model as per the config, and loads the parameters from the given checkpoint path. The model is then adjusted for data parallelism and mixed precision. Also updates the checkpoint_epoch.
def try_create_mean_teacher_model_load_from_checkpoint_and_adjust(self) -> bool: success = self.try_create_mean_teacher_model_and_load_from_checkpoint() self.create_summary_and_adjust_mean_teacher_model_for_gpus() return success
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def save_checkpoint(self, model_path=None):\n # TODO: include new params based on ConfigEnum\n if not os.path.isdir(path_checkpoints_dir):\n os.mkdir(path_checkpoints_dir)\n if model_path is None:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"saved the model at {model_path}\") \n model_state_dict = self.model.state_dict()\n if self.optimizer is not None:\n opt_state_dict = self.optimizer.state_dict()\n else:\n opt_state_dict = None\n if self.scheduler is not None:\n sch_state_dict = self.scheduler.state_dict()\n else:\n sch_state_dict = None\n \n if self.scaler is not None:\n amp_grad_scaler = self.scaler.state_dict()\n else:\n amp_grad_scaler = None\n\n model_dict = {}\n model_dict[\"state_dict\"] = model_state_dict\n model_dict[\"optimizer\"] = opt_state_dict\n model_dict[\"scheduler\"] = sch_state_dict\n model_dict['scaler'] = amp_grad_scaler\n model_dict['image_size'] = self.image_size\n model_dict['device'] = self.device\n model_dict['fp16'] = self.fp16\n model_dict['accumulate_grad_steps'] = self.accumulate_grad_steps\n\n model_dict['experiment_id'] = self.experiment_id\n model_dict['experiment_tag'] = self.experiment_tag\n\n model_dict['seed'] = self.seed\n\n model_dict['train_batch_size'] = self.train_batch_size\n model_dict['valid_batch_size'] = self.valid_batch_size\n model_dict['test_batch_size'] = self.test_batch_size\n model_dict['dataloader_num_workers'] = self.dataloader_num_workers\n model_dict['train_dataloader_shuffle'] = self.train_dataloader_shuffle\n\n model_dict['optimizer_type'] = self.optimizer_type\n model_dict['optimizer_params'] = self.optimizer_params\n\n model_dict['scheduler_type'] = self.scheduler_type\n model_dict['scheduler_params'] = self.scheduler_params\n model_dict['step_scheduler_after'] = self.step_scheduler_after\n model_dict['step_scheduler_metric'] = self.step_scheduler_metric\n\n model_dict['compute_train_loss_after'] = self.compute_train_loss_after\n model_dict['compute_train_metric_after'] = self.compute_train_metric_after\n model_dict['compute_valid_loss_after'] = self.compute_valid_loss_after\n model_dict['compute_valid_metric_after'] = self.compute_valid_metric_after\n\n model_dict['training_stopping_criteria'] = self.training_stopping_criteria\n model_dict['stopping_criteria_params'] = self.stopping_criteria_params\n model_dict['max_epoch'] = self.max_epoch\n model_dict['train_on_all_data'] = self.train_on_all_data\n model_dict['validate_after'] = self.validate_after\n model_dict['validation_steps'] = self.validation_steps\n model_dict['run_lr_range_test'] = self.run_lr_range_test\n model_dict['sleep_in_epochs'] = self.sleep_in_epochs\n model_dict['sleep_time'] = self.sleep_time\n model_dict['checkpoint_epochs'] = self.checkpoint_epochs\n\n model_dict['_best_score'] = self._best_score\n model_dict['_current_score'] = self._current_score\n model_dict['_counter'] = self._counter\n\n model_dict['metrics'] = self.metrics\n model_dict['current_epoch'] = self.current_epoch\n model_dict['current_train_batch'] = self.current_train_batch\n model_dict['current_valid_batch'] = self.current_valid_batch\n\n model_dict['num_train_samples'] = self.num_train_samples\n model_dict['num_train_iterations'] = self.num_train_iterations\n model_dict['checkpoint_snapshot'] = self.checkpoint_snapshot \n torch.save(model_dict, model_path)", "def create_checkpoint(model_config, path):\n model = models.VisionTransformer(num_classes=1, **model_config)\n variables = model.init(\n jax.random.PRNGKey(0),\n jnp.ones([1, 16, 16, 3], jnp.float32),\n train=False,\n )\n _save(variables['params'], path)", "def init_model(config, program, exe):\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n if os.path.exists(checkpoints + '.pdparams'):\n path = checkpoints\n fluid.load(program, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise ValueError(\"Model checkpoints {} does not exists,\"\n \"check if you lost the file prefix.\".format(\n checkpoints + '.pdparams'))\n else:\n pretrain_weights = config['Global'].get('pretrain_weights')\n if pretrain_weights:\n path = pretrain_weights\n load_params(exe, program, path)\n logger.info(\"Finish initing model from {}\".format(path))", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def load(cls, model_path: str, sample_shape: tuple = None,\n checkpoint: str = None, **kwargs):", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_model(self, ckpt_name=\"best_model.pth\"):\n path = \"/\".join(ckpt_name.split(\"/\")[:-1])\n chkpt = torch.load(ckpt_name)\n self.start_epoch = chkpt['epoch']\n self.best_metric = chkpt['best_metric']\n\n # fix the DataParallel caused problem with keys names\n if self.multi_gpu_flag:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=False)\n self.net.load_state_dict(new_state_dict)\n else:\n try:\n self.net.load_state_dict(chkpt['state_dict'])\n except:\n new_state_dict = fix_multigpu_chkpt_names(chkpt['state_dict'], drop=True)\n self.net.load_state_dict(new_state_dict)\n\n if self.load_optimizer_state:\n self.optimizer.load_state_dict(chkpt['optimizer'])\n logging.info(\"******** State loaded ********\")\n\n training_meta = pickle.load(open(f\"{path}/training_meta.pickle.dat\", \"rb\"))\n for k, v in training_meta.items():\n if k in self.__class__.__params:\n setattr(self, k, v)\n logging.info(\"******** Training params loaded ********\")", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def _init_model(self, checkpoint_path: str) -> None:\n # load weights\n logger.info(f\"Load weights from the checkpoint {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n\n state_dict = checkpoint[\"state_dict\"]\n self.orig_acc = checkpoint[\"test_acc\"]\n\n is_pruned = (\n next((name for name in state_dict if \"mask\" in name), None) is not None\n )\n\n if is_pruned:\n logger.info(\"Dummy prunning to load pruned weights\")\n model_utils.dummy_pruning(self.params_all)\n\n model_utils.initialize_params(self.model, state_dict)\n logger.info(\"Initialized weights\")\n\n # check the trained model is pruned\n\n if is_pruned:\n logger.info(\n \"Get masks and remove prunning reparameterization for prepare_qat\"\n )\n self.mask = model_utils.get_masks(self.model)\n model_utils.remove_pruning_reparameterization(self.params_all)", "def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net", "def train(self, config: ConfigurationNode = None):\n if config is None:\n config = self.config\n # Create writable timestamp for easier record keeping\n timestamp = datetime.now().isoformat(sep=\"T\", timespec=\"auto\")\n name_timestamp = timestamp.replace(\":\", \"_\")\n\n # Start the mlflow run:\n mlflow.start_run(run_name=name_timestamp)\n\n # Check valid output path, set path from the path_cfg_override modules respectively\n assert config.OUTPUT_PATH != ''\n path_output = config.OUTPUT_PATH # output folder\n path_train = config.DATASET.TRAIN_DATA_PATH # training data folder\n path_val = config.DATASET.VAL_DATA_PATH # validation data folder\n\n # Make output dir and its parents if not exist.\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n\n # Make result folders if they do not exist.\n self.results_dir = (Path(path_output) / name_timestamp)\n if not os.path.exists(self.results_dir):\n os.makedirs(self.results_dir)\n\n # Make backup folders if they do not exist.\n self.backup_dir = os.path.join(self.results_dir, 'model_backups')\n if not os.path.exists(self.backup_dir):\n os.makedirs(self.backup_dir)\n\n writer_tensorboard = SummaryWriter(log_dir=Path(self.results_dir / \"logs_tensorflow\"))\n\n # Now that CFG has been properly merged with new data along the way, time to dump a version of it into a string for trackability purposes.\n config.dump(stream=open(os.path.join(self.results_dir, f'config{name_timestamp}.yaml'), 'w'))\n\n # file path to store the state of the model.\n state_fpath = os.path.join(self.results_dir, f'model{name_timestamp}.pt')\n\n # ????\n perf_path = os.path.join(self.results_dir, f'trace{name_timestamp}.p')\n perf_trace = []\n\n # Load data, create the data loader objects from them.\n data_train = pickle.load(open(path_train, 'rb'))\n data_val = pickle.load(open(path_val, 'rb'))\n self.loader_train = build_data_loader(data_train, config.DATASET, True)\n self.loader_val = build_data_loader(data_val, config.DATASET, False)\n\n # Build the model using configue dict node\n self.model = build_model(config.MODEL)\n\n # Enable parallel multi GPU mode if the config specify it.\n if config.MODEL.PARALLEL:\n print(\"Utilized parallel processing\")\n self.model = torch.nn.DataParallel(self.model)\n\n current_epoch = 0\n\n # For resuming training (i.e. load checkpoint)\n if config.RESUME_PATH != \"\":\n checkpoint = torch.load(config.RESUME_PATH, map_location='cpu')\n current_epoch = checkpoint['epoch']\n self.model.load_state_dict(checkpoint[\"model_state\"])\n _ = self.model.cuda()\n\n # SOLVER EVALUATOR\n cfg_solver = config.MODEL.SOLVER\n\n # Build optimizer (between train/validation, using the solver portion of the configuration.\n optimizer = build_optimizer(self.model, cfg_solver)\n\n # Build evaluator (between train/validation, using the solver portion of the configuration.\n evaluator = build_evaluator(cfg_solver)\n\n evaluator.float().cuda()\n total_epochs = cfg_solver.TOTAL_EPOCHS\n\n\n # Main training epoch loop starts here.\n for epoch in range(current_epoch, total_epochs):\n\n # Train a single epoch\n self.train_epoch(epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard)\n\n mlflow.end_run()", "def load_model_trainer_states_from_checkpoint(self, checkpoint_path, model=None):\n import os\n\n if model is None:\n try:\n import cloudpickle\n except ImportError:\n raise ImportError(\"cloudpickle is required to load model class\")\n logger.info(\"Loading model class\")\n model = cloudpickle.load(open(os.path.join(checkpoint_path, \"model_class.pkl\"), \"rb\"))\n\n self.model = HFWrapper(model)\n logger.info(\"Loading weights of previously trained model\")\n # Restoring model weights\n self.model.load_state_dict(\n # torch.load(os.path.join(training_args.output_dir, \"pytorch_model.bin\"))\n torch.load(os.path.join(checkpoint_path, \"pytorch_model.bin\"))\n )\n # Restoring random state\n rng_file = os.path.join(checkpoint_path, \"rng_state.pth\")\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n # Restoring AMP scaler\n if self.use_amp:\n self.scaler.load_state_dict(torch.load(os.path.join(checkpoint_path, \"scaler.pt\")))", "def init_model(config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None):\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config):\n raise TypeError('config must be a filename or Config object, '\n 'but got {}'.format(type(config)))\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n elif 'init_cfg' in config.model.backbone:\n config.model.backbone.init_cfg = None\n config.model.pretrained = None\n config.model.train_cfg = None\n init_default_scope(config.get('default_scope', 'mmseg'))\n\n model = MODELS.build(config.model)\n if checkpoint is not None:\n checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n dataset_meta = checkpoint['meta'].get('dataset_meta', None)\n # save the dataset_meta in the model for convenience\n if 'dataset_meta' in checkpoint.get('meta', {}):\n # mmseg 1.x\n model.dataset_meta = dataset_meta\n elif 'CLASSES' in checkpoint.get('meta', {}):\n # < mmseg 1.x\n classes = checkpoint['meta']['CLASSES']\n palette = checkpoint['meta']['PALETTE']\n model.dataset_meta = {'classes': classes, 'palette': palette}\n else:\n warnings.simplefilter('once')\n warnings.warn(\n 'dataset_meta or class names are not saved in the '\n 'checkpoint\\'s meta data, classes and palette will be'\n 'set according to num_classes ')\n num_classes = model.decode_head.num_classes\n dataset_name = None\n for name in dataset_aliases.keys():\n if len(get_classes(name)) == num_classes:\n dataset_name = name\n break\n if dataset_name is None:\n warnings.warn(\n 'No suitable dataset found, use Cityscapes by default')\n dataset_name = 'cityscapes'\n model.dataset_meta = {\n 'classes': get_classes(dataset_name),\n 'palette': get_palette(dataset_name)\n }\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model", "def restore_checkpoint(model, checkpoint_dir, cuda=False, force=False, pretrain=False):\n try:\n cp_files = [\n file_\n for file_ in os.listdir(checkpoint_dir)\n if file_.startswith(\"epoch=\") and file_.endswith(\".checkpoint.pth.tar\")\n ]\n except FileNotFoundError:\n cp_files = None\n os.makedirs(checkpoint_dir)\n if not cp_files:\n print(\"No saved model parameters found\")\n if force:\n raise Exception(\"Checkpoint not found\")\n else:\n return model, 0, []\n\n # Find latest epoch\n for i in itertools.count(1):\n if \"epoch={}.checkpoint.pth.tar\".format(i) in cp_files:\n epoch = i\n else:\n break\n\n if not force:\n print(\n \"Which epoch to load from? Choose in range [0, {}].\".format(epoch),\n \"Enter 0 to train from scratch.\",\n )\n print(\">> \", end=\"\")\n inp_epoch = int(input())\n if inp_epoch not in range(epoch + 1):\n raise Exception(\"Invalid epoch number\")\n if inp_epoch == 0:\n print(\"Checkpoint not loaded\")\n clear_checkpoint(checkpoint_dir)\n return model, 0, []\n else:\n print(\"Which epoch to load from? Choose in range [1, {}].\".format(epoch))\n inp_epoch = int(input())\n if inp_epoch not in range(1, epoch + 1):\n raise Exception(\"Invalid epoch number\")\n\n filename = os.path.join(\n checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(inp_epoch)\n )\n\n print(\"Loading from checkpoint {}?\".format(filename))\n\n if cuda:\n checkpoint = torch.load(filename)\n else:\n # Load GPU model on CPU\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n try:\n start_epoch = checkpoint[\"epoch\"]\n stats = checkpoint[\"stats\"]\n if pretrain:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\n \"=> Successfully restored checkpoint (trained for {} epochs)\".format(\n checkpoint[\"epoch\"]\n )\n )\n except:\n print(\"=> Checkpoint not successfully restored\")\n raise\n\n return model, inp_epoch, stats", "def train_model(config: dict, load_weights=None, resume_epoch=None):\n # Set GPU memory optimization\n try:\n physical_devices = tf.config.list_physical_devices(\"GPU\")\n for index in range(len(physical_devices)):\n try:\n tf.config.experimental.set_memory_growth(physical_devices[index], True)\n\n except Exception as err:\n print(\"[WARN]: Failed to set memory growth for {0}\".format(physical_devices[index]))\n print(\"[WARN]: Error\", err, \" .Skipping memory optimization\")\n\n except Exception as err:\n print(\"[WARN]: memory optimization failed. Error:\", err, \" . Skipping!\")\n\n # Set up random states\n np.random.seed(100)\n random.seed(100)\n tf.random.set_seed(100)\n\n # Get the required configurations\n no_of_epochs = config[\"no_of_epochs\"]\n steps_per_epoch = config[\"steps_per_epoch\"] if config[\"steps_per_epoch\"] > 0 else None\n\n train_batch_size = config[\"train_batch_size\"]\n val_batch_size = config[\"val_batch_size\"]\n test_batch_size = config[\"test_batch_size\"]\n\n model_name = config[\"model_name\"]\n\n # Create the dataset\n dataset_path = config[\"dataset_path\"]\n dataset_family = config[\"dataset_family\"]\n\n # get width and height\n image_width = config[\"image_width\"]\n image_height = config[\"image_height\"]\n initial_width = config[\"initial_width\"]\n initial_height = config[\"initial_height\"]\n\n model_name = model_name + \"_\" + dataset_family\n\n print(\"[INFO]: Using Configuration: \\n\", config)\n\n # Set up environments\n folders = [\n \"./outputs/model_save/{0}/checkpoints/\".format(model_name),\n \"./outputs/output_logs/{0}/csv_log/\".format(model_name),\n \"./outputs/model_save/{0}/best_model/\".format(model_name),\n \"./outputs/model_save/{0}/saved_model/\".format(model_name),\n \"./outputs/output_logs/{0}/graphs/\".format(model_name)\n ]\n print(\"[INFO]: Setting up folders: \")\n os_utilities.make_directories(paths=folders)\n\n # Load the dataset\n print(\"[INFO]: Loading dataset\")\n if dataset_family == \"CVC-ClinicDB\":\n X, y = du.get_cvc_clinic_datapath(dataset_path)\n elif dataset_family == \"Kvasir-Seg\":\n X, y = du.get_kvasir_seg_datapath(dataset_path)\n else:\n print(\"[ERROR]: {0} dataset family is unrecognized or not supported!\".format(dataset_family))\n raise NotImplementedError\n\n X_train, X_sided, y_train, y_sided = train_test_split(X, y, random_state=100, test_size=0.2)\n X_val, X_test, y_val, y_test = train_test_split(X_sided, y_sided, random_state=100, test_size=0.5)\n\n print(\"[INFO]: Training set size: \", len(X_train))\n print(\"[INFO]: Validation set size: \", len(X_val))\n print(\"[INFO]: Testing set size: \", len(X_test))\n\n print(\"[INFO]: Loading Training set\")\n train_datagen = du.DataGenerator(X_train,\n y_train,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=train_batch_size,\n dataset_family=dataset_family,\n initial_size=(initial_width, initial_height),\n aug_config_path=\"./augmentation_config.yaml\",\n shuffle=True)\n\n print(\"[INFO]: Loading Validation set\")\n val_datagen = du.DataGenerator(X_val,\n y_val,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=val_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Setting tf.data pipeline\")\n train_steps = len(train_datagen) if steps_per_epoch is None else int(steps_per_epoch)\n val_steps = len(val_datagen)\n\n train_dataset = train_datagen.get_tf_data()\n val_dataset = val_datagen.get_tf_data()\n\n # Get the model, loss and metrics\n print(\"[INFO]: Building the model - {0}\".format(model_name))\n model = models.ModelSelector(config)\n\n # Load the weights if available\n if load_weights is not None:\n print(\"[INFO]: Load the weights from {0}\".format(load_weights))\n model.load_weights(load_weights)\n\n # Setup Callbacks\n print(\"[INFO]: Setting up training Callbacks and Optimizers. Its almost done\")\n resume_epoch = 0 if resume_epoch is None else resume_epoch\n overwrite = True if resume_epoch == 0 else False\n\n train_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/train_log.csv\".format(model_name),\n overwrite=overwrite)\n valid_csv_logger = callbacks.CSVLogging(\"./outputs/output_logs/{0}/csv_log/valid_log.csv\".format(model_name),\n overwrite=overwrite)\n lr_reducer = callbacks.ReduceLROnPlateau(learning_rate=float(config[\"learning_rate\"]),\n patience=4,\n decay_rate=1E-1,\n delta=0.0001,\n min_lr=1E-7,\n mode=\"min\")\n\n model_save_path = \"./outputs/model_save/{0}/\".format(model_name)\n\n # Setup Optimizer\n optimizer = tf.keras.optimizers.Adam(learning_rate=float(config[\"learning_rate\"]))\n\n # Check for monitor\n monitor_variable = 100.0 # Initialize a start max\n\n print(\"[INFO]: Setting up metrics\")\n loss_avg = tf.keras.metrics.Mean(name=\"loss\")\n f1_score_metric = tf.keras.metrics.Mean(name=\"f1_score\")\n iou_coe_metric = tf.keras.metrics.Mean(name=\"iou_coe\")\n dice_coe_metric = tf.keras.metrics.Mean(name=\"dice_coe\")\n\n # Set up a custom train loop\n print(\"[INFO]: Beginning training loops\")\n # Iterate epoch wise\n for epoch in range(resume_epoch, no_of_epochs):\n\n print(\"Training {0}/{1}\".format(epoch + 1, no_of_epochs))\n\n try:\n # Training-loop == using batches\n train_loss, train_f1_score, train_iou, train_dice = train(config[\"model_name\"],\n model,\n train_dataset,\n train_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=optimizer,\n epoch=epoch + 1,\n total_epochs=no_of_epochs)\n\n train_tracker = {\n \"train_loss\": [train_loss],\n \"train_f1_score\": [train_f1_score],\n \"train_iou_coe\": [train_iou],\n \"train_dice_coe\": [train_dice]\n }\n\n # Validation loop == using batches\n val_loss, val_f1_score, val_iou, val_dice = train(config[\"model_name\"],\n model,\n val_dataset,\n val_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n val_tracker = {\n \"val_loss\": [val_loss],\n \"val_f1_score\": [val_f1_score],\n \"val_iou_coe\": [val_iou],\n \"val_dice_coe\": [val_dice]\n }\n\n model.save_weights(model_save_path + \"checkpoints/{0}_ckpt.h5\".format(model_name))\n print(\"[INFO]: Epoch {0}/{1} - \\nTrain evaluation: {2}, \\nValidation evaluation: {3}\".\n format(epoch + 1, no_of_epochs, train_tracker, val_tracker))\n train_csv_logger.log(train_tracker)\n valid_csv_logger.log(val_tracker)\n\n # Save the best model\n if monitor_variable > val_loss:\n monitor_variable = val_loss\n model.save_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n # LR Reduce\n lr_reducer.check_lr(monitor_variable=val_loss, optimizer=optimizer)\n\n except KeyboardInterrupt:\n print(\"[INFO]: Interrupted Training. Trying to save model\")\n model.save_weights(model_save_path + \"{0}_{1}_interrupted.h5\".format(model_name, epoch + 1))\n print(\"[INFO]: Attempting to run test on the best model so far!\")\n break\n\n except Exception as err:\n print(\"[ERROR]: Unexpected Critical Error: \", err)\n print(\"[ERROR]: Trying to save the weights\")\n model.save_weights(model_save_path + \"{0}_{1}_critical.h5\".format(model_name, epoch + 1))\n traceback.print_exc()\n sys.exit(2)\n\n print(\"[INFO]: Training completed. Saving model\")\n model.save_weights(model_save_path + \"saved_model/{0}.h5\".format(model_name))\n\n print(\"[INFO]: Testing model\")\n print(\"[INFO]: Loading Best saved model:\")\n model.load_weights(model_save_path + \"best_model/best_{0}.h5\".format(model_name))\n\n print(\"[INFO]: Loading the test set\")\n test_datagen = du.DataGenerator(X_test,\n y_test,\n image_size=(image_width, image_height),\n model_name=config[\"model_name\"],\n batch_size=test_batch_size,\n dataset_family=dataset_family,\n initial_size=None,\n aug_config_path=None,\n shuffle=False)\n\n print(\"[INFO]: Loading TF data\")\n test_dataset = test_datagen.get_tf_data()\n test_steps = len(test_datagen)\n\n print(\"[INFO]: Testing Initiated\")\n test_loss, test_f1_score, test_iou, test_dice = train(config[\"model_name\"],\n model,\n test_dataset,\n test_steps,\n metrics_tracker=(loss_avg,\n f1_score_metric,\n iou_coe_metric,\n dice_coe_metric),\n optimizer=None,\n epoch=1,\n total_epochs=1)\n test_tracker = {\n \"test_loss\": [test_loss],\n \"test_f1_score\": [test_f1_score],\n \"test_iou_coe\": [test_iou],\n \"test_dice_coe\": [test_dice]\n }\n print(\"[INFO]: Test Results: \\n\", test_tracker)\n with open(\"./outputs/output_logs/{0}/test_results.txt\".format(model_name), mode=\"w\") as f:\n f.write(\"Dumped Test Results for the model {0}\\n\".format(model_name))\n for k, v in test_tracker.items():\n f.write(\"{0} => {1}\\n\".format(k, v))\n\n print(\"[INFO]: Closing operations\")", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)", "def load_model(self, checkpoint_path):\n model = self.model_definition()\n model.load_weights(checkpoint_path)\n return model", "def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())", "def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n from_hf = kwargs.pop(\"from_hf\", False)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n default_gpu = kwargs.pop(\"default_gpu\", True)\n\n # Load config\n assert config is not None\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n else:\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n logger.error(\"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path, \", \".join(cls.pretrained_model_archive_map.keys()), archive_file)\n )\n return None\n if default_gpu:\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Rename Bert parameters for our framework\n # NB: Assume 1 Bert layer is mapped to 1 layer only (cannot be used to init multiple layers)\n old_keys = []\n new_keys = []\n nums = []\n for key in state_dict.keys():\n new_key = None\n if \".layer.\" in key and from_hf:\n num = int(key.split(\".layer.\")[-1].split(\".\")[0])\n if \".attention.\" in key:\n new_key = key.replace(\".layer.%d.attention.\" % num,\n \".layer.%d.attention_\" % config.bert_layer2attn_sublayer.get(str(num), num))\n elif \".intermediate.\" in key:\n new_key = key.replace(\".layer.%d.intermediate.\" % num,\n \".layer.%d.intermediate.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n elif \".output.\" in key:\n new_key = key.replace(\".layer.%d.output.\" % num,\n \".layer.%d.output.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n nums.append(num)\n for old_key, new_key, _ in sorted(zip(old_keys, new_keys, nums), key=lambda x: x[2], reverse=True):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Load from a PyTorch state_dict\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n logger.info(start_prefix)\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0 and default_gpu:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(model.__class__.__name__, missing_keys)\n )\n if len(unexpected_keys) > 0 and default_gpu:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(model.__class__.__name__, unexpected_keys)\n )\n if len(error_msgs) > 0 and default_gpu:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(model.__class__.__name__, \"\\n\\t\".join(error_msgs))\n )\n\n if hasattr(model, \"tie_weights\"):\n model.tie_weights() # make sure word embedding weights are still tied\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"error_msgs\": error_msgs,\n }\n return model, loading_info\n\n return model", "def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n if checkpoint is not None:\n _load_checkpoint_to_model(model, checkpoint)\n else:\n warnings.warn('Checkpoint is not loaded, and the inference '\n 'result is calculated by the randomly initialized '\n 'model!')", "def prepare_model_optimizer_and_scheduler(self):\n\n ###################################################################\n # MODEL PREPARATION\n # -----------------\n # - step 1: Initialize a random model from config\n # - step 2: Load model weights from checkpoint if any\n # - step 3: Move model to device (GPU)\n ###################################################################\n\n # Initialize a random model according to a specific config:\n # NOTE: here we load from a physical path instead of using a keyword\n # as compute nodes may not allow downloading from online hubs\n if self.is_character_bert:\n model_config = CharacterBertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'character-bert'))\n model = CharacterBertForPreTraining(model_config)\n else:\n model_config = BertConfig.from_pretrained(\n os.path.join(WORKDIR, 'data', 'bert-base-uncased'))\n model = BertForPreTraining(model_config)\n if self.is_main_process:\n logging.info(\n \"Initialized %s using Config:\\n%s\",\n \"CharacterBERT\" if self.is_character_bert else \"BERT\",\n model_config\n )\n\n # Load checkpoint if any:\n if not self.resume_pretraining:\n # CASE: no checkpoint -> training from scratch\n self.global_step = 0\n if self.is_main_process:\n logging.info(\"Pre-training from scratch (good luck!)\")\n else:\n if self.init_checkpoint:\n # CASE: load checkpoint from direct path\n self.global_step = 0\n init_checkpoint = self.init_checkpoint\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from specific checkpoint `%s`\",\n init_checkpoint\n )\n else:\n # CASE: load checkpoint from resume_step\n if self.is_main_process:\n logging.info(\n \"Resuming pre-training from step `%s`. \"\n \"Looking inside `output_directory` for checkpoints...\",\n self.resume_step\n )\n\n if self.resume_step == -1:\n # CASE: resume_step == -1, load latest checkpoint\n model_names = [\n fname\n for fname in os.listdir(self.output_directory)\n if fname.endswith(\".pt\")]\n assert model_names, \"Could not find any checkpoints to resume from.\"\n self.resume_step = max([\n int(x.split('.pt')[0].split('_')[1].strip())\n for x in model_names]) # TODO: find a better way for this\n if self.is_main_process:\n logging.info(\n \"Resuming from latest checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n else:\n # CASE: resume_step == X, load checkpoint: `ckpt_X.pt`\n if self.is_main_process:\n logging.info(\n \"Resuming from checkpoint: ckpt_%s.pt\",\n self.resume_step\n )\n self.global_step = self.resume_step\n init_checkpoint = os.path.join(\n self.output_directory, f\"ckpt_{self.resume_step}.pt\")\n\n # Load the actual checkpoint file\n self.checkpoint = torch.load(\n init_checkpoint, map_location=\"cpu\"\n )\n\n # NOTE: Keeping these lines below as a reminder that re-training on\n # a different domain with CharacterBERT requires changing the\n # output layer with a topK tokens matrix from the new domain.\n\n # # Case where we would retrain a general_domain CharacterBERT\n # # on the medical domain. Don't use the general domain output layer:\n # if self.is_medical_domain and self.is_character_bert and (not self.phase2):\n # model.load_state_dict(\n # {\n # k: v for (k, v) in self.checkpoint['model'].items()\n # # Don't load output matrix from general domain model\n # if not k.startswith('cls.predictions') # ignoring the old output layer\n # },\n # strict=False)\n # if self.is_main_process:\n # logging.warning(\n # \"Loaded model weights from `%s`, \"\n # \"but ignored the `cls.predictions` module.\",\n # init_checkpoint)\n\n # # General case: load weights from checkpoint\n # else:\n # model.load_state_dict(self.checkpoint['model'], strict=True)\n # if self.is_main_process:\n # logging.info('Loaded model weights from `%s`',\n # init_checkpoint)\n\n # General case: load weights from checkpoint\n model.load_state_dict(self.checkpoint['model'], strict=True)\n if self.is_main_process:\n logging.info('Loaded model weights from `%s`', init_checkpoint)\n\n # Deduce previous steps from phase1 when in phase2\n if self.phase2 and not self.init_checkpoint:\n self.global_step -= self.phase1_end_step\n\n if self.is_main_process:\n logging.info(\"Training will start at global_step=%s\", self.global_step)\n\n # Move model to GPU:\n model.to(self.device)\n if self.is_main_process:\n logging.info(\"Model was moved to device: %s\", self.device)\n\n ###################################################################\n # OPTIMIZER / SCHEDULER PREPARATION\n # ---------------------------------\n # - step 1: Define the optimizer (FusedLAMB w/ some weight decay)\n # - step 2: Define the learning rate scheduler (PolyWarmUpScheduler)\n ###################################################################\n\n # Initialize an optimizer:\n no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] # no weight decay\n optimizer_grouped_parameters = [\n {\n 'params': [\n param for name, param in model.named_parameters()\n if not any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.01\n },\n {\n 'params': [\n param for name, param in model.named_parameters()\n if any((nd in name) for nd in no_decay)],\n 'weight_decay': 0.0\n }\n ]\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.learning_rate)\n if self.is_main_process:\n logging.info(\"Using optimizer: %s\", optimizer)\n\n # Initialize a learning rate scheduler:\n self.lr_scheduler = PolyWarmUpScheduler(\n optimizer,\n warmup=self.warmup_proportion,\n total_steps=self.total_steps\n )\n if self.is_main_process:\n logging.info(\"Using scheduler: %s\", self.lr_scheduler)\n\n ###################################################################\n # OTHER PREPARATION STEPS\n # -----------------------\n # - step 1: Set up Mixed Precision training (fp16) if required\n # - step 2: Load optimizer stat from checkpoint if any\n # - step 2: Set up DataParallel\n ###################################################################\n\n # Set up fp16:\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Setting up `Almost FP16` Mixed Precision...\")\n if self.loss_scale == 0:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=\"dynamic\")\n else:\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O2\", loss_scale=self.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n\n # Load optimizer state from checkpoint\n if self.resume_pretraining:\n if self.is_main_process:\n logging.info(\"Loading optimizer state from checkpoint...\")\n if self.phase2 or self.init_checkpoint:\n keys = list(self.checkpoint['optimizer']['state'].keys())\n # Override hyperparameters from previous self.checkpoint\n for key in keys:\n self.checkpoint['optimizer']['state'][key]['step'] = self.global_step\n for i, _ in enumerate(self.checkpoint['optimizer']['param_groups']):\n self.checkpoint['optimizer']['param_groups'][i]['step'] = self.global_step\n self.checkpoint['optimizer']['param_groups'][i]['t_total'] = self.total_steps\n self.checkpoint['optimizer']['param_groups'][i]['warmup'] = self.warmup_proportion\n self.checkpoint['optimizer']['param_groups'][i]['lr'] = self.learning_rate\n if self.is_main_process:\n logging.info(\"Overwrote the following parameters with new values:\")\n logging.info(\"* step: %s\", self.global_step)\n logging.info(\"* t_total: %s\", self.total_steps)\n logging.info(\"* warmup: %s\", self.warmup_proportion)\n logging.info(\"* lr: %s\", self.learning_rate)\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n # Restore AMP master parameters\n if self.fp16:\n if self.is_main_process:\n logging.info(\"Restoring AMP master parameters (optimizer)...\")\n optimizer._lazy_init_maybe_master_weights()\n optimizer._amp_stash.lazy_init_called = True\n optimizer.load_state_dict(self.checkpoint['optimizer'])\n for param, saved_param in zip(amp.master_params(optimizer), self.checkpoint['master params']):\n param.data.copy_(saved_param.data)\n\n # Distribute model\n if self.training_is_distributed:\n if not self.allreduce_post_accumulation:\n model = DistributedDataParallel(\n model,\n message_size=250000000,\n gradient_predivide_factor=\\\n torch.distributed.get_world_size()\n )\n else:\n flat_dist_call(\n [param.data for param in model.parameters()],\n torch.distributed.broadcast,\n (0,)\n )\n elif self.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Set the values of self.model and self.optimizer\n self.model = model\n self.optimizer = optimizer", "def train_and_eval(config, babas_data):\n\n if config.resume_from_checkpoint is not None:\n try:\n if config.augment_background == 'background':\n bg = config.augment_background\n else:\n bg = None\n rfc = config.resume_from_checkpoint\n ic = config.include_validation\n print 'Loading saved config: %s' % config.saved_config\n config = np.load(config.saved_config).item()\n config.resume_from_checkpoint = rfc\n config.include_validation = ic\n if not hasattr(config, 'augment_background'):\n config.augment_background = 'constant'\n if not hasattr(config, 'background_folder'):\n config.background_folder = 'backgrounds'\n if bg is not None:\n print 'Overriding saved config to add kinect backgrounds to training.'\n config.augment_background = bg\n results_dir = rfc\n except:\n print 'Relying on default config file.'\n\n if babas_data: # Shitty naive training method\n config.tfrecord_dir = '/media/data_cifs/monkey_tracking/data_for_babas/tfrecords_from_babas'\n config.babas_tfrecord_dir = config.tfrecord_dir\n config.steps_before_validation = 20\n config.epochs = 2000\n config.convert_labels_to_pixel_space = False\n config.augment_background = 'constant'\n\n # Import your model\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n model_file = import_cnn(config.model_type)\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = '%s_%s' % (config.model_type, dt_stamp)\n if config.selected_joints is not None:\n dt_dataset = '_%s' % (config.selected_joints) + dt_dataset\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, dt_dataset)\n results_dir = os.path.join(config.npy_dir, dt_dataset)\n print 'Saving Dmurphy\\'s online updates to: %s' % results_dir\n dir_list = [config.train_checkpoint, config.summary_dir, results_dir]\n [tf_fun.make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, config.train_tfrecords)\n if config.babas_tfrecord_dir is not None:\n train_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.train_tfrecords)\n if config.include_validation or config.include_validation is None:\n val_babas_tfrecord_dir = os.path.join(\n config.babas_tfrecord_dir,\n config.val_tfrecords)\n else:\n train_babas_tfrecord_dir = None\n val_babas_tfrecord_dir = None\n\n if isinstance(config.include_validation, basestring):\n validation_data = config.include_validation\n elif config.include_validation == True:\n validation_data = os.path.join(\n config.tfrecord_dir,\n config.val_tfrecords)\n else:\n validation_data = None\n\n print 'Using training set: %s' % train_data\n print 'Using validation set: %s' % validation_data\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_data_dict = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background=config.augment_background,\n background_folder=config.background_folder,\n randomize_background=config.randomize_background,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=train_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n train_data_dict['deconv_label_size'] = len(config.labels)\n\n val_data_dict = inputs(\n tfrecord_file=validation_data,\n batch_size=config.validation_batch,\n im_size=config.resize,\n target_size=config.image_target_size,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n label_shape=config.num_classes,\n num_epochs=config.epochs,\n image_target_size=config.image_target_size,\n image_input_size=config.image_input_size,\n maya_conversion=config.maya_conversion,\n max_value=config.max_depth,\n normalize_labels=config.normalize_labels,\n aux_losses=config.aux_losses,\n selected_joints=config.selected_joints,\n joint_names=config.joint_order,\n num_dims=config.num_dims,\n keep_dims=config.keep_dims,\n mask_occluded_joints=config.mask_occluded_joints,\n background_multiplier=config.background_multiplier,\n augment_background='none',\n background_folder=config.background_folder,\n randomize_background=None,\n maya_joint_labels=config.labels,\n babas_tfrecord_dir=val_babas_tfrecord_dir,\n convert_labels_to_pixel_space=config.convert_labels_to_pixel_space,\n image_target_size_is_flipped=config.image_target_size_is_flipped)\n val_data_dict['deconv_label_size'] = len(config.labels)\n\n # Check output_shape\n if config.selected_joints is not None:\n print 'Targeting joint: %s' % config.selected_joints\n joint_shape = len(config.selected_joints) * config.keep_dims\n if (config.num_classes // config.keep_dims) > (joint_shape):\n print 'New target size: %s' % joint_shape\n config.num_classes = joint_shape\n\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n print 'Creating training graph:'\n model = model_file.model_struct(\n weight_npy_path=config.weight_npy_path)\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n rgb=train_data_dict['image'],\n target_variables=train_data_dict,\n train_mode=train_mode,\n batchnorm=config.batch_norm)\n train_mu, train_var = tf.nn.moments(train_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"train image mean\", train_mu)\n tf.summary.histogram(\"train image std\", tf.sqrt(train_var))\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv train', model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(model.deconv, axis=3), tf.float32), 3))\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n print 'Creating validation graph:'\n val_model = model_file.model_struct()\n val_model.build(\n rgb=val_data_dict['image'],\n target_variables=val_data_dict)\n\n # Calculate validation accuracy\n val_mu, val_var = tf.nn.moments(val_data_dict['image'], axes=[1, 2, 3])\n tf.summary.histogram(\"validation image mean\", val_mu)\n tf.summary.histogram(\"validation image std\", tf.sqrt(val_var))\n if 'label' in val_data_dict.keys():\n # val_score = tf.reduce_mean(\n # tf_fun.l2_loss(\n # val_model.output, val_data_dict['label']))\n if config.keep_dims == 3:\n z_mask = tf.expand_dims(tf.tile([1, 1, 0], [int(val_data_dict['label'].get_shape()[-1]) // 3]), axis=0)\n z_mask = tf.cast(z_mask, tf.float32)\n val_model.output = val_model.output * z_mask\n val_data_dict['label'] = val_data_dict['label'] * z_mask \n val_score = tf.reduce_mean(tf.nn.l2_loss(val_model.output - val_data_dict['label']))\n tf.summary.scalar(\"validation mse\", val_score)\n if 'fc' in config.aux_losses:\n tf.summary.image('FC val activations', val_model.final_fc)\n if 'deconv_image' in config.aux_losses:\n tf.summary.image('Deconv val', val_model.deconv)\n if 'deconv_label' in config.aux_losses:\n tf.summary.image(\n 'Deconv label train',\n tf.expand_dims(\n tf.cast(\n tf.argmax(val_model.deconv, axis=3),\n tf.float32), 3))\n tf.summary.image(\n 'validation images',\n tf.cast(val_data_dict['image'], tf.float32))\n\n # Prepare the loss functions:::\n loss_list, loss_label = [], []\n if 'label' in train_data_dict.keys():\n # 1. Joint localization loss\n if config.calculate_per_joint_loss == 'thomas':\n label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n elif config.calculate_per_joint_loss == 'skeleton and joint':\n label_loss = tf_fun.skeleton_loss(\n model=model,\n train_data_dict=train_data_dict,\n config=config,\n y_key='label',\n yhat_key='output')\n loss_list += [label_loss]\n loss_label += ['skeleton loss']\n delta = model['output'] - train_data_dict['label']\n proc_weights = np.asarray(\n config.dim_weight)[None,:].repeat(\n len(config.joint_names), axis=0).reshape(1, -1)\n delta *= proc_weights\n # label_loss, use_joints, joint_variance = tf_fun.thomas_l1_loss(\n # model=model,\n # train_data_dict=train_data_dict,\n # config=config,\n # y_key='label',\n # yhat_key='output')\n # loss_list += [label_loss]\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n else:\n loss_list += [tf.nn.l2_loss(\n model['output'] - train_data_dict['label'])]\n loss_label += ['combined head']\n for al in loss_helper.potential_aux_losses():\n loss_list, loss_label = loss_helper.get_aux_losses(\n loss_list=loss_list,\n loss_label=loss_label,\n train_data_dict=train_data_dict,\n model=model,\n aux_loss_dict=al,\n domain_adaptation=train_babas_tfrecord_dir)\n loss = tf.add_n(loss_list)\n\n # Add wd if necessary\n if config.wd_penalty is not None:\n _, l2_wd_layers = tf_fun.fine_tune_prepare_layers(\n tf.trainable_variables(), config.wd_layers)\n l2_wd_layers = [\n x for x in l2_wd_layers if 'biases' not in x.name]\n if config.wd_type == 'l1':\n loss += (config.wd_penalty * tf.add_n(\n [tf.reduce_sum(tf.abs(x)) for x in l2_wd_layers]))\n elif config.wd_type == 'l2':\n loss += (config.wd_penalty * tf.add_n(\n [tf.nn.l2_loss(x) for x in l2_wd_layers]))\n\n optimizer = loss_helper.return_optimizer(config.optimizer)\n optimizer = optimizer(config.lr)\n\n if hasattr(config, 'fine_tune_layers') and config.fine_tune_layers is not None:\n print 'Finetuning learning for: %s' % config.fine_tune_layers\n train_op, grads = tf_fun.finetune_learning(\n loss,\n trainables=tf.trainable_variables(),\n fine_tune_layers=config.fine_tune_layers,\n config=config\n )\n else:\n # Op to calculate every variable gradient\n grads = optimizer.compute_gradients(\n loss, tf.trainable_variables())\n # Op to update all variables according to their gradient\n train_op = optimizer.apply_gradients(\n grads_and_vars=grads)\n\n # Summarize all gradients and weights\n [tf.summary.histogram(\n var.name + '/gradient', grad)\n for grad, var in grads if grad is not None]\n # train_op = optimizer.minimize(loss)\n\n # Summarize losses\n [tf.summary.scalar(lab, il) for lab, il in zip(\n loss_label, loss_list)]\n\n # Summarize images and l1 weights\n tf.summary.image(\n 'train images',\n tf.cast(train_data_dict['image'], tf.float32))\n tf_fun.add_filter_summary(\n trainables=tf.trainable_variables(),\n target_layer='conv1_1_filters')\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n tf.add_to_collection('output', model.output)\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Create list of variables to run through training model\n train_session_vars = {\n 'train_op': train_op,\n 'loss_value': loss,\n 'im': train_data_dict['image'],\n 'yhat': model.output,\n 'ytrue': train_data_dict['label']\n }\n if hasattr(model, 'deconv'):\n train_session_vars['deconv'] = model.deconv\n if hasattr(model, 'final_fc'):\n train_session_vars['fc'] = model.final_fc\n\n # Create list of variables to run through validation model\n val_session_vars = {\n 'val_acc': val_score,\n 'val_pred': val_model.output,\n 'val_ims': val_data_dict['image'],\n 'val_true': val_data_dict['label'],\n }\n\n # Create list of variables to save to numpys\n save_training_vars = [\n 'im',\n 'yhat',\n 'ytrue',\n 'yhat'\n ]\n\n for al in loss_helper.potential_aux_losses():\n if al.keys()[0] in train_data_dict.keys():\n y_key = '%s' % al.keys()[0]\n train_session_vars[y_key] = train_data_dict[al.values()[0]['y_name']]\n save_training_vars += [y_key]\n\n yhat_key = '%s_hat' % al.keys()[0]\n train_session_vars[yhat_key] = model[al.values()[0]['model_name']]\n save_training_vars += [yhat_key]\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, losses = 0, []\n num_joints = int(\n train_data_dict['label'].get_shape()[-1]) // config.keep_dims\n normalize_vec = tf_fun.get_normalization_vec(config, num_joints)\n if config.resume_from_checkpoint is not None:\n if '.ckpt' in config.resume_from_checkpoint:\n ckpt = config.resume_from_checkpoint\n 'Restoring specified checkpoint: %s' % config.resume_from_checkpoint\n else:\n ckpt = tf.train.latest_checkpoint(config.resume_from_checkpoint)\n print 'Evaluating checkpoint: %s' % ckpt\n saver.restore(sess, ckpt)\n try:\n while not coord.should_stop():\n start_time = time.time()\n train_out_dict = sess.run(train_session_vars.values())\n train_out_dict = {k: v for k, v in zip(\n train_session_vars.keys(), train_out_dict)}\n losses.append(train_out_dict['loss_value'])\n duration = time.time() - start_time\n assert not np.isnan(\n train_out_dict['loss_value']), 'Model diverged with loss = NaN'\n if step % config.steps_before_validation == 0:\n if validation_data is not False:\n val_out_dict = sess.run(\n val_session_vars.values())\n val_out_dict = {k: v for k, v in zip(\n val_session_vars.keys(), val_out_dict)}\n # if config.normalize_labels:\n # val_out_dict['val_pred'] *= normalize_vec\n # val_out_dict['val_true'] *= normalize_vec\n np.savez(\n os.path.join(\n results_dir, '%s_val_coors' % step),\n val_pred=val_out_dict['val_pred'],\n val_ims=val_out_dict['val_ims'],\n val_true=val_out_dict['val_true'],\n normalize_vec=normalize_vec)\n with open(\n os.path.join(\n results_dir, '%s_config.p' % step), 'wb') as fp:\n pickle.dump(config, fp)\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy attach 9177\n format_str = (\n '%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch) | '\n 'Validation l2 loss = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, train_out_dict['loss_value'],\n config.train_batch / duration, float(duration),\n val_out_dict['val_acc'],\n config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if config.normalize_labels:\n train_out_dict['yhat'] *= normalize_vec\n train_out_dict['ytrue'] *= normalize_vec\n [save_training_data(\n output_dir=results_dir,\n data=train_out_dict[k],\n name='%s_%s' % (k, step)) for k in save_training_vars]\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.8f (%.1f examples/sec; '\n '%.3f sec/batch)')\n print (format_str % (\n datetime.now(),\n step,\n train_out_dict['loss_value'],\n config.train_batch / duration,\n float(duration)))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%s_training_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def initialize_model_from_cfg(args, gpu_id=0):\n model = eval(args.model).loot_model(args)\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n\n return model", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def train_and_evaluate(\n model_name: str,\n job_log_dir: Optional[str],\n multi_host_checkpointing: Optional[bool],\n maybe_use_persistence_checkpointing: bool,\n restore_checkpoint_dir: Optional[str],\n restore_checkpoint_step: Optional[int],\n eval_on_test: Optional[bool],\n checkpoint_todelete_subdir: Optional[str] = None) -> None:\n model_config = model_utils.get_model(model_name)()\n _write_params_file(model_config, job_log_dir)\n task_p = model_config.task()\n\n input_p = model_config.datasets()\n # Note that we modify input params below with runtime information, therefore\n # model_config.dataset() should not be called again as it won't have the\n # correct runtime information populated.\n for inp in input_p:\n if not isinstance(inp, base_input.BaseInputParams):\n raise ValueError('Expecting BaseInputParams from datasets(), got: '\n f'{inp.ToText()}')\n inp.num_infeed_hosts = jax.process_count()\n inp.infeed_host_index = jax.process_index()\n train_input_p = [v for v in input_p if v.is_training]\n if len(train_input_p) != 1:\n raise ValueError(\n f'Expecting exactly one training split. Got `{len(train_input_p)}`.')\n train_input_p = train_input_p[0]\n logging.info('train_input_p=%s', train_input_p.ToText())\n eval_input_p = None\n if eval_on_test:\n eval_input_p = [v for v in input_p if not v.is_training]\n\n checkpoint_type = checkpoints.retrieve_checkpoint_type(\n multi_host_checkpointing, maybe_use_persistence_checkpointing, task_p)\n\n checkpoint_manager = _create_checkpoint_manager(model_name, task_p,\n job_log_dir, checkpoint_type,\n checkpoint_todelete_subdir)\n\n if task_p.model.device_mesh is not None:\n train_and_evaluate_spmd_model(task_p, train_input_p, job_log_dir,\n checkpoint_manager, checkpoint_type,\n restore_checkpoint_dir,\n restore_checkpoint_step, eval_input_p)\n else:\n train_and_evaluate_pmap(task_p, train_input_p, job_log_dir,\n checkpoint_manager, restore_checkpoint_dir,\n restore_checkpoint_step, eval_input_p)", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def create_or_load_model(model, model_dir, session, name):\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n start_time = time.time()\n # It only takes a few seconds to initialize all variables.\n session.run(tf.global_variables_initializer())\n logging.info(\n \"Initialize %s model with fresh parameters before loading variables \"\n \"from the checkpoint, time %.2fs\", name,\n time.time() - start_time)\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step", "def load_checkpoint(path: str, save_dir: str, cuda: bool = False, attention_viz: bool = False) -> nn.Module:\r\n # Load model and args\r\n state = torch.load(path, map_location=lambda storage, loc: storage)\r\n args, loaded_state_dict = state['args'], state['state_dict']\r\n\r\n # Update args with current args\r\n args.cuda = cuda\r\n args.attention_viz = attention_viz\r\n args.save_dir = save_dir\r\n\r\n model = build_model(args)\r\n model.load_state_dict(loaded_state_dict)\r\n\r\n if cuda:\r\n print('Moving model to cuda')\r\n model = model.cuda()\r\n\r\n return model", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def try_create_model_load_from_checkpoint_and_adjust(self) -> bool:\n success = self.try_create_model_and_load_from_checkpoint()\n self.create_summary_and_adjust_model_for_gpus()\n return success", "def try_load_checkpoint_for_model(self) -> bool:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if not self.checkpoint_path:\n raise ValueError(\"No checkpoint provided\")\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n epoch = ModelAndInfo._load_checkpoint(model=self._model,\n checkpoint_path=self.checkpoint_path,\n key_in_state_dict=ModelAndInfo.MODEL_STATE_DICT_KEY,\n use_gpu=self.config.use_gpu)\n\n logging.info(f\"Loaded model from checkpoint (epoch: {epoch})\")\n self.checkpoint_epoch = epoch\n return True", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def train(model, infer_train, infer_val, load_checkpoint=None):\n\n global checkpoint_name\n print('Initialising {}'.format(cfg['experiment_name']))\n checkpoint_folder = 'checkpoints/{}/'.format(cfg['experiment_name'])\n\n if not os.path.exists(checkpoint_folder):\n os.makedirs(checkpoint_folder)\n\n tb_folder = 'tb/{}/'.format(cfg['experiment_name'])\n if not os.path.exists(tb_folder):\n os.makedirs(tb_folder)\n\n writer = SummaryWriter(logdir=tb_folder, flush_secs=30)\n optimiser = Adam(model.parameters(), lr=cfg['learning_rate'], weight_decay=cfg['weight_decay'])\n\n train_dataset = TweetDataset(dataset_type='train')\n train_loader = DataLoader(train_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=True, pin_memory=True)\n\n val_dataset = TweetDataset(dataset_type='val')\n val_loader = DataLoader(val_dataset, batch_size=cfg['batch_size'], num_workers=cfg['workers'],\n collate_fn=collate_function, shuffle=False, pin_memory=True)\n\n if load_checkpoint:\n checkpoint = torch.load(load_checkpoint)\n assert model.config == checkpoint['net_config'], \\\n \"The provided checkpoint has a different configuration, loading is impossible\"\n start_epoch = checkpoint['epoch'] + 1\n epochs = cfg['epochs'] + start_epoch\n step = checkpoint['step']\n model.load_state_dict(checkpoint['model'])\n optimiser.load_state_dict(checkpoint['optimiser'])\n print(\"Loaded the checkpoint at {}\".format(load_checkpoint))\n else:\n start_epoch, step = 0, 0\n epochs = cfg['epochs']\n\n init_loss = 0.\n avg_loss = AverageMeter()\n best_mae = 1e10\n\n print('Sanity val')\n val(model, val_loader, writer, 0, infer_val)\n model.train()\n\n print('Starting training')\n for epoch in range(start_epoch, epochs):\n loader_length = len(train_loader)\n epoch_start = time.time()\n\n for batch_idx, batch in enumerate(train_loader):\n optimiser.zero_grad()\n\n loss = infer_train(model, batch)\n loss.backward()\n\n if epoch == 0 and batch_idx == 0:\n init_loss = loss\n\n # logging\n elapsed = time.time() - epoch_start\n progress = batch_idx / loader_length\n est = datetime.timedelta(seconds=int(elapsed / progress)) if progress > 0.001 else '-'\n avg_loss.update(loss)\n suffix = '\\tloss {:.4f}/{:.4f}\\tETA [{}/{}]'.format(avg_loss.avg, init_loss,\n datetime.timedelta(seconds=int(elapsed)), est)\n printProgressBar(batch_idx, loader_length, suffix=suffix,\n prefix='Epoch [{}/{}]\\tStep [{}/{}]'.format(epoch, epochs - 1, batch_idx, loader_length))\n\n writer.add_scalar('Steps/train_loss', loss, step)\n\n # saving the model\n if step % cfg['checkpoint_every'] == 0:\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n step += 1\n optimiser.step()\n\n # validating\n if step % cfg['val_every'] == 0:\n mae = val(model, val_loader, writer, step, infer_val)\n if mae < best_mae:\n best_mae = mae\n print('Best model with V{:.2f}'.format(best_mae))\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': batch_idx, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n '{}/best.pth'.format(checkpoint_folder))\n model.train()\n\n # end of epoch\n print('')\n writer.add_scalar('Epochs/train_loss', avg_loss.avg, epoch)\n avg_loss.reset()\n checkpoint_name = '{}/epoch_{}.pth'.format(checkpoint_folder, epoch)\n torch.save({'model': model.state_dict(), 'epoch': epoch, 'batch_idx': loader_length, 'step': step,\n 'optimiser': optimiser.state_dict(), 'train_config': cfg, 'net_config': model.config,\n 'dataset_config': DATASET_CONFIG},\n checkpoint_name)\n\n # finished training\n writer.close()\n print('Training finished :)')", "def load_pretrained_model(self,model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n self.create_training_model(model_dir = model_dir,\n **rnn_params)\n #从目录中读取神经网络参数\n self.set_model_from_file()", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load_model(config, batchmanager):\n \n # this function returns a dictionary mapping\n # name of the task (string) --> number of classes in the task (int)\n tasks = batchmanager.getTasksWithNClasses()\n # this \"tasks\" object is used to initialize the model (with the right output layers)\n model = MultiTaskBERT(device = config.device, tasks = tasks)\n\n if not config.untrained_baseline:\n\n # if we evaluate only, model MUST be loaded.\n if config.k_shot_only:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--k_shot_only` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n raise Exception()\n \n # if we saved the state dictionary, load it.\n elif config.resume:\n try :\n model.load_state_dict(torch.load(path_to_dicts(config), map_location = config.device))\n except Exception:\n print(f\"WARNING: the `--resume` flag was passed, but `{path_to_dicts(config)}` was NOT found!\")\n else:\n if os.path.exists(path_to_dicts(config)):\n print(f\"WARNING: `--resume` flag was NOT passed, but `{path_to_dicts(config)}` was found!\") \n\n return model", "def train_model_by_config(\n checkpoint: int,\n config: lmp.config.BaseConfig,\n dataset: lmp.dataset.BaseDataset,\n model: lmp.model.BaseRNNModel,\n optimizer: Union[\n torch.optim.SGD,\n torch.optim.Adam,\n ],\n tokenizer: lmp.tokenizer.BaseTokenizer,\n):\n # Create collate_fn for sampling.\n collate_fn = lmp.dataset.BaseDataset.create_collate_fn(\n tokenizer=tokenizer,\n max_seq_len=config.max_seq_len\n )\n\n # `torch` utility for sampling.\n data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=config.batch_size,\n shuffle=True,\n collate_fn=collate_fn\n )\n\n train_model(\n checkpoint=checkpoint,\n checkpoint_step=config.checkpoint_step,\n data_loader=data_loader,\n device=config.device,\n epoch=config.epoch,\n experiment=config.experiment,\n max_norm=config.max_norm,\n model=model,\n optimizer=optimizer,\n vocab_size=tokenizer.vocab_size\n )", "def load_model_from_checkpoint(file, device):\r\n\r\n if device == 'cuda':\r\n # Load all tensors onto GPU\r\n map_location = lambda storage, loc: storage.cuda()\r\n else:\r\n # Load all tensors onto CPU\r\n map_location = lambda storage, loc: storage\r\n\r\n # Assuming model was trained and checkpoint saved on Linux, but predict.py inference is executed using Windows.\r\n # Then, it is required to implement the following quick fix, because otherwise the exception is raised:\r\n # \"NotImplementedError: cannot instantiate 'PosixPath' on your system\"\r\n # Credits to https://stackoverflow.com/questions/57286486/i-cant-load-my-model-because-i-cant-put-a-posixpath\r\n if type(file) == pathlib.WindowsPath:\r\n tmp_PosixPath = pathlib.PosixPath\r\n pathlib.PosixPath = pathlib.WindowsPath\r\n\r\n parameters = torch.load(file, map_location=map_location)\r\n\r\n # Restore default\r\n if type(file) == pathlib.WindowsPath:\r\n pathlib.WindowsPath = pathlib.PosixPath\r\n pathlib.PosixPath = tmp_PosixPath\r\n\r\n model = train.create_model(parameters)\r\n\r\n model.class_to_idx = parameters.get('train_datasets_class_to_idx')\r\n model.load_state_dict(parameters.get('state_dict'), strict=False)\r\n\r\n return model, parameters", "def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(pretrain_model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % pretrain_model)\n ckpt = tf.train.get_checkpoint_state(pretrain_model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(pretrain_model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(pretrain_model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(pretrain_model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n pretrain_model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n tf.logging.info(\"Copy the pre-trained model %s as the fine-tuned initialization\" % pretrain_model_checkpoint_path)\n\n import glob\n for filename in glob.glob(pretrain_model_checkpoint_path + \"*\"):\n bas = os.path.basename(filename).split(\"-\", 1)[0]\n ext = os.path.basename(filename).rsplit(\".\", 1)[1]\n shutil.copyfile(filename, os.path.join(target_model, bas + \"-0.\" + ext))\n\n with open(os.path.join(target_model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit(\"-\", 1)[0] + \"-0\"))\n return", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def create_reference_model(self, config, tmp_path_factory: pytest.TempPathFactory, *args):\n config = copy.deepcopy(config) # ensure the reference model is not passed to tests\n\n save_folder = tmp_path_factory.mktemp('{device}-{precision}'.format(**config))\n config.update({'save_interval': '1ep', 'save_folder': str(save_folder), 'save_filename': 'ep{epoch}.pt'})\n\n trainer = Trainer(**config)\n trainer.fit()\n\n self.reference_model = trainer.state.model\n self.reference_folder = save_folder", "def create_model( session, batch_size ):\n model = linear_model.LinearModel(\n FLAGS.linear_size,\n FLAGS.num_layers,\n FLAGS.residual,\n FLAGS.batch_norm,\n FLAGS.max_norm,\n batch_size,\n FLAGS.learning_rate,\n FLAGS.origin_bc,\n summaries_dir,\n dtype=tf.float16 if FLAGS.use_fp16 else tf.float32)\n\n if FLAGS.load <= 0:\n # Create a new model from scratch\n print(\"Creating model with fresh parameters.\")\n session.run( tf.global_variables_initializer() )\n return model\n\n # Load a previously saved model\n ckpt = tf.train.get_checkpoint_state( train_dir, latest_filename=\"checkpoint\")\n print( \"train_dir\", train_dir )\n\n if ckpt and ckpt.model_checkpoint_path:\n # Check if the specific cpixels = pixels / pixels[2,:]heckpoint exists\n if FLAGS.load > 0:\n if os.path.isfile(os.path.join(train_dir,\"checkpoint-{0}.index\".format(FLAGS.load))):\n ckpt_name = os.path.join( os.path.join(train_dir,\"checkpoint-{0}\".format(FLAGS.load)) )\n else:\n raise ValueError(\"Asked to load checkpoint {0}, but it does not seem to exist\".format(FLAGS.load))\n else:\n ckpt_name = os.path.basename( ckpt.model_checkpoint_path )\n\n print(\"Loading model {0}\".format( ckpt_name ))\n model.saver.restore( session, ckpt.model_checkpoint_path )\n return model\n else:\n print(\"Could not find checkpoint. Aborting.\")\n raise( ValueError, \"Checkpoint {0} does not seem to exist\".format( ckpt.model_checkpoint_path ) )\n\n return model", "def main(cfg, logger):\n\n # Initialize parameters\n model_selection_metric = cfg['train']['model_selection_metric']\n\n if cfg['train']['model_selection_mode'] == 'maximize':\n model_selection_sign = 1\n elif cfg['train']['model_selection_mode'] == 'minimize':\n model_selection_sign = -1\n else:\n raise ValueError(\n 'model_selection_mode must be either maximize or minimize.')\n\n # Get data loader\n train_loader = make_data_loader(cfg, phase='train')\n val_loader = make_data_loader(cfg, phase='val')\n\n # Set up tensorboard logger\n tboard_logger = SummaryWriter(os.path.join(cfg['misc']['log_dir'], 'logs'))\n\n # Get model\n model = config.get_model(cfg)\n\n # Get optimizer and trainer\n optimizer = getattr(optim, cfg['optimizer']['alg'])(model.parameters(), lr=cfg['optimizer']['learning_rate'],\n weight_decay=cfg['optimizer']['weight_decay'])\n\n trainer = config.get_trainer(cfg, model, optimizer, tboard_logger)\n\n # Load pre-trained model if existing\n kwargs = {\n 'model': model,\n 'optimizer': optimizer,\n }\n\n checkpoint_io = CheckpointIO(cfg['misc']['log_dir'], initialize_from=cfg['model']['init_from'],\n initialization_file_name=cfg['model']['init_file_name'], **kwargs)\n\n try:\n load_dict = checkpoint_io.load('model.pt')\n except FileExistsError:\n load_dict = dict()\n\n epoch_it = load_dict.get('epoch_it', -1)\n it = load_dict.get('it', -1)\n\n metric_val_best = load_dict.get(\n 'loss_val_best', -model_selection_sign * np.inf)\n\n if metric_val_best == np.inf or metric_val_best == -np.inf:\n metric_val_best = -model_selection_sign * np.inf\n\n logger.info('Current best validation metric ({}): {:.5f}'.format(\n model_selection_metric, metric_val_best))\n\n # Training parameters\n stat_interval = cfg['train']['stat_interval']\n stat_interval = stat_interval if stat_interval > 0 else abs(\n stat_interval * len(train_loader))\n\n chkpt_interval = cfg['train']['chkpt_interval']\n chkpt_interval = chkpt_interval if chkpt_interval > 0 else abs(\n chkpt_interval * len(train_loader))\n\n val_interval = cfg['train']['val_interval']\n val_interval = val_interval if val_interval > 0 else abs(\n val_interval * len(train_loader))\n\n # Print model parameters and model graph\n nparameters = sum(p.numel() for p in model.parameters())\n # print(model)\n logger.info('Total number of parameters: {}'.format(nparameters))\n\n # Training loop\n while epoch_it < cfg['train']['max_epoch']:\n epoch_it += 1\n\n for batch in train_loader:\n it += 1\n loss = trainer.train_step(batch, it)\n tboard_logger.add_scalar('train/loss', loss, it)\n\n # Print output\n if stat_interval != 0 and (it % stat_interval) == 0 and it != 0:\n logger.info('[Epoch {}] it={}, loss={:.4f}'.format(\n epoch_it, it, loss))\n\n # Save checkpoint\n if (chkpt_interval != 0 and (it % chkpt_interval) == 0) and it != 0:\n logger.info('Saving checkpoint')\n checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,\n loss_val_best=metric_val_best)\n\n # Run validation\n if val_interval != 0 and (it % val_interval) == 0 and it != 0:\n eval_dict = trainer.evaluate(val_loader, it)\n\n metric_val = eval_dict[model_selection_metric]\n logger.info('Validation metric ({}): {:.4f}'.format(\n model_selection_metric, metric_val))\n\n for k, v in eval_dict.items():\n tboard_logger.add_scalar('val/{}'.format(k), v, it)\n\n if model_selection_sign * (metric_val - metric_val_best) > 0:\n metric_val_best = metric_val\n logger.info(\n 'New best model (loss {:.4f})'.format(metric_val_best))\n checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,\n loss_val_best=metric_val_best)\n\n # Quit after the maximum number of epochs is reached\n logger.info('Training completed after {} Epochs ({} it) with best val metric ({})={}'.format(\n epoch_it, it, model_selection_metric, metric_val_best))", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def load_pretrained_model(self, model_path):\n # My eyes and my heart both hurt when writing this method\n\n # Only care about layer_types that have trainable parameters\n ltypes = ['BNData', 'ConvolutionData', 'HoleConvolutionData']\n\n def _get_layer_params(layer, ltype):\n\n if ltype == 'BNData': \n n_channels = layer.blobs[0].shape.dim[1]\n gamma = np.array(layer.blobs[0].data).reshape(n_channels)\n beta = np.array(layer.blobs[1].data).reshape(n_channels)\n mean = np.array(layer.blobs[2].data).reshape(n_channels)\n var = np.array(layer.blobs[3].data).reshape(n_channels)\n return [mean, var, gamma, beta]\n\n elif ltype in ['ConvolutionData', 'HoleConvolutionData']:\n is_bias = layer.convolution_param.bias_term\n shape = [int(d) for d in layer.blobs[0].shape.dim]\n weights = np.array(layer.blobs[0].data).reshape(shape)\n bias = []\n if is_bias:\n bias = np.array(layer.blobs[1].data).reshape(shape[0])\n return [weights, bias]\n \n elif ltype == 'InnerProduct':\n raise Exception(\"Fully connected layers {}, not supported\".format(ltype))\n\n else:\n raise Exception(\"Unkown layer type {}\".format(ltype))\n\n\n net = caffe_pb2.NetParameter()\n with open(model_path, 'rb') as model_file:\n net.MergeFromString(model_file.read())\n\n # dict formatted as -> key:<layer_name> :: value:<layer_type>\n layer_types = {}\n # dict formatted as -> key:<layer_name> :: value:[<list_of_params>]\n layer_params = {}\n\n for l in net.layer:\n lname = l.name\n ltype = l.type\n if ltype in ltypes:\n print(\"Processing layer {}\".format(lname))\n layer_types[lname] = ltype\n layer_params[lname] = _get_layer_params(l, ltype)\n\n # Set affine=False for all batchnorm modules\n def _no_affine_bn(module=None):\n if isinstance(module, nn.BatchNorm2d):\n module.affine = False\n\n if len([m for m in module.children()]) > 0:\n for child in module.children():\n _no_affine_bn(child)\n\n #_no_affine_bn(self)\n\n\n def _transfer_conv(layer_name, module):\n weights, bias = layer_params[layer_name]\n w_shape = np.array(module.weight.size())\n \n np.testing.assert_array_equal(weights.shape, w_shape)\n print(\"CONV: Original {} and trans weights {}\".format(w_shape,\n weights.shape))\n module.weight.data = torch.from_numpy(weights)\n\n if len(bias) != 0:\n b_shape = np.array(module.bias.size())\n np.testing.assert_array_equal(bias.shape, b_shape)\n print(\"CONV: Original {} and trans bias {}\".format(b_shape,\n bias.shape))\n module.bias.data = torch.from_numpy(bias)\n\n\n def _transfer_conv_bn(conv_layer_name, mother_module):\n conv_module = mother_module[0]\n bn_module = mother_module[1]\n \n _transfer_conv(conv_layer_name, conv_module)\n \n mean, var, gamma, beta = layer_params[conv_layer_name+'/bn']\n print(\"BN: Original {} and trans weights {}\".format(bn_module.running_mean.size(),\n mean.shape))\n bn_module.running_mean = torch.from_numpy(mean)\n bn_module.running_var = torch.from_numpy(var)\n bn_module.weight.data = torch.from_numpy(gamma)\n bn_module.bias.data = torch.from_numpy(beta)\n\n\n def _transfer_residual(prefix, block):\n block_module, n_layers = block[0], block[1]\n\n bottleneck = block_module.layers[0]\n bottleneck_conv_bn_dic = {prefix + '_1_1x1_reduce': bottleneck.cbr1.cbr_unit,\n prefix + '_1_3x3': bottleneck.cbr2.cbr_unit,\n prefix + '_1_1x1_proj': bottleneck.cb4.cb_unit,\n prefix + '_1_1x1_increase': bottleneck.cb3.cb_unit,}\n\n for k, v in bottleneck_conv_bn_dic.items():\n _transfer_conv_bn(k, v)\n\n for layer_idx in range(2, n_layers+1):\n residual_layer = block_module.layers[layer_idx-1]\n residual_conv_bn_dic = {'_'.join(map(str, [prefix, layer_idx, '1x1_reduce'])): residual_layer.cbr1.cbr_unit,\n '_'.join(map(str, [prefix, layer_idx, '3x3'])): residual_layer.cbr2.cbr_unit,\n '_'.join(map(str, [prefix, layer_idx, '1x1_increase'])): residual_layer.cb3.cb_unit,} \n \n for k, v in residual_conv_bn_dic.items():\n _transfer_conv_bn(k, v)\n\n\n convbn_layer_mapping = {'conv1_1_3x3_s2': self.convbnrelu1_1.cbr_unit,\n 'conv1_2_3x3': self.convbnrelu1_2.cbr_unit,\n 'conv1_3_3x3': self.convbnrelu1_3.cbr_unit,\n 'conv5_3_pool6_conv': self.pyramid_pooling.paths[0].cbr_unit, \n 'conv5_3_pool3_conv': self.pyramid_pooling.paths[1].cbr_unit,\n 'conv5_3_pool2_conv': self.pyramid_pooling.paths[2].cbr_unit,\n 'conv5_3_pool1_conv': self.pyramid_pooling.paths[3].cbr_unit,\n 'conv5_4': self.cbr_final.cbr_unit,}\n\n residual_layers = {'conv2': [self.res_block2, self.block_config[0]],\n 'conv3': [self.res_block3, self.block_config[1]],\n 'conv4': [self.res_block4, self.block_config[2]],\n 'conv5': [self.res_block5, self.block_config[3]],}\n\n # Transfer weights for all non-residual conv+bn layers\n for k, v in convbn_layer_mapping.items():\n _transfer_conv_bn(k, v)\n\n # Transfer weights for final non-bn conv layer\n _transfer_conv('conv6', self.classification)\n\n # Transfer weights for all residual layers\n for k, v in residual_layers.items():\n _transfer_residual(k, v)", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_params(self, params):\n params.cp_latest_filename = \"latest_checkpoint_v\"+params.version\n params.cp_load_latest_filename = \"latest_checkpoint_v\"+params.cp_load_ver\n params.cp_load_dir = params.out_dir + params.cp_load_name+ \"/checkpoints/\"\n if not hasattr(params, \"model_out_dir\"):\n params.model_out_dir = params.out_dir + params.model_name\n params.cp_save_dir = params.model_out_dir + \"/checkpoints/\"\n params.log_dir = params.model_out_dir + \"/logfiles/\"\n params.save_dir = params.model_out_dir + \"/savefiles/\"\n params.disp_dir = params.model_out_dir + \"/vis/\"\n params.num_pixels = int(np.prod(params.data_shape))\n self.params = params\n self.params_loaded = True", "def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step,\r\n last_global_data_samples, **kwargs):\r\n checkpoint_state_dict = {\r\n 'epoch': epoch,\r\n 'last_global_step': last_global_step,\r\n 'last_global_data_samples': last_global_data_samples\r\n }\r\n # Add extra kwargs too\r\n checkpoint_state_dict.update(kwargs)\r\n\r\n success = model.network.save_checkpoint(PATH, ckpt_id,\r\n checkpoint_state_dict)\r\n status_msg = 'checkpointing: PATH={}, ckpt_id={}'.format(PATH, ckpt_id)\r\n if success:\r\n logging.info(f\"Success {status_msg}\")\r\n else:\r\n logging.warning(f\"Failure {status_msg}\")\r\n return", "def load_model(self, model_path):\n # Check the model file exists\n if not os.path.isfile(model_path):\n raise ValueError(f\"The model file `{model_path}` is not exists or broken!\")\n\n checkpoint = torch.load(model_path)\n self.model_type = checkpoint['model_type']\n self.label2idx = checkpoint['label2idx']\n self.idx2label = checkpoint['idx2label']\n self.model.load_state_dict(checkpoint['model_state_dict'])\n self.model.to(self.device)", "def try_create_model_and_load_from_checkpoint(self) -> bool:\n self.create_model()\n if self.checkpoint_path:\n # Load the stored model. If there is no checkpoint present, return immediately.\n return self.try_load_checkpoint_for_model()\n return True", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)", "def _load_checkpoint_to_net(config, network):\n if config.existed_ckpt:\n if config.existed_ckpt.endswith(\".npz\"):\n weights = np.load(config.existed_ckpt)\n else:\n weights = load_checkpoint(config.existed_ckpt)\n for param in network.trainable_params():\n weights_name = param.name\n if weights_name not in weights:\n raise ValueError(f\"Param {weights_name} is not found in ckpt file.\")\n\n if isinstance(weights[weights_name], Parameter):\n param.set_data(weights[weights_name].data)\n elif isinstance(weights[weights_name], Tensor):\n param.set_data(Tensor(weights[weights_name].asnumpy(), config.dtype))\n elif isinstance(weights[weights_name], np.ndarray):\n param.set_data(Tensor(weights[weights_name], config.dtype))\n else:\n param.set_data(weights[weights_name])\n else:\n for param in network.trainable_params():\n name = param.name\n value = param.data\n if isinstance(value, Tensor):\n if name.endswith(\".gamma\"):\n param.set_data(one_weight(value.asnumpy().shape))\n elif name.endswith(\".beta\") or name.endswith(\".bias\"):\n if param.data.dtype == \"Float32\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data((weight_variable(value.asnumpy().shape).astype(np.float16)))\n else:\n if param.data.dtype == \"Float32\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float32)))\n elif param.data.dtype == \"Float16\":\n param.set_data(Tensor(weight_variable(value.asnumpy().shape).astype(np.float16)))", "def build_graph_from_config(self, model_config, track_config, checkpoint_path):\n self.build_model()\n \n ema = tf.train.ExponentialMovingAverage(0)\n variables_to_restore = ema.variables_to_restore(moving_avg_variables=[])\n\n # Filter out State variables\n variables_to_restore_filterd = {}\n for key, value in variables_to_restore.items():\n if key.split('/')[1] != 'State':\n if \"alex_branch\" not in key:\n if \"vggf_branch\" not in key:\n variables_to_restore_filterd[key] = value\n \n saver = tf.train.Saver(variables_to_restore_filterd)\n \n\n if osp.isdir(checkpoint_path):\n #checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)\n if not checkpoint_path:\n raise ValueError(\"No checkpoint file found in: {}\".format(checkpoint_path))\n\n def _restore_fn(sess):\n logging.info(\"Loading model from checkpoint: %s\", checkpoint_path)\n saver.restore(sess, checkpoint_path)\n logging.info(\"Successfully loaded checkpoint: %s\", os.path.basename(checkpoint_path))\n logging.info(\"Restore CANet...\")\n\n return _restore_fn", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model", "def train(self, resume_from_checkpoint: Optional[Union[str, bool]] = None,\n trial: Union[\"optuna.Trial\", Dict[str, Any]] = None, ignore_keys_for_eval: Optional[List[str]] = None, **kwargs):\n\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n args = self.args\n\n self.is_in_train = True\n\n # do_train is not a reliable argument, as it might not be set and .train() still called, so\n # the following is a workaround:\n if args.fp16_full_eval and not args.do_train:\n self.model = self.model.to(args.device)\n\n if \"model_path\" in kwargs:\n resume_from_checkpoint = kwargs.pop(\"model_path\")\n if len(kwargs) > 0:\n raise TypeError(f\"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.\")\n\n # This might change the seed so needs to run first.\n self._hp_search_setup(trial)\n\n # Model re-init\n model_reloaded = False\n if self.model_init is not None:\n # Seed must be set before instantiating the model when using model_init.\n set_seed(args.seed)\n self.model = self.call_model_init(trial)\n model_reloaded = True\n\n # Reinitializes optimizer and scheduler\n self.optimizer, self.lr_scheduler = None, None\n\n # Load potential model checkpoint\n if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:\n resume_from_checkpoint = get_last_checkpoint(args.output_dir)\n if resume_from_checkpoint is None:\n raise ValueError(f\"No valid checkpoint found in output directory ({args.output_dir})\")\n\n # If model was re-initialized, put it on the right device and update self.model_wrapped\n if model_reloaded:\n if self.place_model_on_device:\n self.model = self.model.to(args.device)\n self.model_wrapped = self.model\n\n # Keeping track whether we can len() on the dataset or not\n train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)\n\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n if train_dataset_is_sized:\n num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n if args.max_steps > 0:\n max_steps = args.max_steps\n num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(\n args.max_steps % num_update_steps_per_epoch > 0\n )\n else:\n max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(args.num_train_epochs)\n else:\n # see __init__. max_steps is set when the dataset has no __len__\n max_steps = args.max_steps\n num_train_epochs = int(args.num_train_epochs)\n num_update_steps_per_epoch = max_steps\n\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n model = self._wrap_model(self.model_wrapped)\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n # Train!\n world_size = 1 # number of processes in parallel\n\n total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * world_size\n num_examples = (self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps)\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n steps_trained_progress_bar = None\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, \"trainer_state.json\")):\n self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, \"trainer_state.json\"))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {self.state.global_step}\")\n if not args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` \"\n \"flag to your launch command, but you will resume the training on data already seen by your model.\"\n )\n if self.is_local_process_zero() and not args.disable_tqdm:\n steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)\n steps_trained_progress_bar.set_description(\"Skipping the first batches\")\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None\n self.state.trial_params = hp_params(trial) if trial is not None else None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(args, self.state, self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not args.ignore_data_skip:\n for epoch in range(epochs_trained):\n # We just need to begin an iteration to create the randomization of the sampler.\n for _ in train_dataloader:\n break\n\n for epoch in range(epochs_trained, num_train_epochs):\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = (len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps)\n self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)\n\n for step, inputs in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n if steps_trained_progress_bar is not None:\n steps_trained_progress_bar.update(1)\n if steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n continue\n\n elif steps_trained_progress_bar is not None:\n steps_trained_progress_bar.close()\n steps_trained_progress_bar = None\n\n if step % args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(args, self.state, self.control)\n\n tr_loss += self.custom_training_step(model, inputs)\n\n self.current_flos += float(self.floating_point_ops(inputs))\n\n # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps\n if (step + 1) % args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if args.max_grad_norm is not None and args.max_grad_norm > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n # Optimizer step\n optimizer_was_run = True\n self.optimizer.step()\n\n if optimizer_was_run:\n self.lr_scheduler.step()\n\n model.zero_grad()\n\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n self.control = self.callback_handler.on_step_end(args, self.state, self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n\n self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)\n\n if self.control.should_training_stop:\n break\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n logger.info(f\"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).\")\n\n # We load the model state dict on the CPU to avoid an OOM error.\n state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME), map_location=\"cpu\")\n # If the model is on the GPU, it still works!\n self.model.load_state_dict(state_dict)\n\n metrics = speed_metrics(\"train\", start_time, self.state.max_steps)\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n self.log(metrics)\n\n self.control = self.callback_handler.on_train_end(args, self.state, self.control)\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n\n self.is_in_train = False\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)", "def run(self):\n # Get the checkpoint file\n print('loading checkpoint file ...')\n cp = torch.load(self.cfg.work_dir + '/latest.pth')\n print('done')\n\n print('loading state dictionary ...')\n # Initialize network first as separate modules so we can access WFCOS\n backbone = build_backbone(self.cfg.model.backbone).cuda()\n neck = build_neck(self.cfg.model.neck).cuda()\n head = build_head(self.cfg.model.bbox_head).cuda()\n\n # Load the state dicts\n backbone_state = OrderedDict()\n neck_state = OrderedDict()\n head_state = OrderedDict()\n\n for key in cp['state_dict'].keys():\n if 'backbone' in key:\n backbone_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n elif 'neck' in key:\n neck_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n elif 'bbox_head' in key:\n head_state[key.split('.', 1)[1]] = cp['state_dict'][key]\n\n backbone.load_state_dict(backbone_state)\n neck.load_state_dict(neck_state)\n head.load_state_dict(head_state)\n\n # Set to eval mode\n backbone.eval()\n neck.eval()\n head.eval()\n\n print('done')\n\n print('starting inference validation run ...')\n for i, (img, cls) in enumerate(self.loader):\n out = backbone(img)\n out = neck(out)\n out = head(out)\n\n img_metas = [{'img_shape': (640, 800),\n 'scale_factor': 1}]\n bboxes = head.get_bboxes(out[0], out[1], out[2], img_metas,\n self.cfg.test_cfg)\n pass\n print('done')", "def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path,\n key_in_state_dict: str, use_gpu: bool) -> int:\n logging.info(f\"Loading checkpoint {checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu)\n\n try:\n state_dict = checkpoint[key_in_state_dict]\n except KeyError:\n logging.error(f\"Key {key_in_state_dict} not found in checkpoint\")\n return False\n\n if isinstance(model, torch.nn.DataParallel):\n result = model.module.load_state_dict(state_dict, strict=False)\n else:\n result = model.load_state_dict(state_dict, strict=False)\n\n if result.missing_keys:\n logging.warning(f\"Missing keys in model checkpoint: {result.missing_keys}\")\n if result.unexpected_keys:\n logging.warning(f\"Unexpected keys in model checkpoint: {result.unexpected_keys}\")\n\n return checkpoint[ModelAndInfo.EPOCH_KEY]", "def try_and_init_from(self, path):\n log.info(\"Loading weights from foreign checkpoint {}\".format(path))\n if not os.path.exists(path):\n raise ValueError(\"Checkpoint {} does not exist\".format(path))\n\n chkpt = th.load(path, map_location=th.device(\"cpu\"))\n if \"model\" not in chkpt.keys() or chkpt[\"model\"] is None:\n raise ValueError(\"{} has no model saved\".format(path))\n\n mdl = chkpt[\"model\"]\n for n, p in self.model.named_parameters():\n if n in mdl:\n p2 = mdl[n]\n if p2.shape != p.shape:\n log.warning(\"Parameter {} ignored, checkpoint size does not match: {}, should be {}\".format(n, p2.shape, p.shape))\n continue\n log.debug(\"Parameter {} copied\".format(n))\n p.data.copy_(p2)\n else:\n log.warning(\"Parameter {} ignored, not found in source checkpoint.\".format(n))\n\n log.info(\"Weights loaded from foreign checkpoint {}\".format(path))", "def initialize_model_from_cfg(args, gpu_id=0):\n model = model_builder.Generalized_RCNN()\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_caffe2_detectron_weights(model, args.load_detectron)\n\n model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)\n\n return model", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def train_model(\n checkpoint: int,\n checkpoint_step: int,\n data_loader: torch.utils.data.DataLoader,\n device: torch.device,\n epoch: int,\n experiment: str,\n max_norm: float,\n model: lmp.model.BaseRNNModel,\n optimizer: Union[\n torch.optim.SGD,\n torch.optim.Adam,\n ],\n vocab_size: int\n):\n # Set experiment output folder.\n file_dir = f'{lmp.path.DATA_PATH}/{experiment}'\n\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n\n # Set experiment log folder.\n writer = torch.utils.tensorboard.SummaryWriter(\n f'{lmp.path.DATA_PATH}/log/{experiment}'\n )\n\n # Define objective function.\n criterion = torch.nn.CrossEntropyLoss()\n\n # Step = number of updates.\n # Every update must increment `step`.\n step = 0\n\n # Set model to train mode.\n model.train()\n\n # Clean up gradient in model parameters.\n model.zero_grad()\n\n # Initialize total loss.\n total_loss = 0.0\n\n for cur_epoch in range(epoch):\n\n epoch_iterator = tqdm(\n data_loader,\n desc=f'epoch: {cur_epoch}, loss: {0:.6f}'\n )\n\n for x, y in epoch_iterator:\n # Increment step for each update.\n step += 1\n\n # Continue training from previous checkpoint step.\n if step < checkpoint:\n continue\n\n # Put tensors on to specified device (CPU or GPU). Reshape `y` into\n # shape (B x S) for cross-entropy.\n # x.size = (B, S)\n # y.size = (B x S)\n x = x.to(device)\n y = y.reshape(-1).to(device)\n\n # Forward pass.\n # pred_y_logits.size = (B, S, V)\n pred_y_logits = model(x)\n\n # Reshape `pred_y_logits` into shape (B x S, V) for cross-entropy.\n pred_y_logits = pred_y_logits.reshape(-1, vocab_size)\n\n # Perform cross-entropy.\n loss = criterion(pred_y_logits, y)\n\n # Calculate total loss.\n total_loss += loss.item()\n\n # Log loss.\n epoch_iterator.set_description(\n f'epoch: {cur_epoch}, loss: {loss.item():.6f}'\n )\n\n # Backward pass.\n loss.backward()\n\n # Perform gradient clipping to avoid gradient explosion.\n torch.nn.utils.clip_grad_norm_(\n model.parameters(),\n max_norm\n )\n\n # Gradient descent.\n optimizer.step()\n\n # `torch` required manually clean up gradient.\n optimizer.zero_grad()\n\n # Save checkpoint for each `checkpoint_step`.\n if step % checkpoint_step == 0:\n torch.save(\n model.state_dict(),\n os.path.join(\n file_dir,\n f'model-{step}.pt'\n )\n )\n torch.save(\n optimizer.state_dict(),\n os.path.join(\n file_dir,\n f'optimizer-{step}.pt'\n )\n )\n # Log average loss.\n writer.add_scalar(\n f'{experiment}/loss',\n total_loss / checkpoint_step,\n step\n )\n total_loss = 0.0\n\n # Save last checkpoint.\n torch.save(\n model.state_dict(),\n os.path.join(\n file_dir,\n f'model-{step}.pt'\n )\n )\n torch.save(\n optimizer.state_dict(),\n os.path.join(\n file_dir,\n f'optimizer-{step}.pt'\n )\n )", "def train(self, config, **kwargs):\n\n config_parameters = utils.parse_config_or_kwargs(config, **kwargs)\n outputdir = Path(\n config_parameters['outputpath'], config_parameters['model'],\n \"{}_{}\".format(\n datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%m'),\n uuid.uuid1().hex[:8]))\n # Early init because of creating dir\n checkpoint_handler = ModelCheckpoint(\n outputdir,\n 'run',\n n_saved=1,\n require_empty=False,\n create_dir=True,\n score_function=lambda engine: -engine.state.metrics['Loss'],\n save_as_state_dict=False,\n score_name='loss')\n logger = utils.getfile_outlogger(Path(outputdir, 'train.log'))\n logger.info(\"Storing files in {}\".format(outputdir))\n # utils.pprint_dict\n utils.pprint_dict(config_parameters, logger.info)\n logger.info(\"Running on device {}\".format(DEVICE))\n labels_df = pd.read_csv(config_parameters['trainlabel'], sep=' ')\n labels_df['encoded'], encoder = utils.encode_labels(\n labels=labels_df['bintype'])\n train_df, cv_df = utils.split_train_cv(labels_df)\n\n transform = utils.parse_transforms(config_parameters['transforms'])\n utils.pprint_dict({'Classes': encoder.classes_},\n logger.info,\n formatter='pretty')\n utils.pprint_dict(transform, logger.info, formatter='pretty')\n if 'sampler' in config_parameters and config_parameters[\n 'sampler'] == 'MinimumOccupancySampler':\n # Asserts that each \"batch\" contains at least one instance\n train_sampler = dataset.MinimumOccupancySampler(\n np.stack(train_df['encoded'].values))\n\n sampling_kwargs = {\"sampler\": train_sampler, \"shuffle\": False}\n elif 'shuffle' in config_parameters and config_parameters['shuffle']:\n sampling_kwargs = {\"shuffle\": True}\n else:\n sampling_kwargs = {\"shuffle\": False}\n\n logger.info(\"Using Sampler {}\".format(sampling_kwargs))\n\n colname = config_parameters.get('colname', ('filename', 'encoded')) #\n trainloader = dataset.getdataloader(\n train_df,\n config_parameters['traindata'],\n transform=transform,\n batch_size=config_parameters['batch_size'],\n colname=colname, # For other datasets with different key names\n num_workers=config_parameters['num_workers'],\n **sampling_kwargs)\n cvdataloader = dataset.getdataloader(\n cv_df,\n config_parameters['traindata'],\n transform=None,\n shuffle=False,\n colname=colname, # For other datasets with different key names\n batch_size=config_parameters['batch_size'],\n num_workers=config_parameters['num_workers'])\n if 'pretrained' in config_parameters and config_parameters[\n 'pretrained'] is not None:\n model = models.load_pretrained(config_parameters['pretrained'],\n outputdim=len(encoder.classes_))\n else:\n model = getattr(models, config_parameters['model'],\n 'LightCNN')(inputdim=trainloader.dataset.datadim,\n outputdim=len(encoder.classes_),\n **config_parameters['model_args'])\n\n if config_parameters['optimizer'] == 'AdaBound':\n try:\n import adabound\n optimizer = adabound.AdaBound(\n model.parameters(), **config_parameters['optimizer_args'])\n except ImportError:\n logger.info(\n \"Adabound package not found, install via pip install adabound. Using Adam instead\"\n )\n config_parameters['optimizer'] = 'Adam'\n config_parameters['optimizer_args'] = {\n } # Default adam is adabount not found\n else:\n optimizer = getattr(\n torch.optim,\n config_parameters['optimizer'],\n )(model.parameters(), **config_parameters['optimizer_args'])\n\n utils.pprint_dict(optimizer, logger.info, formatter='pretty')\n utils.pprint_dict(model, logger.info, formatter='pretty')\n if DEVICE.type != 'cpu' and torch.cuda.device_count() > 1:\n logger.info(\"Using {} GPUs!\".format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n criterion = torch.nn.CrossEntropyLoss().to(DEVICE)\n model = model.to(DEVICE)\n\n precision = Precision()\n recall = Recall()\n f1_score = (precision * recall * 2 / (precision + recall)).mean()\n metrics = {\n 'Loss': Loss(criterion),\n 'Precision': precision.mean(),\n 'Recall': recall.mean(),\n 'Accuracy': Accuracy(),\n 'F1': f1_score,\n }\n\n # batch contains 3 elements, X,Y and filename. Filename is only used\n # during evaluation\n def _prep_batch(batch, device=DEVICE, non_blocking=False):\n x, y, _ = batch\n return (convert_tensor(x, device=device,\n non_blocking=non_blocking),\n convert_tensor(y, device=device,\n non_blocking=non_blocking))\n\n train_engine = create_supervised_trainer(model,\n optimizer=optimizer,\n loss_fn=criterion,\n prepare_batch=_prep_batch,\n device=DEVICE)\n inference_engine = create_supervised_evaluator(\n model, metrics=metrics, prepare_batch=_prep_batch, device=DEVICE)\n\n RunningAverage(output_transform=lambda x: x).attach(\n train_engine, 'run_loss') # Showing progressbar during training\n pbar = ProgressBar(persist=False)\n pbar.attach(train_engine, ['run_loss'])\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n patience=3,\n factor=0.1)\n\n @inference_engine.on(Events.COMPLETED)\n def update_reduce_on_plateau(engine):\n val_loss = engine.state.metrics['Loss']\n if 'ReduceLROnPlateau' == scheduler.__class__.__name__:\n scheduler.step(val_loss)\n else:\n scheduler.step()\n\n early_stop_handler = EarlyStopping(\n patience=5,\n score_function=lambda engine: -engine.state.metrics['Loss'],\n trainer=train_engine)\n inference_engine.add_event_handler(Events.EPOCH_COMPLETED,\n early_stop_handler)\n inference_engine.add_event_handler(Events.EPOCH_COMPLETED,\n checkpoint_handler, {\n 'model': model,\n 'encoder': encoder,\n 'config': config_parameters,\n })\n\n @train_engine.on(Events.EPOCH_COMPLETED)\n def compute_validation_metrics(engine):\n inference_engine.run(cvdataloader)\n results = inference_engine.state.metrics\n output_str_list = [\n \"Validation Results - Epoch : {:<5}\".format(engine.state.epoch)\n ]\n for metric in metrics:\n output_str_list.append(\"{} {:<5.3f}\".format(\n metric, results[metric]))\n logger.info(\" \".join(output_str_list))\n pbar.n = pbar.last_print_n = 0\n\n train_engine.run(trainloader, max_epochs=config_parameters['epochs'])\n return outputdir", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model, **kwargs)\n self.path = model\n self.model = get_zennet()\n\n model_pth_path = osp.join(self.path, ModelFile.TORCH_MODEL_FILE)\n\n checkpoint = torch.load(model_pth_path, map_location='cpu')\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n\n self.model.load_state_dict(state_dict, strict=True)\n logger.info('load model done')", "def train_model(config, environ, train_data, test_data, trainval_data=None):\n np.random.seed(0)\n if not hasattr(config, \"seed\"):\n tf.set_random_seed(1234)\n log.info(\"Setting tensorflow random seed={:d}\".format(1234))\n else:\n log.info(\"Setting tensorflow random seed={:d}\".format(config.seed))\n tf.set_random_seed(config.seed)\n if environ.verbose:\n verbose_level = 0\n else:\n verbose_level = 2\n\n if trainval_data is None:\n trainval_data = train_data\n\n log.info(\"Environment: {}\".format(environ.__dict__))\n log.info(\"Config: {}\".format(config.__dict__))\n\n save_folder = os.path.join(environ.save_folder, environ.exp_id)\n logs_folder = os.path.join(environ.logs_folder, environ.exp_id)\n with log.verbose_level(verbose_level):\n exp_logger = ExperimentLogger(logs_folder)\n\n if not hasattr(config, \"seed\"):\n data_seed = 0\n else:\n data_seed = config.seed\n\n # Gets data iterators.\n train_iter = get_iter(\n train_data,\n batch_size=config.batch_size,\n shuffle=True,\n cycle=True,\n prefetch=config.prefetch,\n seed=data_seed,\n num_worker=25,\n queue_size=500)\n trainval_iter = get_iter(\n train_data,\n batch_size=config.batch_size,\n shuffle=True,\n cycle=True,\n prefetch=config.prefetch,\n num_worker=10,\n queue_size=200)\n test_iter = get_iter(\n test_data,\n batch_size=config.batch_size,\n shuffle=False,\n cycle=False,\n prefetch=config.prefetch,\n num_worker=10,\n queue_size=200)\n\n # Builds models.\n log.info(\"Building models\")\n with tf.name_scope(\"Train\"):\n with tf.variable_scope(\"Model\", reuse=None):\n with tf.device(environ.device):\n if config.model.startswith(\"resnet\"):\n m = ResNetModel(config, is_training=True)\n else:\n m = CNNModel(config, is_training=True)\n\n with tf.name_scope(\"Valid\"):\n with tf.variable_scope(\"Model\", reuse=True):\n with tf.device(environ.device):\n if config.model.startswith(\"resnet\"):\n mvalid = ResNetModel(config, is_training=False)\n else:\n mvalid = CNNModel(config, is_training=False)\n\n # Initializes variables.\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n\n def train_step():\n \"\"\"Train step.\"\"\"\n batch = train_iter.next()\n feed_data = {m.input: batch[\"img\"], m.label: batch[\"label\"]}\n cost, ce, _ = sess.run([m.cost, m.cross_ent, m.train_op],\n feed_dict=feed_data)\n return ce\n\n def evaluate(data_iter, nbatches):\n \"\"\"Runs evaluation.\"\"\"\n num_correct = 0.0\n count = 0\n if nbatches == -1:\n iter_ = data_iter\n else:\n iter_ = range(nbatches)\n\n for bb in iter_:\n if nbatches == -1:\n batch = bb\n else:\n batch = data_iter.next()\n feed_data = {mvalid.input: batch[\"img\"]}\n y = sess.run(mvalid.output, feed_dict=feed_data)\n pred_label = np.argmax(y, axis=1)\n num_correct += np.sum(\n np.equal(pred_label, batch[\"label\"]).astype(float))\n count += pred_label.size\n acc = (num_correct / count)\n return acc\n\n def save():\n \"\"\"Snapshots a model.\"\"\"\n if not os.path.isdir(save_folder):\n os.makedirs(save_folder)\n config_file = os.path.join(save_folder, \"conf.json\")\n environ_file = os.path.join(save_folder, \"env.json\")\n with open(config_file, \"w\") as f:\n f.write(config.to_json())\n with open(environ_file, \"w\") as f:\n f.write(environ.to_json())\n log.info(\"Saving to {}\".format(save_folder))\n saver.save(\n sess,\n os.path.join(save_folder, \"model.ckpt\"),\n global_step=m.global_step)\n\n def train():\n \"\"\"Train loop.\"\"\"\n lr = config.base_learn_rate\n lr_decay_steps = config.lr_decay_steps\n max_train_iter = config.max_train_iter\n m.assign_lr(sess, lr)\n\n if environ.verbose:\n loop = range(max_train_iter)\n else:\n loop = pb.get(max_train_iter)\n\n for niter in loop:\n # decrease learning rate\n if len(lr_decay_steps) > 0:\n if (niter + 1) == lr_decay_steps[0]:\n lr *= 0.1\n m.assign_lr(sess, lr)\n lr_decay_steps.pop(0)\n ce = train_step()\n if (niter + 1) % config.disp_iter == 0 or niter == 0:\n exp_logger.log_train_ce(niter, ce)\n if (niter + 1) % config.valid_iter == 0 or niter == 0:\n acc = evaluate(trainval_iter, 10)\n exp_logger.log_train_acc(niter, acc)\n test_iter.reset()\n acc = evaluate(test_iter, -1)\n log.info(\"Experment ID {}\".format(environ.exp_id))\n exp_logger.log_valid_acc(niter, acc)\n if (niter + 1) % config.save_iter == 0:\n save()\n test_iter.reset()\n acc = evaluate(test_iter, -1)\n return acc\n\n acc = train()\n return acc", "def save_model(self, checkpoint_path, epoch):\n self.saver.save(self.sess, checkpoint_path, global_step = epoch)", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state" ]
[ "0.77288234", "0.7422351", "0.73331386", "0.7309896", "0.7146994", "0.7120653", "0.709593", "0.70095086", "0.6994309", "0.6936953", "0.6916758", "0.6907432", "0.6901133", "0.685239", "0.6851834", "0.682932", "0.6783703", "0.6779041", "0.6770681", "0.6748651", "0.6746794", "0.67368805", "0.6731563", "0.67264324", "0.670968", "0.6671762", "0.66528815", "0.66475177", "0.6645826", "0.6637641", "0.66101646", "0.66010445", "0.66010445", "0.6596317", "0.6593572", "0.65878546", "0.65843034", "0.65838987", "0.6548627", "0.6546977", "0.6546659", "0.6541168", "0.6533427", "0.65101206", "0.65097", "0.65077376", "0.64917064", "0.64886", "0.64727765", "0.6471805", "0.64422655", "0.6441939", "0.644122", "0.6437587", "0.6434152", "0.6431633", "0.6426752", "0.6425945", "0.63933456", "0.6392295", "0.63907516", "0.6390588", "0.6386796", "0.63828975", "0.63773906", "0.6375171", "0.6371313", "0.6359675", "0.6358668", "0.6352871", "0.6350757", "0.6350401", "0.63464296", "0.63437796", "0.63432944", "0.63346267", "0.6334598", "0.63186693", "0.63083464", "0.63072896", "0.6300133", "0.6299943", "0.6299492", "0.62972635", "0.62901527", "0.6288992", "0.6287761", "0.6283841", "0.6283273", "0.6277323", "0.62664723", "0.62640643", "0.6259985", "0.6251159", "0.6251159", "0.6250616", "0.624678", "0.6227893", "0.6219129", "0.62175655", "0.621257" ]
0.0
-1
Creates a torch optimizer for the given model, and stores it as an instance variable in the current object.
def create_optimizer(self) -> None: # Make sure model is created before we create optimizer if self._model is None: raise ValueError("Model checkpoint must be created before optimizer checkpoint can be loaded.") # Select optimizer type if self.config.optimizer_type in [OptimizerType.Adam, OptimizerType.AMSGrad]: self._optimizer = torch.optim.Adam(self._model.parameters(), self.config.l_rate, self.config.adam_betas, self.config.opt_eps, self.config.weight_decay, amsgrad=self.config.optimizer_type == OptimizerType.AMSGrad) elif self.config.optimizer_type == OptimizerType.SGD: self._optimizer = torch.optim.SGD(self._model.parameters(), self.config.l_rate, self.config.momentum, weight_decay=self.config.weight_decay) elif self.config.optimizer_type == OptimizerType.RMSprop: self._optimizer = RMSprop(self._model.parameters(), self.config.l_rate, self.config.rms_alpha, self.config.opt_eps, self.config.weight_decay, self.config.momentum) else: raise NotImplementedError(f"Optimizer type {self.config.optimizer_type.value} is not implemented")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore\n pass", "def build_optimizer(model: nn.Module, args: Namespace) -> Optimizer:\n params = [{'params': model.parameters(), 'lr': args.init_lr, 'weight_decay': 0}]\n\n return Adam(params)", "def get_optimizer(args, model: torch.nn.Module):\n return Adam(model.parameters(), lr=args.lr)", "def optimizer_creator(model, config):\n return torch.optim.SGD(model.parameters(), lr=config.get(\"lr\", 1e-4))", "def __init__(self, model, optimizer=tf.compat.v1.train.AdamOptimizer, optimizer_arguments={}):\n \n self.model = model\n self.train_optimizer = optimizer(**optimizer_arguments).minimize(self.model.trn_loss)\n self.train_reg_optimizer = optimizer(**optimizer_arguments).minimize(self.model.reg_loss)", "def create_optimizer(optimizer_name, model, config):\n if optimizer_name == 'adadelta':\n return torch.optim.Adadelta(model.parameters(),\n lr=config['adadelta_lr'],\n rho=config['adadelta_rho'],\n weight_decay=config['adadelta_weight_decay'],\n eps=config['adadelta_eps'])\n else:\n raise Exception('Optimizer \\'{}\\' not supported.'.format(optimizer_name))", "def create_train_op(self, model, learning_rate):\n if self.optim_type == 'adagrad':\n optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate, weight_decay=self.args.weight_decay)\n elif self.optim_type == 'adadelta':\n optimizer = torch.optim.Adadelta(model.parameters(), lr=learning_rate, weight_decay=self.args.weight_decay)\n elif self.optim_type == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=self.args.weight_decay)\n elif self.optim_type == 'rprop':\n optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate, weight_decay=self.args.weight_decay)\n elif self.optim_type == 'sgd':\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=self.args.momentum,\n weight_decay=self.args.weight_decay)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n return optimizer", "def build_torch_optimizer(model, opt):\n params = [p for p in model.parameters() if p.requires_grad]\n if opt.optim == 'sgd':\n optimizer = optim.SGD(params, lr=opt.learning_rate)\n elif opt.optim == 'adagrad':\n optimizer = optim.Adagrad(\n params,\n lr=opt.learning_rate,\n initial_accumulator_value=opt.adagrad_accumulator_init)\n elif opt.optim == 'adadelta':\n optimizer = optim.Adadelta(params, lr=opt.learning_rate)\n elif opt.optim == 'adam':\n optimizer = optim.Adam(\n params,\n lr=opt.learning_rate,\n betas=(opt.adam_beta1, opt.adam_beta2),\n weight_decay=opt.learning_rate_decay,\n eps=1e-9)\n else:\n raise ValueError('Invalid optimizer type: ' + opt.optim)\n\n return optimizer", "def get_optimizer(self, model):\n optimizer = scaffold_optimizer.ScaffoldOptimizer(\n model.parameters(),\n lr=Config().trainer.learning_rate,\n momentum=Config().trainer.momentum,\n weight_decay=Config().trainer.weight_decay)\n\n optimizer.server_update_direction = self.server_update_direction\n optimizer.client_update_direction = self.client_update_direction\n optimizer.client_id = self.client_id\n optimizer.update_flag = True\n\n return optimizer", "def create_model_optimizer(net,alpha):\n optimizer = chainer.optimizers.Adam(alpha=alpha)\n optimizer.setup(net)\n return optimizer", "def create_optimizer(model: torch.nn.Module, optimizer_name: str, learning_rate: float, weight_decay: float = 0.0,\n momentum: float = 0.0) -> torch.optim.Optimizer:\n if optimizer_name.lower() == \"adadelta\":\n optimizer = torch.optim.Adadelta(model.parameters(), learning_rate, weight_decay=weight_decay)\n elif optimizer_name.lower() == \"adam\":\n optimizer = torch.optim.Adam(model.parameters(), learning_rate, weight_decay=weight_decay)\n elif optimizer_name.lower() == \"sgd\":\n optimizer = torch.optim.SGD(model.parameters(), learning_rate, momentum,\n weight_decay=weight_decay)\n elif optimizer_name.lower() == \"rmsprop\":\n optimizer = torch.optim.RMSprop(model.parameters(), learning_rate, weight_decay=weight_decay,\n momentum=momentum)\n else:\n raise ValueError(\"SystemLog: The optimizer must be Adadelta, Adam, SGD, or RMSprop \"\n \"(optimizer: {})\".format(optimizer_name))\n\n return optimizer", "def get_optimizer(model):\n optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.5, 0.999))\n return optimizer", "def optimizer_setup(model, params):\n if params.optimizer == 'adam':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_adam(model, params)\n else:\n optimizer = optimizer_handler.plain_adam(model, params)\n elif params.optimizer == 'sgd':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_sgd(model, params)\n else:\n optimizer = optimizer_handler.plain_sgd(model, params)\n\n if params.zero_bn_bias_decay:\n optimizer = zero_wdcay_bn_bias(optimizer)\n\n return optimizer", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def optimizer_factory(config, model):\n params = model.parameters()\n\n optimizer = config[\"loss\"].get(\"optimizer\", \"Adam\")\n lr = config[\"loss\"].get(\"lr\", 1e-3)\n momentum = config[\"loss\"].get(\"momentum\", 0.9)\n\n if optimizer == \"SGD\":\n return optim.SGD(params, lr=lr, momentum=momentum)\n elif optimizer == \"Adam\":\n return optim.Adam(params, lr=lr)\n else:\n raise NotImplementedError()", "def make_optimizer(model, lr, opt, weight_decay):\n optimizers = {\n 'adam': optim.Adam,\n 'adamax': optim.Adamax,\n 'rmsprop': optim.RMSprop,\n }\n\n optimizer = optimizers[opt](model.parameters(), lr=lr,\n weight_decay=weight_decay)\n\n return optimizer", "def init_optimizer(model, config, exact_layers=None):\n opt_type = config.optimizer\n if exact_layers:\n logger.info('Learning exact layers, number=%d', len(exact_layers))\n parameters = []\n for i, layer in enumerate(exact_layers):\n if isinstance(layer, tuple) and len(layer) == 2:\n layer, multiplier = layer\n init_multiplier = 1\n elif isinstance(layer, tuple) and len(layer) == 3:\n layer, init_multiplier, multiplier = layer\n else:\n multiplier = 1\n init_multiplier = 1\n lr = config.lr * multiplier\n init_lr = config.lr * multiplier * init_multiplier\n logger.info('Layer=%d, lr=%.5f', i, init_lr)\n parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})\n else:\n logger.info('Optimizing all parameters, lr=%.5f', config.lr)\n parameters = model.parameters()\n\n if opt_type == 'sgd':\n optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)\n elif opt_type == 'adam':\n optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)\n elif opt_type == 'yf':\n optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,\n clip_thresh=0.1)\n else:\n raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )\n return optimizer", "def __init__(self, model, adv_func, dist_func, attack_lr=1e-2,\n init_weight=10., max_weight=80., binary_step=10, num_iter=500):\n\n self.model = model.cuda()\n self.model.eval()\n\n self.adv_func = adv_func\n self.dist_func = dist_func\n self.attack_lr = attack_lr\n self.init_weight = init_weight\n self.max_weight = max_weight\n self.binary_step = binary_step\n self.num_iter = num_iter", "def create_optimizer(self, run_configuration, rng=None):\n @functools.partial(jax.jit, static_argnums=(1, 2))\n def create_model(rng, example, model_cls):\n with flax.deprecated.nn.attention.Cache().mutate() as cache_def:\n _, initial_params = model_cls.init(\n rng,\n example,\n cache=cache_def)\n model = flax.deprecated.nn.Model(model_cls, initial_params)\n return model, cache_def\n\n config = self.config\n dataset = run_configuration.dataset_info.dataset\n\n rng = rng if rng is not None else jax.random.PRNGKey(0)\n learning_rate = config.opt.learning_rate\n example = self.as_example(next(iter(dataset)))\n model_cls = run_configuration.model\n model, unused_cache_def = create_model(rng, example, model_cls)\n return optimizer_utils.create_optimizer(model, learning_rate)", "def initialize_optimizer(model, args):\n parameters = [p for p in model.parameters() if p.requires_grad]\n if args.optimizer == 'sgd':\n optimizer = optim.SGD(parameters, args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adam':\n optimizer = optim.Adam(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adamax':\n optimizer = optim.Adamax(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n elif args.optimizer == 'adagrad':\n optimizer = optim.Adagrad(parameters, args.learning_rate,\n weight_decay=args.weight_decay)\n scheduler = ReduceLROnPlateau(optimizer, 'min', verbose=True)\n return optimizer, scheduler", "def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:\n norm_module_types = (\n torch.nn.BatchNorm1d,\n torch.nn.BatchNorm2d,\n torch.nn.BatchNorm3d,\n torch.nn.SyncBatchNorm,\n # NaiveSyncBatchNorm inherits from BatchNorm2d\n torch.nn.GroupNorm,\n torch.nn.InstanceNorm1d,\n torch.nn.InstanceNorm2d,\n torch.nn.InstanceNorm3d,\n torch.nn.LayerNorm,\n torch.nn.LocalResponseNorm,\n )\n params: List[Dict[str, Any]] = []\n memo: Set[torch.nn.parameter.Parameter] = set()\n for module in model.modules():\n for key, value in module.named_parameters(recurse=False):\n if not value.requires_grad:\n continue\n # Avoid duplicating parameters\n if value in memo:\n continue\n memo.add(value)\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n if isinstance(module, norm_module_types):\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM\n elif key == \"bias\":\n # NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0\n # and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer\n # hyperparameters are by default exactly the same as for regular\n # weights.\n lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)\n return optimizer", "def __init__(self, model, src_vocab, tgt_vocab):\n self.max_length = 120\n if torch.cuda.is_available():\n self.model = model.cuda()\n else:\n self.model = model.cpu()\n self.model.eval()\n self.src_vocab = src_vocab\n self.tgt_vocab = tgt_vocab", "def optimize(self, model):\n model.optimize_params(\n max_iters=self.max_iters, max_beta_iters=self.max_beta_iters,\n max_U_iters=self.max_U_iters, rel_tol=self.rel_tol,\n optimize_beta=self.optimize_beta, optimize_U=self.optimize_U,\n compute_D=self.compute_D\n )\n return model", "def get_optimizer(model, lr, transfer_optim):\n\n # different otpimizer lr for transfer to reuse low level features\n if transfer_optim:\n if isinstance(model, UNetRegressionModel):\n optimizer = torch.optim.Adam([{'params':\n list(model.msd.inc.parameters()) +\n list(model.msd.down1.parameters()) +\n list(model.msd.down2.parameters()), 'lr': 1e-6},\n {'params': \n list(model.msd.down3.parameters()) +\n list(model.msd.down4.parameters()) +\n list(model.msd.up1.parameters()), 'lr': 1e-5},\n {'params': \n list(model.msd.up2.parameters()) + list(model.msd.up3.parameters()) +\n list(model.msd.up4.parameters()) + list(model.msd.outc.parameters()), 'lr': 1e-4},\n ])\n\n else:\n params = list(model.msd.parameters())\n # case: MSD_d30\n if len(params) < 40:\n optimizer = torch.optim.Adam([{'params': params[1:10], 'lr':1e-6},\n {'params': params[:0]+ params[10:20], 'lr':1e-5},\n {'params': params[20:], 'lr':1e-4},\n ])\n # case: MSD_d80\n else:\n optimizer = torch.optim.Adam([{'params': params[1:20], 'lr':1e-6},\n {'params': params[:0]+ params[20:40], 'lr':1e-5},\n {'params': params[40:], 'lr':1e-4},\n ])\n else:\n optimizer = torch.optim.Adam(model.msd.parameters(), lr)\n\n return optimizer", "def __init__(self, model_class, model=None, env=None, exploration=None,\n gamma=0.99, memory_size=100000, batch_size=64, target_update_frequency=1000, saving_dir=None):\n self.model_class = model_class\n self.env = env\n self.exploration = exploration\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.policy_net = None\n self.target_net = None\n self.optimizer = None\n if model:\n self.policy_net = model\n self.target_net = copy.deepcopy(self.policy_net)\n self.policy_net = self.policy_net.to(self.device)\n self.target_net = self.target_net.to(self.device)\n self.target_net.eval()\n self.optimizer = optim.Adam(self.policy_net.parameters(), lr=0.0001)\n self.memory = ReplayMemory(memory_size)\n self.batch_size = batch_size\n self.gamma = gamma\n self.target_update = target_update_frequency\n self.steps_done = 0\n self.episodes_done = 0\n self.episode_rewards = []\n self.episode_lengths = []\n self.saving_dir = saving_dir\n\n self.state = None", "def optimizerFactory(hybridModel, params):\n\n if params['optim']['name'] == 'adam':\n return torch.optim.Adam(\n hybridModel.parameters(),lr=params['optim']['lr'], \n betas=(0.9, 0.999), eps=1e-08, \n weight_decay=params['optim']['weight_decay'], amsgrad=False\n )\n elif params['optim']['name'] == 'sgd': \n return torch.optim.SGD(\n hybridModel.parameters(), lr=params['optim']['lr'], \n momentum=params['optim']['momentum'], weight_decay=params['optim']['weight_decay']\n )\n \n else:\n raise NotImplemented(f\"Optimizer {params['optim']['name']} not implemented\")", "def add_optimizer(self, optimizer):\n assert isinstance(optimizer, torch.optim.Optimizer)\n setattr(self, 'optimizer', optimizer)", "def fetch_optimizer(args, model):\n optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)\n\n scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps + 100,\n pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')\n\n return optimizer, scheduler", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def __init__(self, model: torch.nn.Module) -> None:\n self.model = copy.deepcopy(model)\n self.model.eval().to(device=DEVICE)\n self.rule_layer_map: List[Tuple[List[str], rules.LrpRule,\n Dict[str, Union[torch.Tensor, float]]]] = []\n self.input_nchw: Optional[torch.Tensor] = None\n self.label_idx_n: Optional[torch.Tensor] = None\n self.relevance_scores_nchw: Optional[torch.Tensor] = None\n self.explained_class_indices: Optional[torch.Tensor] = None", "def create_optimizer(self, context, optimizer, host):\n pass", "def create_optimizer(parameters, optimizer_class, optim_params, model_name='model'):\n opt = optimizer_class(parameters, **optim_params)\n if special_parameters.load_model:\n _load_optimizer(opt, model_name)\n return opt", "def define_model_and_optimizer(self):\r\n print(\"* Defining model and optimizer.\", flush=True)\r\n job_dir = self.C.job_dir\r\n\r\n if self.C.restart:\r\n print(\"-- Loading model from previous saved state.\", flush=True)\r\n self.restart_epoch = util.get_restart_epoch()\r\n self.model = torch.load(f\"{job_dir}model_restart_{self.restart_epoch}.pth\")\r\n\r\n print(\r\n f\"-- Backing up as \"\r\n f\"{job_dir}model_restart_{self.restart_epoch}_restarted.pth.\",\r\n flush=True,\r\n )\r\n shutil.copyfile(\r\n f\"{job_dir}model_restart_{self.restart_epoch}.pth\",\r\n f\"{job_dir}model_restart_{self.restart_epoch}_restarted.pth\",\r\n )\r\n\r\n else:\r\n print(\"-- Initializing model from scratch.\", flush=True)\r\n self.model = models.initialize_model()\r\n\r\n self.restart_epoch = 0\r\n\r\n start_epoch = self.restart_epoch + 1\r\n end_epoch = start_epoch + self.C.epochs\r\n\r\n print(\"-- Defining optimizer.\", flush=True)\r\n self.optimizer = torch.optim.Adam(\r\n params=self.model.parameters(),\r\n lr=self.C.init_lr,\r\n weight_decay=self.C.weight_decay,\r\n )\r\n\r\n return start_epoch, end_epoch", "def _build_model(self, model):\n model = model(self.state_dim, n_actions=self.n_actions)\n model.compile(loss=self._huber_loss,\n optimizer=optimizers.Adam(lr=self.learning_rate))\n return model", "def __init__(self, model_name='vgg16'):\n trainer = Trainer(model_name=model_name)\n self.model = trainer.model\n self.model_save_dir = trainer.model_save_dir\n self.model_name = model_name", "def get_adv_optimizer(self, mode: str) -> torch.optim.Optimizer:\n pass", "def get_optimizer(cfg, model_parameters):\r\n op_params = cfg.copy()\r\n del op_params['name']\r\n\r\n optimizer = {\r\n 'sgd': torch.optim.SGD,\r\n 'adam': torch.optim.Adam\r\n }[cfg.name]\r\n return optimizer(model_parameters, **op_params)", "def create_engine(model, optimizer, loss, device):\n model.to(device)\n\n def _update(engine, batch):\n data, label = batch\n num_channels = 1 if len(data.shape) == 2 else data.shape[1]\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n label = label.float()\n\n model.train()\n model.zero_grad()\n optimizer.zero_grad()\n\n output = model(data)\n output = output.view(-1, num_channels, output.shape[-1])\n _loss = loss(output, label)\n _loss.backward()\n optimizer.step()\n\n return _loss.item()\n\n return Engine(_update)", "def run_onnx_optimizer(onnx_model):\n try:\n onnx_polish_model = onnx.utils.polish_model\n except AttributeError:\n pass\n else:\n return onnx_polish_model(onnx_model)\n\n try:\n # pylint: disable=import-outside-toplevel\n import onnxoptimizer\n except ImportError:\n pass\n else:\n return onnxoptimizer.optimize(onnx_model)\n\n return onnx_model", "def __init__(self,\n model: Type[torch.nn.Module],\n optimizer: Type[optim.PyroOptim] = None,\n loss: Type[infer.ELBO] = None,\n enumerate_parallel: bool = False,\n seed: int = 1,\n **kwargs: Union[str, float]\n ) -> None:\n pyro.clear_param_store()\n set_deterministic_mode(seed)\n self.device = kwargs.get(\n \"device\", 'cuda' if torch.cuda.is_available() else 'cpu')\n if optimizer is None:\n lr = kwargs.get(\"lr\", 1e-3)\n optimizer = optim.Adam({\"lr\": lr})\n if loss is None:\n if enumerate_parallel:\n loss = infer.TraceEnum_ELBO(\n max_plate_nesting=1, strict_enumeration_warning=False)\n else:\n loss = infer.Trace_ELBO()\n guide = model.guide\n if enumerate_parallel:\n guide = infer.config_enumerate(guide, \"parallel\", expand=True) \n self.svi = infer.SVI(model.model, guide, optimizer, loss=loss)\n self.loss_history = {\"training_loss\": [], \"test_loss\": []}\n self.current_epoch = 0", "def __init__(self, model: Module, settings: PGActorSettings) -> None:\n super().__init__(settings)\n\n final_layer, self.network = finalize_module(model, from_numpy(self.state_space.sample()),\n self._num_policy_params)\n self.settings.optimizer.add_param_group({\"params\": final_layer.parameters()})", "def __init__(self, name, model, log_dir, lr, lr_decay_step, adam=False):\n\t\tsuper(Trainer, self).__init__(self.update_model)\n\t\tself.model = model\n\t\t# tqdm\n\t\tProgressBar(persist=True).attach(self)\n\t\t# Optimizer\n\t\tparams = [p for p in model.parameters() if p.requires_grad]\n\t\tif adam:\n\t\t\tself.optimizer = torch.optim.Adam(params, lr=lr)\n\t\telse:\n\t\t\tself.optimizer = torch.optim.SGD(params, lr=lr, momentum=0.9)\n\t\t# Scheduler\n\t\tif lr_decay_step > 0:\n\t\t\tself.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=lr_decay_step, gamma=0.1)\n\t\t\tself.add_event_handler(Events.EPOCH_COMPLETED, lambda e: e.scheduler.step())\n\t\telse:\n\t\t\tself.scheduler = None\n\t\t# Terminate if nan values found\n\t\tself.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())\n\t\t# Tensorboard logging\n\t\tself.tb_logger = TensorboardLogger(log_dir=os.path.join(log_dir, name))\n\t\tself.add_event_handler(Events.COMPLETED, lambda x: self.tb_logger.close())\n\t\tself.tb_logger.attach(self,\n\t\t log_handler=OptimizerParamsHandler(self.optimizer),\n\t\t event_name=Events.EPOCH_COMPLETED)\n\t\tself.tb_logger.attach(self,\n\t\t log_handler=OutputHandler(tag='training', output_transform=lambda x: {\n\t\t\t 'rpn_box_loss': round(self.state.output['loss_rpn_box_reg'].item(), 4),\n\t\t\t 'rpn_cls_loss': round(self.state.output['loss_objectness'].item(), 4),\n\t\t\t 'roi_box_loss': round(self.state.output['loss_box_reg'].item(), 4),\n\t\t\t 'roi_cls_loss': round(self.state.output['loss_classifier'].item(), 4)\n\t\t }),\n\t\t event_name=Events.EPOCH_COMPLETED)\n\t\t# Run on GPU (cuda) if available\n\t\tif torch.cuda.is_available():\n\t\t\ttorch.cuda.set_device(int(get_free_gpu()))\n\t\t\tmodel.cuda(torch.cuda.current_device())", "def __init__(self, model: Callable, q: Callable, loss: Callable,\n optimizer: jax_optim.Optimizer, initial_params: Dict):\n self.model = model\n self.q = q\n self.loss = loss\n self.optimizer = optimizer\n self.optimizer_state = self.optimizer.init_fn(initial_params)\n self.step = 0", "def add_optimizer(self, optimizer):\n assert isinstance(optimizer, torch.optim.Optimizer)\n setattr(self, 'optimizer'+str(self._optimizer_counter), optimizer)\n self._optimizer_counter += 1\n # optimizer indexing : optimizer 0 is the optimizer for layer 0", "def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def init_optimizer(network, config):\n # define optimizer and loss\n if config.optimizer == 'adadelta':\n opt = torch.optim.Adadelta(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n elif config.optimizer == 'adam':\n opt = torch.optim.Adam(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n elif config.optimizer == 'rmsprop':\n opt = torch.optim.RMSprop(network.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n return opt", "def make_optimizer(self):\r\n # parameters = [self.encoder.parameters(), self.decoder.parameters(), self.spec_enc.parameters()]\r\n if self.flags.optim == 'Adam':\r\n op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n elif self.flags.optim == 'RMSprop':\r\n op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n elif self.flags.optim == 'SGD':\r\n op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n else:\r\n raise Exception(\"Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben\")\r\n return op", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def compile_optimizer(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.cfg.learning_rate)\n\n return optimizer", "def _inst_optimizer(self):\n optimizer = Optimizers(self.m_cfg['configs']['lr_politics']['optimizer']).value\n lr_schedule = self.m_cfg['configs']['lr_politics']['lr']\n opt = optimizer(learning_rate=lr_schedule)\n return opt", "def create_optimizer(net, optimizer_state_dict, learning_rate, device='cuda'):\n # define optimizer\n optimizer = optim.Adam([{\n 'params': net.net.parameters(),\n 'initial_lr': learning_rate\n }])\n # load optimizer checkpoint if available\n if optimizer_state_dict is not None:\n target_device = 'cpu' if device == 'cpu' else 'cuda'\n # load the optimizer weights\n optimizer.load_state_dict(optimizer_state_dict)\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = getattr(v, target_device)()\n return optimizer", "def init_optimizer_for_pruning(cls, optimizer):\n assert (cls.__optimizer is None), \"ASP has initialized optimizer already.\"\n assert (cls.__calculate_mask is not None), \"Called ASP.init_optimizer_for_pruning before ASP.init_model_for_pruning.\"\n\n # store pointer to original optimizer step method\n cls.__optimizer = optimizer\n cls.__optimizer.__step = optimizer.step\n\n def __step(opt_self, *args, **kwargs):\n # prune gradients before step method\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n p.grad.mul_(mask)\n # call original optimizer step method\n rval = opt_self.__step(*args, **kwargs)\n # prune parameters after step method\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n p.mul_(mask)\n return rval\n cls.__optimizer.step = types.MethodType(__step, cls.__optimizer)", "def get_optimizer(self, optimizer, params):\n # Reference: https://pytorch.org/docs/stable/optim.html#per-parameter-options\n optimizer = getattr(optim, optimizer, \"Adam\")(\n [\n {'params': self.shared_conv.parameters()},\n {'params': self.detector.parameters()},\n # {'params': self.recognizer.parameters()},\n ],\n **params\n )\n\n return optimizer", "def _get_optim(\n self,\n model: nn.Module,\n optim_class: Type[torch.optim.Optimizer],\n multi_tensor: bool,\n ) -> torch.optim.Optimizer:\n param_groups = self._get_param_groups(model)\n return optim_class(param_groups, lr=5e-3, foreach=multi_tensor)", "def __init__(self, model: Editable, loss_function, error_function=classification_error, opt=None,\n stability_coeff=0.01, editability_coeff=0.01, max_norm=None, **kwargs):\n opt = opt if opt is not None else torch.optim.Adam(model.parameters())\n super().__init__(model, loss_function=loss_function, opt=opt, error_function=error_function, **kwargs)\n self.stability_coeff, self.editability_coeff, self.max_norm = stability_coeff, editability_coeff, max_norm", "def __init__(self, *, model, criterion, optimizer, dataloader, logdir='.', storage='storage.hdf5',\n transformation=lambda x: x, loss_decay=0.95, split_sample=None):\n\n self.model = model\n self.criterion = criterion\n self.optimizer = optimizer\n self.dataloader = dataloader\n self.logdir = logdir\n self.storage = os.path.join(logdir, storage) if storage is not None else None\n self.transformation = transformation\n self.cuda = bool(next(model.parameters()).is_cuda)\n self.dtype = next(model.parameters()).dtype\n self.loss_decay = loss_decay\n self.epochs = 0\n self.steps = 0\n self.split_sample = split_sample if callable(split_sample) else self._split_sample\n\n # register base handlers\n self.events = {event: [] for event in events.event_list}\n self.register_event_handler(events.AFTER_TRAINING, self.save, directory=self.logdir)\n self.register_event_handler(events.AFTER_TRAINING, self.close_storage)\n self.register_event_handler(events.EACH_STEP, TrainingLoss(), monitor=(storage is not None))\n\n super().__init__()\n\n # unify backward call for all types of optimizer\n if not hasattr(self.optimizer, 'backward'):\n setattr(self.optimizer, 'backward', self._backward)", "def __init__(self, state_dim, action_dim, learning_rate, weight_decay):\n self.dynamics_net = ForwardModel(state_dim, action_dim)\n self.rewards_net = RewardModel(state_dim, action_dim)\n self.done_net = RewardModel(state_dim, action_dim)\n\n self.dyn_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)\n self.reward_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)\n self.done_optimizer = tfa_optimizers.AdamW(\n learning_rate=learning_rate, weight_decay=weight_decay)", "def inject(self, model):\n if not hasattr(model, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n\n model._check_trainable_weights_consistency()\n\n if model.train_function is None:\n inputs = (model._feed_inputs +\n model._feed_targets +\n model._feed_sample_weights)\n if model._uses_dynamic_learning_phase():\n inputs += [K.learning_phase()]\n fast_params = model._collected_trainable_weights\n\n with K.name_scope('training'):\n with K.name_scope(model.optimizer.__class__.__name__):\n training_updates = model.optimizer.get_updates(\n params=fast_params,\n loss=model.total_loss)\n slow_params = [K.variable(p) for p in fast_params]\n fast_updates = (model.updates +\n training_updates +\n model.metrics_updates)\n\n slow_updates, copy_updates = [], []\n for p, q in zip(fast_params, slow_params):\n slow_updates.append(K.update(q, q + self.alpha * (p - q)))\n copy_updates.append(K.update(p, q))\n\n # Gets loss and metrics. Updates weights at each call.\n fast_train_function = K.function(\n inputs,\n [model.total_loss] + model.metrics_tensors,\n updates=fast_updates,\n name='fast_train_function',\n **model._function_kwargs)\n\n def F(inputs):\n self.count += 1\n R = fast_train_function(inputs)\n if self.count % self.k == 0:\n K.batch_get_value(slow_updates)\n K.batch_get_value(copy_updates)\n return R\n\n #### REM : C'est pas super propre ca comme manière de faire\n #### Tu rompts l'encapsulation de la classe \n model.train_function = F", "def get_unet_model(self):\n # create optimizer instance\n config = {\n 'class_name': self.optimizer,\n 'config': self.optimizer_params}\n optimizer = get_optimizer(config)\n\n self.model = unet(optimizer=optimizer,\n loss=self.loss,\n metrics=self.metrics,\n input_size=self.input_size,\n pretrained_weights=self.pretrained_weights)", "def set_lr(model: Union[tf.keras.Model, torch.nn.Module], lr: float, weight_decay: Optional[float] = None):\n assert hasattr(model, \"fe_compiled\") and model.fe_compiled, \"set_lr only accept models from fe.build\"\n if isinstance(model, tf.keras.Model):\n # when using decoupled weight decay like SGDW or AdamW, weight decay factor needs to change together with lr\n # see https://www.tensorflow.org/addons/api_docs/python/tfa/optimizers/DecoupledWeightDecayExtension for detail\n if isinstance(model.current_optimizer, tfa.optimizers.DecoupledWeightDecayExtension) or hasattr(\n model.current_optimizer, \"inner_optimizer\") and isinstance(\n model.current_optimizer.inner_optimizer, tfa.optimizers.DecoupledWeightDecayExtension):\n if weight_decay is None:\n weight_decay = tf.keras.backend.get_value(model.current_optimizer.weight_decay) * lr / get_lr(model)\n tf.keras.backend.set_value(model.current_optimizer.weight_decay, weight_decay)\n tf.keras.backend.set_value(model.current_optimizer.lr, lr)\n elif isinstance(model, torch.nn.Module):\n for param_group in model.current_optimizer.param_groups:\n param_group['lr'] = lr\n else:\n raise ValueError(\"Unrecognized model instance {}\".format(type(model)))", "def setup_optims(self):\n lr = self.train_config['lr']\n b1 = self.train_config['b1']\n b2 = self.train_config['b2']\n weight_decay = self.train_config['weight_decay']\n self.opt = torch.optim.Adam(self.network.parameters(), lr=lr, betas=(b1, b2),\n weight_decay=weight_decay)", "def __init__(self,\n model=None,\n train_dataset=None,\n eval_dataset=None,\n optimizer=None,\n criterion=None,\n cpu: bool = False):\n\n # import torch for initialization\n torch = import_optional_dependency(\"torch\")\n\n # ============== basic parameters ============== #\n # the device that used to train models, which can automatically set\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() and not cpu else \"cpu\")\n # the optimizer of training\n self.optimizer = optimizer\n # the neural network model\n self.model = model.to(self.device) if model else None\n # the criterion of training\n self.criterion = criterion.to(self.device) if criterion else None\n # the dataset for training\n self.train_dataset = train_dataset\n # the dataset for evaluation\n self.eval_dataset = eval_dataset\n # the training process would show information if self.info is True\n self.info = True\n\n # ============== the parameters of training ============== #\n # the loss average meter for every epoch\n self.epoch_loss = AverageMeter()\n # the counter for training\n self.epoch = 0\n # training process for iteration\n self.batch_process = None", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def convert_to_torch_amp(model: nn.Module,\n optimizer: Optimizer,\n criterion: Optional[_Loss] = None,\n amp_config: Optional[Config] = None):\n model = TorchAMPModel(model)\n if amp_config is None:\n amp_config = dict()\n optimizer = TorchAMPOptimizer(optimizer, **amp_config)\n if criterion:\n criterion = TorchAMPLoss(criterion)\n return model, optimizer, criterion", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def __init__(\n self,\n model: nn.Module,\n branch_losses: Dict[str, int],\n branch_loss_params: Dict[str, Dict[str, Any]] = None,\n branch_metrics: Dict[str, List[str]] = None,\n optimizer: str = \"adam\",\n lookahead: bool = False,\n optim_params: Dict[str, Dict[str, Any]] = None,\n scheduler: str = \"reduce_on_plateau\",\n scheduler_params: Dict[str, Any] = None,\n log_freq: int = 100,\n **kwargs,\n ) -> None:\n super().__init__()\n self.model = model\n self.heads = model.heads\n self.aux_key = model.aux_key\n self.inst_key = model.inst_key\n\n self.optimizer = optimizer\n self.optim_params = optim_params\n self.scheduler = scheduler\n self.scheduler_params = scheduler_params\n self.lookahead = lookahead\n\n self.branch_losses = branch_losses\n self.branch_metrics = branch_metrics\n self.branch_loss_params = branch_loss_params\n self.log_freq = log_freq\n\n self._validate_branch_args()\n self.save_hyperparameters(ignore=\"model\")\n\n self.criterion = self.configure_loss()\n metrics = self.configure_metrics()\n self.train_metrics = deepcopy(metrics)\n self.val_metrics = deepcopy(metrics)\n self.test_metrics = deepcopy(metrics)", "def load_model(model_path: str) -> object:\n model = torch.load(model_path)\n model.eval()\n return model", "def _make_model(self):\n self._model = tf.estimator.Estimator(model_fn=self.model_fn,\n model_dir=self.model_dir,\n config=self._config,\n params=self._params,\n )", "def optimizer_setup(optimizer_class: Type[torch.optim.Optimizer], **hyperparameters) -> \\\n Callable[[torch.nn.Module], torch.optim.Optimizer]:\n\n def f(model):\n return optimizer_class(model.parameters(), **hyperparameters)\n\n return f", "def setup_trainer(model, train_dir, train_loader, val_loader,\n property_map, exclude=[]):\n hooks = build_hooks(train_dir=train_dir, property_map=property_map)\n\n trainable_params = filter(lambda p: p.requires_grad, model.parameters())\n trainable_params = filter(lambda p: p not in exclude, trainable_params)\n\n optim = build_optimizer(trainable_params=trainable_params)\n loss_fn = build_loss(property_map=property_map)\n trainer = Trainer(train_dir, model, loss_fn, optim, train_loader,\n val_loader, hooks=hooks)\n return trainer", "def model_creator(config):\n return nn.Linear(1, 1)", "def setup_optimizer(params, config):\n if config.METHOD == 'AdaMod':\n return AdaMod(params,\n lr=config.LR,\n betas=(config.BETA, 0.999),\n beta3=config.BETA3,\n weight_decay=config.WD)\n elif config.METHOD == 'Adam':\n return torch.optim.Adam(params,\n lr=config.LR,\n betas=(config.BETA, 0.999),\n weight_decay=config.WD)\n elif config.METHOD == 'SGD':\n return torch.optim.SGD(params,\n lr=config.LR,\n momentum=config.MOMENTUM,\n dampening=config.DAMPENING,\n weight_decay=config.WD,\n nesterov=config.NESTEROV)\n else:\n raise NotImplementedError", "def get_optimizer(model, learning_rate, name=\"SGD\"):\n \n optimizer_dict = {\"SGD\": optim.SGD(model.parameters(), lr=learning_rate,\n weight_decay=0.0001, momentum=0.9),\n \"Adam\": optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=0.0001),\n \"RMSProp\": optim.RMSprop(model.parameters(), lr=learning_rate,\n weight_decay=0.0001)}\n return optimizer_dict[name]", "def __init__(self,\n verbosity=1,\n model=None,\n path=None,\n prefix=None,\n **kwargs):\n if K.BACKEND == 'tensorflow' and tf is not None:\n min_version = tf.__version__.split(\".\")[1]\n maj_version = tf.__version__.split(\".\")[0]\n if maj_version in [\"2\"] and min_version in [\"3\", \"4\"]:\n raise NotImplementedError(f\"\"\"\n Not implemented due to a bug in tensorflow as shown here https://github.com/tensorflow/tensorflow/issues/44646\n You can use functional API instead by using\n from ai4water.functional import Model\n instead of \n from ai4water import Model\n Or change the tensorflow version. Current version is {tf.__version__}. \n \"\"\")\n\n tf_kwargs = {}\n for arg in ['inputs', 'outputs']:\n if arg in kwargs:\n tf_kwargs[arg] = kwargs[arg]\n\n self._go_up = False\n\n MODEL.__init__(self, **tf_kwargs)\n\n self._go_up = True\n BaseModel.__init__(self,\n prefix=prefix,\n path=path,\n verbosity=verbosity,\n model=model,\n **kwargs)\n\n self.config['backend'] = K.BACKEND\n\n if torch is not None:\n from .models._torch import Learner\n self.torch_learner = Learner(\n model=self,\n batch_size=self.config['batch_size'],\n num_epochs=self.config['epochs'],\n shuffle=self.config['shuffle'],\n to_monitor=self.config['monitor'],\n patience=self.config['patience'],\n path=self.path,\n use_cuda=False,\n wandb_config=self.config['wandb_config'],\n verbosity=self.verbosity\n )\n\n if self.category == \"DL\":\n self.initialize_layers(self.config['model']['layers'])\n\n if K.BACKEND == 'tensorflow':\n outs = self.call(self._input_lyrs(), run_call=False)\n setattr(self, 'output_lyrs', outs)\n self._go_up = False # do not reinitiate BaseModel and other upper classes\n\n maj_ver = int(tf.__version__.split('.')[0])\n min_ver = int(tf.__version__.split('.')[1][0])\n # in tf versions >= 2.5, we don't need to specify inputs and outputs as keyword arguments\n if maj_ver>1 and min_ver>=5:\n MODEL.__init__(self, self._input_lyrs(), self.output_lyrs)\n else:\n MODEL.__init__(self, inputs=self._input_lyrs(), outputs=self.output_lyrs)\n\n self.build(self._get_dummy_input_shape()) # will initialize ML models or build NNs", "def train(model, optimizer: torch.optim, data: torch_geometric.data.Data):\n model.train()\n optimizer.zero_grad()\n F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()\n optimizer.step()\n\n model.eval()", "def __init__(self, model):\n self.output_weights = model.get_layer(\"output\").get_weights()[0]\n self.cam_model = Model(inputs=model.input, outputs=(model.get_layer(\"activation\").output, model.get_layer(\"output\").output))", "def __init__(self,\n forward_models,\n forward_model_optim=tf.keras.optimizers.Adam,\n forward_model_lr=0.001):\n\n super().__init__()\n self.forward_models = forward_models\n self.bootstraps = len(forward_models)\n\n # create optimizers for each model in the ensemble\n self.forward_model_optims = [\n forward_model_optim(learning_rate=forward_model_lr)\n for i in range(self.bootstraps)]", "def create_custom_supervised_trainer(model, optimizer, loss_fn, metrics={}, device=None, prepare_batch=None):\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n if not prepare_batch:\n x, y = _prepare_batch(batch, device=device)\n else:\n x, y = prepare_batch(batch, device=device)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.item(), y_pred, y\n\n def _metrics_transform(output):\n return output[1], output[2]\n\n engine = Engine(_update)\n\n for name, metric in metrics.items():\n metric._output_transform = _metrics_transform\n metric.attach(engine, name)\n\n return engine", "def model_compile(self, model, optimiser, loss, metrics, learning_rate = None):\n\n if optimiser == \"sgd\":\n model.compile(optimizer = SGD(lr = learning_rate),\n loss = loss, metrics = metrics)\n elif optimiser == \"rmsprop\":\n model.compile(optimizer = RMSprop(lr = learning_rate),\n loss = loss, metrics = metrics)\n else:\n model.compile(optimizer = optimiser, loss = loss,\n metrics = metrics)", "def get_optimizer(model, lr_method, lr_rate):\n lr_method_name = lr_method\n\n # initialize optimizer function\n if lr_method_name == 'sgd':\n optimizer = optim.SGD(model.parameters(), lr=lr_rate, momentum=0.9)\n # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)\n elif lr_method_name == 'adagrad':\n optimizer = optim.Adagrad(model.parameters(), lr=lr_rate)\n # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\n elif lr_method_name == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=lr_rate)\n # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.001)\n else:\n raise Exception('unknown optimization method.')\n\n return optimizer # , scheduler", "def _set_optimizer(self):\n\n if self.optimizer_name == 'Adam':\n self.optimizer = optim.Adam(self.net.parameters(),\n lr=self.learning_rate,\n betas=self.betas,\n eps=1e-8,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'SGD_Nesterov':\n self.optimizer = optim.SGD(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay,\n nesterov=True)\n elif self.optimizer_name == 'RMSprop':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n momentum=self.momentum,\n weight_decay=self.weight_decay)\n elif self.optimizer_name == 'Adagrad':\n self.optimizer = optim.Adagrad(self.net.parameters(),\n lr=self.learning_rate,\n weight_decay=self.weight_decay)\n else:\n print(\"Optimizer '\" + self.optimizer_name + \"' not implemented.\")", "def __init__(self, model_path: str = None):\n\n self.img_size = 128\n self.tranform = transforms.Compose(\n [transforms.Resize((self.img_size, self.img_size)), transforms.ToTensor()]\n )\n if model_path:\n self.__load_model(path=model_path)\n else:\n print(\"Provide a path to trained model!\")", "def add_optimizer(self):\n \n with tf.variable_scope(\"optimizer\"):\n\n # Define optimizer and minimize loss\n if self.OPTIM == \"RMSProp\":\n self.optimizer = tf.train.RMSPropOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"GD\":\n self.optimizer = tf.train.GradientDescentOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n \n elif self.OPTIM == \"Adam\":\n self.optimizer = tf.train.AdamOptimizer(self.LEARN_RATE).\\\n minimize(self.cost)\n\n # Merge all summaries for tensorboard\n #self.tbsummaries = tf.summary.merge_all()", "def get_task_optimizer(self) -> torch.optim.Optimizer:\n pass", "def __init__(self,\n model: PreTrainedModel,\n tokenizer: PreTrainedTokenizer,\n optimizer: torch.optim.Optimizer,\n n_epochs: int,\n labels2ind: Dict[str, int],\n scheduler: Optional[torch.optim.lr_scheduler.LambdaLR] = None,\n device: str = 'cpu',\n clipping: Optional[Union[int, float]] = None,\n accumulate_grad_every: int = 1,\n print_every: int = 10,\n print_val_mistakes: bool = False,\n output_dir: str = './'):\n\n self.tokenizer = tokenizer\n self.model = model\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.n_epochs = n_epochs\n self.labels2ind = labels2ind\n self.inds2labels = {v: k for k, v in self.labels2ind.items()}\n self.device = device\n self.clipping = clipping\n self.accumulate_grad_every = accumulate_grad_every\n self.print_every = print_every\n self.print_val_mistakes = print_val_mistakes\n self.output_dir = output_dir\n\n os.makedirs(self.output_dir, exist_ok=True)", "def get_optimizer(train_model, params):\n # TODO: Why are we looking for an entry in a list instead of directly matching the string?\n if params.optimizer in [\"adam\"]:\n param_optimizer = list(train_model.named_parameters())\n # Set weight decay of bias and LayerNorm.weight to zero.\n param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(\n nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(\n nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n if params.fp16:\n print(\"SystemLog: Loading Apex and building the FusedAdam optimizer.\")\n try:\n from apex import amp\n from apex.optimizers import FusedAdam\n from apex.amp import _amp_state\n except:\n raise ImportError(\n \"SystemLog: Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n if params.amp_opt_lvl not in ['O0', 'O1', 'O2', 'O3']:\n raise ValueError(\"SystemLog: %s amp_opt_level is not supported\" % params.amp_opt_lvl)\n\n print(\"SystemLog: Using %s opt_level for Amp\" % params.amp_opt_lvl)\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=params.learning_rate,\n bias_correction=False)\n if params.loss_scale == 0:\n train_model, optimizer = amp.initialize(train_model, optimizer, opt_level=params.amp_opt_lvl, loss_scale=\"dynamic\")\n else:\n train_model, optimizer = amp.initialize(train_model, optimizer, opt_level=params.amp_opt_lvl, loss_scale=params.loss_scale)\n amp._amp_state.loss_scalers[0]._loss_scale = 2**20\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=params.learning_rate,\n warmup=params.warmup_proportion,\n t_total=params.total_training_steps)\n else:\n # TODO: These parameters should be made configurable through global parameters/function arguments\n optimizer = torch.optim.Adadelta(train_model.parameters(), lr=params.learning_rate, rho=0.95, eps=1e-08, weight_decay=0)\n\n return train_model, optimizer", "def get_optimizer(start_epoch, factor, opt_config, model):\n optimizer = __optimizers[opt_config['optimizer']]\n opt_params = {'parameter_list': model.parameters()}\n for key in opt_config.keys():\n if key == 'learning_rate':\n lr = opt_config[key]\n bo = [val*factor for val in lr['bound']]\n decay_lr = fluid.dygraph.PiecewiseDecay(bo, lr['value'], start_epoch*factor)\n opt_params['learning_rate'] = decay_lr\n elif key == 'weight_decay':\n regularization = fluid.regularizer.L2Decay(regularization_coeff=opt_config[key])\n opt_params['regularization'] = regularization\n elif key == 'momentum':\n opt_params['momentum'] = opt_config[key]\n\n return optimizer(**opt_params)", "def load_model(model_name, environment_name):\n ray.init(ignore_reinit_error=True)\n # Fetch the specified model trainer.\n model_module = importlib.import_module(\n \"ray.rllib.agents.\" + alg2module[model_name])\n # Load the trainer and return.\n trainer = getattr(model_module, model_name + 'Trainer')\n\n env = None\n if type(environment_name) == str:\n if \"MiniGrid\" in environment_name:\n import gym_minigrid.wrappers as gmw\n # Need to adjust observation space.\n minigrid_env = gym.make(environment_name)\n env = gmw.ImgObsWrapper(minigrid_env)\n else:\n env = gym.make(environment_name)\n else:\n env = environment_name\n\n return trainer(env=environment_name)", "def configure_optimizers(self):\n optimizer = torch.optim.Adam(\n self.parameters(),\n lr=self.hparams.learning_rate,\n weight_decay=self.hparams.weight_decay,\n )\n return optimizer", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def convert_to_torch_script(model, input_size):\n model.eval()\n\n # An example input you would normally provide to your model's forward() method.\n example = torch.rand(1, 3, input_size, input_size)\n\n # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.\n traced_script_module = torch.jit.trace(model, example)\n\n return traced_script_module", "def train(train_loader : torch.utils.data.DataLoader, model : nn.Module, criterion : nn.Module, optimizer : torch.optim.Optimizer) -> logger.Result:", "def set_optimizer(self, config):\r\n self.optimizer = optim.Adam(self.net.parameters(), config.lr)\r\n self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, config.lr_decay)", "def load_model(model: nn.Module, model_args: dict, model_weights: str, device: torch.device):\n model = model(**model_args)\n state_dict = torch.load(model_weights, map_location=device)\n model.load_state_dict(state_dict[\"model\"])\n return model", "def load(model_path: str):\n model = torch.load(model_path)\n model.eval()\n return model", "def __init__(\n self, weights_path: Optional[str], max_frames: int = 0, **model_kwargs,\n ):\n super().__init__()\n max_frames = int(max_frames)\n if max_frames < 0:\n raise ValueError(f\"max_frames {max_frames} cannot be negative\")\n self.max_frames = max_frames\n self.model, self.optimizer = make_model(\n weights_path=weights_path, **model_kwargs\n )", "def select_optimizer(opt, model):\n\n if opt == 'msgd':\n return optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n elif opt == 'adam':\n return optim.Adam(model.parameters(), lr=0.001)\n elif opt == 'rmsprop':\n return optim.RMSprop(model.parameters(), lr=0.001)\n elif opt == 'adagrad':\n return optim.Adagrad(model.parameters(), lr=0.001)\n else:\n return optim.SGD(model.parameters(), lr=0.001)", "def _init_optimizer(self, optimizer):\n if optimizer == \"rmsprop\":\n self.optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)\n elif optimizer == \"adagrad\":\n self.optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)\n elif optimizer == \"adadelta\":\n self.optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)\n elif optimizer == \"adam\":\n self.optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n elif optimizer == \"adamax\":\n self.optimizer = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) \n elif hasattr(optimizer, __call__):\n self.optimizer = optimizer\n else:\n print \"Error: unsupported optimizer %s\"%optimizer\n sys.exit(0)", "def _init_optimizer(self, optimizer):\n if optimizer == \"rmsprop\":\n self.optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)\n elif optimizer == \"adagrad\":\n self.optimizer = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-06)\n elif optimizer == \"adadelta\":\n self.optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06)\n elif optimizer == \"adam\":\n self.optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n elif optimizer == \"adamax\":\n self.optimizer = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08) \n elif hasattr(optimizer, __call__):\n self.optimizer = optimizer\n else:\n print \"Error: unsupported optimizer %s\"%optimizer\n sys.exit(0)", "def optim_cls(self) -> type:\n\n return getattr(torch.optim, self._params.get(\"optimizer\", \"Adam\"))" ]
[ "0.75656193", "0.7248476", "0.7094529", "0.70669323", "0.6984369", "0.68603814", "0.67578644", "0.66976243", "0.6690973", "0.66653234", "0.66625184", "0.66552", "0.6637654", "0.66292614", "0.6535859", "0.65118015", "0.6485002", "0.6414784", "0.6320487", "0.6311424", "0.6306251", "0.61640453", "0.61441946", "0.61401016", "0.6127151", "0.6099292", "0.60934687", "0.6090151", "0.6079634", "0.60610604", "0.60554343", "0.60450107", "0.5977532", "0.59358186", "0.5917297", "0.5915897", "0.59027565", "0.58937645", "0.5862995", "0.58558595", "0.5843563", "0.58394915", "0.5831794", "0.57926697", "0.5783491", "0.57375854", "0.57089233", "0.5702956", "0.57006794", "0.5697089", "0.5679795", "0.56763273", "0.56739414", "0.56324714", "0.56317294", "0.56090605", "0.55861694", "0.5580019", "0.5579856", "0.5579526", "0.5575561", "0.55751354", "0.55649763", "0.55331266", "0.5526492", "0.5523804", "0.55183876", "0.5508744", "0.55018747", "0.5491707", "0.54802734", "0.5461728", "0.54499745", "0.5447595", "0.5441311", "0.543569", "0.542988", "0.5426924", "0.542245", "0.54076505", "0.54060507", "0.54000556", "0.5398662", "0.53774637", "0.5368346", "0.53679585", "0.535337", "0.5350745", "0.53373", "0.5336137", "0.53358835", "0.5321679", "0.5318685", "0.5314642", "0.5309358", "0.530587", "0.5302316", "0.52985936", "0.52985936", "0.5296311" ]
0.70478994
4
Loads a checkpoint of an optimizer.
def try_load_checkpoint_for_optimizer(self) -> bool: if self._optimizer is None: raise ValueError("Optimizer must be created before optimizer checkpoint can be loaded.") if not self.checkpoint_path: logging.warning("No checkpoint path provided.") return False if not self.checkpoint_path.is_file(): logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}') return False logging.info(f"Loading checkpoint {self.checkpoint_path}") checkpoint = ModelAndInfo.read_checkpoint(self.checkpoint_path, self.config.use_gpu) try: state_dict = checkpoint[ModelAndInfo.OPTIMIZER_STATE_DICT_KEY] except KeyError: logging.error(f"Key {ModelAndInfo.OPTIMIZER_STATE_DICT_KEY} not found in checkpoint") return False self._optimizer.load_state_dict(state_dict) logging.info(f"Loaded optimizer from checkpoint (epoch: {checkpoint[ModelAndInfo.EPOCH_KEY]})") self.checkpoint_epoch = checkpoint[ModelAndInfo.EPOCH_KEY] return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def _load(checkpoint_path):\n state_dict, optimizer_state = dg.load_persistables(dirname=checkpoint_path)\n return state_dict, optimizer_state", "def load_checkpoint(args, trainer, epoch_itr):\n os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def load_checkpoint(self, filename, load_optim=True):\n extra_state, optim_history, last_optim_state = \\\n utils.load_model_state(filename, self.get_model())\n\n if last_optim_state is not None:\n # rebuild optimizer after loading model, since params may have changed\n #self.optimizer = optim.build_optimizer(self.args, self.model.parameters())\n self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)\n\n if load_optim:\n self._optim_history = optim_history\n # only reload optimizer and lr_scheduler if they match\n last_optim = self._optim_history[-1]\n if last_optim['criterion_name'] == self.criterion.__class__.__name__:\n self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])\n if last_optim['optimizer_name'] == self.optimizer.__class__.__name__:\n self.optimizer.load_state_dict(last_optim_state)\n\n self._num_updates = last_optim['num_updates']\n\n return extra_state", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def LoadProgramState(self, restored_checkpoint_path=None, sess=None):\n pass", "def _resume_checkpoint(self, resume_path, model, optimizer):\n if not resume_path:\n return model, optimizer\n\n self.logger.info(f'Loading checkpoint: {resume_path}')\n checkpoint = torch.load(resume_path)\n model.load_state_dict(checkpoint['state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from \"\n \"that of checkpoint. Optimizer parameters not being resumed.\")\n else:\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n self.logger.info(f'Checkpoint \"{resume_path}\" loaded')\n return model, optimizer", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def load_ckpt(self, name=None):\r\n name = name if name == 'latest' else \"ckpt_epoch{}\".format(name)\r\n load_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if not os.path.exists(load_path):\r\n raise ValueError(\"Checkpoint {} not exists.\".format(load_path))\r\n\r\n checkpoint = torch.load(load_path)\r\n print(\"Checkpoint loaded from {}\".format(load_path))\r\n if isinstance(self.net, nn.DataParallel):\r\n self.net.module.load_state_dict(checkpoint['model_state_dict'])\r\n else:\r\n self.net.load_state_dict(checkpoint['model_state_dict'])\r\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\r\n self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\r\n self.clock.restore_checkpoint(checkpoint['clock'])", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n return torch.load(checkpoint_path, *args, **kwargs)", "def load_checkpoint_train(cpdir, model, optimizer):\n start_epoch = 0\n start_global_step = 0\n if cpdir is not None:\n start_global_step, start_epoch = load_checkpoint(\n cpdir, model, optimizer)\n start_global_step += 1\n start_epoch += 1\n return start_global_step, start_epoch", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def opt_from_checkpoint(\n checkpoint_path: str,\n config_path: Optional[str] = None,\n extra_bindings=tuple([])\n) -> Optimizer:\n\n if config_path is None:\n config_path = \"/\".join(checkpoint_path.split(\"/\")[:-1]) + \"/config.gin\"\n\n logging.info(\"Restoring configs from: %s\", config_path)\n with gin.unlock_config():\n scope = f\"opt_from_checkpoint__{str(uuid.uuid4()).replace('-', '_')}\"\n with gin.config_scope(None):\n with gin.config_scope(scope):\n if config_path:\n with file_open(config_path, \"rb\") as f:\n content = bytes(f.read()).decode(\"utf-8\")\n\n # gin writes out multi line sometimes, undo this.\n content = content.replace(\"\\\\\\n\", \"\")\n\n def maybe_add_scope(c):\n # filter out train as this overlaps with outer_training.\n if c.startswith(\"#\"):\n return None\n if \"=\" in c:\n return scope + \"/\" + c\n return c\n\n bindings = [maybe_add_scope(c) for c in content.split(\"\\n\")]\n bindings = [b for b in bindings if b]\n bindings = bindings + [maybe_add_scope(c) for c in extra_bindings]\n\n logging.info(\"Parsing bindings\")\n for b in bindings:\n logging.info(b)\n print(b)\n gin.parse_config(bindings, skip_unknown=True)\n\n configurable = gin.query_parameter(f\"{scope}/run_train.lopt\")\n if isinstance(configurable, gin.config._UnknownConfigurableReference): # pylint: disable=protected-access\n raise ValueError(\"Gin couldn't find the learned optimizer in current\"\n \" imports. Did you forget to import the module?\")\n\n # with summary.summary_scope(\"opt_from_checkpoint\"):\n lopt = configurable.configurable.wrapped()\n theta = lopt.init(jax.random.PRNGKey(0))\n logging.info(f\"Restoring checkpoint {checkpoint_path}\") # pylint: disable=logging-fstring-interpolation\n ckpt = ParameterCheckpoint(theta, \"\", 0)\n ckpt = load_state(checkpoint_path, ckpt)\n opt = lopt.opt_fn(ckpt.params)\n return opt\n # wrapped = _GinScopeClass(opt, scope)\n # For now, just add the lopt to the returned class.\n # TODO(lmetz) change this api to return a more structured class?\n # wrapped.lopt = lopt\n # return wrapped # type: ignore", "def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch", "def try_create_optimizer_and_load_from_checkpoint(self) -> bool:\n self.create_optimizer()\n if self.checkpoint_path:\n return self.try_load_checkpoint_for_optimizer()\n return True", "def reload_checkpoint(self):\n checkpoint_path = os.path.join(self.params.dump_path, 'checkpoint.pth')\n if not os.path.isfile(checkpoint_path):\n if self.params.reload_checkpoint == '':\n return\n else:\n checkpoint_path = self.params.reload_checkpoint\n assert os.path.isfile(checkpoint_path)\n logger.warning(\"Reloading checkpoint from %s ...\" % checkpoint_path)\n data = torch.load(checkpoint_path, map_location='cpu')\n\n # reload model parameters\n for name in self.MODEL_NAMES:\n getattr(self, name).load_state_dict(data[name])\n\n # reload optimizers\n for name in self.optimizers.keys():\n if False: # AMP checkpoint reloading is buggy, we cannot do that - TODO: fix - https://github.com/NVIDIA/apex/issues/250\n logger.warning(\"Reloading checkpoint optimizer %s ...\" % name)\n else: # instead, we only reload current iterations / learning rates\n logger.warning(\"Not reloading checkpoint optimizer %s.\" % name)\n for group_id, param_group in enumerate(self.optimizers[name].param_groups):\n if 'num_updates' not in param_group:\n logger.warning(\"No 'num_updates' for optimizer %s.\" % name)\n continue\n logger.warning(\"Reloading 'num_updates' and 'lr' for optimizer %s.\" % name)\n param_group['num_updates'] = data['%s_optimizer' % name]['param_groups'][group_id]['num_updates']\n param_group['lr'] = self.optimizers[name].get_lr_for_step(param_group['num_updates'])\n\n # reload main metrics\n self.epoch = data['epoch'] + 1\n self.n_total_iter = data['n_total_iter']\n self.best_metrics = data['best_metrics']\n self.best_stopping_criterion = data['best_stopping_criterion']\n logger.warning(\"Checkpoint reloaded. Resuming at epoch %i / iteration %i ...\" % (self.epoch, self.n_total_iter))", "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n while True:\n try:\n latest_filename = \"checkpoint_best\" if ckpt_dir==\"eval\" else None\n ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n return ckpt_state.model_checkpoint_path\n except:\n tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n time.sleep(10)", "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n while True:\n try:\n latest_filename = \"checkpoint_best\" if ckpt_dir == \"eval\" else None\n ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n return ckpt_state.model_checkpoint_path\n except:\n tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n time.sleep(10)", "def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))", "def load(self, checkpoint_dir):\n print(\"\\nReading Checkpoints.....\\n\\n\")\n model_dir = \"%s\" % (\"cnn\") # give the model name by label_size\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n \n # Check the checkpoint is exist\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\n self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\n else:\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def load_model(agent, optimizer, model_file):\n checkpoint = torch.load(model_file)\n episode = checkpoint['epoch']\n agent.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n return episode", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def load(self):\r\n checkpoint = torch.load(self.checkpoint_path,\r\n map_location=self.device)\r\n self.load_state_dict(checkpoint)\r\n del checkpoint", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint", "def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n if self.with_cuda:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda(torch.device('cuda'))\n self.train_logger = checkpoint['logger']\n #self.config = checkpoint['config']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def _resume_checkpoint(self, resume_path):\n resume_path = str(resume_path)\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n\n # load architecture params from checkpoint.\n if checkpoint['config']['model'] != self.config['model']:\n self.logger.warning(\"Warning: Architecture configuration given in config file is different from that of \"\n \"checkpoint. This may yield an exception while state_dict is being loaded.\")\n self.model.load_state_dict(checkpoint['state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['trainer']['optimizer']['type'] != self.config['trainer']['optimizer']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from that of checkpoint. \"\n \"Optimizer parameters not being resumed.\")\n else:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n self.logger.info(\"Checkpoint loaded. Resume from epoch {}\".format(self.start_epoch))", "def load_model(model, checkpoint_path: str): \r\n checkpoint = torch.load(checkpoint_path)\r\n model.load_state_dict(checkpoint['model'])\r\n epoch = checkpoint['epoch']\r\n print('Loaded model from {}, epoch {}'.format(checkpoint_path, epoch))", "def restore(self, checkpoint_path):\n start_time = time.time()\n latest_checkpoint = train_util.get_latest_chekpoint(checkpoint_path)\n if latest_checkpoint is not None:\n checkpoint = tf.train.Checkpoint(model=self)\n checkpoint.restore(latest_checkpoint).expect_partial()\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n logging.info('Loading model took %.1f seconds', time.time() - start_time)\n else:\n logging.info('Could not find checkpoint to load at %s, skipping.',\n checkpoint_path)", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_checkpoint(checkpoint_dir, epoch, iteration):\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n if not os.path.isfile(path):\n raise Exception(\"Checkpoint in epoch %d doesn't exist :sob:\" % epoch)\n\n checkpoint = torch.load(path)\n start_epoch = checkpoint['epoch']\n state_dict = checkpoint['state_dict']\n start_iteration = checkpoint['iteration']\n\n assert iteration == start_iteration\n return start_epoch, start_iteration, state_dict", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.train_logger = checkpoint['logger']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))", "def unpack_checkpoint(\n self,\n checkpoint: Dict,\n model=None,\n criterion=None,\n optimizer=None,\n scheduler=None,\n **kwargs,\n ) -> None:\n super().unpack_checkpoint(\n checkpoint,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n **kwargs,\n )\n\n # NOTE: propper way to load state, docs:\n # https://nvidia.github.io/apex/amp.html#checkpointing\n if \"amp\" in checkpoint:\n amp.load_state_dict(checkpoint[\"amp\"])", "def unpack_checkpoint(\n self,\n checkpoint: Dict,\n model=None,\n criterion=None,\n optimizer=None,\n scheduler=None,\n **kwargs,\n ) -> None:\n super().unpack_checkpoint(\n checkpoint,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n **kwargs,\n )\n\n # NOTE: propper way to load state, docs:\n # https://nvidia.github.io/apex/amp.html#checkpointing\n if \"amp\" in checkpoint:\n amp.load_state_dict(checkpoint[\"amp\"])", "def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.mnt_best = checkpoint['monitor_best']\n\n # load model params from checkpoint.\n if checkpoint['config']['name'] != self.config['name']:\n self.logger.warning(\n 'Warning: Architecture configuration given in config file is different from that of checkpoint. ' + \\\n 'This may yield an exception while state_dict is being loaded.')\n self.model.load_state_dict(checkpoint['model_state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed. \n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning('Warning: Optimizer type given in config file is different from that of checkpoint. ' + \\\n 'Optimizer parameters not being resumed.')\n self.optimizer.load_state_dict(checkpoint['model_optimizer'])\n\n # load scheduler state from checkpoint only when scheduler type is not changed\n if checkpoint['config']['scheduler']['type'] != self.config['scheduler']['type']:\n self.logger.warning('Warning: Scheduler type given in config file is different from that of checkpoint. ' + \\\n 'Scheduler parameters not being resumed.')\n self.scheduler.load_state_dict(checkpoint['model_scheduler'])\n\n self.train_logger = checkpoint['logger']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))", "def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))", "def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n # 将参数全部放入GPU\n if self.with_cuda:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda(self.device)\n self.train_logger = checkpoint['logger']\n self.config = checkpoint['config']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))", "def load_checkpoint(model, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n _load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)", "def load_from_checkpoint(self, chkpt, section=None):\n if section is None:\n section = self.name\n self.load_state_dict(chkpt[section])", "def load_G(self, G_checkpoint):\n checkpoint = torch.load(G_checkpoint)\n self.G.load_state_dict(checkpoint['gnet'])\n self.gen_optim.load_state_dict(checkpoint['gopt'])", "def load_ckpt(args):\n net = model.Model.load_from_checkpoint(args.ckpt)\n net = net.eval().requires_grad_(False).to(args.device)\n return net", "def load_snapshot(device, net, snapshot_name, optimizer=None):\n\ttry:\n\t\tcheckpoint = torch.load(snapshot_name+'.pth', map_location=device)\n\t\tnet.load_state_dict(checkpoint['model_state_dict'])\n\t\tif optimizer:\n\t\t\trestore_optimizer(optimizer, checkpoint)\n\texcept:\n\t\tcheckpoint = None\t\n\treturn checkpoint", "def load_checkpoint(checkpoint_file: pl.Path) -> Optional[Dict[str, Any]]:\n if checkpoint_file.exists():\n logger.info(f\"Loading checkpoint {checkpoint_file}.\")\n checkpoint = torch.load(str(checkpoint_file))\n logger.info(f\"Done loading checkpoint from epoch {checkpoint['epoch']}.\")\n else:\n logger.warning(f\"No {checkpoint_file} checkpoint file found. Starting normal.\")\n return checkpoint", "def load_checkpoint(self, checkpoint_path: Union[str, Path]) -> Dict[str, Any]:\n # TODO: move to CheckpointIO\n torch.cuda.empty_cache()\n checkpoint_path = inject_model_parallel_rank(checkpoint_path)\n return self.checkpoint_io.load_checkpoint(checkpoint_path)", "def load_training(saver, session, load_dir):\n if tf.gfile.Exists(load_dir):\n ckpt = tf.train.get_checkpoint_state(load_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(session, ckpt.model_checkpoint_path)\n prev_step = extract_step(ckpt.model_checkpoint_path)\n else:\n tf.gfile.DeleteRecursively(load_dir)\n tf.gfile.MakeDirs(load_dir)\n prev_step = 0\n else:\n tf.gfile.MakeDirs(load_dir)\n prev_step = 0\n return prev_step", "def load_checkpoint(path: str, use_cuda: bool = True) -> dict:\n assert os.path.isfile(path), \"Checkpoint %s not found\" % path\n checkpoint = torch.load(path, map_location=\"cuda\" if use_cuda else \"cpu\")\n return checkpoint", "def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])", "def load_eval(saver, session, load_dir):\n saver.restore(session, load_dir)\n print('model loaded successfully')\n return extract_step(load_dir)", "def load_checkpoint(checkpoint_directory,\n session):\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n # filter variables if needed.\n print(variables)\n saver_ob = tf.train.Saver(variables, max_to_keep=0)\n os.makedirs(checkpoint_directory, exist_ok=True)\n # verify if we don't have a checkpoint saved directly\n step = 0\n ckpt = tf.train.get_checkpoint_state(checkpoint_directory)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n model_checkpoint_path = ckpt.model_checkpoint_path\n saver_ob.restore(session, model_checkpoint_path)\n step = int(model_checkpoint_path.rsplit('-', 1)[1])\n print('Model loaded = ', step)\n return saver_ob, step", "def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)", "def load_state(net, optimizer, scheduler, model_no=0, load_best=False):\n logger.info(\"Initializing model and optimizer states...\")\n base_path = \"./data/\"\n checkpoint_path = os.path.join(base_path,\"test_checkpoint_%d.pth.tar\" % model_no)\n best_path = os.path.join(base_path,\"test_model_best_%d.pth.tar\" % model_no)\n start_epoch, best_pred, checkpoint = 0, 0, None\n if (load_best == True) and os.path.isfile(best_path):\n checkpoint = torch.load(best_path)\n logger.info(\"Loaded best model.\")\n elif os.path.isfile(checkpoint_path):\n checkpoint = torch.load(checkpoint_path)\n logger.info(\"Loaded checkpoint model.\")\n if checkpoint != None:\n start_epoch = checkpoint['epoch']\n best_pred = checkpoint['best_acc']\n net.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n logger.info(\"Loaded model and optimizer.\") \n return start_epoch, best_pred", "def load_checkpoint(checkpoint_path):\n flat_checkpoint_dict = flatten_checkpoint(\n parse_checkpoint(checkpoint_path), keep_empty_nodes=True)\n return flat_checkpoint_dict", "def load_D( self, D_checkpoint):\n checkpoint = torch.load(D_checkpoint)\n self.D.load_state_dict(checkpoint['dnet'])\n self.dis_optim.load_state_dict(checkpoint['gopt'])", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")", "def load_checkpoint(fpath):\n if fpath is None:\n raise ValueError('File path is None')\n if not osp.exists(fpath):\n raise FileNotFoundError('File is not found at \"{}\"'.format(fpath))\n map_location = None if torch.cuda.is_available() else 'cpu'\n try:\n checkpoint = torch.load(fpath, map_location=map_location)\n except UnicodeDecodeError:\n pickle.load = partial(pickle.load, encoding=\"latin1\")\n pickle.Unpickler = partial(pickle.Unpickler, encoding=\"latin1\")\n checkpoint = torch.load(\n fpath, pickle_module=pickle, map_location=map_location\n )\n except Exception:\n print('Unable to load checkpoint from \"{}\"'.format(fpath))\n raise\n return checkpoint", "def load_states(self, checkpoint):\n raise NotImplementedError()", "def _load_checkpoint(cls, model: DeviceAwareModule, checkpoint_path: Path,\n key_in_state_dict: str, use_gpu: bool) -> int:\n logging.info(f\"Loading checkpoint {checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(checkpoint_path, use_gpu)\n\n try:\n state_dict = checkpoint[key_in_state_dict]\n except KeyError:\n logging.error(f\"Key {key_in_state_dict} not found in checkpoint\")\n return False\n\n if isinstance(model, torch.nn.DataParallel):\n result = model.module.load_state_dict(state_dict, strict=False)\n else:\n result = model.load_state_dict(state_dict, strict=False)\n\n if result.missing_keys:\n logging.warning(f\"Missing keys in model checkpoint: {result.missing_keys}\")\n if result.unexpected_keys:\n logging.warning(f\"Unexpected keys in model checkpoint: {result.unexpected_keys}\")\n\n return checkpoint[ModelAndInfo.EPOCH_KEY]", "def load(self, sess, step=None):\n if step==None:\n ckpt_path = tf.train.latest_checkpoint(self.model.ckpt_dir)\n else:\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model-'+str(step))\n self.saver.restore(sess, ckpt_path)\n step = tf.train.global_step(sess, self.gstep)\n print('Load model at step {} from check point {}.'.format(step, ckpt_path))", "def load_checkpoint(self, session: tf.Session):\n if not _load_checkpoint(session, str(self.info.checkpoint_path)):\n tf_util.init_variables(session)\n return False\n else:\n return True", "def load(self, model_path):\n # TODO: include new params based on ConfigEnum\n checkpoint = torch.load(model_path)\n\n self.image_size = checkpoint['image_size']\n self.device = checkpoint['device']\n self.fp16 = checkpoint['fp16']\n self.accumulate_grad_steps = checkpoint['accumulate_grad_steps']\n self.experiment_id = checkpoint['experiment_id']\n self.experiment_tag = checkpoint['experiment_tag']\n self.seed = checkpoint['seed']\n self.train_batch_size = checkpoint['train_batch_size']\n self.valid_batch_size = checkpoint['valid_batch_size']\n self.test_batch_size = checkpoint['test_batch_size']\n self.dataloader_num_workers = checkpoint['dataloader_num_workers']\n self.train_dataloader_shuffle = checkpoint['train_dataloader_shuffle']\n self.optimizer_type = checkpoint['optimizer_type']\n self.optimizer_params = checkpoint['optimizer_params']\n self.scheduler_type = checkpoint['scheduler_type']\n self.scheduler_params = checkpoint['scheduler_params']\n self.step_scheduler_after = checkpoint['step_scheduler_after']\n self.step_scheduler_metric = checkpoint['step_scheduler_metric']\n self.compute_train_loss_after = checkpoint['compute_train_loss_after']\n self.compute_train_metric_after = checkpoint['compute_train_metric_after']\n self.compute_valid_loss_after = checkpoint['compute_valid_loss_after']\n self.compute_valid_metric_after = checkpoint['compute_valid_metric_after']\n self.training_stopping_criteria = checkpoint['training_stopping_criteria']\n self.stopping_criteria_params = checkpoint['stopping_criteria_params']\n self.max_epoch = checkpoint['max_epoch']\n self.train_on_all_data = checkpoint['train_on_all_data']\n self.validate_after = checkpoint['validate_after']\n self.validation_steps = checkpoint['validation_steps']\n self.run_lr_range_test= checkpoint['run_lr_range_test']\n self.sleep_in_epochs = checkpoint['sleep_in_epochs']\n self.sleep_time = checkpoint['sleep_time']\n self.checkpoint_epochs = checkpoint['checkpoint_epochs']\n\n self._best_score = checkpoint['_best_score']\n self._current_score = checkpoint['_current_score']\n self._counter = checkpoint['_counter']\n self.metrics = checkpoint['metrics']\n self.current_epoch = checkpoint['current_epoch']\n self.current_train_batch = checkpoint['current_train_batch']\n self.current_valid_batch = checkpoint['current_valid_batch']\n self.num_train_samples = checkpoint['num_train_samples']\n self.num_train_iterations = checkpoint['num_train_iterations']\n self.checkpoint_snapshot = checkpoint['checkpoint_snapshot'] \n\n # initialize optimizer, scheduler, and gradient scaler\n self.configure_optimizers()\n self.configure_schedulers()\n \n if self.fp16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n if self.model:\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.to(self.device)\n\n if self.optimizer:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n if self.scheduler:\n self.scheduler.load_state_dict(checkpoint['scheduler'])\n\n #if self.scaler:\n # self.scaler.load_state_dict(checkpoint['scaler'])", "def load_state_dict(self, checkpoint):\n self.net.load_state_dict(checkpoint['Net'])\n self.optimizer.load_state_dict(checkpoint['Optimizer'])\n\n if ADVERSARIAL_FLAG:\n self.adv_net.load_state_dict(checkpoint['AdvNet'])\n self.adv_optimizer.load_state_dict(checkpoint['AdvOptimizer'])\n\n self.history = checkpoint['History']\n self.stats = checkpoint['Stats']\n\n # The following loops are used to fix a bug that was\n # discussed here: https://github.com/pytorch/pytorch/issues/2830\n # (it is supposed to be fixed in recent PyTorch version)\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.net.device)\n if ADVERSARIAL_FLAG:\n for adv_state in self.adv_optimizer.state.values():\n for k, v in adv_state.items():\n if isinstance(v, torch.Tensor):\n adv_state[k] = v.to(self.adv_net.device)", "def load(self, path):\n\n filename = self.__path(path, prefix=None)\n chkpt = th.load(filename, map_location=\"cpu\") # TODO: check behavior\n\n if self.model is not None and chkpt[\"model\"] is not None:\n log.debug(\"Loading model state dict\")\n self.model.load_state_dict(chkpt[\"model\"])\n\n if \"optimizers\" in chkpt.keys():\n if self.optimizers is not None and chkpt[\"optimizers\"] is not None:\n try:\n for opt, state in zip(self.optimizers,\n chkpt[\"optimizers\"]):\n log.debug(\"Loading optimizers state dict for %s\", opt)\n opt.load_state_dict(state)\n except:\n # We do not raise an error here, e.g. in case the user simply\n # changes optimizer\n log.warning(\"Could not load optimizer state dicts, \"\n \"starting from scratch\")\n\n if \"schedulers\" in chkpt.keys():\n if self.schedulers is not None and chkpt[\"schedulers\"] is not None:\n try:\n for s, state in zip(self.schedulers,\n chkpt[\"schedulers\"]):\n log.debug(\"Loading scheduler state dict for %s\", s)\n s.load_state_dict(state)\n except:\n log.warning(\"Could not load scheduler state dicts, \"\n \"starting from scratch\")\n\n log.debug(\"Loaded checkpoint \\\"{}\\\"\".format(filename))\n return tuple(chkpt[k] for k in [\"extras\", \"meta\"])", "def load_actor(self, checkpoint):\n \n model = torch.load(checkpoint)\n self.actor_local.load_state_dict(model)", "def load_checkpoint(self, checkpoint_filepath=None, verbose=True):\n if checkpoint_filepath:\n print('loading', checkpoint_filepath, flush=True)\n optimistic_restore(self._session, checkpoint_filepath, verbose=verbose)\n return True\n else:\n checkpoints = retrieve_all_checkpoints(self._checkpoints_path) + retrieve_all_checkpoints(self._recovery_checkpoints_path)\n\n if checkpoints:\n last_checkpoint = sorted(checkpoints)[-1][1]\n print('loading', last_checkpoint, flush=True)\n optimistic_restore(self._session, last_checkpoint, verbose=verbose)\n return True\n else:\n print('nothing to restore. no checkpoint found.', flush=True)\n return False", "def load_weights(self, checkpoint_path, sess=None):\n\n if sess is None:\n sess = tf.get_default_session()\n assert sess is not None\n\n saver = tf.train.Saver(self.variables_to_restore)\n saver.restore(sess, checkpoint_path)", "def load_weights(self, checkpoint_path, sess=None):\n\n if sess is None:\n sess = tf.get_default_session()\n assert sess is not None\n\n saver = tf.train.Saver(self.variables_to_restore)\n saver.restore(sess, checkpoint_path)", "def load_weights(self, checkpoint_path, sess=None):\n\n if sess is None:\n sess = tf.get_default_session()\n assert sess is not None\n\n saver = tf.train.Saver(self.variables_to_restore)\n saver.restore(sess, checkpoint_path)", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path" ]
[ "0.83664644", "0.80974585", "0.80771816", "0.80259275", "0.7999482", "0.79903316", "0.7857532", "0.7640271", "0.75701135", "0.748829", "0.7443241", "0.74228936", "0.7389801", "0.7375046", "0.7372049", "0.7315069", "0.7312009", "0.72855306", "0.725681", "0.7247519", "0.72201985", "0.72201985", "0.72147804", "0.7209309", "0.71791726", "0.7172253", "0.71158737", "0.7098844", "0.70984185", "0.7015792", "0.70148826", "0.70143586", "0.7003345", "0.7003345", "0.70000845", "0.6930754", "0.69023585", "0.6900019", "0.6882023", "0.68749577", "0.68699276", "0.6853341", "0.68064564", "0.68055785", "0.6797791", "0.6789168", "0.67880154", "0.6779343", "0.67526346", "0.6739233", "0.6728988", "0.6724793", "0.6701432", "0.6689497", "0.666077", "0.6658363", "0.6636807", "0.6628691", "0.6610083", "0.6576368", "0.6575941", "0.65691924", "0.6563615", "0.6562063", "0.6562063", "0.6540874", "0.6534417", "0.6487161", "0.64807385", "0.6464343", "0.6454532", "0.64472795", "0.6441939", "0.64327425", "0.64193445", "0.6413526", "0.6407017", "0.6399171", "0.6387763", "0.637766", "0.63550097", "0.6342925", "0.6338328", "0.63344276", "0.6327162", "0.6327162", "0.63226485", "0.6314014", "0.63058704", "0.63001436", "0.62904066", "0.6287508", "0.626691", "0.62554336", "0.625303", "0.62445664", "0.6229745", "0.6229745", "0.6229745", "0.6228728" ]
0.7403394
12
Creates an optimizer and loads its state from a checkpoint.
def try_create_optimizer_and_load_from_checkpoint(self) -> bool: self.create_optimizer() if self.checkpoint_path: return self.try_load_checkpoint_for_optimizer() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_checkpoint(self, checkpoint: Dict[str, OrderedDict]):\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n self.optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n return self", "def create_optimizer(self) -> None:\n # Make sure model is created before we create optimizer\n if self._model is None:\n raise ValueError(\"Model checkpoint must be created before optimizer checkpoint can be loaded.\")\n\n # Select optimizer type\n if self.config.optimizer_type in [OptimizerType.Adam, OptimizerType.AMSGrad]:\n self._optimizer = torch.optim.Adam(self._model.parameters(), self.config.l_rate,\n self.config.adam_betas, self.config.opt_eps, self.config.weight_decay,\n amsgrad=self.config.optimizer_type == OptimizerType.AMSGrad)\n elif self.config.optimizer_type == OptimizerType.SGD:\n self._optimizer = torch.optim.SGD(self._model.parameters(), self.config.l_rate, self.config.momentum,\n weight_decay=self.config.weight_decay)\n elif self.config.optimizer_type == OptimizerType.RMSprop:\n self._optimizer = RMSprop(self._model.parameters(), self.config.l_rate, self.config.rms_alpha,\n self.config.opt_eps,\n self.config.weight_decay, self.config.momentum)\n else:\n raise NotImplementedError(f\"Optimizer type {self.config.optimizer_type.value} is not implemented\")", "def load_checkpoint(path, model, optimizer=None, reset_optimizer=True):\n print(\"Load checkpoint from: {}\".format(path))\n state_dict, optimizer_state = _load(path)\n\n model.load_dict(state_dict)\n if not reset_optimizer and optimizer is not None:\n if optimizer_state is not None:\n print(\"[loading] Load optimizer state from {}\".format(path))\n optimizer.load(optimizer_state)\n\n return model", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path)\n try:\n model.load_state_dict(state['model_state_dict'])\n except BaseException as e:\n print('Failed to do something: ' + str(e))\n\n if optimizer is not None:\n try:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n except Exception as e:\n print(e)\n\n return state", "def load_checkpoint(checkpoint, model, optimizer=None):\n model_state_dict, optimizer_state_dict = torch.load(checkpoint)\n model.load_state_dict(model_state_dict)\n\n if optimizer is not None:\n optimizer.load_state_dict(optimizer_state_dict)", "def opt_from_checkpoint(\n checkpoint_path: str,\n config_path: Optional[str] = None,\n extra_bindings=tuple([])\n) -> Optimizer:\n\n if config_path is None:\n config_path = \"/\".join(checkpoint_path.split(\"/\")[:-1]) + \"/config.gin\"\n\n logging.info(\"Restoring configs from: %s\", config_path)\n with gin.unlock_config():\n scope = f\"opt_from_checkpoint__{str(uuid.uuid4()).replace('-', '_')}\"\n with gin.config_scope(None):\n with gin.config_scope(scope):\n if config_path:\n with file_open(config_path, \"rb\") as f:\n content = bytes(f.read()).decode(\"utf-8\")\n\n # gin writes out multi line sometimes, undo this.\n content = content.replace(\"\\\\\\n\", \"\")\n\n def maybe_add_scope(c):\n # filter out train as this overlaps with outer_training.\n if c.startswith(\"#\"):\n return None\n if \"=\" in c:\n return scope + \"/\" + c\n return c\n\n bindings = [maybe_add_scope(c) for c in content.split(\"\\n\")]\n bindings = [b for b in bindings if b]\n bindings = bindings + [maybe_add_scope(c) for c in extra_bindings]\n\n logging.info(\"Parsing bindings\")\n for b in bindings:\n logging.info(b)\n print(b)\n gin.parse_config(bindings, skip_unknown=True)\n\n configurable = gin.query_parameter(f\"{scope}/run_train.lopt\")\n if isinstance(configurable, gin.config._UnknownConfigurableReference): # pylint: disable=protected-access\n raise ValueError(\"Gin couldn't find the learned optimizer in current\"\n \" imports. Did you forget to import the module?\")\n\n # with summary.summary_scope(\"opt_from_checkpoint\"):\n lopt = configurable.configurable.wrapped()\n theta = lopt.init(jax.random.PRNGKey(0))\n logging.info(f\"Restoring checkpoint {checkpoint_path}\") # pylint: disable=logging-fstring-interpolation\n ckpt = ParameterCheckpoint(theta, \"\", 0)\n ckpt = load_state(checkpoint_path, ckpt)\n opt = lopt.opt_fn(ckpt.params)\n return opt\n # wrapped = _GinScopeClass(opt, scope)\n # For now, just add the lopt to the returned class.\n # TODO(lmetz) change this api to return a more structured class?\n # wrapped.lopt = lopt\n # return wrapped # type: ignore", "def _load(checkpoint_path):\n state_dict, optimizer_state = dg.load_persistables(dirname=checkpoint_path)\n return state_dict, optimizer_state", "def load_checkpoint(checkpoint_path, model, optimizer=None,\n model_key='model_state_dict', optimizer_key='optimizer_state_dict'):\n if not os.path.exists(checkpoint_path):\n raise IOError(f\"Checkpoint '{checkpoint_path}' does not exist\")\n\n state = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(state[model_key])\n\n if optimizer is not None:\n optimizer.load_state_dict(state[optimizer_key])\n\n return state", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(checkpoint, model, optimizer=None):\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint", "def load_checkpoint(checkpoint_path, model, optimizer=None):\n if not os.path.exists(checkpoint_path):\n raise IOError(\"Checkpoint '{}' does not exist\".format(checkpoint_path))\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else 'cpu')\n state = torch.load(checkpoint_path, map_location=\"cuda:0\")\n model.load_state_dict(state['model_state_dict'])\n\n if optimizer is not None:\n optimizer.load_state_dict(state['optimizer_state_dict'])\n\n return state", "def try_load_checkpoint_for_optimizer(self) -> bool:\n\n if self._optimizer is None:\n raise ValueError(\"Optimizer must be created before optimizer checkpoint can be loaded.\")\n\n if not self.checkpoint_path:\n logging.warning(\"No checkpoint path provided.\")\n return False\n\n if not self.checkpoint_path.is_file():\n logging.warning(f'No checkpoint found at {self.checkpoint_path} current working dir {os.getcwd()}')\n return False\n\n logging.info(f\"Loading checkpoint {self.checkpoint_path}\")\n checkpoint = ModelAndInfo.read_checkpoint(self.checkpoint_path, self.config.use_gpu)\n\n try:\n state_dict = checkpoint[ModelAndInfo.OPTIMIZER_STATE_DICT_KEY]\n except KeyError:\n logging.error(f\"Key {ModelAndInfo.OPTIMIZER_STATE_DICT_KEY} not found in checkpoint\")\n return False\n\n self._optimizer.load_state_dict(state_dict)\n\n logging.info(f\"Loaded optimizer from checkpoint (epoch: {checkpoint[ModelAndInfo.EPOCH_KEY]})\")\n self.checkpoint_epoch = checkpoint[ModelAndInfo.EPOCH_KEY]\n return True", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def load(self, path):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])", "def LoadProgramState(self, restored_checkpoint_path=None, sess=None):\n pass", "def create_optimizer(net, optimizer_state_dict, learning_rate, device='cuda'):\n # define optimizer\n optimizer = optim.Adam([{\n 'params': net.net.parameters(),\n 'initial_lr': learning_rate\n }])\n # load optimizer checkpoint if available\n if optimizer_state_dict is not None:\n target_device = 'cpu' if device == 'cpu' else 'cuda'\n # load the optimizer weights\n optimizer.load_state_dict(optimizer_state_dict)\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = getattr(v, target_device)()\n return optimizer", "def load_model_from_checkpoint(self, path: str):\n ckpt = torch.load(path, map_location='cpu')\n self.net_q.encoder.load_state_dict(ckpt['encoder'])\n self.net_q.head.load_state_dict(ckpt['head'])\n self.net_ps.load_state_dict(ckpt['net_ps'])\n self.net_k.load_state_dict(ckpt['net_k'])\n self.queue.load_state_dict(ckpt['queue'])\n self.optimizer.load_state_dict(ckpt['optimizer'])\n if 'scheduler' in ckpt:\n self.scheduler.load_stae_dict(ckpt['scheduler'])\n self.move_optimizer_states(self.optimizer, self.local_rank)", "def load_checkpoint(self, checkpoint_path, continue_from_epoch=True):\n print(\"Loading checkpoint: {}\".format(checkpoint_path))\n state = torch.load(checkpoint_path)\n self.model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optim_dict'])\n\n if continue_from_epoch:\n self.epoch = state['epoch']", "def create_optimizer(parameters, optimizer_class, optim_params, model_name='model'):\n opt = optimizer_class(parameters, **optim_params)\n if special_parameters.load_model:\n _load_optimizer(opt, model_name)\n return opt", "def load_state_dict(self, checkpoint):\n self.net.load_state_dict(checkpoint['Net'])\n self.optimizer.load_state_dict(checkpoint['Optimizer'])\n\n if ADVERSARIAL_FLAG:\n self.adv_net.load_state_dict(checkpoint['AdvNet'])\n self.adv_optimizer.load_state_dict(checkpoint['AdvOptimizer'])\n\n self.history = checkpoint['History']\n self.stats = checkpoint['Stats']\n\n # The following loops are used to fix a bug that was\n # discussed here: https://github.com/pytorch/pytorch/issues/2830\n # (it is supposed to be fixed in recent PyTorch version)\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.net.device)\n if ADVERSARIAL_FLAG:\n for adv_state in self.adv_optimizer.state.values():\n for k, v in adv_state.items():\n if isinstance(v, torch.Tensor):\n adv_state[k] = v.to(self.adv_net.device)", "def load_model(agent, optimizer, model_file):\n checkpoint = torch.load(model_file)\n episode = checkpoint['epoch']\n agent.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n return episode", "def load_checkpoint(self, filename, load_optim=True):\n extra_state, optim_history, last_optim_state = \\\n utils.load_model_state(filename, self.get_model())\n\n if last_optim_state is not None:\n # rebuild optimizer after loading model, since params may have changed\n #self.optimizer = optim.build_optimizer(self.args, self.model.parameters())\n self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)\n\n if load_optim:\n self._optim_history = optim_history\n # only reload optimizer and lr_scheduler if they match\n last_optim = self._optim_history[-1]\n if last_optim['criterion_name'] == self.criterion.__class__.__name__:\n self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])\n if last_optim['optimizer_name'] == self.optimizer.__class__.__name__:\n self.optimizer.load_state_dict(last_optim_state)\n\n self._num_updates = last_optim['num_updates']\n\n return extra_state", "def _create_optimizer(self):\n\n with tf.name_scope(\"optimizer\"):\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def create_optimizer(self, context, optimizer, host):\n pass", "def _load_state_dict(optimizer, state: dict) -> None:\n if is_scheduler(optimizer):\n optimizer.load_state_dict(state[\"scheduler\"])\n optimizer.optimizer.load_state_dict(state[\"optimizer\"])\n else:\n optimizer.load_state_dict(state)", "def load_state(net, optimizer, scheduler, model_no=0, load_best=False):\n logger.info(\"Initializing model and optimizer states...\")\n base_path = \"./data/\"\n checkpoint_path = os.path.join(base_path,\"test_checkpoint_%d.pth.tar\" % model_no)\n best_path = os.path.join(base_path,\"test_model_best_%d.pth.tar\" % model_no)\n start_epoch, best_pred, checkpoint = 0, 0, None\n if (load_best == True) and os.path.isfile(best_path):\n checkpoint = torch.load(best_path)\n logger.info(\"Loaded best model.\")\n elif os.path.isfile(checkpoint_path):\n checkpoint = torch.load(checkpoint_path)\n logger.info(\"Loaded checkpoint model.\")\n if checkpoint != None:\n start_epoch = checkpoint['epoch']\n best_pred = checkpoint['best_acc']\n net.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n logger.info(\"Loaded model and optimizer.\") \n return start_epoch, best_pred", "def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)", "def load_ckp(checkpoint_fpath, model, optimizer, device):\n\n checkpoint = torch.load(checkpoint_fpath,map_location=device)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n valid_acc = checkpoint['valid_acc'] \n return model, optimizer, checkpoint['epoch'], valid_acc", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def load_model(path, model, optimizer):\n print(\"LOADING MODEL...\")\n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n status = ckpt.restore(tf.train.latest_checkpoint(path))\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir, \n max_to_keep=3 \n )\n return model, optimizer, ckpt, ckpt_manager", "def testSaveAndLoad(self):\n layers = (2, 3)\n net_options = {\"layers\": layers, \"initializer\": \"zeros\"}\n num_unrolls = 2\n num_epochs = 1\n\n problem = problems.simple()\n\n # Original optimizer.\n with tf.Graph().as_default() as g1:\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options=net_options))\n minimize_ops = optimizer.meta_minimize(problem, 3)\n\n with self.test_session(graph=g1) as sess:\n sess.run(tf.global_variables_initializer())\n train(sess, minimize_ops, 1, 2)\n\n # Save optimizer.\n tmp_dir = tempfile.mkdtemp()\n save_result = optimizer.save(sess, path=tmp_dir)\n net_path = next(iter(save_result))\n\n # Retrain original optimizer.\n cost, x = train(sess, minimize_ops, num_unrolls, num_epochs)\n\n # Load optimizer and retrain in a new session.\n with tf.Graph().as_default() as g2:\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options=net_options,\n net_path=net_path))\n minimize_ops = optimizer.meta_minimize(problem, 3)\n\n with self.test_session(graph=g2) as sess:\n sess.run(tf.global_variables_initializer())\n cost_loaded, x_loaded = train(sess, minimize_ops, num_unrolls, num_epochs)\n\n # The last cost should be the same.\n self.assertAlmostEqual(cost, cost_loaded, places=3)\n self.assertAlmostEqual(x[0], x_loaded[0], places=3)\n\n # Cleanup.\n os.remove(net_path)\n os.rmdir(tmp_dir)", "def on_stage_start(self, state: _State):\n optimizer = state.get_attr(\n key=\"optimizer\", inner_key=self.optimizer_key\n )\n assert optimizer is not None\n self._optimizer = optimizer", "def build_trainer(restore_state=None, train_policies=None, config=None):\n \n print(\"Using config\")\n print(config)\n cls = PPOTrainer\n trainer = cls(config=config)\n env = trainer.workers.local_worker().env\n if restore_state is not None:\n trainer.restore_from_object(restore_state)\n return trainer", "def load_checkpoint(tag, params, model):\r\n file_name = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n logger.info(\"Load checkpoint from %s\" % file_name)\r\n if os.path.exists(file_name):\r\n checkpoint = torch.load(file_name, map_location='cpu')\r\n params.training_id = checkpoint['training_id']\r\n logger.info(checkpoint['training_id'])\r\n model.global_step = checkpoint['global_step']\r\n model.load_state_dict(checkpoint['model'])\r\n for i, optimizer in enumerate(model.optimizers):\r\n optimizer.load_state_dict(checkpoint['optimizers'][i])\r\n else:\r\n raise Exception(\"Checkpoint not found.\")", "def load(self, path):\n file = open(path, 'rb')\n state = pickle.load(file)\n\n self.opt_state = optimizers.pack_optimizer_state(state)", "def _load_optimizer(self):\n # loss function\n with tf.variable_scope(\"forward\"):\n self.loss_fwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_fwd,\n self.labels, self.weights, self.vocab_size)\n\n # optimizer\n # self.optimizer_fwd = tf.train.MomentumOptimizer(self.learning_rate,\n # self.momentum)\n self.optimizer_fwd = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.train_op_fwd = self.optimizer_fwd.minimize(self.loss_fwd)\n\n with tf.variable_scope(\"backward\"):\n self.loss_bwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_bwd,\n self.labels, self.weights, self.vocab_size)\n\n # optimizer\n # self.optimizer_bwd = tf.train.MomentumOptimizer(self.learning_rate,\n # self.momentum)\n self.optimizer_bwd = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.train_op_bwd = self.optimizer_bwd.minimize(self.loss_bwd)", "def load_pretrained(model, fname, optimizer=None):\n if os.path.isfile(fname):\n print(\"=> loading checkpoint '{}'\".format(fname))\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint['state_dict'])\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint['optimizer'])\n return model, optimizer, checkpoint['epoch']\n else:\n return model\n else:\n raise Exception(\"=> no checkpoint found at '{}'\".format(fname))", "def load_checkpoint(args, trainer, epoch_itr):\n os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']", "def _resume_checkpoint(self, resume_path, model, optimizer):\n if not resume_path:\n return model, optimizer\n\n self.logger.info(f'Loading checkpoint: {resume_path}')\n checkpoint = torch.load(resume_path)\n model.load_state_dict(checkpoint['state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed.\n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from \"\n \"that of checkpoint. Optimizer parameters not being resumed.\")\n else:\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n self.logger.info(f'Checkpoint \"{resume_path}\" loaded')\n return model, optimizer", "def _create_train_op(self):\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n self.train_op = self.optimizer.minimize(self.loss)", "def _create_train_op(self):\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n self.train_op = self.optimizer.minimize(self.loss)", "def init_optimizer(self, kvstore='local', optimizer='sgd',\n optimizer_params=(('learning_rate', 0.01),), force_init=False):\n assert self.binded and self.params_initialized\n\n if self.optimizer_initialized and not force_init:\n self.logger.warning('optimizer already initialized, ignoring...')\n return\n\n if self._params_dirty:\n self._sync_params_from_devices()\n\n (kvstore, update_on_kvstore) = \\\n mx.model._create_kvstore(kvstore, len(self._context), self._arg_params)\n\n batch_size = self._exec_group.batch_size\n if kvstore and 'dist' in kvstore.type and '_sync' in kvstore.type:\n batch_size *= kvstore.num_workers\n rescale_grad = 1.0 / batch_size\n\n\n idx2name = {}\n if update_on_kvstore:\n idx2name.update(enumerate(self._exec_group.param_names))\n else:\n for k in range(len(self._context)):\n idx2name.update({i*len(self._context)+k: n\n for i, n in enumerate(self._exec_group.param_names)})\n name2idx = {}\n for k, v in idx2name.items():\n if v not in name2idx:\n name2idx[v] = []\n name2idx[v].append(k)\n\n if isinstance(optimizer, str):\n optimizer_params = dict(optimizer_params)\n if 'rescale_grad' not in optimizer_params:\n optimizer_params['rescale_grad'] = rescale_grad\n optimizer = mx.optimizer.create(optimizer,\n sym=self.symbol, param_idx2name=idx2name,\n **optimizer_params)\n else:\n assert isinstance(optimizer, mx.optimizer.Optimizer)\n if optimizer.rescale_grad != rescale_grad:\n #pylint: disable=no-member\n warnings.warn(\n \"Optimizer created manually outside Module but rescale_grad \" +\n \"is not normalized to 1.0/batch_size/num_workers (%s vs. %s). \"%(\n optimizer.rescale_grad, rescale_grad) +\n \"Is this intended?\", stacklevel=2)\n if len(optimizer.idx2name):\n warnings.warn(\"The idx2name of the optimizer is overwrote by ModuleEXT\")\n # overwrite optimizer.idx2name\n optimizer.idx2name = idx2name.copy()\n\n self._param_idx2name = idx2name \n self._param_name2idx = name2idx\n self._optimizer = optimizer\n self._kvstore = kvstore\n self._update_on_kvstore = update_on_kvstore\n self._updater = None\n\n if kvstore:\n if self._compression_params:\n kvstore.set_gradient_compression(self._compression_params)\n # copy initialized local parameters to kvstore\n _initialize_kvstore(kvstore=kvstore,\n param_arrays=self._exec_group.param_arrays,\n arg_params=self._arg_params,\n param_names=self._param_names,\n update_on_kvstore=update_on_kvstore)\n if update_on_kvstore:\n kvstore.set_optimizer(self._optimizer)\n else:\n self._updater = mx.optimizer.get_updater(optimizer)\n\n self.optimizer_initialized = True\n\n if self._preload_opt_states is not None:\n self.load_optimizer_states(self._preload_opt_states)\n self._preload_opt_states = None", "def load_checkpoint(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.get_latest_path()\n\n if os.path.isfile(checkpoint_path):\n key = 'cuda' if torch.cuda.is_available() else 'cpu'\n checkpoint = torch.load(checkpoint_path, map_location=key)\n self.network.load_state_dict(checkpoint['network'])\n self.network_target.load_state_dict(checkpoint['network_target'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('checkpoint loaded at {}'.format(checkpoint_path))\n else:\n raise OSError(\"Checkpoint file not found.\")", "def create_orttrainer_and_save_checkpoint_bart(device, trainer_opts, checkpoint_dir, state_dict_key_name='state_dict', use_lamb=True, seed=1, learning_rate=0.1):\n torch.manual_seed(seed)\n set_seed(seed)\n\n ort_trainer_opts = orttrainer.ORTTrainerOptions(trainer_opts)\n optim_config = optim.LambConfig(lr=learning_rate) if use_lamb else optim.AdamConfig(lr=learning_rate)\n model, model_desc = _load_bart_model()\n trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=ort_trainer_opts)\n\n # load dummy optimizer state as we are not going to run real training\n dummy_init_state = generate_dummy_optim_state(model, optim_config)\n init_state = copy.deepcopy(dummy_init_state)\n trainer.load_state_dict(dummy_init_state)\n\n # run an eval step to innitialize the graph\n src_tokens, prev_output_tokens, target = generate_random_input_from_bart_model_desc(model_desc, seed = seed)\n trainer.eval_step(src_tokens, prev_output_tokens, target)\n\n # save current model parameters as a checkpoint\n if checkpoint_dir:\n if _is_model_parallel_run(ort_trainer_opts):\n _save(trainer, checkpoint_dir, state_dict_key_name, world_rank=ort_trainer_opts.distributed.world_rank)\n # save the initial complete model and optimizer states\n if ort_trainer_opts.distributed.world_rank == 0:\n init_state['model'] = {'full_precision': dict()}\n for initializer in model.graph.initializer:\n init_state['model']['full_precision'][initializer.name] = numpy_helper.to_array(initializer)\n with open(os.path.join(checkpoint_dir, 'expected_state_dict.pkl'), \"wb\") as f:\n pickle.dump(init_state, f)\n else:\n _save(trainer, checkpoint_dir, state_dict_key_name)", "def load_checkpoint(self, checkpoint: str, **kwargs) -> None:\n with open(checkpoint, \"rb\") as f:\n state = SafePickle.load(f)\n\n state_id = ray.put(state)\n ray.get([worker.set_state.remote(state_id, **kwargs) for worker in self.remote_workers])", "def load_model(self, checkpoint):\n print(f'Load parameters from {checkpoint}')\n epoch = re.match(r\"[0-9]*\", os.path.basename(checkpoint)).group(0)\n self.epoch_i = int(epoch)\n self.model.load_state_dict(torch.load(checkpoint))", "def unpack_checkpoint(\n self,\n checkpoint: Dict,\n model=None,\n criterion=None,\n optimizer=None,\n scheduler=None,\n **kwargs,\n ) -> None:\n super().unpack_checkpoint(\n checkpoint,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n **kwargs,\n )\n\n # NOTE: propper way to load state, docs:\n # https://nvidia.github.io/apex/amp.html#checkpointing\n if \"amp\" in checkpoint:\n amp.load_state_dict(checkpoint[\"amp\"])", "def unpack_checkpoint(\n self,\n checkpoint: Dict,\n model=None,\n criterion=None,\n optimizer=None,\n scheduler=None,\n **kwargs,\n ) -> None:\n super().unpack_checkpoint(\n checkpoint,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n **kwargs,\n )\n\n # NOTE: propper way to load state, docs:\n # https://nvidia.github.io/apex/amp.html#checkpointing\n if \"amp\" in checkpoint:\n amp.load_state_dict(checkpoint[\"amp\"])", "def load(loadname, checkpoint=None):\n ckpt_dir = \"./models/tf_ckpt_\" + loadname + \"/\"\n if checkpoint is not None:\n status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir))\n status.assert_consumed()\n print(\"Loaded checkpoint\")\n else:\n print(\"Not Loading any checkpoint\")\n print(\"Starting training from initial configuration\")", "def load_optimizers(self, epoch):\n for i, optimizer in enumerate(self.optimizers):\n load_filename = '{0}_optimizer_{1}.pth'.format(epoch, i)\n load_path = os.path.join(self.save_dir, load_filename)\n print('loading the optimizer from {0}'.format(load_path))\n state_dict = torch.load(load_path)\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n optimizer.load_state_dict(state_dict)", "def train(self, num_epochs, state_transition_model, save_weights_path='./stored_data/weights/policy_network/policy_network',\n load_weights=False, load_weights_path=None):\n with self.sess as sess: \n stm_weights_path = ('./stored_data/weights/state_transition_model/state_transition_model_'\n + self.energy_system.electricity_pricing_model + '.h5')\n state_transition_model.load_weights(stm_weights_path)\n\n starting_state = tf.placeholder(tf.float32, shape=(None, self.num_state_variables), name='starting_state')\n normalized_starting_state_vals = self._get_normalized_starting_state_values_and_set_battery_soc()\n print(\"\\nNow creating computational graph\")\n print(\"Available memory in GB:\", psutil.virtual_memory().available / 1000000000.0)\n average_reward_per_rollout_op, average_electricity_cost_per_rollout_op = self.simulate_epoch(starting_state, state_transition_model)\n print(\"\\nNow creating optimizer op\")\n print(\"Available memory in GB:\", psutil.virtual_memory().available / 1000000000.0)\n train_op = tf.train.AdamOptimizer().minimize(-average_reward_per_rollout_op, \n var_list=[self.model.trainable_variables],\n name='policy_optimizer')\n print(\"\\nNow merging sumaries\")\n print(\"Available memory in GB:\", psutil.virtual_memory().available / 1000000000.0)\n merged_tensorboard_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter('./stored_data/Tensorboard/policy_network/' + self.energy_system.electricity_pricing_model + '/')\n #writer.add_graph(sess.graph)\n\n print(\"\\nNow initializing variables\")\n print(\"Available memory in GB:\", psutil.virtual_memory().available / 1000000000.0)\n self._initialize_local_and_global_variables()\n self.training_average_electricity_cost_in_euros = np.zeros(num_epochs)\n self.training_average_reward = np.zeros(num_epochs)\n\n if load_weights:\n self.load_weights(load_weights_path)\n\n feed_dict = {starting_state: normalized_starting_state_vals}\n print(\"\\nNow running first epoch\")\n print(\"Available memory in GB:\", psutil.virtual_memory().available / 1000000000.0)\n for epoch in range(num_epochs):\n op_list = [train_op, average_reward_per_rollout_op, average_electricity_cost_per_rollout_op, merged_tensorboard_summary]\n _, epoch_average_reward, epoch_average_electricity_cost, epoch_summary = sess.run(op_list, feed_dict=feed_dict)\n self.training_average_electricity_cost_in_euros[epoch] = epoch_average_electricity_cost\n self.training_average_reward[epoch] = epoch_average_reward\n if epoch % 10 == 0 or epoch == num_epochs-1:\n writer.add_summary(epoch_summary, epoch)\n print(\"Finished \" + str(epoch+1) + \" epochs out of \" + str(num_epochs)\n + \". Electricity cost: %.3f euros\" % epoch_average_electricity_cost)\n if epoch >= 99 and (epoch + 1) % 50 == 0:\n self.model.save_weights(save_weights_path + '_'\n + self.energy_system.electricity_pricing_model + '_'\n + str(epoch + 1)\n + '.h5')\n\n if num_epochs > 0:\n self.save_weights_and_monitoring_data(save_weights_path)\n\n self.plot_progress()", "def load_checkpoint(path):\n\n # Get the model name\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Load in checkpoint\n checkpoint = torch.load(path)\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = checkpoint['classifier']\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n # Make sure to set parameters as not trainable\n for param in model.parameters():\n param.requires_grad = False\n model.fc = checkpoint['fc']\n\n # Load in the state dict\n model.load_state_dict(checkpoint['state_dict'])\n\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n total_trainable_params = sum(\n p.numel() for p in model.parameters() if p.requires_grad)\n print(f'{total_trainable_params:,} total gradient parameters.')\n\n # Move to gpu\n if multi_gpu:\n model = nn.DataParallel(model)\n\n if train_on_gpu:\n model = model.to('cuda')\n\n # Model basics\n model.class_to_idx = checkpoint['class_to_idx']\n model.idx_to_class = checkpoint['idx_to_class']\n model.epochs = checkpoint['epochs']\n\n # Optimizer\n optimizer = checkpoint['optimizer']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n return model, optimizer", "def train(config, alg, checkpoint=None):\n agent = get_agent_class(alg)(config=config, env=\"retro-v0\")\n if checkpoint is not None:\n try:\n agent.restore(checkpoint)\n print(f\"Resumed checkpoint {checkpoint}\")\n except:\n print(\"Checkpoint not found: restarted policy network from scratch\")\n else:\n print(\"Started policy network from scratch\")\n\n for i in range(1000000):\n # Perform one iteration of training the policy with the algorithm\n result = agent.train()\n print(pretty_print(result))\n\n if i % 50 == 0:\n checkpoint = agent.save()\n print(\"checkpoint saved at\", checkpoint)", "def load_checkpoint_train(cpdir, model, optimizer):\n start_epoch = 0\n start_global_step = 0\n if cpdir is not None:\n start_global_step, start_epoch = load_checkpoint(\n cpdir, model, optimizer)\n start_global_step += 1\n start_epoch += 1\n return start_global_step, start_epoch", "def reload_checkpoint(self):\n checkpoint_path = os.path.join(self.params.dump_path, 'checkpoint.pth')\n if not os.path.isfile(checkpoint_path):\n if self.params.reload_checkpoint == '':\n return\n else:\n checkpoint_path = self.params.reload_checkpoint\n assert os.path.isfile(checkpoint_path)\n logger.warning(\"Reloading checkpoint from %s ...\" % checkpoint_path)\n data = torch.load(checkpoint_path, map_location='cpu')\n\n # reload model parameters\n for name in self.MODEL_NAMES:\n getattr(self, name).load_state_dict(data[name])\n\n # reload optimizers\n for name in self.optimizers.keys():\n if False: # AMP checkpoint reloading is buggy, we cannot do that - TODO: fix - https://github.com/NVIDIA/apex/issues/250\n logger.warning(\"Reloading checkpoint optimizer %s ...\" % name)\n else: # instead, we only reload current iterations / learning rates\n logger.warning(\"Not reloading checkpoint optimizer %s.\" % name)\n for group_id, param_group in enumerate(self.optimizers[name].param_groups):\n if 'num_updates' not in param_group:\n logger.warning(\"No 'num_updates' for optimizer %s.\" % name)\n continue\n logger.warning(\"Reloading 'num_updates' and 'lr' for optimizer %s.\" % name)\n param_group['num_updates'] = data['%s_optimizer' % name]['param_groups'][group_id]['num_updates']\n param_group['lr'] = self.optimizers[name].get_lr_for_step(param_group['num_updates'])\n\n # reload main metrics\n self.epoch = data['epoch'] + 1\n self.n_total_iter = data['n_total_iter']\n self.best_metrics = data['best_metrics']\n self.best_stopping_criterion = data['best_stopping_criterion']\n logger.warning(\"Checkpoint reloaded. Resuming at epoch %i / iteration %i ...\" % (self.epoch, self.n_total_iter))", "def _restore_checkpoint(self, checkpoint_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path)\n pretrained_dict = checkpoint['state_dict'] # 预训练模型的state_dict\n model_dict = self.model.state_dict() # 当前用来训练的模型的state_dict\n \n if pretrained_dict.keys() != model_dict.keys(): # 需要进行参数的适配\n print('Parameters are inconsistant, adapting model parameters ...')\n # 在合并前(update),需要去除pretrained_dict一些不需要的参数\n # 只含有识别分支的预训练模型参数字典中键'0', '1'对应全模型参数字典中键'2', '3'\n pretrained_dict['2'] = transfer_state_dict(pretrained_dict['0'], model_dict['2'])\n pretrained_dict['3'] = transfer_state_dict(pretrained_dict['1'], model_dict['3'])\n del pretrained_dict['0'] # 把原本预训练模型中的键值对删掉,以免错误地更新当前模型中的键值对\n del pretrained_dict['1']\n model_dict.update(pretrained_dict) # 更新(合并)模型的参数\n self.model.load_state_dict(model_dict)\n else:\n print('Parameters are consistant, load state dict directly ...')\n self.model.load_state_dict(checkpoint['state_dict'])\n # self.optimizer.load_state_dict(checkpoint['optimizer'])\n # if self.with_cuda:\n # for state in self.optimizer.state.values():\n # for k, v in state.items():\n # if isinstance(v, torch.Tensor):\n # state[k] = v.cuda(self.device)", "def create_or_load_model(model, model_dir, session, name):\n latest_ckpt = tf.train.latest_checkpoint(model_dir)\n if latest_ckpt:\n start_time = time.time()\n # It only takes a few seconds to initialize all variables.\n session.run(tf.global_variables_initializer())\n logging.info(\n \"Initialize %s model with fresh parameters before loading variables \"\n \"from the checkpoint, time %.2fs\", name,\n time.time() - start_time)\n model = load_model(model, latest_ckpt, session, name)\n else:\n start_time = time.time()\n session.run(tf.global_variables_initializer())\n session.run(tf.tables_initializer())\n utils.print_out(\" created %s model with fresh parameters, time %.2fs\" %\n (name, time.time() - start_time))\n\n global_step = model.global_step.eval(session=session)\n return model, global_step", "def load_states(self, checkpoint):\n raise NotImplementedError()", "def from_checkpoint(\n checkpoint: Union[str, Checkpoint],\n policy_ids: Optional[Container[PolicyID]] = None,\n policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,\n policies_to_train: Optional[\n Union[\n Container[PolicyID],\n Callable[[PolicyID, Optional[SampleBatchType]], bool],\n ]\n ] = None,\n ) -> \"Algorithm\":\n checkpoint_info = get_checkpoint_info(checkpoint)\n\n # Not possible for (v0.1) (algo class and config information missing\n # or very hard to retrieve).\n if checkpoint_info[\"checkpoint_version\"] == version.Version(\"0.1\"):\n raise ValueError(\n \"Cannot restore a v0 checkpoint using `Algorithm.from_checkpoint()`!\"\n \"In this case, do the following:\\n\"\n \"1) Create a new Algorithm object using your original config.\\n\"\n \"2) Call the `restore()` method of this algo object passing it\"\n \" your checkpoint dir or AIR Checkpoint object.\"\n )\n elif checkpoint_info[\"checkpoint_version\"] < version.Version(\"1.0\"):\n raise ValueError(\n \"`checkpoint_info['checkpoint_version']` in `Algorithm.from_checkpoint\"\n \"()` must be 1.0 or later! You are using a checkpoint with \"\n f\"version v{checkpoint_info['checkpoint_version']}.\"\n )\n\n # This is a msgpack checkpoint.\n if checkpoint_info[\"format\"] == \"msgpack\":\n # User did not provide unserializable function with this call\n # (`policy_mapping_fn`). Note that if `policies_to_train` is None, it\n # defaults to training all policies (so it's ok to not provide this here).\n if policy_mapping_fn is None:\n # Only DEFAULT_POLICY_ID present in this algorithm, provide default\n # implementations of these two functions.\n if checkpoint_info[\"policy_ids\"] == {DEFAULT_POLICY_ID}:\n policy_mapping_fn = AlgorithmConfig.DEFAULT_POLICY_MAPPING_FN\n # Provide meaningful error message.\n else:\n raise ValueError(\n \"You are trying to restore a multi-agent algorithm from a \"\n \"`msgpack` formatted checkpoint, which do NOT store the \"\n \"`policy_mapping_fn` or `policies_to_train` \"\n \"functions! Make sure that when using the \"\n \"`Algorithm.from_checkpoint()` utility, you also pass the \"\n \"args: `policy_mapping_fn` and `policies_to_train` with your \"\n \"call. You might leave `policies_to_train=None` in case \"\n \"you would like to train all policies anyways.\"\n )\n\n state = Algorithm._checkpoint_info_to_algorithm_state(\n checkpoint_info=checkpoint_info,\n policy_ids=policy_ids,\n policy_mapping_fn=policy_mapping_fn,\n policies_to_train=policies_to_train,\n )\n\n return Algorithm.from_state(state)", "def create_train_state(\n config, rng, learning_rate_fn, example_batch\n):\n model, variables, metric_collector = create_model(config, rng, example_batch)\n params = variables['params']\n parameter_overview.log_parameter_overview(params)\n tx = train_utils.create_optimizer(config, learning_rate_fn)\n\n state = train_state.TrainState.create(\n apply_fn=model.apply,\n params=variables['params'],\n tx=tx,\n )\n return model, state, metric_collector", "def load_checkpoint(ckpt_path):\n checkpoint = None\n if ckpt_path:\n logger.info(\"Loading checkpoint from %s\" % ckpt_path)\n checkpoint = torch.load(ckpt_path, map_location=torch.device(\"cpu\"))\n\n if \"model\" in checkpoint.keys():\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.b_2\", r\"\\1.layer_norm\\2.bias\", s\n )\n s = re.sub(\n r\"(.*)\\.layer_norm((_\\d+)?)\\.a_2\", r\"\\1.layer_norm\\2.weight\", s\n )\n return s\n\n checkpoint[\"model\"] = {\n fix_key(k): v for k, v in checkpoint[\"model\"].items()\n }\n # Force add_ffnbias to True if bias found in model w_1 keys\n for key in checkpoint[\"model\"].keys():\n if \"w_1.bias\" in key:\n checkpoint[\"opt\"].add_ffnbias = True\n\n if not hasattr(checkpoint[\"opt\"], \"num_kv\"):\n checkpoint[\"opt\"].num_kv = 0\n if not hasattr(checkpoint[\"opt\"], \"add_ffnbias\"):\n checkpoint[\"opt\"].add_ffnbias = False\n if not hasattr(checkpoint[\"opt\"], \"parallel_residual\"):\n checkpoint[\"opt\"].parallel_residual = False\n if not hasattr(checkpoint[\"opt\"], \"shared_layer_norm\"):\n checkpoint[\"opt\"].shared_layer_norm = False\n if not hasattr(checkpoint[\"opt\"], \"use_ckpting\"):\n checkpoint[\"opt\"].use_ckpting = []\n if not hasattr(checkpoint[\"opt\"], \"relative_positions_buckets\"):\n checkpoint[\"opt\"].relative_positions_buckets = 0\n if not hasattr(checkpoint[\"opt\"], \"parallel_mode\"):\n checkpoint[\"opt\"].parallel_mode = \"data_parallel\"\n if not hasattr(checkpoint[\"opt\"], \"norm_eps\"):\n checkpoint[\"opt\"].norm_eps = 1e-6\n\n # fix v2 compatibility\n if \"generator\" in checkpoint.keys() and checkpoint[\"generator\"]:\n if \"0.weight\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"weight\"] = checkpoint[\"generator\"].pop(\n \"0.weight\"\n )\n if \"0.bias\" in checkpoint[\"generator\"]:\n checkpoint[\"generator\"][\"bias\"] = checkpoint[\"generator\"].pop(\"0.bias\")\n # end of patch for backward compatibility\n\n return checkpoint", "def load(self):\r\n checkpoint = torch.load(self.checkpoint_path,\r\n map_location=self.device)\r\n self.load_state_dict(checkpoint)\r\n del checkpoint", "def initialize_optimization(self):\n\n if self.FLAGS.optimizer == \"Adam\" :\n self.solver = tf.train.AdamOptimizer(\n learning_rate = self.learning_rate,\n beta1 = self.FLAGS.beta1,\n beta2 = self.FLAGS.beta2)\n else:\n print(\"ERROR: Cannot handle optimizer type {}!!!\".format(self.FLAGS.optimizer))\n raise RuntimeError\n \n # batch normalization in tensorflow requires this extra dependency\n # this is required to update the moving mean and moving variance variables\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n self.update = self.solver.minimize(self.loss, global_step=self.global_step)", "def importOptimizer():\n module_path = os.path.join(path, \"optimization\")\n module_path = os.path.join(module_path, \"optimizer.py\")\n optimizer_class = importClass(\"Optimizer\", \"optimizer\", module_path)\n return optimizer_class", "def create_orttrainer_and_load_checkpoint(device, trainer_opts, checkpoint_dir, use_lamb=True, seed=1, learning_rate=0.1):\n torch.manual_seed(seed)\n set_seed(seed)\n\n # PyTorch transformer model setup \n optim_config = optim.LambConfig(lr=learning_rate) if use_lamb else optim.AdamConfig(lr=learning_rate)\n model, model_desc, loss_fn, batcher_fn, train_data, _, _ = _load_pytorch_transformer_model(device)\n trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=loss_fn, options=orttrainer.ORTTrainerOptions(trainer_opts))\n\n # load checkpoint into trainer\n checkpoint_file_name = 'checkpoint*.ortcp'\n checkpoint_files = glob.glob(os.path.join(checkpoint_dir, checkpoint_file_name))\n trainer.load_checkpoint(*checkpoint_files)\n\n # run an eval step to innitialize the graph\n torch.manual_seed(seed)\n set_seed(seed)\n data, targets = batcher_fn(train_data, 0)\n trainer.eval_step(data, targets)\n\n return trainer.state_dict(), model", "def _get_init_fn():\n exclusions = []\n if FLAGS.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in FLAGS.checkpoint_exclude_scopes.split(',')]\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n\n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Fine-tuning from {}'.format(checkpoint_path))\n\n return slim.assign_from_checkpoint_fn(checkpoint_path, variables_to_restore)", "def optimizer_factory(config, model):\n params = model.parameters()\n\n optimizer = config[\"loss\"].get(\"optimizer\", \"Adam\")\n lr = config[\"loss\"].get(\"lr\", 1e-3)\n momentum = config[\"loss\"].get(\"momentum\", 0.9)\n\n if optimizer == \"SGD\":\n return optim.SGD(params, lr=lr, momentum=momentum)\n elif optimizer == \"Adam\":\n return optim.Adam(params, lr=lr)\n else:\n raise NotImplementedError()", "def load(self):\n checkpoint = torch.load(self.checkpoint_path,\n map_location=self.net.device)\n self.load_state_dict(checkpoint)\n del checkpoint", "def load(self, checkpoint_dir=None):\n\n if checkpoint_dir is None:\n checkpoint_dir = FLAGS.checkpoint_dir\n\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n return self.load_from_path(checkpoint_dir)", "def _inst_optimizer(self):\n optimizer = Optimizers(self.m_cfg['configs']['lr_politics']['optimizer']).value\n lr_schedule = self.m_cfg['configs']['lr_politics']['lr']\n opt = optimizer(learning_rate=lr_schedule)\n return opt", "def build_optimizer(optimizer_config, params, name=None):\n\n if optimizer_config.name == 'rms_prop_optimizer':\n\n optimizer = paddle.optimizer.RMSProp(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n rho=optimizer_config.decay,\n momentum=optimizer_config.momentum_optimizer_value,\n epsilon=optimizer_config.epsilon,\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer_config.name =='momentum_optimizer':\n\n optimizer = paddle.optimizer.SGD(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer_config.name =='adam_optimizer':\n\n optimizer = paddle.optimizer.Adam(\n parameters = params,\n learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),\n weight_decay=optimizer_config.weight_decay)\n\n if optimizer is None:\n raise ValueError('Optimizer %s not supported.' % optimizer_config.name)\n\n if optimizer_config.use_moving_average:\n raise ValueError('paddle don\\'t support moving average')\n if name is None:\n # assign a name to optimizer for checkpoint system\n optimizer.name = optimizer_config.name\n else:\n optimizer.name = name\n return optimizer", "def load_G(self, G_checkpoint):\n checkpoint = torch.load(G_checkpoint)\n self.G.load_state_dict(checkpoint['gnet'])\n self.gen_optim.load_state_dict(checkpoint['gopt'])", "def load_sharded_optimizer(self, optimizer: Optimizer, index_file_path: str, size_per_shard: int):\n raise NotImplementedError(\"Sharded optimizer checkpoint is not supported yet.\")", "def create_optimizer(optimizer_name, model, config):\n if optimizer_name == 'adadelta':\n return torch.optim.Adadelta(model.parameters(),\n lr=config['adadelta_lr'],\n rho=config['adadelta_rho'],\n weight_decay=config['adadelta_weight_decay'],\n eps=config['adadelta_eps'])\n else:\n raise Exception('Optimizer \\'{}\\' not supported.'.format(optimizer_name))", "def create_orttrainer_and_load_checkpoint_bart(device, trainer_opts, checkpoint_dir, use_lamb=True, seed=1, learning_rate=0.1):\n torch.manual_seed(seed)\n set_seed(seed)\n\n # model setup\n optim_config = optim.LambConfig(lr=learning_rate) if use_lamb else optim.AdamConfig(lr=learning_rate)\n model, model_desc = _load_bart_model()\n trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, options=orttrainer.ORTTrainerOptions(trainer_opts))\n\n # load checkpoint into trainer\n checkpoint_file_name = 'checkpoint*.ortcp'\n checkpoint_files = glob.glob(os.path.join(checkpoint_dir, checkpoint_file_name))\n trainer.load_checkpoint(*checkpoint_files)\n\n # run an eval step to innitialize the graph\n src_tokens, prev_output_tokens, target = generate_random_input_from_bart_model_desc(model_desc, seed = seed)\n trainer.eval_step(src_tokens, prev_output_tokens, target)\n\n expected_state_dict = None\n fname = os.path.join(checkpoint_dir, 'expected_state_dict.pkl')\n if os.path.isfile(fname):\n with open(fname, \"rb\") as f:\n expected_state_dict = pickle.load(f)\n\n return trainer.state_dict(), expected_state_dict, model", "def optimizer_setup(model, params):\n if params.optimizer == 'adam':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_adam(model, params)\n else:\n optimizer = optimizer_handler.plain_adam(model, params)\n elif params.optimizer == 'sgd':\n if params.freeze_backbone:\n optimizer = optimizer_handler.layer_specific_sgd(model, params)\n else:\n optimizer = optimizer_handler.plain_sgd(model, params)\n\n if params.zero_bn_bias_decay:\n optimizer = zero_wdcay_bn_bias(optimizer)\n\n return optimizer", "def init_optimizer(self, state_dict=None, use_gpu=True):\n if self.args.fix_embeddings:\n self.network.embedder.src_word_embeddings.fix_word_lut()\n self.network.embedder.tgt_word_embeddings.fix_word_lut()\n\n if self.args.optimizer == 'sgd':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.SGD(parameters,\n self.args.learning_rate,\n momentum=self.args.momentum,\n weight_decay=self.args.weight_decay)\n\n elif self.args.optimizer == 'adam':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.Adam(parameters,\n self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n elif self.args.optimizer == 'adamW':\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n self.optimizer = optim.AdamW(parameters,\n self.args.learning_rate,\n weight_decay=self.args.weight_decay)\n\n else:\n raise RuntimeError('Unsupported optimizer: %s' % self.args.optimizer)\n\n if state_dict is not None:\n self.optimizer.load_state_dict(state_dict)\n # FIXME: temp soln - https://github.com/pytorch/pytorch/issues/2830\n if use_gpu:\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.cuda()", "def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]", "def load_D( self, D_checkpoint):\n checkpoint = torch.load(D_checkpoint)\n self.D.load_state_dict(checkpoint['dnet'])\n self.dis_optim.load_state_dict(checkpoint['gopt'])", "def _create_train_op(self):\n self.lr = self.learning_rate\n # global_step = tf.train.get_or_create_global_step()\n learning_rate = tf.constant(value=self.learning_rate, shape=[], dtype=tf.float32)\n learning_rate =tf.train.exponential_decay(learning_rate,self.global_step,2*self.num_warm_up,0.96,staircase=True,name=\"exponential_decay\")\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if self.num_warm_up:\n global_steps_int = tf.cast(self.global_step, tf.int32)\n warmup_steps_int = tf.constant(self.num_warm_up, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = self.learning_rate * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n self.current_learning_rate = learning_rate\n if self.optim_type == 'adagrad':\n self.optimizer = tf.train.AdagradOptimizer(self.lr)\n elif self.optim_type == 'adam':\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n elif self.optim_type == 'rprop':\n self.optimizer = tf.train.RMSPropOptimizer(self.lr)\n elif self.optim_type == 'sgd':\n self.optimizer = tf.train.GradientDescentOptimizer(self.lr)\n elif self.optim_type == \"bert\":\n self.optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9,\n beta_2=0.999, epsilon=1e-6,\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\n else:\n raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))\n\n self.logger.info(\"applying optimize %s\" % self.optim_type)\n if self.clip_weight:\n # clip_weight\n tvars = tf.trainable_variables()\n grads = tf.gradients(self.loss, tvars)\n grads, _ = tf.clip_by_global_norm(grads, clip_norm=self.max_norm_grad)\n grad_var_pairs = zip(grads, tvars)\n train_op = self.optimizer.apply_gradients(grad_var_pairs, name='apply_grad', global_step=self.global_step)\n new_global_step = self.global_step + 1\n train_op = tf.group(train_op, [self.global_step.assign(new_global_step)])\n self.train_op = train_op\n else:\n self.train_op = self.optimizer.minimize(self.loss, global_step=self.global_step)", "def from_checkpoints(\n self,\n restore_cfgs: Sequence[RestoreCheckpointConfig],\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None,\n ) -> Iterator[train_state_lib.TrainState]:\n\n def _restore_path(path, cfg):\n restore_checkpointer = cfg.checkpointer_cls(\n train_state=self.global_train_state_shape,\n partitioner=self._partitioner,\n checkpoints_dir='', # unused for restore\n dataset_iterator=ds_iter if cfg.restore_dataset else None,\n restore_dtype=jnp.dtype(cfg.dtype) if cfg.dtype else None)\n\n from_tensorflow = gfile.exists(path + '.index')\n if from_tensorflow and cfg.state_transformation_fns:\n raise ValueError('Cannot initialize from a TensorFlow checkpoint using '\n '`state_transformation_fns`.')\n if from_tensorflow:\n logging.info('Initializing parameters from TensorFlow checkpoint %s',\n path)\n return restore_checkpointer.restore_from_tf_checkpoint(\n path, strict=cfg.strict)\n\n else:\n if cfg.fallback_to_scratch:\n if not cfg.state_transformation_fns:\n raise ValueError('`state_transformation_fns` must be provided with '\n '`fallback_to_scratch`')\n if init_rng is None:\n raise ValueError('An `init_rng` must be provided with '\n '`fallback_to_scratch`')\n fallback_state = self.from_scratch(init_rng).state_dict()\n else:\n fallback_state = None\n\n logging.info('Initializing parameters from specific T5X checkpoint %s',\n path)\n return restore_checkpointer.restore(\n path=path,\n state_transformation_fns=cfg.state_transformation_fns,\n fallback_state=fallback_state)\n\n for restore_cfg in restore_cfgs:\n paths = ([restore_cfg.path]\n if isinstance(restore_cfg.path, str) else restore_cfg.path)\n if restore_cfg.mode == 'specific':\n logging.info('Restoring specific checkpoint(s): %s', paths)\n for path in paths:\n yield _restore_path(path, restore_cfg)\n return\n elif restore_cfg.mode in ('all', 'latest'):\n for ckpt_dir in paths:\n if not gfile.isdir(ckpt_dir):\n raise ValueError(\n 'Checkpoint path(s) must be valid directories when using '\n \"restore mode 'all' or 'latest'.\")\n # Check if this is a TensorFlow checkpoint dir.\n tf_ckpt_state = tf.train.get_checkpoint_state(ckpt_dir)\n\n if tf_ckpt_state:\n ckpt_paths = tf_ckpt_state.all_model_checkpoint_paths\n else:\n ckpt_paths = [\n os.path.join(ckpt_dir, f'checkpoint_{step}')\n for step in checkpoints.all_steps(ckpt_dir)\n ]\n if not ckpt_paths:\n logging.info('No checkpoints found in specified directory: %s',\n ckpt_dir)\n continue\n if restore_cfg.mode == 'latest':\n logging.info('Restoring latest T5X checkpoint.')\n ckpt_paths = ckpt_paths[-1:]\n logging.info('Restoring checkpoints for path(s): %s', ckpt_paths)\n for ckpt_path in ckpt_paths:\n yield _restore_path(ckpt_path, restore_cfg)\n return\n else:\n raise ValueError(\n f'Unsupported checkpoint restore mode: {restore_cfg.mode}')", "def load_from_path(self, checkpoint_dir):\n\n vars = self.save_var_names\n saver = tf.train.Saver(vars)\n\n def load_aux(ckpt_path):\n \"\"\"Helper function to not repeat the same code in the following lines.\"\"\"\n\n ckpt_name = os.path.basename(ckpt_path)\n saver.restore(self.sess, ckpt_path)\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\", ckpt_name)).group(0))\n self.counter = counter\n print(\" [*] Loaded {}\".format(ckpt_name))\n return True, counter\n\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n try:\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n return load_aux(os.path.join(checkpoint_dir, ckpt_name))\n else:\n print(\n \" [!] Failed to find a checkpoint within directory {}\".format(\n FLAGS.ckpt_path))\n return False, 0\n except:\n print(\" [!] Failed to find a checkpoint, Exception!\")\n return False, 0", "def define_model_and_optimizer(self):\r\n print(\"* Defining model and optimizer.\", flush=True)\r\n job_dir = self.C.job_dir\r\n\r\n if self.C.restart:\r\n print(\"-- Loading model from previous saved state.\", flush=True)\r\n self.restart_epoch = util.get_restart_epoch()\r\n self.model = torch.load(f\"{job_dir}model_restart_{self.restart_epoch}.pth\")\r\n\r\n print(\r\n f\"-- Backing up as \"\r\n f\"{job_dir}model_restart_{self.restart_epoch}_restarted.pth.\",\r\n flush=True,\r\n )\r\n shutil.copyfile(\r\n f\"{job_dir}model_restart_{self.restart_epoch}.pth\",\r\n f\"{job_dir}model_restart_{self.restart_epoch}_restarted.pth\",\r\n )\r\n\r\n else:\r\n print(\"-- Initializing model from scratch.\", flush=True)\r\n self.model = models.initialize_model()\r\n\r\n self.restart_epoch = 0\r\n\r\n start_epoch = self.restart_epoch + 1\r\n end_epoch = start_epoch + self.C.epochs\r\n\r\n print(\"-- Defining optimizer.\", flush=True)\r\n self.optimizer = torch.optim.Adam(\r\n params=self.model.parameters(),\r\n lr=self.C.init_lr,\r\n weight_decay=self.C.weight_decay,\r\n )\r\n\r\n return start_epoch, end_epoch", "def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])", "def load(cls, path):\n print(f'load checkpoint from {path}')\n if torch.cuda.is_available():\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME))\n model = torch.load(os.path.join(path, cls.MODEL_NAME))\n else:\n resume_checkpoint = torch.load(os.path.join(path, cls.TRAINER_STATE_NAME), map_location=lambda storage, loc: storage)\n model = torch.load(os.path.join(path, cls.MODEL_NAME), map_location=lambda storage, loc: storage)\n \n # model.flatten_parameters() # make RNN parameters contiguous\n optimizer = resume_checkpoint['optimizer']\n return Checkpoint(\n model=model, \n optimizer=optimizer,\n epoch=resume_checkpoint['epoch'],\n path=path\n )", "def load_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n raise Exception('No checkpoint directory <%s>' % checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n self.model.load_state_dict(torch.load(path, self.device))\r\n self.update()", "def __init__(self, optimizer):\n super(ShardedOptimizer, self).__init__(optimizer, name=\"ShardedOptimizer\")", "def load_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Load checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def _init_model(self, checkpoint_path: str) -> None:\n # load weights\n logger.info(f\"Load weights from the checkpoint {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n\n state_dict = checkpoint[\"state_dict\"]\n self.orig_acc = checkpoint[\"test_acc\"]\n\n is_pruned = (\n next((name for name in state_dict if \"mask\" in name), None) is not None\n )\n\n if is_pruned:\n logger.info(\"Dummy prunning to load pruned weights\")\n model_utils.dummy_pruning(self.params_all)\n\n model_utils.initialize_params(self.model, state_dict)\n logger.info(\"Initialized weights\")\n\n # check the trained model is pruned\n\n if is_pruned:\n logger.info(\n \"Get masks and remove prunning reparameterization for prepare_qat\"\n )\n self.mask = model_utils.get_masks(self.model)\n model_utils.remove_pruning_reparameterization(self.params_all)", "def create_optimizer(hparams):\n\n if hparams.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=hparams.learning_rate, momentum=hparams.momentum)\n elif hparams.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(\n learning_rate=hparams.learning_rate)\n elif hparams.optimizer == 'adadelta':\n optimizer = tf.train.AdadeltaOptimizer(\n hparams.learning_rate)\n elif hparams.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n hparams.learning_rate)\n elif hparams.optimizer == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(\n hparams.learning_rate, momentum=hparams.momentum)\n\n return optimizer", "def load_model (checkpoint_path, model, opt_fn=None, loss_fn=None, epoch=None):\n\n if not os.path.exists(checkpoint_path):\n raise Exception (\"The {} does not exist!\".format(checkpoint_path))\n\n ckpt = torch.load(checkpoint_path)\n model.load_state_dict(ckpt['model_state_dict'])\n\n if opt_fn is not None and loss_fn is not None:\n opt_fn.load_state_dict(ckpt['optimizer_state_dict'])\n epoch = ckpt['epoch']\n loss_fn = ckpt['loss']\n return model, opt_fn, loss_fn, epoch\n else:\n return model", "def _get_init_fn():\n if FLAGS.checkpoint_path is None:\n return None\n\n # Warn the user if a checkpoint exists in the train_dir. Then we'll be\n # ignoring the checkpoint anyway.\n if tf.train.latest_checkpoint(FLAGS.train_dir):\n tf.logging.info(\n 'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n % FLAGS.train_dir)\n return None\n\n exclusions = []\n if FLAGS.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in FLAGS.checkpoint_exclude_scopes.split(',')]\n\n # TODO(sguada) variables.filter_variables()\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n\n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Fine-tuning from %s' % checkpoint_path)\n\n return slim.assign_from_checkpoint_fn(\n checkpoint_path,\n variables_to_restore,\n ignore_missing_vars=FLAGS.ignore_missing_vars)", "def _get_init_fn():\n if FLAGS.checkpoint_path is None:\n return None\n\n # Warn the user if a checkpoint exists in the train_dir. Then we'll be\n # ignoring the checkpoint anyway.\n if tf.train.latest_checkpoint(FLAGS.train_dir):\n tf.logging.info(\n 'Ignoring --checkpoint_path because a checkpoint already exists in %s'\n % FLAGS.train_dir)\n return None\n\n exclusions = []\n if FLAGS.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in FLAGS.checkpoint_exclude_scopes.split(',')]\n\n # TODO(sguada) variables.filter_variables()\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n\n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Fine-tuning from %s' % checkpoint_path)\n\n return slim.assign_from_checkpoint_fn(\n checkpoint_path,\n variables_to_restore,\n ignore_missing_vars=FLAGS.ignore_missing_vars)", "def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,\n batch_norm, l1_factor, l2_factor, optimizer):\n basename = get_basename(name, split_num)\n model_filename_fmt = get_model_filename_fmt(basename)\n model_filename = model_filename_fmt.format(epoch=resume_from_epoch)\n\n checkpoint_path = os.path.join(checkpoint_dir, model_filename)\n\n if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):\n\n click.secho(f\"Found model checkpoint '{checkpoint_path}'. \"\n f\"Resuming from epoch {resume_from_epoch}.\", fg='green')\n\n model = load_model(checkpoint_path)\n\n initial_epoch = resume_from_epoch\n\n else:\n\n click.secho(f\"Could not load model checkpoint '{checkpoint_path}' \"\n \"or `resume_from_epoch == 0`. Building new model.\",\n fg='yellow')\n\n model = build_model(output_dim=1, batch_norm=batch_norm,\n kernel_regularizer=l1_l2(l1_factor, l2_factor))\n # optimizer = Adam(beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n initial_epoch = 0\n\n return model, initial_epoch", "def resume(model, optimizer, gpu, filepath='./saves/checkpoint.pth.tar'):\n assert isinstance(filepath, str)\n\n if gpu:\n f = torch.load(filepath)\n else:\n f = torch.load(filepath, map_location=lambda storage, loc: storage)\n epoch = f['epoch']\n losses = f['losses']\n seq_len = f['seq_len']\n\n model.load_state_dict(f['state_dict'])\n optimizer.load_state_dict(f['optimizer'])\n return model, optimizer, epoch, losses, seq_len", "def load_from_checkpoint(results_dir, load_fn, args):\n ckpt_dir = os.path.join(results_dir, \"tb\", \"version_0\", \"checkpoints\")\n files = os.listdir(ckpt_dir)\n assert len(files) > 0, \"Checkpoint directory is empty\"\n ckpt_path = os.path.join(ckpt_dir, files[-1])\n model = load_fn(checkpoint_path=ckpt_path, args=args)\n return model", "def init_optimizer(model, config, exact_layers=None):\n opt_type = config.optimizer\n if exact_layers:\n logger.info('Learning exact layers, number=%d', len(exact_layers))\n parameters = []\n for i, layer in enumerate(exact_layers):\n if isinstance(layer, tuple) and len(layer) == 2:\n layer, multiplier = layer\n init_multiplier = 1\n elif isinstance(layer, tuple) and len(layer) == 3:\n layer, init_multiplier, multiplier = layer\n else:\n multiplier = 1\n init_multiplier = 1\n lr = config.lr * multiplier\n init_lr = config.lr * multiplier * init_multiplier\n logger.info('Layer=%d, lr=%.5f', i, init_lr)\n parameters.append({'params': layer.parameters(), 'lr': init_lr, 'after_warmup_lr': lr})\n else:\n logger.info('Optimizing all parameters, lr=%.5f', config.lr)\n parameters = model.parameters()\n\n if opt_type == 'sgd':\n optimizer = torch.optim.SGD(parameters, config.lr, momentum=config.momentum, weight_decay=config.weight_decay)\n elif opt_type == 'adam':\n optimizer = torch.optim.Adam(parameters, lr=config.lr, weight_decay=config.weight_decay)\n elif opt_type == 'yf':\n optimizer = YFOptimizer(parameters, config.lr, mu=config.momentum, weight_decay=config.weight_decay,\n clip_thresh=0.1)\n else:\n raise TypeError, 'Unknown optimizer type=%s' % (opt_type, )\n return optimizer", "def load_checkpoint(self, session, model_dir):\n assert self.params.cp_load == True, (\"cp_load must be set to true to load a checkpoint\")\n self.loader.restore(session, model_dir)", "def load_checkpoint(filepath):\n checkpoint = torch.load(filepath)\n \n arch = checkpoint['arch']\n if arch == 'vgg':\n model = models.vgg16(pretrained=True)\n elif arch == 'densenet':\n model = models.densenet121(pretrained=True) \n \n model.class_to_idx = checkpoint['class_to_idx']\n model.classifier = checkpoint['classifier']\n model.classifier.load_sate_dict = checkpoint['classifier_state_dict']\n model.optimizer = checkpoint['optimizer_state_dict']\n model.input_size = checkpoint['input_size']\n model.output_size = checkpoint['output_size']\n \n return model", "def state_create(recipe, stage, devtype):\n\n # subsequent `.load_state_dict()` automatically moves to device and casts\n # `model` stage in a stage are allowed to be None\n model = get_model(recipe, **stage[\"model\"]).to(**devtype)\n\n # base lr is specified in the optimizer settings\n optim = get_optimizer(model, stage[\"optimizer\"])\n\n # name-id mapping for copying optimizer states\n mapper = {k: id(p) for k, p in model.named_parameters()}\n\n return State(model, optim, mapper)" ]
[ "0.7107619", "0.70649505", "0.7064646", "0.69969", "0.6977983", "0.6892563", "0.6793151", "0.67687976", "0.673699", "0.6716268", "0.65814966", "0.650589", "0.64456534", "0.64456534", "0.6368876", "0.632732", "0.6312547", "0.6301847", "0.62795025", "0.6267788", "0.623797", "0.61970675", "0.6188866", "0.6160548", "0.61562544", "0.61470646", "0.61243194", "0.6072098", "0.6072091", "0.6048436", "0.6034581", "0.60323876", "0.6016112", "0.6015076", "0.6009018", "0.6002419", "0.5990809", "0.5974894", "0.59277564", "0.59171706", "0.59171706", "0.5892049", "0.5838952", "0.5836216", "0.5823521", "0.58004516", "0.57544154", "0.57544154", "0.5753728", "0.57502097", "0.5740976", "0.5733053", "0.57278365", "0.57276464", "0.57227236", "0.5707192", "0.56827104", "0.56801975", "0.5676075", "0.5670331", "0.56604284", "0.56260026", "0.5621544", "0.56213075", "0.56176025", "0.5612432", "0.56096023", "0.56047535", "0.55953526", "0.5592341", "0.5589361", "0.5583856", "0.5582378", "0.55680716", "0.55661315", "0.5561057", "0.55473334", "0.55380076", "0.5526939", "0.55258477", "0.55244076", "0.5524308", "0.5519089", "0.5508652", "0.55036366", "0.5496245", "0.5490582", "0.5490566", "0.5476506", "0.5473886", "0.54663295", "0.54622054", "0.54600763", "0.5450367", "0.5446924", "0.5446162", "0.54386723", "0.54309535", "0.5416919", "0.5414193" ]
0.71238905
0
Saves a checkpoint of the current model and optimizer_type parameters in the specified folder and uploads it to the output blob storage of the current run context. The checkpoint's name for epoch 123 would be 123_checkpoint.pth.tar.
def save_checkpoint(self, epoch: int) -> Path: logging.getLogger().disabled = True model_state_dict = self.model.module.state_dict() \ if isinstance(self.model, torch.nn.DataParallel) else self.model.state_dict() checkpoint_file_path = self.config.get_path_to_checkpoint(epoch) checkpoint_file_path.parent.mkdir(exist_ok=True, parents=True) info_to_store = { ModelAndInfo.EPOCH_KEY: epoch, ModelAndInfo.MODEL_STATE_DICT_KEY: model_state_dict, ModelAndInfo.OPTIMIZER_STATE_DICT_KEY: self.optimizer.state_dict() } if self.config.compute_mean_teacher_model: assert self.mean_teacher_model is not None # for mypy, getter has this built in mean_teacher_model_state_dict = self.mean_teacher_model.module.state_dict() \ if isinstance(self.mean_teacher_model, torch.nn.DataParallel) \ else self.mean_teacher_model.state_dict() info_to_store[ModelAndInfo.MEAN_TEACHER_STATE_DICT_KEY] = mean_teacher_model_state_dict torch.save(info_to_store, checkpoint_file_path) logging.getLogger().disabled = False logging.info(f"Saved model checkpoint for epoch {epoch} to {checkpoint_file_path}") return checkpoint_file_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):\n\n checkpoint_path = os.path.join(model_dir, checkpoint_prefix)\n saved_path = checkpoint.save(checkpoint_path)\n logging.info('Saving model as TF checkpoint: %s', saved_path)\n return", "def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"`epoch` != save_dict's `start_epoch`\"\n assert iteration == save_dict['iteration'], \"`iteration` != save_dict's `start_iteration`\"\n if os.path.isfile(path):\n print(\"Overwrite checkpoint in epoch %d, iteration %d :exclamation:\" % (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"Fail to save checkpoint\")\n \n print(\"Checkpoint %s saved :heavy_check_mark:\" % (str(epoch) + '.' + str(iteration) + '.ckpt'))", "def save_checkpoint(ckpt_dir, model, optim, scheduler, epoch, global_step):\n states = {\n 'model': model.state_dict(),\n 'optim': optim.state_dict(),\n 'epoch': epoch,\n 'global_step': global_step\n }\n if scheduler is not None:\n states['scheduler'] = scheduler.state_dict()\n ckpt_path = os.path.join(ckpt_dir, '[ep-{:02d}]giter-{}.ckpt'.format(epoch, global_step))\n torch.save(states, ckpt_path)\n\n return ckpt_path", "def save(self, checkpoint_dir, step):\n model_name = \"CNN.model\"\n model_dir = \"%s\" % (\"cnn\")\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n \n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n \n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)", "def save_checkpoint(self, checkpoint_dir):\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n os.makedirs(checkpoint_dir)\r\n\r\n path = os.path.join(checkpoint_dir, 'model.pt')\r\n torch.save(self.model.state_dict(), path)", "def save_model_checkpoint(model, optimizer, global_step, epoch_info, file_name):\n output = {\n \"model\" : model.state_dict(),\n \"optimizer\" : optimizer.state_dict(),\n \"global_step\" : global_step + 1,\n \"epoch_info\" : epoch_info\n }\n torch.save(output, file_name)", "def save_checkpoint(args,state, is_best, filename=\"checkpoint.pth.tar\"):\n directory = \"runs/%s-net/\" % (args.name)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\n epoch = state['epoch']\n\n filename = directory + filename\n torch.save(state, filename)\n\n if is_best:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_best.pth.tar\")\n\n if epoch==0 or epoch==2:\n shutil.copyfile(filename, \"runs/%s-net/\" % (args.name) + \"model_epoch_%d.pth.tar\" % epoch )", "def save_model(self, checkpoint_path, epoch):\n self.saver.save(self.sess, checkpoint_path, global_step = epoch)", "def save_checkpoint(state, is_best, epoch, args, filename='checkpoint.pth'):\n if not os.path.exists(args.save_folder):\n os.makedirs(args.save_folder)\n filename = args.save_folder + str(epoch) + '_' + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, args.save_folder + 'model_best.pth')", "def save_checkpoint(model, epoch, checkpoint_dir, stats):\n state = {\n \"epoch\": epoch,\n \"state_dict\": model.state_dict(),\n \"stats\": stats,\n }\n\n filename = os.path.join(checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(epoch))\n torch.save(state, filename)", "def save(self, epoch=None, note=None):\n\n checkpoint_encoder = {\n 'type': \"transformer\",\n 'model': self.model.encoder.state_dict(),\n 'epoch': epoch,\n 'settings': self.opt\n }\n\n if checkpoint_encoder['settings'].telegram:\n del checkpoint_encoder['settings'].telegram\n\n checkpoint_decoder = {\n 'type': \"transformer\",\n 'model': self.model.decoder.state_dict(),\n 'generator': self.model.generator.state_dict(),\n 'epoch': epoch,\n 'settings': self.opt\n }\n\n if checkpoint_decoder['settings'].telegram:\n del checkpoint_decoder['settings'].telegram\n\n if not note:\n note = \"\"\n\n # make sure a path is specified prior to saving the files.\n if self.opt.save_model:\n ready_to_save = False\n if self.opt.save_mode == \"all\":\n model_name = \"_\" + str(note)\n ready_to_save = True\n else:\n # assumes self.opt.save_mode = \"best\"\n if self.valid_accs[-1] >= max(self.valid_accs):\n model_name = \"\"\n ready_to_save = True\n if self.opt.verbose:\n print(\n ' - [Info] The checkpoint file has been updated.')\n if ready_to_save:\n encoder_name = \"encoder\" + model_name + \".chkpt\"\n decoder_name = \"decoder\" + model_name + \".chkpt\"\n # setup directory to save this at.\n encoder_filepath = os.path.join(\n self.opt.directory, encoder_name)\n decoder_filepath = os.path.join(\n self.opt.directory, decoder_name)\n torch.save(checkpoint_encoder, encoder_filepath)\n torch.save(checkpoint_decoder, decoder_filepath)\n else:\n if not self.save_trip:\n if self.opt.verbose:\n print(\n \" - [Warning]: the model is not specified to save.\")\n self.save_trip = True", "def checkpoint_save(self, epoch, model, label=None, checkpoint=None, path=\"\"):\n\n if label is None:\n label = f\"checkpoint-{epoch}\"\n else:\n label = f\"{label}-checkpoint-{epoch}\"\n\n if checkpoint is None:\n pass\n elif checkpoint == -1:\n Potentials.save(model=model, label=label, path=path)\n elif epoch % checkpoint == 0:\n Potentials.save(model=model, label=label, path=path)", "def save_checkpoint(self):\n \n if not os.path.isdir(self.path + '/checkpoint/'):\n os.makedirs(self.path + '/checkpoint/')\n\n if self.saver == None:\n with self.graph.as_default():\n self.saver = tf.train.Saver(tf.global_variables())\n\n self.saver.save(self.session, self.path + '/checkpoint/model.ckpt')", "def save_checkpoint(self, model_path=None):\n # TODO: include new params based on ConfigEnum\n if not os.path.isdir(path_checkpoints_dir):\n os.mkdir(path_checkpoints_dir)\n if model_path is None:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"saved the model at {model_path}\") \n model_state_dict = self.model.state_dict()\n if self.optimizer is not None:\n opt_state_dict = self.optimizer.state_dict()\n else:\n opt_state_dict = None\n if self.scheduler is not None:\n sch_state_dict = self.scheduler.state_dict()\n else:\n sch_state_dict = None\n \n if self.scaler is not None:\n amp_grad_scaler = self.scaler.state_dict()\n else:\n amp_grad_scaler = None\n\n model_dict = {}\n model_dict[\"state_dict\"] = model_state_dict\n model_dict[\"optimizer\"] = opt_state_dict\n model_dict[\"scheduler\"] = sch_state_dict\n model_dict['scaler'] = amp_grad_scaler\n model_dict['image_size'] = self.image_size\n model_dict['device'] = self.device\n model_dict['fp16'] = self.fp16\n model_dict['accumulate_grad_steps'] = self.accumulate_grad_steps\n\n model_dict['experiment_id'] = self.experiment_id\n model_dict['experiment_tag'] = self.experiment_tag\n\n model_dict['seed'] = self.seed\n\n model_dict['train_batch_size'] = self.train_batch_size\n model_dict['valid_batch_size'] = self.valid_batch_size\n model_dict['test_batch_size'] = self.test_batch_size\n model_dict['dataloader_num_workers'] = self.dataloader_num_workers\n model_dict['train_dataloader_shuffle'] = self.train_dataloader_shuffle\n\n model_dict['optimizer_type'] = self.optimizer_type\n model_dict['optimizer_params'] = self.optimizer_params\n\n model_dict['scheduler_type'] = self.scheduler_type\n model_dict['scheduler_params'] = self.scheduler_params\n model_dict['step_scheduler_after'] = self.step_scheduler_after\n model_dict['step_scheduler_metric'] = self.step_scheduler_metric\n\n model_dict['compute_train_loss_after'] = self.compute_train_loss_after\n model_dict['compute_train_metric_after'] = self.compute_train_metric_after\n model_dict['compute_valid_loss_after'] = self.compute_valid_loss_after\n model_dict['compute_valid_metric_after'] = self.compute_valid_metric_after\n\n model_dict['training_stopping_criteria'] = self.training_stopping_criteria\n model_dict['stopping_criteria_params'] = self.stopping_criteria_params\n model_dict['max_epoch'] = self.max_epoch\n model_dict['train_on_all_data'] = self.train_on_all_data\n model_dict['validate_after'] = self.validate_after\n model_dict['validation_steps'] = self.validation_steps\n model_dict['run_lr_range_test'] = self.run_lr_range_test\n model_dict['sleep_in_epochs'] = self.sleep_in_epochs\n model_dict['sleep_time'] = self.sleep_time\n model_dict['checkpoint_epochs'] = self.checkpoint_epochs\n\n model_dict['_best_score'] = self._best_score\n model_dict['_current_score'] = self._current_score\n model_dict['_counter'] = self._counter\n\n model_dict['metrics'] = self.metrics\n model_dict['current_epoch'] = self.current_epoch\n model_dict['current_train_batch'] = self.current_train_batch\n model_dict['current_valid_batch'] = self.current_valid_batch\n\n model_dict['num_train_samples'] = self.num_train_samples\n model_dict['num_train_iterations'] = self.num_train_iterations\n model_dict['checkpoint_snapshot'] = self.checkpoint_snapshot \n torch.save(model_dict, model_path)", "def save_checkpoint(tag, params, model):\r\n os.makedirs(os.path.join(\"saved_models\", params.path), exist_ok=True)\r\n state = {\r\n 'training_id': params.training_id,\r\n 'global_step': model.global_step,\r\n 'model': model.state_dict(),\r\n 'optimizers': [optimizer.state_dict() for optimizer in model.optimizers]\r\n }\r\n fn = os.path.join(\"saved_models\", params.path, tag + \".pt\")\r\n torch.save(state, fn)", "def save_checkpoint(self, model, optimizers):\n\n def _save(path, model, optimizers):\n if not os.path.exists(path):\n os.makedirs(path)\n # TODO: put everything on CPU first\n torch.save(model.state_dict(), os.path.join(path, 'model.ckpt'))\n torch.save(tuple([optimizer.opt.state_dict() for optimizer in optimizers]),\n os.path.join(path, 'opt.ckpt'))\n\n if (self.epoch % self._save_iter) == 0:\n # we're at a save iteration\n ckpt_path = os.path.join(self.log_path, 'checkpoints', str(self.epoch))\n _save(ckpt_path, model, optimizers)\n\n if self._best_epoch:\n # overwrite the best model\n ckpt_path = os.path.join(self.log_path, 'checkpoints', 'best')\n _save(ckpt_path, model, optimizers)\n self._best_epoch = False", "def save_checkpoint(model, path):\n\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Basic details\n checkpoint = {\n 'class_to_idx': model.class_to_idx,\n 'idx_to_class': model.idx_to_class,\n 'epochs': model.epochs,\n }\n\n # Extract the final classifier and the state dictionary\n if model_name == 'vgg16':\n # Check to see if model was parallelized\n if multi_gpu:\n checkpoint['classifier'] = model.module.classifier\n checkpoint['state_dict'] = model.module.state_dict()\n else:\n checkpoint['classifier'] = model.classifier\n checkpoint['state_dict'] = model.state_dict()\n\n elif model_name == 'resnet50':\n if multi_gpu:\n checkpoint['fc'] = model.module.fc\n checkpoint['state_dict'] = model.module.state_dict()\n else:\n checkpoint['fc'] = model.fc\n checkpoint['state_dict'] = model.state_dict()\n\n # Add the optimizer\n checkpoint['optimizer'] = model.optimizer\n checkpoint['optimizer_state_dict'] = model.optimizer.state_dict()\n\n # Save the data to the path\n torch.save(checkpoint, path)", "def save_model_checkpoint(base_name, model, ep, opt):\n # Save only the model params\n model_name = os.path.join(base_name, \"i3d_ep\"+str(ep)+\"_\"+opt+\".pt\")\n\n torch.save(model.state_dict(), model_name)\n print(\"Model saved to disk... : {}\".format(model_name))", "def save(self, experiment_dir):\n date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())\n\n if self.eval_results is not None:\n # print(self.eval_results)\n assert isinstance(self.eval_results, dict)\n # present the dict in str form\n # res_str = ''.join(''.join(str(x) for x in tup) for tup in self.eval_results.items())\n\n self._path = os.path.join(\n experiment_dir, self.CHECKPOINT_DIR_NAME, date_time,\n )\n path = self._path\n\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n torch.save(\n {'epoch': self.epoch, 'optimizer': self.optimizer},\n os.path.join(path, self.TRAINER_STATE_NAME)\n )\n torch.save(self.model, os.path.join(path, self.MODEL_NAME))\n\n # save parameters to txt\n txt_file = open(os.path.join(path, self.PARAMETERS), \"w\")\n\n txt_file.write(f\"ckpt name: '{date_time}'\\n\")\n txt_file.write(f\"epoch: {self.epoch}\\n\")\n\n if self.eval_results is not None: \n for key, value in self.eval_results.items():\n txt_file.write(str(key)+': '+str(value)+'\\n')\n # if 'acc' in self.eval_results:\n # txt_file.write(f\"acc: {self.eval_results['acc']}\\n\")\n # if 'p' in self.eval_results:\n # txt_file.write(f\"p: {self.eval_results['p']}\\n\")\n # if 'r' in self.eval_results:\n # txt_file.write(f\"r: {self.eval_results['r']}\\n\")\n # if 'f1' in self.eval_results:\n # txt_file.write(f\"f1: {self.eval_results['f1']}\\n\")\n \n txt_file.close()\n\n return path", "def save_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n os.makedirs(model_dir, exist_ok=True)\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n model_dict = {'net_state_dict': self.net.state_dict(),\n 'use_cuda': self.use_cuda}\n\n print(\"Saving model to {}\".format(model_file))\n torch.save(model_dict, model_file)", "def save_checkpoint(self, name=''):\n self.checkpoint_path.mkdir(exist_ok=True)\n if name:\n path = self.checkpoint_path / f'{name}_{self.epoch}.tar'\n else:\n path = self.checkpoint_path / f'{self.epoch}.tar'\n torch.save(self.get_state(), path)", "def save(self, checkpoint_dir, step):\n\n model_name = \"CSGAN.model\"\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step)", "def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n data = dict()\n data[\"inst\"] = \"save\"\n data[\"folder\"] = folder\n data[\"filename\"] = filename\n\n q_idx, data_id = self.put(data, q_idx=0) # Send instruction to first nnet\n self.get(q_idx, data_id) # Blocks here\n\n # Done", "def save_checkpoint(model, state, is_best, checkpoint):\n state_filepath = os.path.join(checkpoint, 'last.pth.tar')\n model_filepath = os.path.join(checkpoint, 'last_model.pth')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n else:\n print(\"Checkpoint Directory exists! \")\n torch.save(state, state_filepath)\n torch.save(model, model_filepath)\n if is_best:\n shutil.copyfile(state_filepath, os.path.join(checkpoint, 'best.pth.tar'))\n shutil.copyfile(model_filepath, os.path.join(checkpoint, 'best_model.pth'))", "def saveCheckpoint(acc, epoch, model, train_hist):\r\n print('Saving..')\r\n state = {\r\n 'model': model,\r\n 'acc': acc,\r\n 'epoch': epoch,\r\n 'rng_state': torch.get_rng_state(),\r\n 'train_hist': train_hist\r\n }\r\n if not os.path.isdir('checkpoint'): # save to checkpoint directory\r\n os.mkdir('checkpoint')\r\n torch.save(state, './checkpoint/ckpt' + '_' + str(epoch+1))", "def save_checkpoint(dir, state, is_best, filename='checkpoint.pth.tar'):\n directory = \"%s/\" % (dir)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, '%s/' %\n (dir) + 'model_best.pth.tar')", "def save_checkpoint(state: dict, is_best: bool, filename: str = 'checkpoint.pth.tar', args: Namespace = None):\n directory = f\"runs/{args.name}/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, f'runs/{args.name}/model_best.pth.tar')", "def save(self, checkpoint) -> None:\r\n self.model.save(checkpoint)", "def _save_model(self, checkpoint_dir):\n # Check whether the specified path exists or not\n isExist = os.path.exists(checkpoint_dir)\n\n if not isExist:\n # Create a new directory because it does not exist\n os.makedirs(checkpoint_dir)\n\n filename = self._get_checkpoint_name()\n path = checkpoint_dir + filename\n\n # Serialize the model checkpoint in to a Python Pickle file\n with open(path, 'wb') as f:\n pickle.dump(self._model, f)\n return path", "def _save(self, tmp_checkpoint_dir):\n checkpoint_path = os.path.join(tmp_checkpoint_dir, \"model_weights\")\n self.model.save_weights(checkpoint_path, save_format=\"tf\")\n return tmp_checkpoint_dir", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\"%(args.name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')", "def create_checkpoint(model, save_dir, train_data):\n model.class_to_idx = train_data.class_to_idx\n\n checkpoint = {\n 'model': model.name,\n 'classifier': model.classifier,\n 'class_to_idx': model.class_to_idx,\n 'state_dict': model.state_dict()\n }\n\n if save_dir and isdir(save_dir):\n torch.save(checkpoint, save_dir + 'checkpoint.pth')\n print('checkpoint created')\n else: \n print(\"Directory not found. Saving at current directory in checkpoint.pth\")\n torch.save(checkpoint, 'checkpoint.pth')", "def save_checkpoint(epoch, outdir, model, mapper, optimizer, criterion,\n filename='checkpoint.OWE.pth.tar'):\n filename = outdir / filename\n logger.info(\"Saving checkpoint to {}.\".format(filename))\n torch.save({'epoch': epoch,\n 'model': model.state_dict(),\n 'mapper': mapper.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, str(filename))\n if max(criterion) == criterion[-1]:\n best_name = str(outdir / 'best_checkpoint.OWE.pth.tar')\n shutil.copyfile(str(filename), best_name)\n logger.info(\"Saved best checkpoint to {}.\".format(best_name))", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"runs/%s/\" % (args.name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'runs/%s/' % (args.name) + 'model_best.pth.tar')", "def save_checkpoint(checkpoint_dir, model_encoder_noisy_clean, model_encoder_noisy_noise, model_decoder_noisy,\n model_encoder_clean, model_decoder_clean, model_encoder_noise, model_decoder_noise,\n model_classifier, min_eval_loss_melsp_y_dB, min_eval_loss_melsp_y_dB_std,\n iter_idx, min_idx, optimizer, numpy_random_state, torch_random_state, iterations, model_spkidtr=None):\n model_encoder_noisy_clean.cpu()\n model_encoder_noisy_noise.cpu()\n model_decoder_noisy.cpu()\n model_encoder_clean.cpu()\n model_decoder_clean.cpu()\n model_encoder_noise.cpu()\n model_decoder_noise.cpu()\n model_classifier.cpu()\n checkpoint = {\n \"model_encoder_noisy_clean\": model_encoder_noisy_clean.state_dict(),\n \"model_encoder_noisy_noise\": model_encoder_noisy_noise.state_dict(),\n \"model_decoder_noisy\": model_decoder_noisy.state_dict(),\n \"model_encoder_clean\": model_encoder_clean.state_dict(),\n \"model_decoder_clean\": model_decoder_clean.state_dict(),\n \"model_encoder_noise\": model_encoder_noise.state_dict(),\n \"model_decoder_noise\": model_decoder_noise.state_dict(),\n \"model_classifier\": model_classifier.state_dict(),\n \"min_eval_loss_melsp_y_dB\": min_eval_loss_melsp_y_dB,\n \"min_eval_loss_melsp_y_dB_std\": min_eval_loss_melsp_y_dB_std,\n \"iter_idx\": iter_idx,\n \"min_idx\": min_idx,\n \"optimizer\": optimizer.state_dict(),\n \"numpy_random_state\": numpy_random_state,\n \"torch_random_state\": torch_random_state,\n \"iterations\": iterations}\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n torch.save(checkpoint, checkpoint_dir + \"/checkpoint-%d.pkl\" % iterations)\n model_encoder_noisy_clean.cuda()\n model_encoder_noisy_noise.cuda()\n model_decoder_noisy.cuda()\n model_encoder_clean.cuda()\n model_decoder_clean.cuda()\n model_encoder_noise.cuda()\n model_decoder_noise.cuda()\n model_classifier.cuda()\n logging.info(\"%d-iter checkpoint created.\" % iterations)", "def save(self, checkpoint_path: str):\r\n raise NotImplementedError", "def _save(trainer, checkpoint_dir, state_dict_key_name, world_rank=None):\n\n # save current model parameters as a checkpoint\n makedir(checkpoint_dir)\n checkpoint_file_name = 'checkpoint{}.ortcp'.format('' if world_rank is None else str(world_rank))\n trainer.save_checkpoint(os.path.join(checkpoint_dir, checkpoint_file_name))\n state_dict = trainer.state_dict()\n with open(os.path.join(checkpoint_dir, state_dict_key_name+'.pkl'), \"wb\") as f:\n pickle.dump({state_dict_key_name : state_dict}, f)", "def save_checkpoint(self, checkpoint_path='checkpoint.pth'):\n # Move the model back to the cpu so it can be loaded onto machines\n # without gpu's as well.\n self.model.to('cpu')\n\n checkpoint = {\n 'model_architecture': self.model_architecture,\n 'input_size': self.input_size,\n 'output_size': self.output_size,\n 'hidden_layers': self.hidden_layers,\n 'learn_rate': self.learn_rate,\n 'drop_p': self.drop_p,\n 'class_to_idx': self.model.class_to_idx,\n 'current_epoch': self.model.current_epoch,\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'model_state_dict': self.model.state_dict()\n }\n torch.save(checkpoint, checkpoint_path)", "def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n directory = \"checkoutpoint/%s/\" % args.name\n if not os.path.exists(directory):\n os.makedirs(directory)\n filename = directory + filename\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'checkoutpoint/%s/' % args.name + 'model_best.pth.tar')", "def _save_model(self, epoch, batch, logs):\n self.save(self._get_file_path(epoch, batch, logs))", "def save_checkpoint(state, is_best, checkpoint_dir, logger=None):\n\n def log_info(message):\n if logger is not None:\n logger.info(message)\n\n if not os.path.exists(checkpoint_dir):\n log_info(\n f\"Checkpoint directory does not exists. Creating {checkpoint_dir}\")\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, f'epoch{state[\"epoch\"]}_checkpoint.pytorch')\n log_info(f\"Saving last checkpoint to '{last_file_path}'\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n log_info(f\"Saving best checkpoint to '{best_file_path}'\")\n shutil.copyfile(last_file_path, best_file_path)", "def save(self):\n\n self.saver.save(self.sess, self.path + '/tensorflow-model', global_step=self.counter.count)", "def save_checkpoint(filename, epoch, model, optimizer=None, best_score=0):\n torch.save({\n 'model' : model.state_dict(),\n 'optim' : optimizer.state_dict() if optimizer is not None else None,\n 'epoch' : epoch,\n 'best_score' : best_score\n }, filename)", "def save_checkpoint(model, save_path):\n torch.save(model.state_dict(), save_path)", "def save(self, model_dir, step, epoch, is_best=False):\n if model_dir is None:\n return\n save_checkpoint(self._model, self._optimizer, step, epoch, model_dir,\n keep_every_n=self._keep_every_n, is_best=is_best)", "def checkpoint(self, epoch, losses, path):\n dct = {'epoch': epoch, \n 'losses': losses, \n 'model_state_dict': self.TrajectoryAutoencoder.state_dict()}\n torch.save(dct, path)", "def save_checkpoint(self, checkpoint_info):\n torch.save(checkpoint_info, os.path.join(self.checkpoint_path, self.checkpoint_file))", "def save_model(self, epoch):\n ckpt_path = os.path.join(self.config.save_path, f'{epoch}.pkl')\n print(f'Save parameters to {ckpt_path}')\n torch.save(self.model.state_dict(), ckpt_path)", "def save_checkpoint(self):\n checkpoin_path = self.get_checkpoint_path()\n _logger.info('Save checkpoint ignored by tuner, checkpoint path: %s', checkpoin_path)", "def create_checkpoint(model_config, path):\n model = models.VisionTransformer(num_classes=1, **model_config)\n variables = model.init(\n jax.random.PRNGKey(0),\n jnp.ones([1, 16, 16, 3], jnp.float32),\n train=False,\n )\n _save(variables['params'], path)", "def save_checkpoint(self, checkpoint_dir: str) -> None:\n state = self.__getstate__()\n\n # Extract policy states from worker state (Policies get their own\n # checkpoint sub-dirs).\n policy_states = {}\n if \"worker\" in state and \"policy_states\" in state[\"worker\"]:\n policy_states = state[\"worker\"].pop(\"policy_states\", {})\n\n # Add RLlib checkpoint version.\n if self.config._enable_learner_api:\n state[\"checkpoint_version\"] = CHECKPOINT_VERSION_LEARNER\n else:\n state[\"checkpoint_version\"] = CHECKPOINT_VERSION\n\n # Write state (w/o policies) to disk.\n state_file = os.path.join(checkpoint_dir, \"algorithm_state.pkl\")\n with open(state_file, \"wb\") as f:\n pickle.dump(state, f)\n\n # Write rllib_checkpoint.json.\n with open(os.path.join(checkpoint_dir, \"rllib_checkpoint.json\"), \"w\") as f:\n json.dump(\n {\n \"type\": \"Algorithm\",\n \"checkpoint_version\": str(state[\"checkpoint_version\"]),\n \"format\": \"cloudpickle\",\n \"state_file\": state_file,\n \"policy_ids\": list(policy_states.keys()),\n \"ray_version\": ray.__version__,\n \"ray_commit\": ray.__commit__,\n },\n f,\n )\n\n # Write individual policies to disk, each in their own sub-directory.\n for pid, policy_state in policy_states.items():\n # From here on, disallow policyIDs that would not work as directory names.\n validate_policy_id(pid, error=True)\n policy_dir = os.path.join(checkpoint_dir, \"policies\", pid)\n os.makedirs(policy_dir, exist_ok=True)\n policy = self.get_policy(pid)\n policy.export_checkpoint(policy_dir, policy_state=policy_state)\n\n # if we are using the learner API, save the learner group state\n if self.config._enable_learner_api:\n learner_state_dir = os.path.join(checkpoint_dir, \"learner\")\n self.learner_group.save_state(learner_state_dir)", "def _save_checkpoint(self, epoch, is_best=False):\n arch = type(self.model).__name__\n state = {\n 'arch': arch,\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'config': self.config\n }\n filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))\n torch.save(state, filename)\n self.logger.info(\"Saving checkpoint: {} ...\".format(filename))\n if is_best:\n best_path = str(self.checkpoint_dir / 'model_best.pth')\n torch.save(state, best_path)\n self.logger.info(\"Saving current best: model_best.pth ...\")", "def save_checkpoint(state, model_name=None):\n \n if not model_name: model_name = f\"model_date_{date_time_str}.pth\"\n torch.save(state, osj(out_path, model_name))", "def save_checkpoint(state, is_best, checkpoint_dir, logger=None):\n\n def log_info(message):\n if logger is not None:\n logger.info(message)\n\n if not os.path.exists(checkpoint_dir):\n log_info(\n \"Checkpoint directory does not exists. Creatding {}\".format(checkpoint_dir))\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n log_info(\"Saving last checkpoint\")\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n log_info(\"Saving best checkpoint\")\n shutil.copyfile(last_file_path, best_file_path)", "def save_model(self, dir=\"\", **kwargs):\n ckpt_fn = os.path.join(dir, f\"model.pkl\")\n torch.save(\n {\n \"global_step\": self.global_step_,\n \"epoch\": self.epoch_,\n \"model\": self.net_.state_dict(),\n \"optimizer\": self.optimizer_.state_dict(),\n \"sampler_state\": self.sampler.state_dict(),\n \"model_samples\": list(self.model_samples_),\n \"ais_state\": self.ais_loss.state_dict(),\n \"replay_prob\": self.replay_prob,\n \"max_replay\": self.max_replay,\n },\n ckpt_fn,\n )\n return ckpt_fn", "def save_checkpoint(model, filename, optimizer=None, meta=None):\n if meta is None:\n meta = {}\n elif not isinstance(meta, dict):\n raise TypeError(f'meta must be a dict or None, but got {type(meta)}')\n meta.update(mmcv_version=mmcv.__version__, time=time.asctime())\n\n if is_module_wrapper(model):\n model = model.module\n\n if hasattr(model, 'CLASSES') and model.CLASSES is not None:\n # save class name to the meta\n meta.update(CLASSES=model.CLASSES)\n\n checkpoint = {\n 'meta': meta,\n 'state_dict': weights_to_cpu(get_state_dict(model))\n }\n # save optimizer state dict in the checkpoint\n if isinstance(optimizer, Optimizer):\n checkpoint['optimizer'] = optimizer.state_dict()\n elif isinstance(optimizer, dict):\n checkpoint['optimizer'] = {}\n for name, optim in optimizer.items():\n checkpoint['optimizer'][name] = optim.state_dict()\n\n if filename.startswith('pavi://'):\n try:\n from pavi import modelcloud\n from pavi.exception import NodeNotFoundError\n except ImportError as e:\n raise ImportError(\n 'Please install pavi to load checkpoint from modelcloud.') from e\n model_path = filename[7:]\n root = modelcloud.Folder()\n model_dir, model_name = osp.split(model_path)\n try:\n model = modelcloud.get(model_dir)\n except NodeNotFoundError:\n model = root.create_training_model(model_dir)\n with TemporaryDirectory() as tmp_dir:\n checkpoint_file = osp.join(tmp_dir, model_name)\n with open(checkpoint_file, 'wb') as f:\n torch.save(checkpoint, f)\n f.flush()\n model.create_file(checkpoint_file, name=model_name)\n else:\n mmcv.mkdir_or_exist(osp.dirname(filename))\n # immediately flush buffer\n with open(filename, 'wb') as f:\n torch.save(checkpoint, f)\n f.flush()", "def save(self):\n\n if self.ckpt_manager is not None:\n save_path = self.ckpt_manager.save()\n print(\"Saved checkpoint at: {}\".format(save_path))\n else:\n print(\"There is no checkpoint manager supplied for saving the \"\n \"network weights, optimizer, or other trackables.\")\n print(\"Therefore these will not be saved and the training will \"\n \"start from default values in the future.\")\n print(\"Consider using a checkpoint manager to save the network \"\n \"weights and optimizer.\")", "def save_checkpoint(filename, model, state=None):\n if not state:\n torch.save(model.state_dict(), os.path.join('checkpoints/', filename))\n else:\n _state = {\n 'epoch': state['epoch'],\n 'state_dict': state['state_dict'].state_dict(),\n 'optimizer': state['optimizer'].state_dict()\n }\n\n torch.save(_state, os.path.join('checkpoints/', filename))", "def save_checkpoint(state, is_best, checkpoint_dir):\n\n if not os.path.exists(checkpoint_dir):\n os.mkdir(checkpoint_dir)\n\n last_file_path = os.path.join(checkpoint_dir, 'last_checkpoint.pytorch')\n torch.save(state, last_file_path)\n if is_best:\n best_file_path = os.path.join(checkpoint_dir, 'best_checkpoint.pytorch')\n shutil.copyfile(last_file_path, best_file_path)", "def checkpoint_model(PATH, ckpt_id, model, epoch, last_global_step,\r\n last_global_data_samples, **kwargs):\r\n checkpoint_state_dict = {\r\n 'epoch': epoch,\r\n 'last_global_step': last_global_step,\r\n 'last_global_data_samples': last_global_data_samples\r\n }\r\n # Add extra kwargs too\r\n checkpoint_state_dict.update(kwargs)\r\n\r\n success = model.network.save_checkpoint(PATH, ckpt_id,\r\n checkpoint_state_dict)\r\n status_msg = 'checkpointing: PATH={}, ckpt_id={}'.format(PATH, ckpt_id)\r\n if success:\r\n logging.info(f\"Success {status_msg}\")\r\n else:\r\n logging.warning(f\"Failure {status_msg}\")\r\n return", "def save_checkpoint(checkpoint_dir, model_encoder_melsp, model_decoder_melsp, model_encoder_excit, model_decoder_excit,\n model_classifier, model_post, min_eval_loss_melsp_dB, min_eval_loss_melsp_dB_std, min_eval_loss_melsp_cv,\n min_eval_loss_melsp, min_eval_loss_melsp_dB_src_trg, min_eval_loss_melsp_dB_src_trg_std, min_eval_loss_laplace,\n min_eval_loss_laplace_cv, iter_idx, min_idx, optimizer, numpy_random_state, torch_random_state,\n iterations, model_spkidtr=None):\n model_encoder_melsp.cpu()\n model_decoder_melsp.cpu()\n model_encoder_excit.cpu()\n model_decoder_excit.cpu()\n model_classifier.cpu()\n model_post.cpu()\n if model_spkidtr is not None:\n model_spkidtr.cpu()\n checkpoint = {\n \"model_encoder_melsp\": model_encoder_melsp.state_dict(),\n \"model_decoder_melsp\": model_decoder_melsp.state_dict(),\n \"model_encoder_excit\": model_encoder_excit.state_dict(),\n \"model_decoder_excit\": model_decoder_excit.state_dict(),\n \"model_classifier\": model_classifier.state_dict(),\n \"model_spkidtr\": model_spkidtr.state_dict(),\n \"model_post\": model_post.state_dict(),\n \"min_eval_loss_melsp_dB\": min_eval_loss_melsp_dB,\n \"min_eval_loss_melsp_dB_std\": min_eval_loss_melsp_dB_std,\n \"min_eval_loss_melsp_cv\": min_eval_loss_melsp_cv,\n \"min_eval_loss_melsp\": min_eval_loss_melsp,\n \"min_eval_loss_melsp_dB_src_trg\": min_eval_loss_melsp_dB_src_trg,\n \"min_eval_loss_melsp_dB_src_trg_std\": min_eval_loss_melsp_dB_src_trg_std,\n \"min_eval_loss_laplace\": min_eval_loss_laplace,\n \"min_eval_loss_laplace_cv\": min_eval_loss_laplace_cv,\n \"iter_idx\": iter_idx,\n \"min_idx\": min_idx,\n \"optimizer\": optimizer.state_dict(),\n \"numpy_random_state\": numpy_random_state,\n \"torch_random_state\": torch_random_state,\n \"iterations\": iterations}\n else:\n checkpoint = {\n \"model_encoder_melsp\": model_encoder_melsp.state_dict(),\n \"model_decoder_melsp\": model_decoder_melsp.state_dict(),\n \"model_encoder_excit\": model_encoder_excit.state_dict(),\n \"model_decoder_excit\": model_decoder_excit.state_dict(),\n \"model_classifier\": model_classifier.state_dict(),\n \"model_post\": model_post.state_dict(),\n \"min_eval_loss_melsp_dB\": min_eval_loss_melsp_dB,\n \"min_eval_loss_melsp_dB_std\": min_eval_loss_melsp_dB_std,\n \"min_eval_loss_melsp_cv\": min_eval_loss_melsp_cv,\n \"min_eval_loss_melsp\": min_eval_loss_melsp,\n \"min_eval_loss_melsp_dB_src_trg\": min_eval_loss_melsp_dB_src_trg,\n \"min_eval_loss_melsp_dB_src_trg_std\": min_eval_loss_melsp_dB_src_trg_std,\n \"min_eval_loss_laplace\": min_eval_loss_laplace,\n \"min_eval_loss_laplace_cv\": min_eval_loss_laplace_cv,\n \"iter_idx\": iter_idx,\n \"min_idx\": min_idx,\n \"optimizer\": optimizer.state_dict(),\n \"numpy_random_state\": numpy_random_state,\n \"torch_random_state\": torch_random_state,\n \"iterations\": iterations}\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n torch.save(checkpoint, checkpoint_dir + \"/checkpoint-%d.pkl\" % iterations)\n model_encoder_melsp.cuda()\n model_decoder_melsp.cuda()\n model_encoder_excit.cuda()\n model_decoder_excit.cuda()\n model_classifier.cuda()\n model_post.cuda()\n if model_spkidtr is not None:\n model_spkidtr.cuda()\n logging.info(\"%d-iter checkpoint created.\" % iterations)", "def save(self, file_prefix, options=None):\n options = options or checkpoint_options.CheckpointOptions()\n\n # IMPLEMENTATION DETAILS: most clients should skip.\n #\n # Suffix for any well-formed \"checkpoint_prefix\", when sharded.\n # Transformations:\n # * Users pass in \"save_path\" in save() and restore(). Say \"myckpt\".\n # * checkpoint_prefix gets fed <save_path><sharded_suffix>.\n #\n # Example:\n # During runtime, a temporary directory is first created, which contains\n # files\n #\n # <train dir>/myckpt_temp/\n # part-?????-of-?????{.index, .data-00000-of-00001}\n #\n # Before .save() finishes, they will be (hopefully, atomically) renamed to\n #\n # <train dir>/\n # myckpt{.index, .data-?????-of-?????}\n #\n # Filesystems with eventual consistency (such as S3), don't need a\n # temporary location. Using a temporary directory in those cases might\n # cause situations where files are not available during copy.\n #\n # Users only need to interact with the user-specified prefix, which is\n # \"<train dir>/myckpt\" in this case. Save() and Restore() work with the\n # prefix directly, instead of any physical pathname. (On failure and\n # subsequent restore, an outdated and orphaned temporary directory can be\n # safely removed.)\n with ops.device(\"CPU\"):\n sharded_suffix = array_ops.where(\n string_ops.regex_full_match(file_prefix, \"^s3://.*\"),\n constant_op.constant(\".part\"),\n constant_op.constant(\"_temp/part\"))\n tmp_checkpoint_prefix = string_ops.string_join(\n [file_prefix, sharded_suffix])\n registered_paths = {\n saver_name: registered_saver_filename(file_prefix, saver_name)\n for saver_name in self._registered_savers\n }\n\n def save_fn():\n saved_prefixes = []\n # Save with the registered savers. These run before default savers due to\n # the API contract.\n for saver_name, (save_fn, _) in self._registered_savers.items():\n maybe_saved_prefixes = save_fn(registered_paths[saver_name])\n if maybe_saved_prefixes is not None:\n flattened_saved_prefixes = nest.flatten(maybe_saved_prefixes)\n if not all(\n tensor_util.is_tf_type(x) and x.dtype == dtypes.string\n for x in flattened_saved_prefixes):\n raise ValueError(\n \"Registered saver must return a (maybe empty) list of \"\n f\"string type tensors. Got {maybe_saved_prefixes}.\")\n saved_prefixes.extend(flattened_saved_prefixes)\n\n # (Default saver) Save with single device savers.\n num_shards = len(self._single_device_savers)\n sharded_saves = []\n num_shards_tensor = constant_op.constant(num_shards, name=\"num_shards\")\n last_device = None\n for shard, (device, saver) in enumerate(\n sorted(self._single_device_savers.items())):\n last_device = device\n with ops.device(saveable_object_util.set_cpu0(device)):\n shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,\n num_shards_tensor)\n saved_prefixes.append(shard_prefix)\n with ops.device(device):\n # _SingleDeviceSaver will use the CPU device when necessary, but\n # initial read operations should be placed on the SaveableObject's\n # device.\n sharded_saves.append(saver.save(shard_prefix, options))\n\n with ops.control_dependencies(sharded_saves):\n # Merge on the io_device if specified, otherwise co-locates the merge op\n # with the last device used.\n merge_device = (\n options.experimental_io_device or\n saveable_object_util.set_cpu0(last_device))\n with ops.device(merge_device):\n # V2 format write path consists of a metadata merge step. Once\n # merged, attempts to delete the temporary directory,\n # \"<user-fed prefix>_temp\".\n return gen_io_ops.merge_v2_checkpoints(\n saved_prefixes, file_prefix, delete_old_dirs=True)\n\n # Since this will causes a function re-trace on each save, limit this to the\n # cases where it is needed: eager and when there are multiple tasks/single\n # device savers. Note that the retrace is needed to ensure we pickup the\n # latest values of options like experimental_io_device.\n if context.executing_eagerly() and len(self._single_device_savers) > 1:\n # Explicitly place the identity op on the first device.\n @def_function.function(jit_compile=False)\n def tf_function_save():\n save_fn()\n tf_function_save()\n else:\n return save_fn()", "def save_model (model, folder_path, epoch, opt_fn, loss_fn, is_best, multi_gpu=False, verbose=False):\n\n last_check_path = os.path.join(folder_path, 'last-checkpoint')\n best_check_path = os.path.join(folder_path, 'best-checkpoint')\n\n if not os.path.exists(last_check_path):\n if verbose:\n print ('last-checkpoint folder does not exist. I am creating it!')\n os.mkdir(last_check_path)\n else:\n if verbose:\n print ('last-checkpoint folder exist! Perfect, I will just use it.')\n\n if not os.path.exists(best_check_path):\n if verbose:\n print('best-checkpoint folder does not exist. I am creating it!')\n os.mkdir(best_check_path)\n else:\n if verbose:\n print('best-checkpoint folder exist! Perfect, I will just use it.')\n\n info_to_save = {\n 'epoch': epoch,\n 'model_state_dict': model.module.state_dict() if multi_gpu else model.state_dict(),\n 'optimizer_state_dict': opt_fn.state_dict(),\n 'loss': loss_fn,\n }\n\n torch.save(info_to_save, os.path.join(last_check_path, \"last-checkpoint.pth\"))\n\n if is_best:\n torch.save(info_to_save, os.path.join(best_check_path, 'best-checkpoint.pth'))", "def _save(self, step):\n\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path,global_step=step)", "def checkpoint(self, epoch: int):\n if self.exp.scheduler_stepper is not None:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"scheduler_state_dict\": self.exp.scheduler_stepper.scheduler.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n else:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )", "def save_checkpoint(state, is_best, checkpoint):\n filepath = os.path.join(checkpoint, 'last.pth.tar')\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.mkdir(checkpoint)\n torch.save(state, filepath)\n # 如果是最好的checkpoint则以best为文件名保存\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))", "def save(self):\n\n pattern = '{}_{}_{}ep.pt' if self.checkpoint_filename_pattern is None else self.checkpoint_filename_pattern\n filename = pattern.format('sherlock1', time.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n self.monitors['loss_train'].num_epochs)\n full_filename = self.full_path(filename)\n c = {\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'monitors': self.monitors,\n 'parent': self.parent,\n 'args': vars(args) # convert args to dict\n }\n torch.save(c, full_filename)\n if not args.tuning and args.delete and self.last_checkpoint is not None:\n os.remove(self.last_checkpoint)\n self.last_checkpoint = full_filename\n return filename", "def save_checkpoint(self, name):\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'best_metrics': self.scores\n }\n\n logger.warning(\"Saving model parameters ...\")\n data['model'] = self.encoder.model.state_dict()\n data['classifier'] = self.proj\n data['dico_id2word'] = self.data['dico'].id2word\n data['dico_word2id'] = self.data['dico'].word2id\n data['dico_counts'] = self.data['dico'].counts\n # print(self.encoder.pretrain_params)\n data['params'] = self.encoder.pretrain_params.update({k: v for k, v in self.params.__dict__.items()})\n\n torch.save(data, path)", "def back_up(self, epoch):\n K.set_value(self._ckpt_saved_epoch, epoch)\n # Save the model plus CKPT_SAVED_EPOCH variable.\n if self.write_checkpoint_manager.save():\n distributed_file_utils.remove_temp_dirpath(\n self.write_checkpoint_manager.directory,\n None) #self._model.distribute_strategy)", "def save_checkpoint(self, checkpoint: str) -> str:\n\n # Some model might need to aggregate variables during checkpointing\n # which requires both the chief and workers to participate in the\n # allreduce communication protocol.\n # So we need to call get_state on every remote workers, otherwise\n # it might get stuck\n state_refs = [w.get_state.remote() for w in self.remote_workers]\n\n state = ray.get(state_refs[0])\n\n with open(checkpoint, \"wb\") as f:\n SafePickle.dump(state, f)\n\n return checkpoint", "def save_checkpoint(model, optimizer=None, model_name='model', validation_id=None):\n path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)\n\n print_debug('Saving checkpoint: ' + path)\n\n model = model.module if type(model) is torch.nn.DataParallel else model\n\n checkpoint = {\n 'model_state_dict': model.state_dict()\n }\n\n if optimizer is not None:\n checkpoint['optimizer_state_dict'] = optimizer.state_dict()\n\n torch.save(checkpoint, path)", "def saveCheckpoint(self):\n time_stamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n state_filename = os.path.join(self.saving_dir, 'checkpoint.' + time_stamp + '.pth.tar')\n mem_filename = os.path.join(self.saving_dir, 'memory.' + time_stamp + '.pth.tar')\n state = self.getSavingState()\n memory = {\n 'memory': self.memory\n }\n torch.save(state, state_filename)\n torch.save(memory, mem_filename)", "def save_checkpoint(self, path: str, **kwargs):\n if self.distributed:\n encoder = self.net_q.module.encoder\n head = self.net_q.module.head\n else:\n encoder = self.net_q.encoder\n head = self.net_q.head\n\n ckpt = {\n 'encoder': encoder.state_dict(),\n 'head': head.state_dict(),\n 'net_ps': self.net_ps.state_dict(),\n 'net_k': self.net_k.state_dict(),\n 'queue': self.queue.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'scheduler': self.scheduler.state_dict(),\n }\n if kwargs:\n ckpt.update(kwargs)\n torch.save(ckpt, path)", "def export(self, last_checkpoint, output_dir):\r\n with tf.Session(graph=tf.Graph()) as sess:\r\n inputs, outputs = self.build_prediction_image_graph()\r\n init_op = tf.global_variables_initializer()\r\n sess.run(init_op)\r\n trained_saver = tf.train.Saver()\r\n trained_saver.restore(sess, last_checkpoint)\r\n \r\n predict_signature_def = build_signature(inputs, outputs)\r\n # Create a saver for writing SavedModel training checkpoints.\r\n build = builder.SavedModelBuilder(\r\n os.path.join(output_dir, 'saved_model_image_in'))\r\n build.add_meta_graph_and_variables(\r\n sess, [tag_constants.SERVING],\r\n signature_def_map={\r\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\r\n predict_signature_def\r\n },\r\n assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS))\r\n self.has_exported_image_in = True\r\n build.save()", "def save(self, sess):\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model')\n if not os.path.exists(self.model.ckpt_dir):\n os.makedirs(self.model.ckpt_dir)\n self.saver.save(sess, ckpt_path, global_step=self.gstep)", "def save_folder(date_time, sfid, logs_folder, checkpoints_folder):\n date_now = str(date_time.date())\n time_now = str(date_time.time())\n sf = \"saved_models/\" + date_now + \"_\" + time_now + \"_\" \\\n + os.path.basename(__file__).split('.')[0] + '_' + sfid\n if not os.path.isdir(sf):\n os.makedirs(sf)\n\n lf = sf +'/' + logs_folder\n if not os.path.isdir(lf):\n os.makedirs(lf)\n chkf = sf +'/' +checkpoints_folder\n if not os.path.isdir(chkf):\n os.makedirs(chkf)\n\n\n return sf, lf, chkf", "def save_checkpoint_manual(model: LFADS, path: str):\n model_wts = [v.numpy() for v in model.trainable_variables]\n optim_wts = model.optimizer.get_weights()\n checkpoint = {\"model\": model_wts, \"optimizer\": optim_wts}\n with open(path, \"wb\") as fout:\n pickle.dump(checkpoint, fout)", "def save_checkpoint(self, name, include_optimizers=True):\n if not self.params.is_master:\n return\n path = os.path.join(self.params.dump_path, '%s.pth' % name)\n logger.info(\"Saving %s to %s ...\" % (name, path))\n\n data = {\n 'epoch': self.epoch,\n 'n_total_iter': self.n_total_iter,\n 'best_metrics': self.best_metrics,\n 'best_stopping_criterion': self.best_stopping_criterion,\n }\n\n for name in self.MODEL_NAMES:\n logger.warning(\"Saving %s parameters ...\" % name)\n data[name] = getattr(self, name).state_dict()\n if include_optimizers:\n for name in self.optimizers.keys():\n logger.warning(\"Saving %s optimizer ...\" % name)\n data['%s_optimizer' % name] = self.optimizers[name].state_dict()\n\n # data['dico_id2word'] = self.data['dico'].id2word\n # data['dico_word2id'] = self.data['dico'].word2id\n # data['dico_counts'] = self.data['dico'].counts\n data['params'] = {k: v for k, v in self.params.__dict__.items()}\n\n torch.save(data, path)", "def train_and_save(self, checkpoint_dir):\n dataset = self._read_dataset(self._train_dataset_path)\n features, labels = self.get_features_and_labels(dataset)\n self._model.partial_fit(features, labels, classes=self._classes)\n checkpoint_path = self._save_model(checkpoint_dir)\n return checkpoint_path", "def save_keras_checkpoint(keras_model: tf.keras.Model,\n save_path: Text,\n save_format: Text = 'ckpt'\n ):\n if save_format == 'ckpt':\n checkpoint = tf.train.Checkpoint(model=keras_model)\n manager = tf.train.CheckpointManager(checkpoint,\n directory=save_path,\n max_to_keep=1)\n manager.save()\n else:\n keras_model.save(save_path, save_format=save_format)", "def write_checkpoint(self, checkpoint_id, best=False):\n assert self.output_dir is not None\n checkpoint_dir = os.path.join(self.output_dir, 'checkpoints')\n fname = self.get_model_fname(self.model)\n checkpoint_file = ''\n if best:\n checkpoint_file = 'model_checkpoint_%s.best.pth.tar' % ( fname )\n else:\n checkpoint_file = 'model_checkpoint_%s_%03i.pth.tar' % ( fname, checkpoint_id )\n os.makedirs(checkpoint_dir, exist_ok=True)\n torch.save(dict(model=self.model.state_dict()),\n os.path.join(checkpoint_dir, checkpoint_file))", "def save_ckpt(self, name=None):\r\n if name is None:\r\n save_path = os.path.join(self.model_dir, \"ckpt_epoch{}.pth\".format(self.clock.epoch))\r\n print(\"Checkpoint saved at {}\".format(save_path))\r\n else:\r\n save_path = os.path.join(self.model_dir, \"{}.pth\".format(name))\r\n if isinstance(self.net, nn.DataParallel):\r\n torch.save({\r\n 'clock': self.clock.make_checkpoint(),\r\n 'model_state_dict': self.net.module.cpu().state_dict(),\r\n 'optimizer_state_dict': self.optimizer.state_dict(),\r\n 'scheduler_state_dict': self.scheduler.state_dict(),\r\n }, save_path)\r\n else:\r\n torch.save({\r\n 'clock': self.clock.make_checkpoint(),\r\n 'model_state_dict': self.net.cpu().state_dict(),\r\n 'optimizer_state_dict': self.optimizer.state_dict(),\r\n 'scheduler_state_dict': self.scheduler.state_dict(),\r\n }, save_path)\r\n self.net.cuda()", "def save_model(file_name, ep, model, optimizer):\n\n torch.save({\n 'epoch': ep,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, file_name) \n \n return", "def save_checkpoint(state, is_best, file_path, file_name='checkpoint.pth.tar'):\n\n save_path = file_path + '/' + file_name\n torch.save(state, save_path)\n if is_best:\n shutil.copyfile(save_path, file_path + '/model_best.pth.tar')", "def save_model(self, path, name, epoch=\"best\"):\n\n # Checks if the save directory exists and if not creates it.\n os.makedirs(path, exist_ok=True)\n\n # Saves the model to the save directory.\n torch.save(self.state_dict(), os.path.join(path, f\"{name}_cnn_{str(epoch)}.pt\"))", "def write_checkpoint(self, session):\n base_save_path = self.params.cp_save_dir+self.params.model_name+\"_v\"+self.params.version\n full_save_path = self.full_saver.save(session,\n save_path=base_save_path,\n global_step=self.global_step,\n latest_filename=self.params.cp_latest_filename)\n self.logger.log_info(\"Full model saved in file %s\"%full_save_path)\n return base_save_path", "def save_session(self):\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(self.FLAGS.model_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n current_step = tf.train.global_step(self.session, self.global_step)\n path = self.saver.save(self.session, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))", "def save(self, model, ema_model, optimizer, epoch, step, best_wer,\n is_best=False):\n rank = 0\n if dist.is_initialized():\n dist.barrier()\n rank = dist.get_rank()\n\n if rank != 0:\n return\n\n # Checkpoint already saved\n if not is_best and epoch in self.tracked:\n return\n\n unwrap_ddp = lambda model: getattr(model, 'module', model)\n state = {\n 'epoch': epoch,\n 'step': step,\n 'best_wer': best_wer,\n 'state_dict': unwrap_ddp(model).state_dict(),\n 'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None,\n 'optimizer': optimizer.state_dict(),\n 'amp': amp.state_dict() if self.use_amp else None,\n }\n\n if is_best:\n fpath = os.path.join(\n self.save_dir, f\"{self.model_name}_best_checkpoint.pt\")\n else:\n fpath = os.path.join(\n self.save_dir, f\"{self.model_name}_epoch{epoch}_checkpoint.pt\")\n\n print_once(f\"Saving {fpath}...\")\n torch.save(state, fpath)\n\n if not is_best:\n # Remove old checkpoints; keep milestones and the last two\n self.tracked[epoch] = fpath\n for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):\n try:\n os.remove(self.tracked[epoch])\n except:\n pass\n del self.tracked[epoch]", "def save(self, epoch: int, path: str = 'model.pt'):\n state_dict = {\n 'model_state_dict': self.state_dict(),\n 'epoch': epoch,\n 'ac_optim_dict': self.actor_optimizer.state_dict(),\n 'critic_optim_dict': self.critic_optimizer.state_dict()\n }\n\n torch.save(state_dict, path)", "def save_checkpoint(state, filename):\n torch.save(state, filename) # save checkpoint", "def save_checkpoint(state, filename='checkpoint.pth.tar'):\n torch.save(state, filename)", "def save_model(args,model,epoch):\n path='./model_'+args.name\n if not os.path.exists(path):\n os.mkdir(path)\n model_name='checkpoint_epoch={}'.format(epoch)\n filepath=os.path.join(path,model_name)\n torch.save(model.state_dict(), filepath)", "def checkpoint(iteration, G, D, opts):\n ckpt_path = os.path.join(opts.checkpoint_dir, 'ckpt_{:06d}.pth.tar'.format(iteration))\n torch.save({'G': G.state_dict(),\n 'D': D.state_dict(),\n 'iter': iteration}, \n ckpt_path)", "def save_model(self, path, name, epoch=\"best\"):\n\n # Checks if the save directory exists and if not creates it.\n os.makedirs(path, exist_ok=True)\n\n # Saves the model to the save directory.\n torch.save(self.state_dict(), os.path.join(path, f\"{name}_sn_{str(epoch)}.pt\"))", "def _save_params(self, output_folder: str, checkpoint: int):\n arg_params, aux_params = self.module.get_params() # sync aux params across devices\n self.module.set_params(arg_params, aux_params)\n self.params = arg_params\n params_base_fname = C.PARAMS_NAME % checkpoint\n self.save_params_to_file(os.path.join(output_folder, params_base_fname))", "def save_checkpoint(self, model):\n # print(f\"save model {self.save_model_path}\")\n torch.save(model.state_dict(), self.save_model_path)", "def save(self, directory: Optional[str] = None, epoch: Optional[int] = None, filename: Optional[str] = None):\n if directory is None and self.save_dir:\n directory = self.save_dir\n\n if directory is None:\n if filename is None:\n raise ValueError(\"The argument `directory` or `filename` must be specified.\")\n else:\n path = filename\n else:\n directory_path = Path(directory)\n if not directory_path.exists():\n directory_path.mkdir(parents=True)\n\n if epoch is None:\n epoch_str = ctime().replace(\" \", \"_\")\n else:\n epoch_str = str(epoch)\n\n if not filename:\n filename = \"enchanter_checkpoints_epoch_{}.pth\".format(epoch_str)\n\n path = str(directory_path / filename)\n\n checkpoint = self.save_checkpoint()\n torch.save(checkpoint, path)\n\n model_name = self.model_name()\n self.experiment.log_model(model_name, str(path))", "def save_checkpoint(model, is_best, filename='./model/checkpoint.pth.tar'):\n if is_best:\n torch.save(model.state_dict(), filename) # save checkpoint\n else:\n print (\"=> Validation Accuracy did not improve\")", "def save_model(net, epoch, opt):\r\n if opt.multi_gpu == True:\r\n if epoch % opt.save_by_epoch == 0:\r\n torch.save(net.module, './model/epoch%d_batchsize%d.pth' % (epoch, opt.batch_size))\r\n print('The trained model is successfully saved at epoch %d' % (epoch))\r\n else:\r\n if epoch % opt.save_by_epoch == 0:\r\n torch.save(net, './model/epoch%d_batchsize%d.pth' % (epoch, opt.batch_size))\r\n print('The trained model is successfully saved at epoch %d' % (epoch))", "def save(self, folder):\n self.generator.save_weights('%s/generator.h5'%folder)\n self.critic.save_weights('%s/critic.h5'%folder)" ]
[ "0.73279613", "0.7212068", "0.7110705", "0.70985615", "0.7074814", "0.7049468", "0.7041478", "0.7008442", "0.7005684", "0.6975668", "0.6948612", "0.6930357", "0.69280964", "0.68722343", "0.6871008", "0.686374", "0.6840294", "0.67784363", "0.6768815", "0.6761419", "0.6749226", "0.67384505", "0.6732112", "0.6720184", "0.67102444", "0.6705523", "0.6703562", "0.66891444", "0.6683254", "0.6676604", "0.66725546", "0.666312", "0.6655545", "0.66503006", "0.66315603", "0.6630564", "0.66268355", "0.66161317", "0.66081226", "0.66043687", "0.65996456", "0.65916884", "0.65914434", "0.65832615", "0.6582654", "0.6577316", "0.6575192", "0.657489", "0.6568904", "0.6567614", "0.6552253", "0.65474266", "0.65202785", "0.65146106", "0.6510118", "0.6502318", "0.6501114", "0.6490838", "0.6488277", "0.64878345", "0.647564", "0.6465131", "0.6461828", "0.64509416", "0.6445251", "0.6442845", "0.64353824", "0.64222074", "0.6419493", "0.64114046", "0.6406433", "0.640558", "0.63988614", "0.6395137", "0.63782674", "0.6364677", "0.6336641", "0.6334147", "0.6323091", "0.6320751", "0.6313641", "0.63102895", "0.6294189", "0.6290109", "0.62840044", "0.62817144", "0.6278738", "0.6278187", "0.6263699", "0.6256394", "0.62560624", "0.62558943", "0.6254891", "0.62507635", "0.6224765", "0.62199455", "0.6214326", "0.621228", "0.6206325", "0.6203553" ]
0.7023109
7
Initialize the weights of a Pytorch module.
def init_weights(m: Union[torch.nn.Conv3d, torch.nn.BatchNorm3d]) -> None: import torch if isinstance(m, torch.nn.Conv3d): torch.nn.init.normal_(m.weight, 0, 0.01) elif isinstance(m, torch.nn.BatchNorm3d): torch.nn.init.constant_(m.weight, 1) torch.nn.init.constant_(m.bias, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False):\n if isinstance(module, nn.Linear):\n if name.startswith('head'):\n nn.init.zeros_(module.weight)\n nn.init.constant_(module.bias, head_bias)\n else:\n if flax:\n # Flax defaults\n lecun_normal_(module.weight)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n else:\n # like MLP init in vit (my original init)\n nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n if 'mlp' in name:\n nn.init.normal_(module.bias, std=1e-6)\n else:\n nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Conv2d):\n lecun_normal_(module.weight)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.ones_(module.weight)\n nn.init.zeros_(module.bias)\n elif hasattr(module, 'init_weights'):\n # NOTE if a parent module contains init_weights method, it can override the init of the\n # child modules as this will be called in depth-first order.\n module.init_weights()", "def init_weights(self, module):\n if isinstance(module, (torch.nn.Linear, torch.nn.Embedding, torch.nn.LayerNorm)):\n module.weight.data.normal_(mean=0.0, std=self.initializer_range)\n if isinstance(module, (torch.nn.Linear, torch.nn.LayerNorm)) and module.bias is not None:\n module.bias.data.zero_()", "def _init_esim_weights(module):\n if isinstance(module, nn.Linear):\n nn.init.xavier_uniform_(module.weight.data)\n nn.init.constant_(module.bias.data, 0.0)\n\n elif isinstance(module, nn.LSTM):\n nn.init.xavier_uniform_(module.weight_ih_l0.data)\n nn.init.orthogonal_(module.weight_hh_l0.data)\n nn.init.constant_(module.bias_ih_l0.data, 0.0)\n nn.init.constant_(module.bias_hh_l0.data, 0.0)\n hidden_size = module.bias_hh_l0.data.shape[0] // 4\n module.bias_hh_l0.data[hidden_size:(2*hidden_size)] = 1.0\n\n if (module.bidirectional):\n nn.init.xavier_uniform_(module.weight_ih_l0_reverse.data)\n nn.init.orthogonal_(module.weight_hh_l0_reverse.data)\n nn.init.constant_(module.bias_ih_l0_reverse.data, 0.0)\n nn.init.constant_(module.bias_hh_l0_reverse.data, 0.0)\n module.bias_hh_l0_reverse.data[hidden_size:(2*hidden_size)] = 1.0", "def initialise_weights(self): \n \n def initialise_process(param):\n \n \"\"\"\n Initialises weights of a given parameter following either Xavier or Kaiming uniform or normal processes.\n \n : param (torch.Tensor):\n \n \"\"\"\n \n if self._initialisation_process == 'xavier_uniform':\n tnni.xavier_uniform_(param.data)\n elif self._initialisation_process == 'xavier_normal':\n tnni.xavier_normal_(param.data)\n elif self._initialisation_process == 'kaiming_uniform':\n tnni.kaiming_uniform_(param.data)\n elif self._initialisation_process == 'kaiming_normal':\n tnni.kaiming_normal_(param.data)\n \n if self._initialisation_process is not None:\n for m in self.modules():\n # Embedding\n if type(m) is nn.Embedding:\n tnni.normal_(self.embedding.weight)\n # RNN\n elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]: \n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n initialise_process(param)\n #torch.nn.init.kaiming_normal_(param.data)\n elif 'weight_hh' in name:\n tnni.orthogonal_(param.data)\n elif 'bias' in name:\n # Bias initialised with zero will get the bias from\n # the forget gate\n param.data.fill_(0.0)\n param.data[self._hidden_size:self.directions*self._hidden_size].fill_(1.0)\n # Attention linear layer\n elif type(m) is nn.Linear:\n for name, param in m.named_parameters():\n if 'weight' in name:\n initialise_process(param.data)\n elif 'bias' in name:\n param.data.normal_()", "def _init_esim_weights(module):\n if isinstance(module, nn.Linear):\n nn.init.xavier_uniform_(module.weight.data)\n nn.init.constant_(module.bias.data, 0.0)\n\n elif isinstance(module, nn.LSTM):\n nn.init.xavier_uniform_(module.weight_ih_l0.data)\n nn.init.orthogonal_(module.weight_hh_l0.data)\n nn.init.constant_(module.bias_ih_l0.data, 0.0)\n nn.init.constant_(module.bias_hh_l0.data, 0.0)\n hidden_size = module.bias_hh_l0.data.shape[0] // 4\n module.bias_hh_l0.data[hidden_size:(2 * hidden_size)] = 1.0\n\n if module.bidirectional:\n nn.init.xavier_uniform_(module.weight_ih_l0_reverse.data)\n nn.init.orthogonal_(module.weight_hh_l0_reverse.data)\n nn.init.constant_(module.bias_ih_l0_reverse.data, 0.0)\n nn.init.constant_(module.bias_hh_l0_reverse.data, 0.0)\n module.bias_hh_l0_reverse.data[hidden_size:(2 * hidden_size)] = 1.0", "def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n elif isinstance(m, nn.Linear):\n normal_init(m, std=0.01)\n elif isinstance(m, nn.BatchNorm2d):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def weights_init(mod):\n classname = mod.__class__.__name__\n if classname.find('Conv') != -1:\n mod.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n mod.weight.data.normal_(1.0, 0.02)\n mod.bias.data.fill_(0)", "def init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):\n if isinstance(module, nn.Linear):\n if name.startswith('head'):\n nn.init.zeros_(module.weight)\n nn.init.constant_(module.bias, head_bias)\n elif name.startswith('pre_logits'):\n lecun_normal_(module.weight)\n nn.init.zeros_(module.bias)\n else:\n if jax_impl:\n nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n if 'mlp' in name:\n nn.init.normal_(module.bias, std=1e-6)\n else:\n nn.init.zeros_(module.bias)\n else:\n trunc_normal_(module.weight, std=.02)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n elif jax_impl and isinstance(module, nn.Conv2d):\n # NOTE conv was left to pytorch default in my original init\n lecun_normal_(module.weight)\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):\n nn.init.zeros_(module.bias)\n nn.init.ones_(module.weight)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def init_weights(self):\n with torch.no_grad():\n self._init_weights()", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def init_params(module):\n\n if isinstance(module, torch.nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.bias is not None:\n module.bias.data.zero_()\n if isinstance(module, torch.nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()", "def initialize_weights(self):\n weights_initializer.WeightsInitializer.initialize_layer_or_model(\n self._batch)", "def init_weights(self):\n if isinstance(self.pretrained, str):\n logger = get_root_logger()\n logger.info(f'load model from: {self.pretrained}')\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n elif isinstance(m, nn.Linear):\n normal_init(m, std=0.01)\n else:\n raise TypeError('pretrained must be a str or None')", "def init_weights(model):\n ...", "def initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def _init(module):\n classname = module.__class__.__name__\n if classname.find('Conv') != -1:\n try:\n nn.init.xavier_uniform_(module.weight.data)\n module.bias.data.fill_(0) # May fail.\n except AttributeError:\n pass", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def init_weights_(self):\n raise NotImplementedError", "def _initialize_weights(self):\n pass", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def setWeightInitializer(self,weights):\n self.init_w = weights", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def init_weights(self) -> None:\n nn.init.kaiming_normal_(self._U)\n nn.init.kaiming_normal_(self._W)\n nn.init.kaiming_normal_(self._V)\n\n nn.init.normal_(self._b)", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def init_weights(module, negative_slope=0):\n if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d)):\n nn.init.kaiming_normal_(module.weight.data, negative_slope)\n module.bias.data.zero_()", "def weights_init(m):\n if type(m) == torch.nn.Linear:\n m.weight.data.normal_(0.0, 0.02)\n m.bias.data.fill_(0)", "def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n\n if self.zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.norm3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.norm2, 0)\n else:\n raise TypeError('pretrained must be a str or None')", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)", "def init_weights(module: nn.Module, gain: float = 1) -> None:\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def init_weights(self, load_weights=None):\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member", "def _initialize_weights(self, inputs):\n if self.data_init:\n self._data_dep_init(inputs)\n else:\n self._init_norm()\n self._initialized = True", "def initialize_weights(self):\n tf.nest.map_structure(\n weights_initializer.WeightsInitializer.initialize_layer_or_model,\n self._layer_nest)", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)\n # Tie weights if needed\n self.tie_weights()", "def init_weights(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n return net", "def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')", "def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')", "def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')", "def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')", "def init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, mean=0.1, std=0.01)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(m):\n\tif type(m) == nn.Linear:\n\t\ttorch.nn.init.xavier_normal(m.weight)\n\t\tm.bias.data.fill_(0.01)", "def init_weight(self):\n init_bn(self.norm0)", "def init_weights(self, pretrained: Optional[str] = None) -> None:\n if isinstance(pretrained, str):\n logger = MMLogger.get_current_instance()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n if self.zero_init_residual:\n\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.norm3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.norm2, 0)\n else:\n raise TypeError('pretrained must be a str or None')", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def init_weights(self):\n\n params = torch.load(self.resnet_weight)\n\n self.fc1.weight.data = params['state_dict']['module.fc.weight'].clone()\n self.fc1.bias.data = params['state_dict']['module.fc.bias'].clone()\n\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc4.in_features +\n self.fc4.out_features)\n self.fc4.weight.data.uniform_(-r, r)\n self.fc4.bias.data.fill_(0)", "def init_weights(self):\n self.transformer.init_weights()\n if self.loss_cls.use_sigmoid:\n bias_init = bias_init_with_prob(0.01)\n for m in self.cls_branches:\n nn.init.constant_(m[-1].bias, bias_init)", "def init_weights(self, pretrained=None):\n if pretrained is not None:\n self.pretrained = pretrained\n self.backbone.init_weights(self.pretrained)\n self.mesh_head.init_weights()\n if self.with_gan:\n self.discriminator.init_weights()", "def weight_initialization(m: nn.Module) -> None:\n if type(m) == nn.Linear:\n # nn.init.xavier_uniform_(m.weight, np.sqrt(2.)) # gain is sqrt(2) because we use ReLU\n nn.init.kaiming_uniform_(m.weight, a=np.sqrt(5))\n # nn.init.uniform(m.weight, 0., 0.)", "def init_weights(self):\n self._q_neuron.h(self._weights) \n self._q_neuron.x(self._weights)", "def init_weights(network):\n for m in network.modules():\n # normal convblock and skip convblock initialisation\n if isinstance(m, (ConvBlock, ConvBlockSkip)):\n if network.weights_init == 'normal':\n torch.nn.init.normal_(m.conv1.weight)\n torch.nn.init.normal_(m.conv2.weight)\n elif network.weights_init == 'orthogonal':\n torch.nn.init.orthogonal_(m.conv1.weight)\n torch.nn.init.orthogonal_(m.conv2.weight)\n elif network.weights_init == 'xavier_uniform':\n torch.nn.init.xavier_uniform_(m.conv1.weight)\n torch.nn.init.xavier_uniform_(m.conv2.weight)\n elif network.weights_init == 'xavier_normal':\n torch.nn.init.xavier_normal_(m.conv1.weight)\n torch.nn.init.xavier_normal_(m.conv2.weight)\n elif network.weights_init == 'kaiming_uniform':\n torch.nn.init.kaiming_uniform_(m.conv1.weight)\n torch.nn.init.kaiming_uniform_(m.conv2.weight)\n elif network.weights_init == 'kaiming_normal':\n torch.nn.init.kaiming_normal_(m.conv1.weight)\n torch.nn.init.kaiming_normal_(m.conv2.weight)\n m.conv1.bias.data.fill_(0.0)\n m.conv2.bias.data.fill_(0.0)\n # fixup block initialisation (see fixup paper for details)\n elif isinstance(m, ConvBlockFixup):\n nn.init.normal_(m.conv1.weight, mean=0, std=np.sqrt(\n 2 / (m.conv1.weight.shape[0] * np.prod(m.conv1.weight.shape[2:]))) * network.nb_conv_blocks ** (-0.5))\n nn.init.constant_(m.conv2.weight, 0)\n # linear layers\n elif isinstance(m, nn.Linear):\n if network.use_fixup:\n nn.init.constant_(m.weight, 0)\n elif network.weights_init == 'normal':\n torch.nn.init.normal_(m.weight)\n elif network.weights_init == 'orthogonal':\n torch.nn.init.orthogonal_(m.weight)\n elif network.weights_init == 'xavier_uniform':\n torch.nn.init.xavier_uniform_(m.weight)\n elif network.weights_init == 'xavier_normal':\n torch.nn.init.xavier_normal_(m.weight)\n elif network.weights_init == 'kaiming_uniform':\n torch.nn.init.kaiming_uniform_(m.weight)\n elif network.weights_init == 'kaiming_normal':\n torch.nn.init.kaiming_normal_(m.weight)\n nn.init.constant_(m.bias, 0)\n # LSTM initialisation\n elif isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n if network.weights_init == 'normal':\n torch.nn.init.normal_(param.data)\n elif network.weights_init == 'orthogonal':\n torch.nn.init.orthogonal_(param.data)\n elif network.weights_init == 'xavier_uniform':\n torch.nn.init.xavier_uniform_(param.data)\n elif network.weights_init == 'xavier_normal':\n torch.nn.init.xavier_normal_(param.data)\n elif network.weights_init == 'kaiming_uniform':\n torch.nn.init.kaiming_uniform_(param.data)\n elif network.weights_init == 'kaiming_normal':\n torch.nn.init.kaiming_normal_(param.data)\n elif 'weight_hh' in name:\n if network.weights_init == 'normal':\n torch.nn.init.normal_(param.data)\n elif network.weights_init == 'orthogonal':\n torch.nn.init.orthogonal_(param.data)\n elif network.weights_init == 'xavier_uniform':\n torch.nn.init.xavier_uniform_(param.data)\n elif network.weights_init == 'xavier_normal':\n torch.nn.init.xavier_normal_(param.data)\n elif network.weights_init == 'kaiming_uniform':\n torch.nn.init.kaiming_uniform_(param.data)\n elif network.weights_init == 'kaiming_normal':\n torch.nn.init.kaiming_normal_(param.data)\n elif 'bias' in name:\n param.data.fill_(0.0)\n return network", "def init_parameters(module: nn.Module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n # todo: check if fan_out is valid\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def weights_init(model):\n classname = model.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(model.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(model.weight.data, 1.0, 0.02)\n nn.init.constant_(model.bias.data, 0)", "def weights_init(self, m):\n if isinstance(m, nn.Conv1d):\n torch.nn.init.xavier_uniform(m.weight.data)\n m.bias.data.fill_(0.0)\n if isinstance(m, nn.LSTM):\n torch.nn.init.xavier_uniform(m.weight_ih_l0)\n torch.nn.init.orthogonal(m.weight_hh_l0)\n for names in m._all_weights:\n for name in filter(lambda n: \"bias\" in n, names):\n bias = getattr(m, name)\n n = bias.size(0)\n start, end = n // 4, n // 2\n bias.data[start:end].fill_(1.0)\n m.bias_ih_l0.data.fill_(0.0)", "def initialize_weights(model: nn.Module, transformer: bool = False) -> None:\n for param in model.parameters():\n if param.dim() == 1:\n if transformer:\n nn.init.constant_(param, 0.01)\n else:\n nn.init.constant_(param, 0)\n else:\n nn.init.xavier_normal_(param)", "def initialize_weights(m):\n if isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d):\n init.xavier_uniform(m.weight.data)", "def init_weights(self):\n\n super().init_weights()\n\n init_type = None if self.init_cfg is None else self.init_cfg.get(\n 'type', None)\n if init_type != 'Pretrained' and self.with_tsa:\n for module in [\n self.fusion.feat_fusion, self.fusion.spatial_attn1,\n self.fusion.spatial_attn2, self.fusion.spatial_attn3,\n self.fusion.spatial_attn4, self.fusion.spatial_attn_l1,\n self.fusion.spatial_attn_l2, self.fusion.spatial_attn_l3,\n self.fusion.spatial_attn_add1\n ]:\n kaiming_init(\n module.conv,\n a=0.1,\n mode='fan_out',\n nonlinearity='leaky_relu',\n bias=0,\n distribution='uniform')", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def weights_init(m):\n if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif(type(m) == nn.BatchNorm2d):\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)", "def init_weights(self):\n \n self.w = np.random.randn(self.D) / np.sqrt(self.D)", "def init_weights(self, init_w=3e-3):\n self.l3.weight.data.uniform_(-init_w, init_w)\n self.l3.bias.data.uniform_(-init_w, init_w)", "def init_weights(self, init_w=3e-3):\n self.l3.weight.data.uniform_(-init_w, init_w)\n self.l3.bias.data.uniform_(-init_w, init_w)", "def _init_weights(self):\n nn.init.xavier_normal_(self.out.weight)", "def init_weights(m):\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA\n zero_init_gamma = hasattr(m, \"final_bn\") and m.final_bn and zero_init_gamma\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()", "def init_weights(self, pretrained=None):\n self.online_backbone.init_weights() # backbone\n for online_neck in self.online_necks:\n online_neck.init_weights(init_linear='kaiming') # projection\n\n for param_ol, param_tgt in zip(self.online_backbone.parameters(),\n self.target_backbone.parameters()):\n param_tgt.data.copy_(param_ol.data)\n for param_ol, param_tgt in zip(self.online_necks.parameters(),\n self.target_necks.parameters()):\n param_tgt.data.copy_(param_ol.data)\n # init the predictor in the head\n for head in self.heads:\n head.init_weights()", "def init_weights(m):\n if isinstance(m, nn.Conv2d):\n # Note that there is no bias due to BN\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))\n elif isinstance(m, nn.BatchNorm2d):\n zero_init_gamma = cfg.BN.ZERO_INIT_FINAL_GAMMA\n zero_init_gamma = hasattr(m, \"final_bn\") and m.final_bn and zero_init_gamma\n m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(mean=0.0, std=0.01)\n m.bias.data.zero_()", "def init_weights(self, pretrained=None):\n if pretrained is not None:\n self.pretrained = pretrained\n self.backbone.init_weights(self.pretrained)\n if self.with_necks:\n for neck in self.necks:\n if hasattr(neck, 'init_weights'):\n neck.init_weights()\n for head in self.heads:\n if hasattr(head, 'init_weights'):\n head.init_weights()", "def initialize_NN(self, m):\n\n if type(m) == nn.Linear:\n nn.init.xavier_uniform_(m.weight)\n # print(m.weight)", "def initialize_weights(self, weights_initializer, bias_initializer):\n wshapes = [\n [self.input_size, self.hidden_size[0]],\n [self.hidden_size[0], self.hidden_size[1]],\n [self.hidden_size[1], self.output_size]\n ]\n\n bshapes = [\n [1, self.hidden_size[0]],\n [1, self.hidden_size[1]],\n [1, self.output_size]\n ]\n\n self.weights = [init_weights(s, weights_initializer) for s in wshapes]\n self.biases = [init_weights(s, bias_initializer) for s in bshapes]\n\n self.trainable_variables = self.weights + self.biases", "def init_weights(self):\r\n constant_init(self.sampling_offsets, 0.)\r\n thetas = torch.arange(\r\n self.num_heads,\r\n dtype=torch.float32) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n for i in range(self.num_points):\r\n grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n self._is_init = True", "def init_weights(self, pretrained=None):\n if pretrained is not None:\n self.pretrained = pretrained\n for modal in self.modality:\n getattr(self.backbone, modal).init_weights(self.pretrained.get(modal, None))\n if hasattr(self, 'neck'):\n self.neck.init_weights()\n if hasattr(self, 'cls_head'):\n self.cls_head.init_weights()", "def init_weights(self, dims):\n self.W = np.random.normal(size=dims) * 0.0001", "def __init__(self, z_dim, initailize_weights=True):\n super().__init__()\n self.z_dim = z_dim\n\n self.proprio_encoder = nn.Sequential(\n nn.Linear(8, 32),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Linear(32, 64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Linear(64, 128),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Linear(128, 2 * self.z_dim),\n nn.LeakyReLU(0.1, inplace=True),\n )\n\n if initailize_weights:\n init_weights(self.modules())", "def __init__(self, weights):\n self._weights = weights", "def init_bert_params(module):\n\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.bias is not None:\n module.bias.data.zero_()\n if isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n if isinstance(module, MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def init_weights(model, fc_init_std=0.01):\n for m in model.modules():\n if isinstance(m, nn.Conv3d):\n \"\"\"\n Follow the initialization method proposed in:\n {He, Kaiming, et al.\n \"Delving deep into rectifiers: Surpassing human-level\n performance on imagenet classification.\"\n arXiv preprint arXiv:1502.01852 (2015)}\n \"\"\"\n c2_msra_fill(m, nonlinearity=('relu', 'leaky_relu')[0])\n # c2_xavier_fill(m)\n # nn.init.xavier_normal_(m.weight)\n # nn.init.xavier_uniform_(m.weight)\n # if m.bias is not None: # pyre-ignore\n # nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.InstanceNorm3d):\n m.weight.data.fill_(1.0)\n m.bias.data.zero_()\n if isinstance(m, nn.Linear): # This assumes nn.Linear is the final layers\n # TODO check to see if this is effective in this architecture since the final is a conv3d\n m.weight.data.normal_(mean=0.0, std=fc_init_std)\n m.bias.data.zero_()", "def InitWeights(self):\n self.w = -1 + 2 * np.random.rand(self.num_of_inputs,)\n self.w0 = -1 + 2 * np.random.rand()", "def _set_weights(self):\n self.Wc = torch.mm(self.TP.T, self.W).mm(self.TP)", "def weights_init(m):\n if (\n isinstance(m, nn.Linear)\n or isinstance(m, nn.EmbeddingBag)\n or isinstance(m, nn.Embedding)\n or isinstance(m, SparseLinear)\n ):\n nn.init.xavier_normal_(m.weight)", "def _initialize_weights(self):\n for _, cell in self.cells_and_names():\n if isinstance(cell, nn.Conv2d):\n cell.weight.set_data(orthogonal(cell.weight.shape, 0.6))\n if cell.bias is not None:\n cell.bias.set_data(\n init.initializer(init.Constant(0.01), cell.bias.shape,\n cell.bias.dtype))", "def init_weights2(net):\n\tfor m in net.modules():\n\t\tif isinstance(m, nn.Conv2d):\n\t\t\tnn.init.xavier_uniform_(m.weight)\n\t\t\tif m.bias is not None:\n\t\t\t\tnn.init.constant_(m.bias, 0)\n\t\t\t\n\t\telif isinstance(m, nn.BatchNorm2d):\n\t\t\tnn.init.constant_(m.weight, 1)\n\t\t\tnn.init.constant_(m.bias, 0)\n\t\t\n\t\telif isinstance(m, nn.Linear):\n\t\t\tnn.init.xavier_uniform_(m.weight)\n\n\t\t\tif m.bias is not None:\n\t\t\t\tnn.init.constant_(m.bias, 0)\n\n\treturn net", "def init_weights(net, init_gain=0.02):\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n init.normal_(m.weight.data, 0.0, init_gain)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network')\n net.apply(init_func) # apply the initialization function <init_func>", "def initialize_weights(self, seed=None):\r\n if seed!=None:\r\n np.random.seed(seed)\r\n self.weights = np.random.randn(self.number_of_nodes,self.input_dimensions)", "def _init_modules(self, pretrained_weights=None):\n if pretrained_weights is None:\n if cfg.MODEL.LOAD_PRETRAINED_BACKBONE_WEIGHTS:\n print(\"\\n-------------------------------------------\")\n print(\"Load pre-trained ImageNet weights\")\n print(\"\\n-------------------------------------------\")\n weight_utils.load_caffe2_pretrained_weights(self, cfg.MODEL.PRETRAINED_BACKBONE_WEIGHTS)\n return\n\n pretrained_detectron = torch.load(pretrained_weights)\n\n if cfg.RPN.RPN_ON:\n load_layers = ['Conv_Body', 'RPN']\n else:\n load_layers = ['Conv_Body']\n\n mapping, _ = self.detectron_weight_mapping()\n state_dict = {}\n ckpt = pretrained_detectron['model']\n for name in ckpt:\n if name.split('.')[0] in load_layers:\n if mapping[name]:\n state_dict[name] = ckpt[name]\n self.load_state_dict(state_dict, strict=False)\n del pretrained_detectron\n torch.cuda.empty_cache()", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def init_weights(self):\r\n self.embedding.weight.data.uniform_(-0.1, 0.1)\r\n self.fc.bias.data.fill_(0)\r\n self.fc.weight.data.uniform_(-0.1, 0.1)", "def initialize_weights(self, seed=None):\n if seed != None:\n np.random.seed(seed)\n self.weights = np.random.randn(self.number_of_nodes, self.input_dimensions)", "def init_model(self, model_init):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n if model_init == 'he_fout':\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif model_init == 'he_fin':\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n else:\n raise NotImplementedError\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n stdv = 1.0 / math.sqrt(m.weight.size(1))\n m.weight.data.uniform_(-stdv, stdv)\n if m.bias is not None:\n m.bias.data.zero_()", "def __init__(self, num_parameters=1, init=0.25):\n super(PReLU, self).__init__()\n self.num_parameters = num_parameters\n self.weight = Parameter(Tensor(num_parameters).fill_(init))", "def initWeights(self):\n self.weights = []\n self.bias = []\n for i, dim in enumerate(self.dimensions[1:]):\n self.weights.append(np.random.uniform(-1,1,(self.dimensions[i],dim)))\n self.bias.append(np.random.uniform(-1,1,dim))", "def init_weights(self, pretrained=None):\n\n super(EncoderDecoder_gan, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n self.backbone_gan.init_weights(pretrained=pretrained)\n self.decode_head.init_weights()\n # init GAN\n # self.discriminator.init_weight()\n # self.G_head.init_weights()\n if self.with_auxiliary_head:\n if isinstance(self.auxiliary_head, nn.ModuleList):\n for aux_head in self.auxiliary_head:\n aux_head.init_weights()\n else:\n self.auxiliary_head.init_weights()" ]
[ "0.77734", "0.7753587", "0.76126516", "0.76039255", "0.75937414", "0.7557966", "0.75483835", "0.75483835", "0.75483835", "0.7541337", "0.75397253", "0.7527526", "0.74981326", "0.7479258", "0.7456155", "0.74487233", "0.74385613", "0.74289656", "0.74223495", "0.74188006", "0.7390666", "0.7355017", "0.7322928", "0.73041874", "0.7290801", "0.725247", "0.7250708", "0.7203141", "0.71963996", "0.718846", "0.71807146", "0.7161181", "0.71259385", "0.71217954", "0.71107715", "0.70876", "0.7076905", "0.70622504", "0.70560426", "0.70481575", "0.70409447", "0.70243335", "0.70156145", "0.69991356", "0.69991356", "0.69991356", "0.69991356", "0.69818896", "0.6961957", "0.69595605", "0.6943721", "0.6929089", "0.6927725", "0.6908174", "0.689139", "0.688329", "0.68746215", "0.6872657", "0.68484735", "0.6821757", "0.6798332", "0.6789858", "0.6719415", "0.6713503", "0.66906285", "0.66906285", "0.6664038", "0.66605675", "0.66605675", "0.66525155", "0.6652108", "0.6633657", "0.663193", "0.662998", "0.66137195", "0.6600654", "0.65849996", "0.6577606", "0.6558788", "0.65549135", "0.65521544", "0.65400124", "0.6528775", "0.6524785", "0.6498938", "0.64972377", "0.6496245", "0.64940524", "0.6488828", "0.64747876", "0.6463073", "0.6456957", "0.64418703", "0.64418703", "0.64418703", "0.6437399", "0.64202416", "0.6416855", "0.64064837", "0.64038074", "0.6401574" ]
0.0
-1
Generates a human readable summary of the present segmentation model, writes it to logging.info, and stores the ModelSummary object inside the argument `model`.
def summary_for_segmentation_models(config: ModelConfigBase, model: DeviceAwareModule) -> None: assert isinstance(model, BaseModel) crop_size = config.crop_size if isinstance(crop_size, int): crop_size = (crop_size, crop_size, crop_size) try: model.generate_model_summary(crop_size, log_summaries_to_files=config.log_summaries_to_files) except AttributeError as e: logging.warning(f"summary_for_segmentation_models failed with exception {e}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_and_print_model_summary(config: ModelConfigBase, model: DeviceAwareModule) -> None:\n random_state = RandomStateSnapshot.snapshot_random_state()\n # There appears to be a bug in apex, where previous use (in training for example) causes problems\n # when another model is later built on the CPU (for example, before loading from a checkpoint)\n # https://github.com/NVIDIA/apex/issues/694\n # Hence, move the model to the GPU before doing model summary.\n if config.use_gpu:\n model = model.cuda()\n if isinstance(config, ScalarModelBase):\n # To generate the model summary, read the first item of the dataset. Then use the model's own\n # get_model_input function to convert the dataset item to input tensors, and feed them through the model.\n train_dataset = config.get_torch_dataset_for_inference(ModelExecutionMode.TRAIN)\n train_item_0 = next(iter(train_dataset.as_data_loader(shuffle=False, batch_size=1, num_dataload_workers=0)))\n model_inputs = get_scalar_model_inputs_and_labels(config, model, train_item_0).model_inputs\n # The model inputs may already be converted to float16, assuming that we would do mixed precision.\n # However, the model is not yet converted to float16 when this function is called, hence convert back to float32\n summary = ModelSummary(model)\n summary.generate_summary(input_tensors=model_inputs, log_summaries_to_files=config.log_summaries_to_files)\n elif config.is_segmentation_model:\n summary_for_segmentation_models(config, model)\n assert model.summarizer\n summary = model.summarizer # type: ignore\n else:\n raise ValueError(\"Don't know how to generate a summary for this type of model?\")\n RUN_CONTEXT.log(LoggingColumns.NumTrainableParameters, summary.n_trainable_params)\n random_state.restore_random_state()", "def model_summary():\n print(\"\\n\")\n print(\"=\" * 30 + \"Model Structure\" + \"=\" * 30)\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n print(\"=\" * 60 + \"\\n\")", "def summary(self):\n\n self.model.summary(print_fn=lambda x: logging.info(x))", "def model_summary_to_file(model, save_path):\n with open(save_path, 'w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + \"\\n\"))", "def save_summary(model, model_name, stage_no):\n stringlist = []\n model.summary(print_fn=lambda x: stringlist.append(x))\n short_model_summary = \"\\n\".join(stringlist)\n \n with open(eval_path+\"{}_model_summary_stage_{}.txt\".format(model_name, stage_no), \"w\") as text_file:\n print(short_model_summary, file=text_file)", "def print_summary(self):\n self.model.summary()", "def show_model_summary(self):\n\t\treturn self.model.summary()", "def summarize_model(\n model: keras.Model, fig_dir: Union[str, None] = None\n) -> None:\n\n submodels = []\n for layer in model.layers:\n if isinstance(layer, TimeDistributed):\n submodels.append(layer.layer)\n\n for submodel in submodels:\n submodel.summary()\n model.summary()\n\n if fig_dir is not None:\n for submodel in submodels:\n keras.utils.plot_model(\n submodel, os.path.join(fig_dir, f'model_{submodel.name}.png'),\n dpi=300\n )\n keras.utils.plot_model(\n model, os.path.join(fig_dir, 'model_full.png'), dpi=300\n )", "def model_stats(opt, epoch, model):\n log = rlog.getLogger(opt.experiment + \".model\")\n if hasattr(opt, \"log\") and opt.log.detailed:\n # log histogram also\n assert isinstance(\n model, SVIModel\n ), \"This stat only makes sense for SVI models.\"\n for mu, std in zip(model.mu(), model.std()):\n log.put(mu=mu, std=std)\n log.trace(step=epoch, **model.summarize())\n log.reset()", "def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)", "def test_summaries(self):\n try:\n ans = str(self.model)\n except:\n assert False, \"Model __repr__ failed.\"\n\n try:\n print(self.model)\n except:\n assert False, \"Model print failed.\"\n\n try:\n self.model.summary()\n except:\n assert False, \"Model summary failed.\"", "def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)", "def print_summary(self):\n print(\"Word Level\")\n self.model_word.summary()\n \n print(\"Sent Level\")\n self.model_sent.summary()\n\n print(\"Doc Level\")\n self.model.summary()", "def test_get_summary_with_model(self):\n\t\t\n\t\tdescription = self.watcher.describe(model=self.model)\n\t\tself.assertEqual(11, len(description))\n\t\t\n\t\t\n\t\tdetails = self.watcher.analyze(model=self.model, layers=[self.second_layer])\n\t\treturned_summary = self.watcher.get_summary(details)\n\t\t\n\t\tprint(returned_summary)\n\t\t\n\t\tsaved_summary = self.watcher.get_summary()\n\t\tself.assertEqual(returned_summary, saved_summary)", "def summary(self):\r\n print(self.model.summary())", "def summary(self):\n print(self.model.summary())", "def output_summary(self, v, vhat, sk, logged_matrics, train_dataset_label, val_dataset_label, summary_folder_path):\n\n mse = np.sum((v-vhat)**2)/len(v)\n train_loss = logged_matrics[\"train_loss\"]\n\n k = np.sum(p.numel() for p in self.parameters())\n\n numOfSamples = len(sk)\n aic = 2*k + numOfSamples*np.log(mse) + numOfSamples*(1+np.log(2*np.pi))\n\n summary_file = os.path.join(summary_folder_path, \"model_summary.txt\")\n if not os.path.isfile(summary_file):\n print(\"Created file \"+summary_file)\n with open(summary_file, \"w\") as output:\n output.write(\n \"Model Train_dataset_label Val_dataset_label Train_loss Test_loss AIC\\n\")\n else:\n print(summary_file +\n \" exists, model summary will be attached to the end of this file.\")\n\n with open(summary_file, \"a\") as output:\n model_name = self.version\n output.write(model_name + \" \" + train_dataset_label + \" \" +\n val_dataset_label + \" %f %f %f\\n\" % (train_loss, mse, aic))\n\n\n plt.scatter(sk, v, c=\"blue\", s=2, label=\"true\")\n plt.scatter(sk, vhat, c=\"red\", s=2, label=\"predict\")\n plt.legend()\n plt.xlabel(\"sk\")\n plt.ylabel(\"v\")\n\n plt.savefig(os.path.join(summary_folder_path, \"plots\", model_name+\".png\"))\n plt.show()\n\n np.savetxt(os.path.join(summary_folder_path, \"model_prediction\", model_name+\"_prediction.txt\"), np.column_stack((sk, v, vhat)), header=\"sk v vhat\", fmt='%.8f')\n\n\n print(\"Plot saved as\", os.path.join(summary_folder_path, \"plots\", model_name+\".png\"))\n print(\"Model prediction saved as\", os.path.join(summary_folder_path, \"model_prediction\", model_name+\"_prediction.txt\"))", "def log_model_analysis(\n logger, image, segmentation_image, model, indices_to_colors_map, void_color, colors_to_ignore):\n\n ground_truth_overlay_image = net.utilities.get_segmentation_overlaid_image(\n image, segmentation_image, colors_to_ignore)\n\n predicted_segmentation_cube = model.predict(image)\n\n predicted_segmentation_image = net.data.get_segmentation_image(\n predicted_segmentation_cube, indices_to_colors_map, void_color)\n\n predicted_overlay_image = net.utilities.get_segmentation_overlaid_image(\n image, predicted_segmentation_image, colors_to_ignore)\n\n logger.info(vlogging.VisualRecord(\n \"Data\", [image, segmentation_image, predicted_segmentation_image,\n ground_truth_overlay_image, predicted_overlay_image]))", "def summary(self):\n self.model.summary()", "def review_model(model): \n \n diagnose_model(model)\n \n plot_param_coef(model)\n \n plot_p_values(model)\n \n return", "def lme_summary(output_dir: str, model: LMEModel, tree: TreeNode) -> None:\n warnings.warn(\"This visualization are deprecated.\", DeprecationWarning)\n # log likelihood\n loglike = pd.Series({r.model.endog_names: r.model.loglike(r.params)\n for r in model.results})\n w, h = 500, 300 # plot width and height\n # Summary object\n smry = model.summary()\n\n t = _decorate_tree(tree, -loglike)\n\n p1 = radialplot(t, figsize=(800, 800))\n p1.title.text = 'Loglikelihood of submodels'\n p1.title_location = 'above'\n p1.title.align = 'center'\n p1.title.text_font_size = '18pt'\n\n # 2D scatter plot for prediction on PB\n p2 = _projected_prediction(model, plot_width=w, plot_height=h)\n p3 = _projected_residuals(model, plot_width=w, plot_height=h)\n hm_p = _heatmap_summary(model.pvalues.T, model.coefficients().T,\n plot_width=900, plot_height=400)\n\n # combine the cross validation, explained sum of squares tree and\n # residual plots into a single plot\n p = row(column(p2, p3), p1)\n p = column(hm_p, p)\n\n # Deposit all regression results\n _deposit_results(model, output_dir)\n\n index_fp = os.path.join(output_dir, 'index.html')\n with open(index_fp, 'w') as index_f:\n index_f.write('<html><body>\\n')\n index_f.write('<h1>Simplicial Linear Mixed Effects Summary</h1>\\n')\n index_f.write(smry.as_html())\n index_f.write(\n ('<th>Coefficients</th>\\n'\n '<a href=\"coefficients.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Coefficient pvalues</th>\\n'\n '<a href=\"pvalues.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>FDR corrected coefficient pvalues</th>\\n'\n '<a href=\"fdr-corrected-pvalues.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Predicted Balances</th>\\n'\n '<a href=\"predicted.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Residuals</th>\\n'\n '<a href=\"residuals.csv\">'\n 'Download as CSV</a><br>\\n')\n )\n\n diag_html = file_html(p, CDN, 'Diagnostic plots')\n index_f.write(diag_html)\n index_f.write('</body></html>\\n')", "def print_summary(self, print_level = 0):\n\n print(\"==========================\")\n print(\"= FUNtoFEM model summary =\")\n print(\"==========================\")\n print(\"Model name:\", self.name)\n print(\"Number of bodies:\", len(self.bodies))\n print(\"Number of scenarios:\", len(self.scenarios))\n print(\" \")\n print(\"------------------\")\n print(\"| Bodies summary |\")\n print(\"------------------\")\n for body in self.bodies:\n print(\"Body:\", body.id, body.name)\n print(\" coupling group:\", body.group)\n print(\" transfer scheme:\", type(body.transfer))\n print(\" shape parameteration:\", type(body.shape))\n for vartype in body.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(body.variables[vartype]))\n if print_level >= 0:\n for var in body.variables[vartype]:\n print(' variable:', var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)\n\n print(\" \")\n print(\"--------------------\")\n print(\"| Scenario summary |\")\n print(\"--------------------\")\n for scenario in self.scenarios:\n print(\"scenario:\", scenario.id, scenario.name)\n print(\" coupling group:\", scenario.group)\n print(\" steps:\", scenario.steps)\n print(\" steady?:\", scenario.steady)\n for func in scenario.functions:\n print(' function:', func.name, ', analysis_type:', func.analysis_type)\n print(' adjoint?', func.adjoint)\n if not scenario.steady:\n print(' time range', func.start, ',', func.stop)\n print(' averaging', func.averaging)\n\n\n for vartype in scenario.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(scenario.variables[vartype]))\n if print_level >= 0:\n for var in scenario.variables[vartype]:\n print(' variable:', var.id, var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)", "def model_architecture(self, filename=None):\n list_summary = []\n self.model.summary(print_fn=lambda x: list_summary.append(x))\n summary = \"\\n\".join(list_summary)\n\n if filename:\n with open(filename + '.txt', 'w') as f:\n f.write(summary)\n\n from keras.utils import plot_model\n plot_model(self.model, filename + '.jpg')\n\n return summary", "def summary(self):\n from statsmodels.iolib.summary import Summary\n from statsmodels.iolib.table import SimpleTable\n model = self.model\n title = model.__class__.__name__ + ' Model Results'\n\n dep_variable = 'endog'\n if isinstance(self.model.endog, pd.DataFrame):\n dep_variable = self.model.endog.columns[0]\n elif isinstance(self.model.endog, pd.Series):\n dep_variable = self.model.endog.name\n seasonal_periods = None if self.model.seasonal is None else self.model.seasonal_periods\n lookup = {'add': 'Additive', 'additive': 'Additive',\n 'mul': 'Multiplicative', 'multiplicative': 'Multiplicative', None: 'None'}\n transform = self.params['use_boxcox']\n box_cox_transform = True if transform else False\n box_cox_coeff = transform if isinstance(transform, str) else self.params['lamda']\n if isinstance(box_cox_coeff, float):\n box_cox_coeff = '{:>10.5f}'.format(box_cox_coeff)\n top_left = [('Dep. Variable:', [dep_variable]),\n ('Model:', [model.__class__.__name__]),\n ('Optimized:', [str(np.any(self.optimized))]),\n ('Trend:', [lookup[self.model.trend]]),\n ('Seasonal:', [lookup[self.model.seasonal]]),\n ('Seasonal Periods:', [str(seasonal_periods)]),\n ('Box-Cox:', [str(box_cox_transform)]),\n ('Box-Cox Coeff.:', [str(box_cox_coeff)])]\n\n top_right = [\n ('No. Observations:', [str(len(self.model.endog))]),\n ('SSE', ['{:5.3f}'.format(self.sse)]),\n ('AIC', ['{:5.3f}'.format(self.aic)]),\n ('BIC', ['{:5.3f}'.format(self.bic)]),\n ('AICC', ['{:5.3f}'.format(self.aicc)]),\n ('Date:', None),\n ('Time:', None)]\n\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n title=title)\n formatted = self.params_formatted # type: pd.DataFrame\n\n def _fmt(x):\n abs_x = np.abs(x)\n scale = 1\n if abs_x != 0:\n scale = int(np.log10(abs_x))\n if scale > 4 or scale < -3:\n return '{:>20.5g}'.format(x)\n dec = min(7 - scale, 7)\n fmt = '{{:>20.{0}f}}'.format(dec)\n return fmt.format(x)\n\n tab = []\n for _, vals in formatted.iterrows():\n tab.append([_fmt(vals.iloc[1]),\n '{0:>20}'.format(vals.iloc[0]),\n '{0:>20}'.format(str(bool(vals.iloc[2])))])\n params_table = SimpleTable(tab, headers=['coeff', 'code', 'optimized'],\n title=\"\",\n stubs=list(formatted.index))\n\n smry.tables.append(params_table)\n\n return smry", "def create_summary_and_adjust_model_for_gpus(self) -> None:\n if self._model is None:\n raise ValueError(\"Model must be created before it can be adjusted.\")\n\n if self.config.is_segmentation_model:\n summary_for_segmentation_models(self.config, self._model)\n # Prepare for mixed precision training and data parallelization (no-op if already done).\n # This relies on the information generated in the model summary.\n self.adjust_model_for_gpus()", "def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))", "def set_up_summary_writer(model_config,\n sess):\n\n paths_config = model_config.paths_config\n\n logdir = paths_config.logdir\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n\n logdir = logdir + '/eval'\n\n datetime_str = str(datetime.datetime.now())\n summary_writer = tf.summary.FileWriter(logdir + '/' + datetime_str,\n sess.graph)\n\n global_summaries = set([])\n summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))\n summary_merged = summary_utils.summaries_to_keep(summaries,\n global_summaries,\n histograms=False,\n input_imgs=False,\n input_bevs=False)\n\n return summary_writer, summary_merged", "def create_summary_writer(model, data_loader, log_dir):\n writer = SummaryWriter(log_dir=log_dir)\n data_loader_iter = iter(data_loader)\n x = next(data_loader_iter)\n try:\n writer.add_graph(model, x)\n except Exception as e:\n warnings.warn(\"Failed to save model graph: {}\".format(e))\n return writer", "def get_model_summary(self):\n\n summary = self._model[0].get_model_summary()\n lower_bound = self._FLOAT_STRING_FORMAT.format(self._break_points[0])\n upper_bound = self._FLOAT_STRING_FORMAT.format(self._break_points[1])\n summary_title = 'Segment model range: ' \\\n + lower_bound \\\n + ' <= ' + self._explanatory_variables[0] \\\n + ' < ' + upper_bound\n summary.tables[0].title = summary_title\n\n number_of_segments = self.get_number_of_segments()\n\n spacer_table = SimpleTable(data=['=' * 50])\n\n for i in range(1, number_of_segments):\n segment_model_summary = self._model[i].get_model_summary()\n lower_bound = self._FLOAT_STRING_FORMAT.format(self._break_points[i])\n upper_bound = self._FLOAT_STRING_FORMAT.format(self._break_points[i + 1])\n summary_title = 'Segment model range: ' \\\n + lower_bound \\\n + ' <= ' + self._explanatory_variables[0] \\\n + ' < ' + upper_bound\n segment_model_summary.tables[0].title = summary_title\n summary.tables.extend([spacer_table] + segment_model_summary.tables)\n\n return summary", "def get_model_summary(self):\n\n summary = Summary()\n\n # add the model equation with estimated parameters\n model_equation = self._get_model_equation()\n summary.tables.append(model_equation)\n\n # add the parameter summary\n params_summary = self._get_params_summary()\n summary.tables.append(params_summary)\n\n res = self._model.fit()\n\n # add more summary statistics\n gleft = self._get_left_summary_table(res)\n gright = self._get_right_summary_table(res)\n summary.add_table_2cols(res, gleft=gleft, gright=gright)\n\n # add extreme influence and outlier table\n high_leverage = ('High leverage:', self._FLOAT_STRING_FORMAT.format(3 * res.params.shape[0] / res.nobs))\n extreme_outlier = ('Extreme outlier (Standardized residual):', self._FLOAT_STRING_FORMAT.format(3))\n dfn = res.params.shape[0] + 1\n dfd = res.nobs + res.params.shape[0]\n high_influence_cooksd = (\"High influence (Cook's D)\",\n self._FLOAT_STRING_FORMAT.format(stats.f.ppf(0.9, dfn=dfn, dfd=dfd)))\n high_influence_dffits = (\"High influence (DFFITS)\",\n self._FLOAT_STRING_FORMAT.format(2 * np.sqrt(res.params.shape[0] / res.nobs)))\n influence_and_outlier_table_data = [high_leverage,\n extreme_outlier,\n high_influence_cooksd,\n high_influence_dffits]\n influence_and_outlier_table = SimpleTable(data=influence_and_outlier_table_data)\n summary.tables.append(influence_and_outlier_table)\n\n return summary", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def summarize(self, *args, **kwargs) -> Optional[ModelSummary]:\n summary = super().summarize(*args, **kwargs)\n save_txt(summary, \"./network\")\n return summary", "def summary(self):\n\n print(\n \"\\nModel trained with dataset %s that has maxlen=%d and charset=%s for %d epochs.\"\n % (self.dataset_name, self.maxlen, self.charset, self.epochs)\n )\n\n print(\n \"noise_std: %.6f, lstm_dim: %d, dec_layers: %d, td_dense_dim: %d, batch_size: %d, codelayer_dim: %d, lr: %.6f.\"\n % (\n self.noise_std,\n self.lstm_dim,\n self.dec_layers,\n self.td_dense_dim,\n self.batch_size,\n self.codelayer_dim,\n self.lr,\n )\n )", "def logging_init(model, graph):\n # Add ops to record summaries for loss and accuracy...\n train_loss = tf.summary.scalar(\"train_loss\", model.loss)\n train_accuracy = tf.summary.scalar(\"train_accuracy\", model.accuracy)\n # ...then merge these ops into one single op so that they easily be run\n # together\n train_summary_ops = tf.summary.merge([train_loss, train_accuracy])\n # Same ops, but with different names, so that train/test results show up\n # separately in TensorBoard\n test_loss = tf.summary.scalar(\"test_loss\", model.loss)\n test_accuracy = tf.summary.scalar(\"test_accuracy\", model.accuracy)\n test_summary_ops = tf.summary.merge([test_loss, test_accuracy])\n\n timestamp = int(time.time())\n run_log_dir = os.path.join(LOGS_DIR, str(timestamp))\n os.makedirs(run_log_dir)\n # (this step also writes the graph to the events file so that\n # it shows up in TensorBoard)\n summary_writer = tf.summary.FileWriter(run_log_dir, graph)\n\n return train_summary_ops, test_summary_ops, summary_writer", "def summary(self):\n return self.model.summary()", "def visualize_model(self):\n if self.model is None:\n print(\"%s.visualize: implement me\" % (self.__class__.__name__))", "def save(model, save_name):\n dirs = configparser.ConfigParser()\n dirs.read(\"config/dir_config.ini\")\n\n save_name = os.path.splitext(save_name)[0]\n path = os.path.join(dirs[\"save_dirs\"][\"models\"], save_name + \".h5\")\n info = os.path.join(dirs[\"save_dirs\"][\"models\"], save_name + \"_info.txt\")\n\n with open(info, \"w\") as file:\n model.summary(print_fn=lambda x: file.write(f\"{x}\\n\"))\n model.save(path, overwrite=False)", "def print_model_status(model):\n\tstatus = model.status\n\tif status == 2:\n\t\tgrb_message = 'Model solved to optimality'\n\tif status == 8:\n\t\tgrb_message = 'Model hits node limit'\n\tif status == 9:\n\t\tgrb_message = 'Model hits time limit'\n\ttry:\n\t\tprint grb_message\n\texcept NameError:\n\t\tprint 'unknown model exist status code: {}'.format(status)", "def save_model(model, save_folder_path, model_name, model_ext):\n\n \n assert model_ext in [\".h5\", \".hdf5\"]\n assert os.path.isdir(save_folder_path)==True, \"model_folder_path is not a folder.\"\n\n print(\"[INFO] Saving model to {0}/\".format(save_folder_path))\n model_path = \"{}/{}\".format(save_folder_path, model_name + model_ext)\n model.save(model_path)\n\n with open('{}/{}'.format(save_folder_path, \"model_summary.txt\"), 'w') as f:\n with redirect_stdout(f):\n model.summary()\n \n # visualize model architecture\n plot_model(model, to_file=\"{}/{}.png\".format(save_folder_path, model_name),\n show_shapes=True)", "def summarize(self):\n # go recursively in the model architecture\n summary_str = self.recursive_summarize(self, 0, self.name)\n\n # Sum the model parameters.\n num_total_params = sum([np.prod(p.size()) for p in self.parameters()])\n mod_trainable_params = filter(lambda p: p.requires_grad, self.parameters())\n num_trainable_params = sum([np.prod(p.size()) for p in mod_trainable_params])\n\n summary_str += 'Total Trainable Params: {}\\n'.format(num_trainable_params)\n summary_str += 'Total Non-trainable Params: {}\\n'.format(num_total_params-num_trainable_params) \n summary_str += '='*80 + '\\n'\n\n return summary_str", "def print_model_generation(model):\n print('g1 = {} MW'.format(model.g[1].value))\n print('g2 = {} MW'.format(model.g[2].value))", "def torch_summarize(model, show_weights=True, show_parameters=True):\n tmpstr = model.__class__.__name__ + ' (\\n'\n for key, module in model._modules.items():\n # if it contains layers let call it recursively to get params and weights\n if type(module) in [\n torch.nn.modules.container.Container,\n torch.nn.modules.container.Sequential\n ]:\n modstr = torch_summarize(module)\n else:\n modstr = module.__repr__()\n modstr = _addindent(modstr, 2)\n\n params = sum([np.prod(p.size()) for p in module.parameters()])\n weights = tuple([tuple(p.size()) for p in module.parameters()])\n\n tmpstr += ' (' + key + '): ' + modstr \n if show_weights:\n tmpstr += ', weights={}'.format(weights)\n if show_parameters:\n tmpstr += ', parameters={}'.format(params)\n tmpstr += '\\n' \n\n tmpstr = tmpstr + ')'\n return tmpstr", "def get_summary(self):\n return self.model.summary()", "def test_get_summary_with_new_model(self):\n\t\t\n\t\tnew_model = models.vgg13(weights='VGG13_Weights.IMAGENET1K_V1').state_dict()\n\t\tdescription = self.watcher.describe(model=new_model)\n\t\tself.assertEqual(13, len(description))\n\t\t\n\t\tfc3_layer = description.layer_id.to_numpy()[-1]\n\t\tdetails = self.watcher.analyze(model=new_model, layers=fc3_layer)\n\t\treturned_summary = self.watcher.get_summary(details)\n\t\t\n\t\tprint(returned_summary)\n\t\t\n\t\tsaved_summary = self.watcher.get_summary()\n\t\tself.assertEqual(returned_summary, saved_summary)", "def torch_summarize(model, show_weights=True, show_parameters=True):\n\n tmpstr = model.__class__.__name__ + ' (\\n'\n for key, module in model._modules.items():\n # if it contains layers let call it recursively to get params and\n # weights\n if type(module) in [\n torch.nn.modules.container.Container,\n torch.nn.modules.container.Sequential\n ]:\n modstr = torch_summarize(module)\n else:\n modstr = module.__repr__()\n modstr = _addindent(modstr, 2)\n\n params = sum([np.prod(p.size()) for p in module.parameters()])\n weights = tuple([tuple(p.size()) for p in module.parameters()])\n\n tmpstr += ' (' + key + '): ' + modstr\n if show_weights:\n tmpstr += ', weights={}'.format(weights)\n if show_parameters:\n tmpstr += ', parameters={}'.format(params)\n tmpstr += '\\n'\n\n tmpstr = tmpstr + ')'\n return tmpstr", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n #print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n\n with open(os.path.join(self.train_dir,'model_arch.txt'), 'a') as fp:\n print(model, file=fp)\n print(name, file=fp)\n print(\"The number of parameters: {}\".format(num_params),file=fp)", "def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):\n # datetime object containing current date and time\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n pdf = FPDF()\n pdf.set_title(\"training_summary_{}_{}\".format(model_name.lower(), dt_string))\n pdf.add_page()\n pdf.set_xy(0, 10)\n pdf.set_font(\"Helvetica\", \"BI\", 16)\n pdf.set_text_color(25, 33, 78)\n pdf.set_draw_color(25, 33, 78)\n pdf.cell(20)\n pdf.cell(\n 200,\n 10,\n \"Model Training Summary: {}\".format(model_name.upper()),\n 0,\n 2,\n )\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(\n 200,\n 5,\n dt_string,\n 0,\n 2,\n )\n\n # Model Configuration Section\n pdf.cell(150, 10, \"Model Configuration:\", 0, 2)\n pdf.cell(30, 10, \"Parameter\", 1, 0)\n pdf.cell(140, 10, \"Value\", 1, 2)\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-30)\n attributes = [\n \"model_dir\",\n \"log_dir\",\n \"check_dir\",\n \"current_epoch\",\n \"overwrite\",\n \"exp_name\",\n ]\n for i, val in enumerate(hyperparams):\n if val not in attributes:\n pdf.cell(30, 10, \"%s\" % (val), 1, 0)\n pdf.cell(140, 10, \"%s\" % (hyperparams[val]), 1, 2)\n pdf.cell(-30)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Model Performance Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Performance Stats:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n\n loss = train_stats[\"test_loss\"]\n acc = train_stats[\"test_acc\"]\n\n pdf.set_text_color(255, 96, 80)\n pdf.cell(35, 6, \"Best Loss:\", 0, 0)\n pdf.cell(\n 45, 6, \"{:.3f} (Epoch {})\".format(min(loss), loss.index(min(loss)) + 1), 0, 0\n )\n pdf.cell(60, 6, \"Training Duration:\", 0, 0)\n pdf.cell(30, 6, \"{:.3f} (s)\".format(train_stats[\"total_dur\"]), 0, 2)\n pdf.cell(-140)\n pdf.cell(35, 6, f\"Best Accuracy:\", 0, 0)\n pdf.cell(45, 6, \"{:.3f} (Epoch {})\".format(max(acc), acc.index(max(acc)) + 1), 0, 0)\n pdf.cell(60, 6, \"Average Epoch Duration:\", 0, 0)\n pdf.cell(\n 30,\n 6,\n \"{:.3f} (s)\".format(train_stats[\"total_dur\"] / hyperparams[\"current_epoch\"]),\n 0,\n 2,\n )\n pdf.cell(-140)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Loss Curve Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Loss Curve:\", 0, 2)\n pdf.image(img_path, x=None, y=None, w=160, h=0, type=\"PNG\", link=\"\")\n\n # Second Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20)\n\n # Model Arch Section\n pdf.cell(150, 20, \"Model Configuration:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n if model_arch is None:\n model_arch = \"No model configuration was provided\"\n pdf.set_text_color(255, 96, 80)\n pdf.multi_cell(180, 8, str(model_arch))\n\n # Third Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20, \" \")\n\n # Training Loss Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 20, \"Detailed Loss Output:\", 0, 2)\n pdf.cell(40, 8, \"Epoch\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Acc\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Acc\", 1, 2, \"C\")\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-130)\n for i in range(0, len(train_stats[\"train_loss\"])):\n pdf.cell(40, 8, \"{}\".format((i + 1)), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_acc\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_acc\"][i])), 1, 2, \"C\")\n pdf.cell(-130)\n pdf.cell(90, 3, \"\", 0, 2)\n\n pdf.output(\n os.path.join(\n os.path.dirname(img_path),\n \"training_summary_{}.pdf\".format(model_name.lower()),\n ),\n \"F\",\n )", "def LogProgress(model, writer, test_loader, epoch, device): \n\n model.eval() \n sequential = test_loader\n sample_batched = next(iter(sequential))\n \n image = torch.Tensor(sample_batched[\"image\"]).to(device)\n depth = torch.Tensor(sample_batched[\"depth\"]).to(device)\n \n if epoch == 0:\n writer.add_image(\"Train.1.Image\", vision_utils.make_grid(image.data, nrow=6, normalize=True), epoch)\n if epoch == 0:\n writer.add_image(\"Train.2.Image\", colorize(vision_utils.make_grid(depth.data, nrow=6, normalize=False)), epoch)\n \n output = DepthNorm(model(image))\n\n writer.add_image(\"Train.3.Ours\", colorize(vision_utils.make_grid(output.data, nrow=6, normalize=False)), epoch)\n writer.add_image(\"Train.4.Diff\", colorize(vision_utils.make_grid(torch.abs(output-depth).data, nrow=6, normalize=False)), epoch)\n \n del image\n del depth\n del output", "def print_info(self):\r\n self.system.print_to_log(\r\n f\"{self.__class__.__name__} model: Infection probability: {self.p}, Infectious period: {self.i}, Recovery period: {self.r}.\")", "def summary(self, test_type='t-test'):\n summary = f'Results for running {self.cv_method} evaluation for {self.method} '\n summary += f'on {self.n_model} models:\\n\\n'\n name_length = max([max(len(m.name) for m in self.models) + 1, 6])\n means = self.get_means()\n sems = self.get_sem()\n if means is None:\n means = np.nan * np.ones(self.n_model)\n if sems is None:\n sems = np.nan * np.ones(self.n_model)\n try:\n p_zero = self.test_zero(test_type=test_type)\n p_noise = self.test_noise(test_type=test_type)\n except ValueError:\n p_zero = np.nan * np.ones(self.n_model)\n p_noise = np.nan * np.ones(self.n_model)\n # header of the results table\n summary += 'Model' + (' ' * (name_length - 5))\n summary += '| Eval \\u00B1 SEM |'\n summary += ' p (against 0) |'\n summary += ' p (against NC) |\\n'\n summary += '-' * (name_length + 51)\n summary += '\\n'\n for i, m in enumerate(self.models):\n summary += m.name + (' ' * (name_length - len(m.name)))\n summary += f'| {means[i]: 5.3f} \\u00B1 {sems[i]:4.3f} |'\n if p_zero[i] < 0.001:\n summary += ' < 0.001 |'\n else:\n summary += f'{p_zero[i]:>13.3f} |'\n if p_noise[i] < 0.001:\n summary += ' < 0.001 |'\n else:\n summary += f'{p_noise[i]:>14.3f} |'\n summary += '\\n'\n summary += '\\n'\n if self.cv_method == 'crossvalidation':\n summary += 'No p-values available as crossvalidation provides no variance estimate'\n elif test_type == 't-test':\n summary += 'p-values are based on uncorrected t-tests'\n elif test_type == 'bootstrap':\n summary += 'p-values are based on percentiles of the bootstrap samples'\n elif test_type == 'ranksum':\n summary += 'p-values are based on ranksum tests'\n return summary", "def print_model_description(verbose: bool):\n\n desc = get_model_description()\n description = f\"\"\"\nModel ID: {desc['id']}\nRelease Date: {desc['releaseDate']}\nCavity Labels: {desc['cavityLabels']}\nFault Labels: {desc['faultLabels']}\nTraining Data: {desc['trainingData']}\nBrief: {desc['brief']}\n\"\"\"\n if verbose:\n description += os.linesep + f\"Details: {desc['details']}\"\n print(description)", "def torch_summarize(model, show_weights=True, show_parameters=True):\n tmpstr = model.__class__.__name__ + ' (\\n'\n for key, module in model._modules.items():\n # if it contains layers let call it recursively to get params and weights\n if type(module) in [\n torch.nn.modules.container.Container,\n torch.nn.modules.container.Sequential\n ]:\n modstr = torch_summarize(module)\n else:\n modstr = module.__repr__()\n modstr = _addindent(modstr, 2)\n\n params = sum([np.prod(p.size()) for p in module.parameters()])\n weights = tuple([tuple(p.size()) for p in module.parameters()])\n\n tmpstr += ' (' + key + '): ' + modstr\n if show_weights:\n tmpstr += ', weights={}'.format(weights)\n if show_parameters:\n tmpstr += ', parameters={}'.format(params)\n tmpstr += '\\n'\n\n tmpstr = tmpstr + ')'\n return tmpstr", "def log_model(self, model_name=\"fixmatch_model\"):\n \n assert hasattr(self, \"_mlflow\"), \"need to run track_with_mlflow() first\"\n from mlflow.keras import log_model\n log_model(self._models[\"full\"], model_name)", "def create_log(self):\n self.model.graph.get_stats()\n out = self.model.graph.summary\n out[\"training_error\"] = zip(self.train_it, self.train_err)\n out[\"validation_error\"] = zip(self.validation_it, self.validation_err)\n with open(self.log, \"w\") as f:\n f.write(json.dumps(out, default=defaultencode))", "def save_arch(model, save_folder):\n with open(save_folder + '/architecture.txt','w') as a_save:\n model.summary(print_fn=lambda x: a_save.write(x + '\\n'))", "def print_summary(self):\n self.network.print_summary()", "def Plot_model_diagram(model, save= True):\n if save:\n keras.utils.plot_model(model, to_file= \"images/CNN_SE_model.png\")\n else:\n keras.utils.plot_model(model)", "def summary_info(self):\n return [('model_architecture', self.model_architecture),\n ('input_size', self.input_size),\n ('output_size', self.output_size),\n ('hidden_layers', self.hidden_layers),\n ('learn_rate', self.learn_rate),\n ('drop_p', self.drop_p),\n ('current_epoch', self.model.current_epoch)]", "def visualize_model(model=None, filename=\"InceptionV3_visualization.png\"):\n if model is None:\n p_model = InceptionV3(weights='imagenet', include_top=False)\n else:\n p_model = model\n keras.utils.plot_model(p_model, to_file=filename)", "def add_summary(self):\n merged = tf.summary.merge_all()\n self.file_writer = tf.summary.FileWriter(self.FLAGS.model_dir, self.session.graph)", "def ols_summary(output_dir: str, model: OLSModel,\n tree: TreeNode) -> None:\n warnings.warn(\"This visualization are deprecated.\", DeprecationWarning)\n # Cross validation\n w, h = 500, 300 # plot width and height\n\n # Explained sum of squares\n ess = model.ess\n # Summary object\n _k, _l = model.kfold(), model.lovo()\n smry = model.summary(_k, _l)\n _deposit_results(model, output_dir)\n t = _decorate_tree(tree, ess)\n\n p1 = radialplot(t, figsize=(800, 800))\n p1.title.text = 'Explained Sum of Squares'\n p1.title_location = 'above'\n p1.title.align = 'center'\n p1.title.text_font_size = '18pt'\n\n # 2D scatter plot for prediction on PB\n p2 = _projected_prediction(model, plot_width=w, plot_height=h)\n p3 = _projected_residuals(model, plot_width=w, plot_height=h)\n hm_p = _heatmap_summary(model.pvalues.T, model.coefficients().T)\n\n # combine the cross validation, explained sum of squares tree and\n # residual plots into a single plot\n p = row(column(p2, p3), p1)\n p = column(hm_p, p)\n index_fp = os.path.join(output_dir, 'index.html')\n with open(index_fp, 'w') as index_f:\n index_f.write('<html><body>\\n')\n index_f.write('<h1>Simplicial Linear Regression Summary</h1>\\n')\n index_f.write(smry.as_html())\n index_f.write(\n ('<th>Coefficients</th>\\n'\n '<a href=\"coefficients.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Coefficient pvalues</th>\\n'\n '<a href=\"pvalues.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>FDR corrected coefficient pvalues</th>\\n'\n '<a href=\"fdr-corrected-pvalues.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Predicted Balances</th>\\n'\n '<a href=\"predicted.csv\">'\n 'Download as CSV</a><br>\\n'\n '<th>Residuals</th>\\n'\n '<a href=\"residuals.csv\">'\n 'Download as CSV</a><br>\\n')\n )\n\n plot_html = file_html(p, CDN, 'Diagnostics')\n index_f.write(plot_html)\n index_f.write('</body></html>\\n')", "def summary(self):\n if self.model_type == 2:\n if self.std is None:\n print(\n dedent(\n f\"\"\"\\\n Oaxaca-Blinder Two-fold Effects\n Unexplained Effect: {self.params[0]:.5f}\n Explained Effect: {self.params[1]:.5f}\n Gap: {self.params[2]:.5f}\"\"\"\n )\n )\n else:\n print(\n dedent(\n \"\"\"\\\n Oaxaca-Blinder Two-fold Effects\n Unexplained Effect: {:.5f}\n Unexplained Standard Error: {:.5f}\n Explained Effect: {:.5f}\n Explained Standard Error: {:.5f}\n Gap: {:.5f}\"\"\".format(\n self.params[0],\n self.std[0],\n self.params[1],\n self.std[1],\n self.params[2],\n )\n )\n )\n if self.model_type == 3:\n if self.std is None:\n print(\n dedent(\n f\"\"\"\\\n Oaxaca-Blinder Three-fold Effects\n Endowment Effect: {self.params[0]:.5f}\n Coefficient Effect: {self.params[1]:.5f}\n Interaction Effect: {self.params[2]:.5f}\n Gap: {self.params[3]:.5f}\"\"\"\n )\n )\n else:\n print(\n dedent(\n f\"\"\"\\\n Oaxaca-Blinder Three-fold Effects\n Endowment Effect: {self.params[0]:.5f}\n Endowment Standard Error: {self.std[0]:.5f}\n Coefficient Effect: {self.params[1]:.5f}\n Coefficient Standard Error: {self.std[1]:.5f}\n Interaction Effect: {self.params[2]:.5f}\n Interaction Standard Error: {self.std[2]:.5f}\n Gap: {self.params[3]:.5f}\"\"\"\n )\n )", "def printModel(self, model):\n print(\"[L DIAG] startLoop =\", model.evaluate(self.startLoop))\n print(\"[L DIAG] endLoop =\", model.evaluate(self.endLoop))\n print(\"[L DIAG] projStartStateFaulty =\", model.evaluate(self.projStartStateFaulty))\n print(\"[L DIAG] projEndStateFaulty =\", model.evaluate(self.projEndStateFaulty))\n print(\"[L DIAG] projStartStateNormal =\", model.evaluate(self.projStartStateNormal))\n print(\"[L DIAG] projEndStateNormal =\", model.evaluate(self.projEndStateNormal))\n\n print(\"[L DIAG] stateFaultyPath: \")\n self.printOneIntArray(model, self.stateFaultyPath)\n print(\"[L DIAG] stateNormalPath: \")\n self.printOneIntArray(model, self.stateNormalPath)\n\n print()\n super().printModel(model)", "def save_architecture(model, path_out):\n # Redirect the print output the a textfile\n orig_stdout = sys.stdout\n # and store the architecture\n f = file(os.path.join(path_out, \"architecture.txt\"), 'w')\n sys.stdout = f\n model.summary()\n # Reset the print output direction\n sys.stdout = orig_stdout\n f.close()\n\n open(os.path.join(path_out, \"config.json\"), 'w').write(model.to_json())", "def test_get_summary_no_model(self):\n\t\t\n\t\tdescription = self.watcher.describe(model=self.model)\n\t\tself.assertEqual(11, len(description))\n\t\t\n\t\t\n\t\tdetails = self.watcher.analyze(model=self.model, layers=[self.fc2_layer])\n\t\treturned_summary = self.watcher.get_summary(details)\n\t\t\n\t\tprint(returned_summary)\n\t\t\n\t\tsaved_summary = self.watcher.get_summary()\n\t\tself.assertEqual(returned_summary, saved_summary)", "def info(model: str = None) -> dict:\n model_instance = get_model(model)\n log.debug(\"Get info for \" + str(model_instance))\n return model_instance.info()", "def save_model(self):\n\n print('Save model')\n self.feature_extractor.save_weights(\n self.path_save_model + self.name_model + '.h5')\n\n print('Mean and std')\n np.save(self.path_save_model + 'mean.npy', self.mean)\n np.save(self.path_save_model + 'std.npy', self.std)", "def save_model(model):\n # ***\n # Please remove the comment to enable model save.\n # However, it will overwrite the baseline model we provided.\n # ***\n model.save(\"model/model.h5\")\n print(\"Model Saved Successfully.\")", "def dispPowerSum(model):\n print(\"*** System Power Overview ***\")\n print(\"Pm:\\t%.3f\" % model.ss_Pm)\n print(\"Pe:\\t%.3f\" % model.ss_Pe)\n print(\"Pacc:\\t%.3f\" % model.ss_Pacc)\n print(\"Pload:\\t%.3f\" % model.ss_Pload)\n print(\"Ploss:\\t%.3f\" % model.PLosses)\n print(\"*_*\")\n #NOTE: Q values are meaningless until Shunts and SVDs are accounted for\n print(\"Qgen:\\t%.3f\" % model.ss_Qgen)\n print(\"Qload:\\t%.3f\" % model.ss_Qload)\n print(\"Qloss:\\t%.3f\" % model.QLosses)\n print(\"***_______________________***\")", "def model_info(model):\n return juju.CLIENT.Client(request=\"ModelInfo\",\n params={\"Name\": model})", "def execute_summary(self, step):\n with self.summary_writer.as_default():\n tf.summary.scalar('bias', self.core.fmlayer.b, step=step)\n tf.summary.scalar('regularization_penalty', self.regularization, step=step)\n tf.summary.scalar('loss', self.reduced_loss, step=step)\n tf.summary.scalar('target', self.target, step=step)", "def define_model(model):\n global log_data_likelihood, log_priors, num_params, file_labels, labels, prior_xs, prior_pdfs\n num_prior_pts = 1001\n pic50_lower = -4.\n pic50_upper = 14.\n hill_lower = 0.\n hill_upper = 6.\n if model == 1:\n num_params = 2\n log_data_likelihood = log_data_likelihood_model_1_capped\n log_priors = log_priors_model_1\n labels = [r\"$pIC50$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0], loc=mu, scale=s),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower),[0,0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n st.gamma.pdf(prior_xs[1], sigma_shape, loc=sigma_loc, scale=sigma_scale)]\n elif model == 2:\n num_params = 3\n log_data_likelihood = log_data_likelihood_model_2_capped\n log_priors = log_priors_model_2\n labels = [r\"$pIC50$\", r\"$Hill$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','Hill','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(hill_lower, hill_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.concatenate(([hill_uniform_lower-2,hill_uniform_lower],\n np.linspace(hill_uniform_lower, hill_uniform_upper, num_prior_pts),\n [hill_uniform_upper,hill_uniform_upper+2])),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0],loc=mu,scale=s),\n # st.fisk.pdf(prior_xs[1],c=beta,scale=alpha),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n # np.concatenate(([0, 0], np.ones(num_prior_pts) / (1. * sigma_uniform_upper - sigma_uniform_lower), [0, 0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n st.gamma.pdf(prior_xs[2], sigma_shape, loc=sigma_loc, scale=sigma_scale)]", "def get_summary_model(processed_text, model_type, number_topics):\n\n if model_type == 'LDA':\n count_model = CountVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return count_model, LDA(n_components=number_topics, learning_method='batch').fit(count_model.fit_transform(processed_text))\n if model_type == 'LSA':\n tf_idf_model = TfidfVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return tf_idf_model, TruncatedSVD(n_components=number_topics, algorithm='randomized', n_iter=100, random_state=122).fit(tf_idf_model.transform(processed_text))\n else:\n tf_idf_model = TfidfVectorizer(ngram_range=(1, 1)).fit(processed_text)\n return tf_idf_model, NMF(n_components=number_topics, init='random', random_state=0).fit(tf_idf_model.transform(processed_text))", "def model_analysis(self, model_name: str, history) -> None:\n # probabilites\n y_pred_prob = self.recognizer.predict(self.X_test)\n # most likely class\n y_pred = np.argmax(y_pred_prob, axis=1)\n # compare true and predicted classes on test set\n\n # path handling for writing to file\n output_dir = Path(os.environ[\"MODEL_DATA\"]) / model_name\n out_name = \"classification_report.txt\"\n out_path = output_dir / out_name\n\n acc = history.history[\"accuracy\"]\n val_acc = history.history[\"val_accuracy\"]\n loss = history.history[\"loss\"]\n val_loss = history.history[\"val_loss\"]\n\n epochs = range(1, len(acc) + 1)\n\n # plot accuracies and losses with respect to epochs\n plt.plot(epochs, acc, \"r\", label=\"Train accuracy\")\n plt.plot(epochs, val_acc, \"b\", label=\"Val accuracy\")\n\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.legend()\n\n plt.savefig(output_dir / \"acc-plot\")\n\n plt.figure()\n plt.plot(epochs, loss, \"r\", label=\"Training loss\")\n plt.plot(epochs, val_loss, \"b\", label=\"Val loss\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend()\n\n plt.savefig(output_dir / \"loss-plot\")\n\n # create, print and write to file a sklearn classification report\n print(set(self.y_test) - set(y_pred))\n report = classification_report(self.y_test, y_pred)\n print(report)\n with open(out_path, \"w\") as f:\n f.write(report)\n\n self.make_heatmap(y_pred, output_dir)", "def get_summary_from_model(text, model_type, number_words, target_percentage, nlp):\n\n # get sentences from text\n sentences = [sentence for sentence in\n text.lower().replace('!', '.').replace('?', '.').split('.')]\n\n # this pre-processing removes junk characters, and converts all sentence-ending characters to full-stops, for the\n # purpose of splitting the text into sentences\n processed_text = [convert_to_string(\n lemmatize(convert_to_string(remove_non_words(remove_stop_words(tokenize_text(sentence, nlp)))), nlp)) for\n sentence in sentences]\n\n # begin generating topics from the text, beginning with 1 topic\n number_topics = 1\n nlp_model, summary_model = get_summary_model(processed_text, model_type, number_topics)\n topic_sentences = get_topics(summary_model, nlp_model, number_words)\n percentage = len('. '.join(full_summarizer_word_comparison(sentences, topic_sentences, number_topics))) / len(text)\n\n # check if the ratio of the summary to the text is below the user-defined threshold\n while percentage < target_percentage:\n number_topics += 1\n nlp_model, summary_model = get_summary_model(processed_text, model_type, number_topics)\n topic_sentences = get_topics(summary_model, nlp_model, number_words)\n percentage = len('. '.join(full_summarizer_word_comparison(sentences, topic_sentences, number_topics))) / len(text)\n\n return '. '.join(full_summarizer_word_comparison(sentences, topic_sentences, number_topics))", "def save_information(self, path: utils.URLPath):\n # Text summary of model\n with (path / \"model_summary.txt\").open(\"w\") as summary_file:\n def print_file(*args, **kwargs):\n print(*args, **kwargs, file=summary_file)\n self.model.summary(print_fn=print_file)\n\n # Image plotting structure of model\n keras.utils.plot_model(self.model, to_file=str(path / \"model_plot.png\"))\n\n # plot all training history\n for i, (meta, history) in enumerate(self.training_history):\n training_output = path / f\"train_{i}\"\n io_functions.save_json(meta, training_output / \"info.json\")\n plot_training_history(history, training_output / \"training.png\")", "def dump_model(self):", "def _setup_summaries(self, sess):\n # Output directory for models and summaries\n\n\n print(\"Writing to {}\\n\".format(os.path.abspath(self._log_dir)))\n\n train_summary_dir = os.path.join(self._log_dir, \"summaries\", \"train\")\n self._train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n val_summary_dir = os.path.join(self._log_dir, \"summaries\", \"validation\")\n self._val_summary_writer = tf.summary.FileWriter(val_summary_dir, sess.graph)\n\n # Model checkpoints\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n self.checkpoint_dir = os.path.abspath(os.path.join(self._save_dir, \"checkpoints/\"))\n\n if not os.path.exists(self.checkpoint_dir):\n os.makedirs(self.checkpoint_dir)\n\n self._saver = tf.train.Saver(max_to_keep=10) # Save model after each epoch\n\n self.train_summary_op = tf.summary.merge(self._train_summaries)\n self.val_summary_op = tf.summary.merge(self._val_summaries)\n\n print(\"--------------------------------------------------\")\n print(\"\\ntensorboard --logdir {}\".format(os.path.abspath(self._log_dir)))\n print(\"\\ntensorboard --logdir {} --port 6007\".format(os.path.abspath(self.checkpoint_dir)))\n print(\"--------------------------------------------------\")", "def printModelAndTime(self):\n import time\n self._reporter.writeOutput(\"Model name = \" + self.modelName + '\\n' +\n \"Output directory = \" + self._outputDir_ + '\\n' +\n \"Time = \" + time.asctime() + '\\n')\n return", "def generate_im(self, model):\n kappa = kappy.KappaStd()\n model_str = export.export(model, 'kappa')\n kappa.add_model_string(model_str)\n kappa.project_parse()\n imap = kappa.analyses_influence_map(accuracy='medium')\n graph = im_json_to_graph(imap)\n return graph", "def _publish_model(self):\n # Check if already published\n if self.model_published:\n return\n\n # Trace CPO model if required\n ctx = self.context\n lout = ctx.get_log_output()\n if lout and ctx.solver.trace_cpo:\n stime = time.time()\n lout.write(\"Model '\" + str(self.model.get_name()) + \"' in CPO format:\\n\")\n lout.write(self.cpostr)\n lout.write(\"\\n\")\n self.model.write_information(lout)\n lout.write(\"\\n\")\n lout.flush()\n self.process_infos.incr(CpoProcessInfos.MODEL_DUMP_TIME, time.time() - stime)\n\n # Dump in dump directory if required\n if ctx.model.dump_directory:\n stime = time.time()\n make_directories(ctx.model.dump_directory)\n mname = self.model.get_name()\n if mname is None:\n mname = \"Anonymous\"\n else:\n # Remove special characters introduced by Jupyter\n mname = mname.replace('<', '').replace('>', '')\n file = ctx.model.dump_directory + \"/\" + mname + \".cpo\"\n with utils.open_utf8(file, 'w') as f:\n f.write(self.cpostr)\n self.process_infos.incr(CpoProcessInfos.MODEL_DUMP_TIME, time.time() - stime)\n\n # Set published indicator\n self.model_published = True", "def print_model(self, model):\n return \"null\"", "def _get_summary_struct(self):\n model_fields = [\n ('Number of classes', 'num_classes'),\n ('Number of feature columns', 'num_features'),\n ('Input image shape', 'input_image_shape'),\n ]\n training_fields = [\n ('Number of examples', 'num_examples'),\n (\"Training loss\", 'training_loss'),\n (\"Training time (sec)\", 'training_time'),\n ]\n\n section_titles = ['Schema', 'Training summary']\n return([model_fields, training_fields], section_titles)", "def summary_page() :\r\n logger.debug(\"\")\r\n model = session_info.get_user_model(session)\r\n return render_template( \"summary_page.html\" , model=model ,\r\n stat_types=param_stats.StatTypes )", "def summary(app):\n click.echo(get_summary(app))", "def summary(self):\n\n print(\"input label:\", self.__input_label)\n print(\"target label:\", self.__target_label)\n print(\"denoising label:\", self.denoising_label)\n print(\"contains a successful DE:\", self.is_successful())", "def create_summary_statistics(forward_accuracy, backward_accuracy, merged_accuracy):\n summary_statistics = open(f'summary_statistics.txt', 'a')\n summary_statistics.write(f'The forward model has an accuracy of: {forward_accuracy}\\n')\n summary_statistics.write(f'The backward model has an accuracy of: {backward_accuracy}\\n')\n summary_statistics.write(f'The merged model has an accuracy of: {merged_accuracy}\\n')\n summary_statistics.close()", "def printModel(self):\n print(self.model)", "def summary(self):\r\n self.base.summary()\r\n self.extra_layers.summary()\r\n self.detector.summary()", "def save_graph_summary(self):\n writer = tf.summary.FileWriter(LOG_PATH)\n writer.add_graph(self.graph)", "def summary(self, summary: str):\n return self.swag({\n 'summary': summary\n })", "def show_performance(model):\n val_image_ids_ = [i for i in val_image_ids]\n np.random.shuffle(val_image_ids_)\n\n df_val = area_filter(val_image_ids_, val_coco)\n image_id = df_val['image_id'].iloc[0]\n annotation_ids = df_val[df_val['image_id'] == image_id]['annotation_id'].tolist()\n\n image_json = val_coco.loadImgs([image_id])[0]\n raw_image = cv2.imread(os.path.join(\"{}/{}/{}\".format(data_dir, val_type, image_json['file_name'])))\n height, width, _ = raw_image.shape\n\n # decode the mask, using annotation id created at the group by above\n binary_mask = process_mask(val_coco, annotation_ids, width, height)\n\n # preprocess input and mask (resize to 128, scale to [0, 1))\n input_image, input_mask = preprocess(raw_image, binary_mask)\n\n input_mask = np.expand_dims(input_mask, axis=-1)\n predicted_mask = model.predict(np.array([input_image]))[0]\n\n plt.figure(figsize=(20, 20))\n\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n display_list = [input_image[:, :, ::-1], input_mask, predicted_mask]\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(array_to_img(display_list[i]))\n plt.axis('off')\n plt.show()", "def save_model(model, loss, inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath):\n\n\t# Dump data\n\twith open(fpath + '.json', 'w') as structure_fpath:\n\t\tjson.dump(model.to_json(), structure_fpath)\n\tlogging.info('...saved structural information')\n\n\t# Dump weights\n\tmodel.save_weights(fpath + '.h5', overwrite = True)\n\tlogging.info('...saved weights')\n\n\t# Dump image\n\ttry:\n\t\tplot(model, to_file = fpath + '.png')\n\t\tlogging.info('...saved image')\n\texcept:\n\t\tpass\n\n\t# Dump history\n\tsave_model_history_manual(loss, inner_val_loss, fpath + '.hist')\n\n\tmean_loss = loss[-1]\n\tmean_inner_val_loss = inner_val_loss[-1]\n\twrite_loss_report(mean_loss, mean_inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath + '_loss_report.txt')\n\tlogging.info ('...saved history')\n\n\tlogging.info('...saved model to {}.[json, h5, png]'.format(fpath))", "def ribovision_model_info(filename, output):\n r2dt.write_ribovision(filename, output)", "def dump(self, model_name: str) -> None:\n # Dump each preprocessor\n for index, preprocessor in enumerate(self._preprocessors):\n model_filename = Files.MODEL_PREPROCESSOR_MODEL_FMT.format(\n model_name, index)\n joblib.dump(preprocessor, model_filename)\n\n # Dump the scalar\n filename = Files.MODEL_PREPROCESSOR_MODEL_FMT.format(\n model_name, Packages.Models.Training.SCALAR_MODEL_NAME)\n joblib.dump(self._last_scalar_model, filename)\n\n # Dump the dimensionality reduction model\n reduction_model_path = Files.MODEL_REDUCTION_MODEL_FMT.format(\n model_name)\n joblib.dump(self._reduction_model, reduction_model_path)", "def summary(self):\n for i,layer in enumerate(self.chain):\n x = Input([2])\n y = layer.forward(x)\n Model(x,y,name=f'layer_{i}_summary').summary()", "def load_model(self):\n self.loaded_model = keras.models.load_model(self.path)\n return self.loaded_model.summary()", "def write(self, model):\n\n # Initialize json_dump\n json_dump = {\"model\": [], \"metadata\": {}}\n\n # Set timestamp in metadata\n json_dump[\"metadata\"][\"time\"] = str(datetime.now())\n\n # Set the size of the model in metadata\n json_dump[\"metadata\"][\"model_size\"] = len(model.models)\n\n for obj in model.models:\n _class = type(obj).__name__\n if _class in [\n Winding,\n PhaseWinding,\n Wire,\n PhaseCapacitor,\n Position,\n PhaseLoad,\n ]:\n continue\n json_dump[\"model\"].append({})\n json_dump[\"model\"][-1][\"class\"] = _class\n\n try:\n json_dump[\"model\"][-1][\"name\"] = {\"class\": \"str\", \"value\": obj.name}\n except:\n json_dump[\"model\"][-1][\"name\"] = {\"class\": \"str\", \"value\": None}\n pass\n\n for key, value in obj._trait_values.items():\n if key in [\"capacitance_matrix\", \"impedance_matrix\", \"reactances\"]:\n json_dump[\"model\"][-1][key] = {\"class\": \"list\", \"value\": []}\n for v in value:\n if isinstance(v, complex):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"complex\", \"value\": [v.real, v.imag]}\n )\n elif isinstance(v, list):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"list\", \"value\": []}\n )\n for vv in v:\n if isinstance(vv, complex):\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"value\"\n ].append(\n {\n \"class\": \"complex\",\n \"value\": [vv.real, vv.imag],\n }\n )\n else:\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"value\"\n ].append(\n {\n \"class\": str(type(vv)).split(\"'\")[1],\n \"value\": vv,\n }\n )\n else:\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": str(type(v)).split(\"'\")[1], \"value\": v}\n )\n continue\n if isinstance(value, list):\n json_dump[\"model\"][-1][key] = {\"class\": \"list\", \"value\": []}\n for v in value:\n\n if isinstance(v, complex):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"complex\", \"value\": [v.real, v.imag]}\n )\n\n elif isinstance(v, Position):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Position\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, Unicode):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Unicode\", \"value\": v.default_value}\n )\n\n elif isinstance(v, Wire):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Wire\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, PhaseCapacitor):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"PhaseCapacitor\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, Winding):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Winding\"}\n )\n for kkk, vvv in v._trait_values.items():\n if kkk != \"phase_windings\":\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ] = {\"class\": \"list\", \"value\": []}\n for phw in v.phase_windings:\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ][\"value\"].append({\"class\": \"PhaseWinding\"})\n for kkkk, vvvv in phw._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ][\"value\"][-1][kkkk] = {\n \"class\": str(type(vvvv)).split(\"'\")[1],\n \"value\": vvvv,\n }\n\n elif isinstance(v, PhaseLoad):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"PhaseLoad\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n continue\n\n if isinstance(value, complex):\n json_dump[\"model\"][-1][key] = {\n \"class\": \"complex\",\n \"value\": [value.real, value.imag],\n }\n continue\n\n json_dump[\"model\"][-1][key] = {\n \"class\": str(type(value)).split(\"'\")[1],\n \"value\": value,\n }\n\n with open(os.path.join(self.output_path, self.filename), \"w\") as f:\n f.write(\n json_tricks.dumps(json_dump, allow_nan=True, sort_keys=True, indent=4)\n )", "def log_model_info(log_file: str, full_train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner):\n # Only write logs on host 0.\n if jax.process_index() != 0:\n return\n\n state_dict = full_train_state.state_dict()\n param_state_dict = state_dict['target']\n total_num_params = jax.tree_util.tree_reduce(\n np.add, jax.tree_map(np.size, param_state_dict))\n\n param_logical_axes = partitioner.get_logical_axes(\n full_train_state).state_dict()['target']\n\n param_mesh_axes = jax.tree_map(\n lambda x: tuple(x) if x is not None else None,\n partitioner.get_mesh_axes(full_train_state).state_dict()['target'])\n\n def _log_info_and_write_to_file(writer, format_str, *args):\n logging.info(format_str, *args)\n writer.write(format_str % args + '\\n')\n\n with gfile.GFile(log_file, 'w') as writer:\n\n # Log params\n def _log_param(name: str, arr: np.ndarray,\n logical_axes: Optional[partitioning.AxisNames],\n mesh_axes: Optional[partitioning.PartitionSpec]):\n if logical_axes is None:\n shape_str = str(arr.shape)\n else:\n assert len(logical_axes) == len(arr.shape)\n shape_str = '({})'.format(', '.join(\n f'{name}={dimension}'\n for name, dimension in zip(logical_axes, arr.shape)))\n _log_info_and_write_to_file(\n writer, 'Variable %-80s size %-12s shape %-40s partition spec %s',\n name, arr.size, shape_str, mesh_axes)\n\n jax.tree_map(_log_param, state_utils.get_name_tree(param_state_dict),\n param_state_dict, param_logical_axes, param_mesh_axes)\n\n _log_info_and_write_to_file(writer, 'Total number of parameters: %d',\n total_num_params)\n\n # Add a blank line between params and states.\n _log_info_and_write_to_file(writer, '')\n\n # Log states\n def _log_state(name, arr):\n if arr is None:\n _log_info_and_write_to_file(writer, 'State %-80s None', name)\n else:\n _log_info_and_write_to_file(writer,\n 'State %-80s size %-12s shape %s', name,\n arr.size, arr.shape)\n\n jax.tree_map(_log_state, state_utils.get_name_tree(state_dict['state']),\n state_dict['state'])" ]
[ "0.73806274", "0.7147296", "0.6999165", "0.6930897", "0.68741435", "0.6732777", "0.67200786", "0.6702313", "0.6616318", "0.65155643", "0.6488972", "0.6480176", "0.6442557", "0.64185977", "0.6416413", "0.6397906", "0.6342395", "0.63167113", "0.63136315", "0.6211133", "0.620451", "0.6184558", "0.6125917", "0.60407835", "0.5997129", "0.5974154", "0.5956726", "0.59416246", "0.589099", "0.5879752", "0.5798037", "0.5798037", "0.5794074", "0.57489026", "0.57281375", "0.57113147", "0.57001674", "0.56893533", "0.56807566", "0.5659331", "0.563871", "0.562797", "0.5621189", "0.5601882", "0.559807", "0.5591748", "0.55900306", "0.5583122", "0.55820835", "0.5579821", "0.55592394", "0.55551916", "0.5550325", "0.5550103", "0.5540493", "0.5533982", "0.55329436", "0.5531642", "0.5518223", "0.5497497", "0.5494639", "0.5488766", "0.5483717", "0.5459991", "0.54580307", "0.54381514", "0.5437414", "0.54294264", "0.5418519", "0.54120076", "0.54089314", "0.53980154", "0.539377", "0.5379878", "0.53785247", "0.5341001", "0.53300434", "0.5324339", "0.529196", "0.52747303", "0.5268623", "0.52684486", "0.5266647", "0.5265179", "0.5254992", "0.52530396", "0.5250056", "0.52459973", "0.5244041", "0.5242759", "0.5230229", "0.52239066", "0.5212089", "0.5192411", "0.5191591", "0.5188423", "0.5185094", "0.5183167", "0.51803166", "0.51711833" ]
0.7225569
1
Writes a human readable summary of the present model to logging.info, and logs the number of trainable parameters to AzureML.
def generate_and_print_model_summary(config: ModelConfigBase, model: DeviceAwareModule) -> None: random_state = RandomStateSnapshot.snapshot_random_state() # There appears to be a bug in apex, where previous use (in training for example) causes problems # when another model is later built on the CPU (for example, before loading from a checkpoint) # https://github.com/NVIDIA/apex/issues/694 # Hence, move the model to the GPU before doing model summary. if config.use_gpu: model = model.cuda() if isinstance(config, ScalarModelBase): # To generate the model summary, read the first item of the dataset. Then use the model's own # get_model_input function to convert the dataset item to input tensors, and feed them through the model. train_dataset = config.get_torch_dataset_for_inference(ModelExecutionMode.TRAIN) train_item_0 = next(iter(train_dataset.as_data_loader(shuffle=False, batch_size=1, num_dataload_workers=0))) model_inputs = get_scalar_model_inputs_and_labels(config, model, train_item_0).model_inputs # The model inputs may already be converted to float16, assuming that we would do mixed precision. # However, the model is not yet converted to float16 when this function is called, hence convert back to float32 summary = ModelSummary(model) summary.generate_summary(input_tensors=model_inputs, log_summaries_to_files=config.log_summaries_to_files) elif config.is_segmentation_model: summary_for_segmentation_models(config, model) assert model.summarizer summary = model.summarizer # type: ignore else: raise ValueError("Don't know how to generate a summary for this type of model?") RUN_CONTEXT.log(LoggingColumns.NumTrainableParameters, summary.n_trainable_params) random_state.restore_random_state()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def summary(self):\n\n self.model.summary(print_fn=lambda x: logging.info(x))", "def model_summary():\n print(\"\\n\")\n print(\"=\" * 30 + \"Model Structure\" + \"=\" * 30)\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n print(\"=\" * 60 + \"\\n\")", "def print_summary(self):\n self.model.summary()", "def log_model_info(log_file: str, full_train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner):\n # Only write logs on host 0.\n if jax.process_index() != 0:\n return\n\n state_dict = full_train_state.state_dict()\n param_state_dict = state_dict['target']\n total_num_params = jax.tree_util.tree_reduce(\n np.add, jax.tree_map(np.size, param_state_dict))\n\n param_logical_axes = partitioner.get_logical_axes(\n full_train_state).state_dict()['target']\n\n param_mesh_axes = jax.tree_map(\n lambda x: tuple(x) if x is not None else None,\n partitioner.get_mesh_axes(full_train_state).state_dict()['target'])\n\n def _log_info_and_write_to_file(writer, format_str, *args):\n logging.info(format_str, *args)\n writer.write(format_str % args + '\\n')\n\n with gfile.GFile(log_file, 'w') as writer:\n\n # Log params\n def _log_param(name: str, arr: np.ndarray,\n logical_axes: Optional[partitioning.AxisNames],\n mesh_axes: Optional[partitioning.PartitionSpec]):\n if logical_axes is None:\n shape_str = str(arr.shape)\n else:\n assert len(logical_axes) == len(arr.shape)\n shape_str = '({})'.format(', '.join(\n f'{name}={dimension}'\n for name, dimension in zip(logical_axes, arr.shape)))\n _log_info_and_write_to_file(\n writer, 'Variable %-80s size %-12s shape %-40s partition spec %s',\n name, arr.size, shape_str, mesh_axes)\n\n jax.tree_map(_log_param, state_utils.get_name_tree(param_state_dict),\n param_state_dict, param_logical_axes, param_mesh_axes)\n\n _log_info_and_write_to_file(writer, 'Total number of parameters: %d',\n total_num_params)\n\n # Add a blank line between params and states.\n _log_info_and_write_to_file(writer, '')\n\n # Log states\n def _log_state(name, arr):\n if arr is None:\n _log_info_and_write_to_file(writer, 'State %-80s None', name)\n else:\n _log_info_and_write_to_file(writer,\n 'State %-80s size %-12s shape %s', name,\n arr.size, arr.shape)\n\n jax.tree_map(_log_state, state_utils.get_name_tree(state_dict['state']),\n state_dict['state'])", "def summary(self):\n\n print(\n \"\\nModel trained with dataset %s that has maxlen=%d and charset=%s for %d epochs.\"\n % (self.dataset_name, self.maxlen, self.charset, self.epochs)\n )\n\n print(\n \"noise_std: %.6f, lstm_dim: %d, dec_layers: %d, td_dense_dim: %d, batch_size: %d, codelayer_dim: %d, lr: %.6f.\"\n % (\n self.noise_std,\n self.lstm_dim,\n self.dec_layers,\n self.td_dense_dim,\n self.batch_size,\n self.codelayer_dim,\n self.lr,\n )\n )", "def summary(self) -> None:\n print(\"Model manager summary:\")\n print(\"Preprocessor:\")\n print(self.preprocessor)\n print(\"Model summary:\")\n self.model.summary()\n print(\"Postprocessor:\")\n print(self.postprocessor)", "def describe_model():\n train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n msg = [\"\"]\n total = 0\n for v in train_vars:\n shape = v.get_shape()\n ele = shape.num_elements()\n total += ele\n msg.append(\"{}: shape={}, dim={}\".format(\n v.name, shape.as_list(), ele))\n size_mb = total * 4 / 1024.0**2\n msg.append(colored(\n \"Total param={} ({:01f} MB assuming all float32)\".format(total, size_mb), 'cyan'))\n logger.info(colored(\"Model Parameters: \", 'cyan') + '\\n'.join(msg))", "def print_summary(self):\n print(\"Word Level\")\n self.model_word.summary()\n \n print(\"Sent Level\")\n self.model_sent.summary()\n\n print(\"Doc Level\")\n self.model.summary()", "def summary(self):\r\n print(self.model.summary())", "def summary(self):\n print(self.model.summary())", "def logging_init(model, graph):\n # Add ops to record summaries for loss and accuracy...\n train_loss = tf.summary.scalar(\"train_loss\", model.loss)\n train_accuracy = tf.summary.scalar(\"train_accuracy\", model.accuracy)\n # ...then merge these ops into one single op so that they easily be run\n # together\n train_summary_ops = tf.summary.merge([train_loss, train_accuracy])\n # Same ops, but with different names, so that train/test results show up\n # separately in TensorBoard\n test_loss = tf.summary.scalar(\"test_loss\", model.loss)\n test_accuracy = tf.summary.scalar(\"test_accuracy\", model.accuracy)\n test_summary_ops = tf.summary.merge([test_loss, test_accuracy])\n\n timestamp = int(time.time())\n run_log_dir = os.path.join(LOGS_DIR, str(timestamp))\n os.makedirs(run_log_dir)\n # (this step also writes the graph to the events file so that\n # it shows up in TensorBoard)\n summary_writer = tf.summary.FileWriter(run_log_dir, graph)\n\n return train_summary_ops, test_summary_ops, summary_writer", "def create_log(self):\n self.model.graph.get_stats()\n out = self.model.graph.summary\n out[\"training_error\"] = zip(self.train_it, self.train_err)\n out[\"validation_error\"] = zip(self.validation_it, self.validation_err)\n with open(self.log, \"w\") as f:\n f.write(json.dumps(out, default=defaultencode))", "def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))", "def summary(self):\n self.model.summary()", "def model_stats(opt, epoch, model):\n log = rlog.getLogger(opt.experiment + \".model\")\n if hasattr(opt, \"log\") and opt.log.detailed:\n # log histogram also\n assert isinstance(\n model, SVIModel\n ), \"This stat only makes sense for SVI models.\"\n for mu, std in zip(model.mu(), model.std()):\n log.put(mu=mu, std=std)\n log.trace(step=epoch, **model.summarize())\n log.reset()", "def summarize(self):\n # go recursively in the model architecture\n summary_str = self.recursive_summarize(self, 0, self.name)\n\n # Sum the model parameters.\n num_total_params = sum([np.prod(p.size()) for p in self.parameters()])\n mod_trainable_params = filter(lambda p: p.requires_grad, self.parameters())\n num_trainable_params = sum([np.prod(p.size()) for p in mod_trainable_params])\n\n summary_str += 'Total Trainable Params: {}\\n'.format(num_trainable_params)\n summary_str += 'Total Non-trainable Params: {}\\n'.format(num_total_params-num_trainable_params) \n summary_str += '='*80 + '\\n'\n\n return summary_str", "def print_info(self):\r\n self.system.print_to_log(\r\n f\"{self.__class__.__name__} model: Infection probability: {self.p}, Infectious period: {self.i}, Recovery period: {self.r}.\")", "def print_num_params(model: nn.Module):\n if type(model) == DistributedDataParallel:\n model = model.module\n\n # Count all parameteres\n sum_params = count_params(model)\n\n # Count SPN parameters\n spn_params = sum_params\n\n # Print\n logger.info(f\"Number of parameters:\")\n # logger.info(f\"- Total: {sum_params / 1e6: >8.3f}M\")\n logger.info(\n f\"- SPN: {spn_params / 1e6: >8.3f}M ({spn_params / sum_params * 100:.1f}%)\"\n )\n # logger.info(f\"- NN: {nn_params / 1e6: >8.3f}M ({nn_params / sum_params * 100:.1f}%)\")", "def show_model_summary(self):\n\t\treturn self.model.summary()", "def log_best_performer(self) -> None:\n best = self.get_highest_accuracy()\n self.logger.info(f\"\\n\\nThe model with the highest accuracy {best[0]} has the following characteristics: \\n\")\n for k, v in best[1].items():\n if k != 'best_performer':\n self.logger.info(f\"{k} : {v}\")\n else:\n self.logger.info(f\"Best Accuracy: {v['Accuracy']}\")\n self.logger.info(\"Features used: \")\n for f in v['Labels']:\n self.logger.info(f)\n for nw, w in v['Vars'].items():\n self.logger.info(f\"{nw}: {w}\")", "def logging(self, function):\n avg_nms_time_per_step = sum(self.nms_times)/len(self.nms_times)\n avg_total_time_per_step = sum(self.total_times)/len(self.total_times)\n\n avg_min_latency = [x[0] for x in self.inference_times]\n avg_max_latency = [x[1] for x in self.inference_times]\n avg_latency = [x[2] for x in self.inference_times]\n\n function(\"Inference stats: image size {}x{}, batches per step {}, batch size {}, {} steps\".format(\n self.cfg.model.image_size, self.cfg.model.image_size, self.cfg.ipuopts.batches_per_step, self.cfg.model.micro_batch_size, len(self.total_times)\n ))\n function(\"--------------------------------------------------\")\n function(\"Inference\")\n function(\"Average Min Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_min_latency)/len(self.inference_times)))\n function(\"Average Max Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_max_latency)/len(self.inference_times)))\n function(\"Average Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_latency)/len(self.inference_times)))\n function(\"Average Inference Throughput: {:.3f} img/s\".format(sum(self.inference_throughputs)/len(self.inference_throughputs)))\n function(\"--------------------------------------------------\")\n # TODO remove the NMS and end-to-end time report once NMS is on device\n function(\"End-to-end\")\n function(\"Average NMS Latency per Batch: {:.3f} ms\".format(1000 * avg_nms_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"Average End-to-end Latency per Batch: {:.3f} ms\".format(1000 * avg_total_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"End-to-end Throughput: {:.3f} img/s\".format(sum(self.total_throughputs)/len(self.total_throughputs)))\n function(\"==================================================\")\n\n if self.cfg.eval.metrics:\n self.compute_and_print_eval_metrics()", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n #print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n\n with open(os.path.join(self.train_dir,'model_arch.txt'), 'a') as fp:\n print(model, file=fp)\n print(name, file=fp)\n print(\"The number of parameters: {}\".format(num_params),file=fp)", "def on_train_begin(self, logs=None):\n f = open(self.log_file_path, \"a\")\n f.write(f\"{'=' * 5}{self.model_name}({self.hp_log_title}){'=' * 5}\\n\")\n f.close()", "def save_summary(model, model_name, stage_no):\n stringlist = []\n model.summary(print_fn=lambda x: stringlist.append(x))\n short_model_summary = \"\\n\".join(stringlist)\n \n with open(eval_path+\"{}_model_summary_stage_{}.txt\".format(model_name, stage_no), \"w\") as text_file:\n print(short_model_summary, file=text_file)", "def initialize_summary(self):\n if self.need_logs:\n self.summary_writer = tf.summary.create_file_writer(self.log_dir)\n if self.verbose > 0:\n full_log_path = os.path.abspath(self.log_dir)\n print('Initialize logs, use: \\ntensorboard --logdir={}'.format(full_log_path))", "def output_summary(self, v, vhat, sk, logged_matrics, train_dataset_label, val_dataset_label, summary_folder_path):\n\n mse = np.sum((v-vhat)**2)/len(v)\n train_loss = logged_matrics[\"train_loss\"]\n\n k = np.sum(p.numel() for p in self.parameters())\n\n numOfSamples = len(sk)\n aic = 2*k + numOfSamples*np.log(mse) + numOfSamples*(1+np.log(2*np.pi))\n\n summary_file = os.path.join(summary_folder_path, \"model_summary.txt\")\n if not os.path.isfile(summary_file):\n print(\"Created file \"+summary_file)\n with open(summary_file, \"w\") as output:\n output.write(\n \"Model Train_dataset_label Val_dataset_label Train_loss Test_loss AIC\\n\")\n else:\n print(summary_file +\n \" exists, model summary will be attached to the end of this file.\")\n\n with open(summary_file, \"a\") as output:\n model_name = self.version\n output.write(model_name + \" \" + train_dataset_label + \" \" +\n val_dataset_label + \" %f %f %f\\n\" % (train_loss, mse, aic))\n\n\n plt.scatter(sk, v, c=\"blue\", s=2, label=\"true\")\n plt.scatter(sk, vhat, c=\"red\", s=2, label=\"predict\")\n plt.legend()\n plt.xlabel(\"sk\")\n plt.ylabel(\"v\")\n\n plt.savefig(os.path.join(summary_folder_path, \"plots\", model_name+\".png\"))\n plt.show()\n\n np.savetxt(os.path.join(summary_folder_path, \"model_prediction\", model_name+\"_prediction.txt\"), np.column_stack((sk, v, vhat)), header=\"sk v vhat\", fmt='%.8f')\n\n\n print(\"Plot saved as\", os.path.join(summary_folder_path, \"plots\", model_name+\".png\"))\n print(\"Model prediction saved as\", os.path.join(summary_folder_path, \"model_prediction\", model_name+\"_prediction.txt\"))", "def summary_info(self):\n return [('model_architecture', self.model_architecture),\n ('input_size', self.input_size),\n ('output_size', self.output_size),\n ('hidden_layers', self.hidden_layers),\n ('learn_rate', self.learn_rate),\n ('drop_p', self.drop_p),\n ('current_epoch', self.model.current_epoch)]", "def _report_model_parameters(self):\n\n all_params = sum(p.numel() for p in self.parameters())\n trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)\n size = all_params * (32 / 8) # Bytes\n logger.info(\"Model has %.1f M parameters (%.1f M trainable) with an estimated size of %.1f MB\", all_params / 1e6, trainable_params / 1.0e6, size / 1.0e6)", "def test_summaries(self):\n try:\n ans = str(self.model)\n except:\n assert False, \"Model __repr__ failed.\"\n\n try:\n print(self.model)\n except:\n assert False, \"Model print failed.\"\n\n try:\n self.model.summary()\n except:\n assert False, \"Model summary failed.\"", "def summary(self, verbose=False):\n for i, layer in enumerate(self._layers):\n print('%d: %s' % (i, str(layer)))\n if verbose:\n print('weights:', layer.get_weights())\n if layer._use_bias:\n print('bias:', layer._bias)\n print()", "def execute_summary(self, step):\n with self.summary_writer.as_default():\n tf.summary.scalar('bias', self.core.fmlayer.b, step=step)\n tf.summary.scalar('regularization_penalty', self.regularization, step=step)\n tf.summary.scalar('loss', self.reduced_loss, step=step)\n tf.summary.scalar('target', self.target, step=step)", "def log_trainable_variables(self):\n var_names = list(self.trainable_variables.keys())\n self.logger.log_trainable_variables(var_names)", "def on_train_begin(self, logs):\n self.train_start = timeit.default_timer()\n self.metrics_names = self.model.metrics_names\n print('Training for {} steps ...'.format(self.params['nb_steps']))", "def get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin, primers_origin,\n taxonomy_level, selected_primer, test_size, logger) -> int:\n\n # Global information on the model\n logger.log(title='Parameter information for {}'.format(model_name))\n # Data Origins\n logger.log(subtitle='Data Origins')\n logger.log(text='Sequence origin: {}'.format(sequence_origin))\n logger.log(text='Primers origin: {}'.format(primers_origin))\n # Chosen levels for classification\n logger.log(subtitle='Chosen HyperVariable Region and Taxonomy Rank')\n logger.log(text='HyperVariable Region: {}'.format(str(selected_primer)))\n logger.log(text='Taxonomy Rank: {}'.format(str(taxonomy_level)))\n # Applied Preprocessing\n logger.log(subtitle='Preprocessing')\n logger.log(text='Preprocessing description: ' + model_preprocessing)\n # Model parameters\n logger.log(subtitle='Model parameters')\n logger.log(text='Parameter dict: {}'.format(str(model_parameters)))\n logger.log(text='Size of test set: {}'.format(len(y_test)))\n logger.log(text='Part of test size compared to total: {}'.format(test_size))\n\n return len(y_test)", "def log_training_results(engine: Engine):\n train_evaluator.run(self.train_dl)\n metrics: Dict[str, float] = train_evaluator.state.metrics\n avg_accuracy: float = metrics['accuracy']\n avg_bce: float = metrics['bce']\n pbar.log_message(\n f'Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.4f} Avg loss: {avg_bce:.4f}')", "def _setup_summaries(self, sess):\n # Output directory for models and summaries\n\n\n print(\"Writing to {}\\n\".format(os.path.abspath(self._log_dir)))\n\n train_summary_dir = os.path.join(self._log_dir, \"summaries\", \"train\")\n self._train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n val_summary_dir = os.path.join(self._log_dir, \"summaries\", \"validation\")\n self._val_summary_writer = tf.summary.FileWriter(val_summary_dir, sess.graph)\n\n # Model checkpoints\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n self.checkpoint_dir = os.path.abspath(os.path.join(self._save_dir, \"checkpoints/\"))\n\n if not os.path.exists(self.checkpoint_dir):\n os.makedirs(self.checkpoint_dir)\n\n self._saver = tf.train.Saver(max_to_keep=10) # Save model after each epoch\n\n self.train_summary_op = tf.summary.merge(self._train_summaries)\n self.val_summary_op = tf.summary.merge(self._val_summaries)\n\n print(\"--------------------------------------------------\")\n print(\"\\ntensorboard --logdir {}\".format(os.path.abspath(self._log_dir)))\n print(\"\\ntensorboard --logdir {} --port 6007\".format(os.path.abspath(self.checkpoint_dir)))\n print(\"--------------------------------------------------\")", "def print_network(self, model, name):\r\n num_params = 0\r\n for p in model.parameters():\r\n num_params += p.numel()\r\n print(model)\r\n print(name)\r\n print(\"The number of parameters: {}\".format(num_params))", "def print_network(self, model, name):\r\n num_params = 0\r\n for p in model.parameters():\r\n num_params += p.numel()\r\n print(model)\r\n print(name)\r\n print(\"The number of parameters: {}\".format(num_params))", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))", "def _summary(self):\n trainable_variable = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n for var in trainable_variable:\n tf.summary.histogram(var.op.name, var)\n\n self.merged_summary_op = tf.summary.merge_all()", "def log_output_data(self):\r\n with tf.name_scope('model_output'):\r\n for i in range(self.action_handler.get_number_actions()):\r\n variable_name = str(self.action_handler.action_list_names[i])\r\n tf.summary.histogram(variable_name + '_output', self.actor_last_row_layer[i])", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))", "def summary(self):\n\n print(\"input label:\", self.__input_label)\n print(\"target label:\", self.__target_label)\n print(\"denoising label:\", self.denoising_label)\n print(\"contains a successful DE:\", self.is_successful())", "def print_info(self):\n print(\"Num samples (train/test/val): {} tot: {}\\n\"\n \"Samples per class: {}\\n\"\n \"Sample type {}\\n\"\n \"Sample shape: {}\\n\"\n \"Label type {}\\n\"\n \"Label shape: {}\\n\"\n \"Root dirs: {}\".format([int(np.floor(frac * len(self.__labels))) for frac in self.split_fraction],\n len(self.__labels),\n self.__samples_per_class,\n self.train.output_types[0], self.train.output_shapes[0][1:],\n self.train.output_types[1], self.train.output_shapes[1][1:],\n self.__root_directory_list))", "def print_total_params(print_function=tf.logging.info):\n\n total_parameters = 0\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n shape = variable.get_shape()\n variable_parametes = 1\n for dim in shape:\n variable_parametes *= dim.value\n total_parameters += variable_parametes\n print_function(f'TOTAL NUMBER OF PARAMTERS:{total_parameters}')\n print_function(f'Memory Estimate:{total_parameters*32/8/1024/1024} MB')", "def print_summary(self):\n self.network.print_summary()", "def training_info(self):\n pass", "def on_train_begin(self, logs):\n print(f\"Testing for {self.params['nb_episodes']} episodes ...\")", "def _log_weights(self, epoch):\r\n writer = self._get_writer(self._train_run_name)\r\n with context.eager_mode(), \\\r\n writer.as_default(), \\\r\n summary_ops_v2.always_record_summaries():\r\n for layer in self.model.layers:\r\n for weight in layer.weights:\r\n weight_name = weight.name.replace(':', '_')\r\n with ops.init_scope():\r\n weight = K.get_value(weight)\r\n summary_ops_v2.histogram(weight_name, weight, step=epoch)\r\n if self.write_images:\r\n self._log_weight_as_image(weight, weight_name, epoch)\r\n writer.flush()", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(\"\\nModel Name: \\\"{}\\\"\".format(name))\n print(model)\n print(\"The number of parameters: {}\".format(num_params))", "def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(\"\\nModel Name: \\\"{}\\\"\".format(name))\n print(model)\n print(\"The number of parameters: {}\".format(num_params))", "def write_training_summaries(self):\n for metric, epochs in self._training_summaries.items():\n self._write_scalar_to_tensorboard(\n name=f\"{self._Sections.SUMMARY}/training_{metric}\",\n value=epochs[-1],\n step=self._epochs,\n )", "def summary(self):\n return self.model.summary()", "def print_summary(self, print_level = 0):\n\n print(\"==========================\")\n print(\"= FUNtoFEM model summary =\")\n print(\"==========================\")\n print(\"Model name:\", self.name)\n print(\"Number of bodies:\", len(self.bodies))\n print(\"Number of scenarios:\", len(self.scenarios))\n print(\" \")\n print(\"------------------\")\n print(\"| Bodies summary |\")\n print(\"------------------\")\n for body in self.bodies:\n print(\"Body:\", body.id, body.name)\n print(\" coupling group:\", body.group)\n print(\" transfer scheme:\", type(body.transfer))\n print(\" shape parameteration:\", type(body.shape))\n for vartype in body.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(body.variables[vartype]))\n if print_level >= 0:\n for var in body.variables[vartype]:\n print(' variable:', var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)\n\n print(\" \")\n print(\"--------------------\")\n print(\"| Scenario summary |\")\n print(\"--------------------\")\n for scenario in self.scenarios:\n print(\"scenario:\", scenario.id, scenario.name)\n print(\" coupling group:\", scenario.group)\n print(\" steps:\", scenario.steps)\n print(\" steady?:\", scenario.steady)\n for func in scenario.functions:\n print(' function:', func.name, ', analysis_type:', func.analysis_type)\n print(' adjoint?', func.adjoint)\n if not scenario.steady:\n print(' time range', func.start, ',', func.stop)\n print(' averaging', func.averaging)\n\n\n for vartype in scenario.variables:\n print(' variable type:', vartype)\n print(' number of ', vartype, ' variables:', len(scenario.variables[vartype]))\n if print_level >= 0:\n for var in scenario.variables[vartype]:\n print(' variable:', var.id, var.name, ', active?', var.active,', coupled?', var.coupled)\n print(' value and bounds:', var.value, var.lower, var.upper)", "def test_get_summary_with_model(self):\n\t\t\n\t\tdescription = self.watcher.describe(model=self.model)\n\t\tself.assertEqual(11, len(description))\n\t\t\n\t\t\n\t\tdetails = self.watcher.analyze(model=self.model, layers=[self.second_layer])\n\t\treturned_summary = self.watcher.get_summary(details)\n\t\t\n\t\tprint(returned_summary)\n\t\t\n\t\tsaved_summary = self.watcher.get_summary()\n\t\tself.assertEqual(returned_summary, saved_summary)", "def log_info(self, logger, opt_loc=''):\n if len(np.unique(self._lr)) == 1:\n logger.info('Using %s %s optimizer with lr = %.5f.' % \\\n (self.name, opt_loc, self._lr[0]))\n else:\n logger.info('Using %s %s optimizer with:' % (self.name, opt_loc))\n for forward_opt in self._optimizer_list:\n assert len(forward_opt.param_groups) == 1\n lr = forward_opt.param_groups[0]['lr']\n shapes = str([list(pm.shape) for pm in \\\n forward_opt.param_groups[0]['params']])\n logger.info(' lr = %.3f for params with shape %s.' % \\\n (lr, shapes[1:-1]))", "def _compute_statistics(self):\n # log to file\n output_dir = self.params['output_dir']\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n output_dir = os.path.join(output_dir, 'statistics.log')\n log = logging.getLogger('tensorflow')\n handle = logging.FileHandler(output_dir)\n log.addHandler(handle)\n\n # FLOPS\n encoder_flops, decoder_flops = 0, 0\n encoder_count, decoder_count = 0, 0\n graph = tf.get_default_graph()\n for operation in graph.get_operations():\n flops = ops.get_stats_for_node_def(graph, operation.node_def,\n 'flops').value\n if flops is None:\n continue\n if operation.name.startswith('model/encoder'):\n # encoder\n encoder_flops += flops\n encoder_count += 1\n tf.logging.info('encoder operation %s : %d', operation.name, flops)\n elif operation.name.startswith('model/decoder'):\n # decoder\n decoder_flops += flops\n decoder_count += 1\n tf.logging.info('decoder operation %s : %d', operation.name, flops)\n else:\n # gradient\n pass\n tf.logging.info('flops of %d encoder tensor: %d',\n encoder_count, encoder_flops)\n tf.logging.info('flops of %d decoder tensor: %d',\n decoder_count, decoder_flops)\n tf.logging.info('flops of total %d tensor: %d',\n encoder_count + decoder_count,\n encoder_flops + decoder_flops)\n # parameters\n encoder_parameters, decoder_parameters = 0, 0\n encoder_count, decoder_count = 0, 0\n for var in tf.trainable_variables():\n parameters = np.prod(var.get_shape().as_list())\n if var.name.startswith('model/encoder'):\n # encoder\n encoder_parameters += parameters\n encoder_count += 1\n tf.logging.info('encoder variable %s : %d', var.name, parameters)\n elif var.name.startswith('model/decoder'):\n # decoder\n decoder_parameters += parameters\n decoder_count += 1\n tf.logging.info('decoder variable %s : %d', var.name, parameters)\n\n tf.logging.info('parameters of %d encoder tensor: %d',\n encoder_count, encoder_parameters)\n tf.logging.info('parameters of %d decoder tensor: %d',\n decoder_count, decoder_parameters)\n tf.logging.info('parameters of total %d tensor: %d',\n encoder_count + decoder_count,\n encoder_parameters + decoder_parameters)\n # disable log to file\n log.removeHandler(handle)", "def log_hyperparameters(\n cfg: DictConfig,\n model: pl.LightningModule,\n trainer: pl.Trainer,\n) -> None:\n hparams = OmegaConf.to_container(cfg, resolve=True)\n\n # save number of model parameters\n hparams[f\"{STATS_KEY}/params_total\"] = sum(p.numel() for p in model.parameters())\n hparams[f\"{STATS_KEY}/params_trainable\"] = sum(\n p.numel() for p in model.parameters() if p.requires_grad\n )\n hparams[f\"{STATS_KEY}/params_not_trainable\"] = sum(\n p.numel() for p in model.parameters() if not p.requires_grad\n )\n\n # send hparams to all loggers\n trainer.logger.log_hyperparams(hparams)\n\n # disable logging any more hyperparameters for all loggers\n # (this is just a trick to prevent trainer from logging hparams of model, since we already did that above)\n trainer.logger.log_hyperparams = lambda params: None", "def summary(self):\r\n self.base.summary()\r\n self.extra_layers.summary()\r\n self.detector.summary()", "def summary(self):\n for i,layer in enumerate(self.chain):\n x = Input([2])\n y = layer.forward(x)\n Model(x,y,name=f'layer_{i}_summary').summary()", "def _log_metrics(\n self,\n train_writer: SummaryWriter,\n val_writer: SummaryWriter,\n timestamped_save_dir: Path,\n train_metrics: _Metrics,\n step: int,\n ) -> None:\n if len(self.val_loader) > 0:\n val_metrics, val_img, val_gt, val_pred = self._get_val_metrics()\n if val_metrics.accuracy > self.best_acc:\n self.best_acc = val_metrics.accuracy\n self.save_weights(timestamped_save_dir, True)\n\n for key in vars(train_metrics):\n if key == \"class_loss\":\n tag = \"losses/classification\"\n elif key in {\"shape_loss\", \"total_loss\"}:\n continue\n else:\n tag = f\"metrics/{key}\"\n\n train_writer.add_scalar(tag, getattr(train_metrics, key), step)\n if len(self.val_loader) > 0:\n val_writer.add_scalar(tag, getattr(val_metrics, key), step)\n\n reg_loss = self._get_l2_reg()\n train_writer.add_scalar(\"losses/regularization\", reg_loss, step)\n train_writer.add_scalar(\"losses/shape\", train_metrics.shape_loss, step)\n train_writer.add_scalar(\n \"losses/total\",\n train_metrics.total_loss + self.config.weight_decay * reg_loss,\n step,\n )\n\n # Log a histogram for each tensor parameter in the model, to\n # see if a parameter is training stably or not\n for name, value in self.model.state_dict().items():\n train_writer.add_histogram(name, value, step)\n\n # Log the validation images for easy visualization\n if len(self.val_loader) > 0:\n val_writer.add_images(\"input\", val_img, step)\n val_writer.add_images(\"ground_truth\", val_gt, step)\n val_writer.add_images(\"prediction\", val_pred, step)", "def _logging(self):\n msgs = []\n # patch to log stdout spawned processes of dataloader\n logger = init_logger()\n for ds_name, ds_count in self._counts.items():\n msgs.append(f\"\\t\\t\\t* {ds_name}: {ds_count}\")\n logger.info(\"Weighted corpora loaded so far:\\n\" + \"\\n\".join(msgs))", "def build_summary(self):\n assert self.mode==\"train\"\n\n for var in tf.trainable_variables():\n with tf.name_scope(var.name[:var.name.find(\":\")]):\n with tf.name_scope(\"values\"):\n self.variable_summary(var)\n\n for g, var in zip(self.gs, self.g_vars):\n with tf.name_scope(var.name[:var.name.find(\":\")]):\n with tf.name_scope(\"gradients\"):\n self.variable_summary(g)\n\n with tf.name_scope(\"cross_entropies\"):\n self.variable_summary(self.cross_entropies)\n\n with tf.name_scope(\"attention\"):\n self.variable_summary(self.sum_alpha) \n\n with tf.name_scope(\"scores\"):\n self.variable_summary(self.scores) \n\n tf.summary.scalar(\"num_correct_words\", self.num_correct_words)\n\n tf.summary.scalar(\"cross_entropy_loss\", self.cross_entropy_loss)\n tf.summary.scalar(\"attention_loss\", self.attention_loss)\n tf.summary.scalar(\"l2_loss\", self.l2_loss)\n tf.summary.scalar(\"loss\", self.loss)\n \n self.summary = tf.summary.merge_all()", "def on_train_begin(self, logs=None):", "def on_train_begin(self, logs=None):", "def print_network(self):\n #plot_model(self.model, to_file='model.png', show_shapes=True)\n logging.info(\"\")\n logging.info(self.network)\n logging.info(\"Network accuracy: %.2f%%\" % (self.accuracy * 100))\n logging.info(\"Network loss: %.2f%%\" % (self.loss))", "def logging_summaries(\n summary_writer: tf.contrib.summary.SummaryWriter, logged: Dict\n) -> None:\n\n with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.image(\"generated\", logged[\"generated_data\"])\n tf.contrib.summary.image(\"real\", logged[\"real_data\"])\n tf.contrib.summary.scalar(\"generator/loss\", logged[\"gen_loss\"])\n tf.contrib.summary.scalar(\"discriminator/loss\", logged[\"disc_loss\"])", "def summary(self):\n print('est0: %s (%s) shape: %s' % (str(self.est0.name),\\\n str(self.est0.type_name),str(self.shape0)))\n print('est1: %s (%s) shape: %s' % (str(self.est1.name),\\\n str(self.est1.type_name),str(self.shape1)))", "def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained", "def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)", "def autolog(\n every_n_iter=1,\n log_models=True,\n disable=False,\n exclusive=False,\n disable_for_unsupported_versions=False,\n silent=False,\n): # pylint: disable=unused-argument\n # pylint: disable=E0611\n import tensorflow\n\n global _LOG_EVERY_N_STEPS\n _LOG_EVERY_N_STEPS = every_n_iter\n\n atexit.register(_flush_queue)\n\n if Version(tensorflow.__version__) < Version(\"1.12\"):\n warnings.warn(\"Could not log to MLflow. TensorFlow versions below 1.12 are not supported.\")\n return\n\n try:\n from tensorflow.python.summary.writer.event_file_writer import EventFileWriter\n from tensorflow.python.summary.writer.event_file_writer_v2 import EventFileWriterV2\n from tensorflow.python.saved_model import tag_constants\n from tensorflow.python.summary.writer.writer import FileWriter\n except ImportError:\n warnings.warn(\"Could not log to MLflow. TensorFlow versions below 1.12 are not supported.\")\n return\n\n def train(original, self, *args, **kwargs):\n active_run = mlflow.active_run()\n global _AUTOLOG_RUN_ID\n _AUTOLOG_RUN_ID = active_run.info.run_id\n\n # Checking step and max_step parameters for logging\n if len(args) >= 3:\n mlflow.log_param(\"steps\", args[2])\n if len(args) >= 4:\n mlflow.log_param(\"max_steps\", args[3])\n if \"steps\" in kwargs:\n mlflow.log_param(\"steps\", kwargs[\"steps\"])\n if \"max_steps\" in kwargs:\n mlflow.log_param(\"max_steps\", kwargs[\"max_steps\"])\n\n result = original(self, *args, **kwargs)\n\n # Flush the metrics queue after training completes\n _flush_queue()\n\n # Log Tensorboard event files as artifacts\n if os.path.exists(self.model_dir):\n for file in os.listdir(self.model_dir):\n if \"tfevents\" not in file:\n continue\n mlflow.log_artifact(\n local_path=os.path.join(self.model_dir, file),\n artifact_path=\"tensorboard_logs\",\n )\n return result\n\n def export_saved_model(original, self, *args, **kwargs):\n global _AUTOLOG_RUN_ID\n if _AUTOLOG_RUN_ID:\n _logger.info(\n \"Logging TensorFlow Estimator as MLflow Model to run with ID '%s'\", _AUTOLOG_RUN_ID\n )\n\n serialized = original(self, *args, **kwargs)\n\n def log_model_without_starting_new_run():\n \"\"\"\n Performs the exact same operations as `log_model` without starting a new run\n \"\"\"\n with TempDir() as tmp:\n artifact_path = \"model\"\n local_path = tmp.path(\"model\")\n mlflow_model = Model(artifact_path=artifact_path, run_id=_AUTOLOG_RUN_ID)\n save_model_kwargs = dict(\n tf_saved_model_dir=serialized.decode(\"utf-8\"),\n tf_meta_graph_tags=[tag_constants.SERVING],\n tf_signature_def_key=\"predict\",\n )\n save_model(path=local_path, mlflow_model=mlflow_model, **save_model_kwargs)\n client = MlflowClient()\n client.log_artifacts(_AUTOLOG_RUN_ID, local_path, artifact_path)\n\n try:\n client._record_logged_model(_AUTOLOG_RUN_ID, mlflow_model)\n except MlflowException:\n # We need to swallow all mlflow exceptions to maintain backwards\n # compatibility with older tracking servers. Only print out a warning\n # for now.\n _logger.warning(\n _LOG_MODEL_METADATA_WARNING_TEMPLATE,\n get_artifact_uri(_AUTOLOG_RUN_ID),\n )\n\n log_model_without_starting_new_run()\n\n _AUTOLOG_RUN_ID = None\n\n return serialized\n\n @picklable_exception_safe_function\n def _get_early_stop_callback(callbacks):\n for callback in callbacks:\n if isinstance(callback, tensorflow.keras.callbacks.EarlyStopping):\n return callback\n return None\n\n def _log_early_stop_callback_params(callback):\n if callback:\n try:\n earlystopping_params = {\n \"monitor\": callback.monitor,\n \"min_delta\": callback.min_delta,\n \"patience\": callback.patience,\n \"baseline\": callback.baseline,\n \"restore_best_weights\": callback.restore_best_weights,\n }\n mlflow.log_params(earlystopping_params)\n except Exception: # pylint: disable=W0703\n return\n\n def _get_early_stop_callback_attrs(callback):\n try:\n return callback.stopped_epoch, callback.restore_best_weights, callback.patience\n except Exception: # pylint: disable=W0703\n return None\n\n def _log_early_stop_callback_metrics(callback, history, metrics_logger):\n if callback is None or not callback.model.stop_training:\n return\n\n callback_attrs = _get_early_stop_callback_attrs(callback)\n if callback_attrs is None:\n return\n\n stopped_epoch, restore_best_weights, _ = callback_attrs\n metrics_logger.record_metrics({\"stopped_epoch\": stopped_epoch})\n\n if not restore_best_weights or callback.best_weights is None:\n return\n\n monitored_metric = history.history.get(callback.monitor)\n if not monitored_metric:\n return\n\n initial_epoch = history.epoch[0]\n # If `monitored_metric` contains multiple best values (e.g. [0.1, 0.1, 0.2] where 0.1 is\n # the minimum loss), the epoch corresponding to the first occurrence of the best value is\n # the best epoch. In keras > 2.6.0, the best epoch can be obtained via the `best_epoch`\n # attribute of an `EarlyStopping` instance: https://github.com/keras-team/keras/pull/15197\n restored_epoch = initial_epoch + monitored_metric.index(callback.best)\n metrics_logger.record_metrics({\"restored_epoch\": restored_epoch})\n restored_index = history.epoch.index(restored_epoch)\n restored_metrics = {\n key: metrics[restored_index] for key, metrics in history.history.items()\n }\n # Checking that a metric history exists\n metric_key = next(iter(history.history), None)\n if metric_key is not None:\n metrics_logger.record_metrics(restored_metrics, stopped_epoch + 1)\n\n class FitPatch(PatchFunction):\n def __init__(self):\n self.log_dir = None\n\n def _patch_implementation(\n self, original, inst, *args, **kwargs\n ): # pylint: disable=arguments-differ\n unlogged_params = [\"self\", \"x\", \"y\", \"callbacks\", \"validation_data\", \"verbose\"]\n\n log_fn_args_as_params(original, args, kwargs, unlogged_params)\n early_stop_callback = None\n\n run_id = mlflow.active_run().info.run_id\n with batch_metrics_logger(run_id) as metrics_logger:\n # Check if the 'callback' argument of fit() is set positionally\n if len(args) >= 6:\n # Convert the positional training function arguments to a list in order to\n # mutate the contents\n args = list(args)\n # Make a shallow copy of the preexisting callbacks to avoid permanently\n # modifying their contents for future training invocations. Introduce\n # TensorBoard & tf.keras callbacks if necessary\n callbacks = list(args[5])\n callbacks, self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n # Replace the callbacks positional entry in the copied arguments and convert\n # the arguments back to tuple form for usage in the training function\n args[5] = callbacks\n args = tuple(args)\n else:\n # Make a shallow copy of the preexisting callbacks and introduce TensorBoard\n # & tf.keras callbacks if necessary\n callbacks = list(kwargs.get(\"callbacks\") or [])\n kwargs[\"callbacks\"], self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n\n early_stop_callback = _get_early_stop_callback(callbacks)\n _log_early_stop_callback_params(early_stop_callback)\n\n history = original(inst, *args, **kwargs)\n\n _log_early_stop_callback_metrics(\n callback=early_stop_callback,\n history=history,\n metrics_logger=metrics_logger,\n )\n\n _flush_queue()\n mlflow.log_artifacts(\n local_dir=self.log_dir.location,\n artifact_path=\"tensorboard_logs\",\n )\n if self.log_dir.is_temp:\n shutil.rmtree(self.log_dir.location)\n\n return history\n\n def _on_exception(self, exception):\n if (\n self.log_dir is not None\n and self.log_dir.is_temp\n and os.path.exists(self.log_dir.location)\n ):\n shutil.rmtree(self.log_dir.location)\n\n class FitGeneratorPatch(PatchFunction):\n \"\"\"\n NOTE: `fit_generator()` is deprecated in TF >= 2.1.0 and simply wraps `fit()`.\n To avoid unintentional creation of nested MLflow runs caused by a patched\n `fit_generator()` method calling a patched `fit()` method, we only patch\n `fit_generator()` in TF < 2.1.0.\n \"\"\"\n\n def __init__(self):\n self.log_dir = None\n\n def _patch_implementation(\n self, original, inst, *args, **kwargs\n ): # pylint: disable=arguments-differ\n unlogged_params = [\"self\", \"generator\", \"callbacks\", \"validation_data\", \"verbose\"]\n\n log_fn_args_as_params(original, args, kwargs, unlogged_params)\n\n run_id = mlflow.active_run().info.run_id\n\n with batch_metrics_logger(run_id) as metrics_logger:\n # Check if the 'callback' argument of fit() is set positionally\n if len(args) >= 5:\n # Convert the positional training function arguments to a list in order to\n # mutate the contents\n args = list(args)\n # Make a shallow copy of the preexisting callbacks to avoid permanently\n # modifying their contents for future training invocations. Introduce\n # TensorBoard & tf.keras callbacks if necessary\n callbacks = list(args[4])\n callbacks, self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n # Replace the callbacks positional entry in the copied arguments and convert\n # the arguments back to tuple form for usage in the training function\n args[4] = callbacks\n args = tuple(args)\n else:\n # Make a shallow copy of the preexisting callbacks and introduce TensorBoard\n # & tf.keras callbacks if necessary\n callbacks = list(kwargs.get(\"callbacks\") or [])\n kwargs[\"callbacks\"], self.log_dir = _setup_callbacks(\n callbacks, log_models, metrics_logger\n )\n\n result = original(inst, *args, **kwargs)\n\n _flush_queue()\n mlflow.log_artifacts(local_dir=self.log_dir.location, artifact_path=\"tensorboard_logs\")\n if self.log_dir.is_temp:\n shutil.rmtree(self.log_dir.location)\n\n return result\n\n def _on_exception(self, exception):\n if (\n self.log_dir is not None\n and self.log_dir.is_temp\n and os.path.exists(self.log_dir.location)\n ):\n shutil.rmtree(self.log_dir.location)\n\n def add_event(original, self, event):\n _log_event(event)\n return original(self, event)\n\n def add_summary(original, self, *args, **kwargs):\n result = original(self, *args, **kwargs)\n _flush_queue()\n return result\n\n managed = [\n (tensorflow.estimator.Estimator, \"train\", train),\n (tensorflow.keras.Model, \"fit\", FitPatch),\n ]\n\n if Version(tensorflow.__version__) < Version(\"2.1.0\"):\n # `fit_generator()` is deprecated in TF >= 2.1.0 and simply wraps `fit()`.\n # To avoid unintentional creation of nested MLflow runs caused by a patched\n # `fit_generator()` method calling a patched `fit()` method, we only patch\n # `fit_generator()` in TF < 2.1.0\n managed.append((tensorflow.keras.Model, \"fit_generator\", FitGeneratorPatch))\n\n non_managed = [\n (EventFileWriter, \"add_event\", add_event),\n (EventFileWriterV2, \"add_event\", add_event),\n (FileWriter, \"add_summary\", add_summary),\n (tensorflow.estimator.Estimator, \"export_saved_model\", export_saved_model),\n (tensorflow.estimator.Estimator, \"export_savedmodel\", export_saved_model),\n ]\n\n # Add compat.v1 Estimator patching for versions of tensfor that are 2.0+.\n if Version(tensorflow.__version__) >= Version(\"2.0.0\"):\n old_estimator_class = tensorflow.compat.v1.estimator.Estimator\n v1_train = (old_estimator_class, \"train\", train)\n v1_export_saved_model = (old_estimator_class, \"export_saved_model\", export_saved_model)\n v1_export_savedmodel = (old_estimator_class, \"export_savedmodel\", export_saved_model)\n\n managed.append(v1_train)\n non_managed.append(v1_export_saved_model)\n non_managed.append(v1_export_savedmodel)\n\n for p in managed:\n safe_patch(FLAVOR_NAME, *p, manage_run=True)\n\n for p in non_managed:\n safe_patch(FLAVOR_NAME, *p)", "def print_network(model, name):\r\n num_params = 0\r\n for p in model.parameters():\r\n num_params += p.numel()\r\n print(model)\r\n print(name)\r\n print(\"The number of parameters: {}\".format(num_params))", "def log_results(best_model, model_name, max_features, train_score, test_score,\n score_fp):\n\n # ensure the directorys where metrics are stored are created\n if not os.path.exists(os.path.dirname(score_fp)):\n os.makedirs(os.path.dirname(score_fp), exist_ok=True)\n\n st = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n with open(score_fp, 'a+') as f:\n f.write(st + '\\n')\n f.write('-' * 100 + '\\n')\n f.write('Model Run: {}\\n\\n'.format(model_name))\n f.write('Params: {}\\n\\n'.format(best_model.get_params())) \n f.write('Max features: {}\\n\\n'.format(max_features))\n f.write('Train Score: {}\\n\\n'.format(train_score))\n f.write('Test Score: {}\\n\\n'.format(test_score))", "def train_logger(model_fn):\n\n @functools.wraps(model_fn)\n def wrapper(*args, **kwargs):\n timer_start = time.perf_counter()\n model = model_fn(*args, **kwargs)\n timer_end = time.perf_counter()\n\n time_stamp = time.localtime()\n model_version = MODEL_VERSION\n run_time = timer_end - timer_start\n\n log_entry = [time_stamp, MODEL_VERSION, run_time]\n\n header = \\\n ','.join(TRAIN_HEADER) \\\n if os.path.exists(CWD, LOG_PATH, LOG_FILE.format(LOG_TYPES['T'])) \\\n else False\n\n create_or_update_log(LOG_TYPES['T'], log_entry, header)\n\n print('Logging: train logger')\n\n return model\n return wrapper", "def LogProgress(model, writer, test_loader, epoch, device): \n\n model.eval() \n sequential = test_loader\n sample_batched = next(iter(sequential))\n \n image = torch.Tensor(sample_batched[\"image\"]).to(device)\n depth = torch.Tensor(sample_batched[\"depth\"]).to(device)\n \n if epoch == 0:\n writer.add_image(\"Train.1.Image\", vision_utils.make_grid(image.data, nrow=6, normalize=True), epoch)\n if epoch == 0:\n writer.add_image(\"Train.2.Image\", colorize(vision_utils.make_grid(depth.data, nrow=6, normalize=False)), epoch)\n \n output = DepthNorm(model(image))\n\n writer.add_image(\"Train.3.Ours\", colorize(vision_utils.make_grid(output.data, nrow=6, normalize=False)), epoch)\n writer.add_image(\"Train.4.Diff\", colorize(vision_utils.make_grid(torch.abs(output-depth).data, nrow=6, normalize=False)), epoch)\n \n del image\n del depth\n del output", "def log_model(self, model_name=\"fixmatch_model\"):\n \n assert hasattr(self, \"_mlflow\"), \"need to run track_with_mlflow() first\"\n from mlflow.keras import log_model\n log_model(self._models[\"full\"], model_name)", "def print_trainable_params(scope=None):\n n_params = 0\n print('name \\t| shape \\t| num parameters')\n\n for var in tf.trainable_variables(scope):\n # shape is an array of tf.Dimension\n shape = var.get_shape()\n n_elems = shape.num_elements()\n print(var.name, shape, n_elems)\n n_params += n_elems\n\n print('Total parameters:', n_params)", "def add_summary(self):\n merged = tf.summary.merge_all()\n self.file_writer = tf.summary.FileWriter(self.FLAGS.model_dir, self.session.graph)", "def send_log():\n log.info(f\"UUID={UUID}\")\n log.info(f\"SPLIT={SPLIT}\")\n log.info(f\"BATCH_SIZE={BATCH_SIZE}\")\n log.info(f\"EPOCHS={EPOCHS}\")\n log.info(f\"PATIENCE={PATIENCE}\")\n log.info(f\"X_FREQ={X_FREQ}\")\n log.info(f\"LOOK_BACK={LOOK_BACK}\")\n log.info(f\"LOOK_AHEAD={LOOK_AHEAD}\")\n log.info(f\"KERNEL_SIZE={KERNEL_SIZE}\")\n log.info(f\"FILTERS={FILTERS}\")\n log.info(f\"L1L2={L1L2}\")\n log.info(f\"D1={D1}\")\n log.info(f\"D2={D2}\")\n log.info(f\"DOUT={DOUT}\")\n log.info(f\"PLOT={PLOT}\")\n log.info(f\"SHUFFLE={SHUFFLE}\")", "def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)", "def summary(self):\n\t\tself.writer = tf.summary.FileWriter(\n\t\t\t\t'./graphs/AttNCF', tf.get_default_graph())\n\t\twith tf.name_scope(\"summaries\"):\n\t\t\ttf.summary.scalar('loss', self.loss)\n\t\t\tself.summary_op = tf.summary.merge_all()", "def log_train_summary(step: int,\n *,\n writer: metric_writers.MetricWriter,\n train_metrics: Sequence[Dict[str, Tuple[float, int]]],\n extra_training_logs: Optional[Sequence[Dict[str,\n Any]]] = None,\n metrics_normalizer_fn: Optional[\n Callable[[Dict[str, Tuple[float, int]], str],\n Dict[str, float]]] = None,\n prefix: str = 'train',\n key_separator: str = '_') -> Dict[str, float]:\n ##### Prepare metrics:\n # Get metrics from devices:\n train_metrics = stack_forest(train_metrics)\n # Compute the sum over all examples in all batches:\n train_metrics_summary = jax.tree_map(lambda x: x.sum(), train_metrics)\n # Normalize metrics by the total number of exampels:\n metrics_normalizer_fn = metrics_normalizer_fn or normalize_metrics_summary\n train_metrics_summary = metrics_normalizer_fn(train_metrics_summary, 'train')\n\n ##### Prepare additional training logs:\n # If None, set to an empty dictionary.\n extra_training_logs = extra_training_logs or {}\n train_logs = stack_forest(extra_training_logs)\n\n # Metrics:\n writer.write_scalars(\n step, {\n key_separator.join((prefix, key)): val\n for key, val in train_metrics_summary.items()\n })\n # Additional logs:\n writer.write_scalars(step,\n {key: val.mean() for key, val in train_logs.items()})\n\n writer.flush()\n return train_metrics_summary", "def model_summary_to_file(model, save_path):\n with open(save_path, 'w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + \"\\n\"))", "def test_01_train(self):\n today = date.today()\n log_file = os.path.join(LOG_DIR, \"{}-train-{}-{}.log\".format(LOG_PREFIX, today.year, today.month))\n if os.path.exists(log_file):\n os.remove(log_file)\n \n ## update the log\n country = 'india'\n date_range = ('2017-11-29', '2019-05-24')\n metric = {'rmse':0.5}\n runtime = \"00:00:01\"\n model_version = 0.1\n model_version_note = \"test model\"\n \n update_train_log(country, date_range, metric, runtime,\n model_version, model_version_note, test=True, prefix=LOG_PREFIX)\n\n self.assertTrue(os.path.exists(log_file))", "def create_summary_statistics(forward_accuracy, backward_accuracy, merged_accuracy):\n summary_statistics = open(f'summary_statistics.txt', 'a')\n summary_statistics.write(f'The forward model has an accuracy of: {forward_accuracy}\\n')\n summary_statistics.write(f'The backward model has an accuracy of: {backward_accuracy}\\n')\n summary_statistics.write(f'The merged model has an accuracy of: {merged_accuracy}\\n')\n summary_statistics.close()", "def print_statistics(self) -> None:\n e = self.current_epoch\n if len(self.loss_history[\"test_loss\"]) > 0:\n template = 'Epoch: {} Training loss: {:.4f}, Test loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1],\n self.loss_history[\"test_loss\"][-1]))\n else:\n template = 'Epoch: {} Training loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1]))", "def summary(self):\n if self.model_type == 2:\n if self.std is None:\n print(\n dedent(\n f\"\"\"\\\n Oaxaca-Blinder Two-fold Effects\n Unexplained Effect: {self.params[0]:.5f}\n Explained Effect: {self.params[1]:.5f}\n Gap: {self.params[2]:.5f}\"\"\"\n )\n )\n else:\n print(\n dedent(\n \"\"\"\\\n Oaxaca-Blinder Two-fold Effects\n Unexplained Effect: {:.5f}\n Unexplained Standard Error: {:.5f}\n Explained Effect: {:.5f}\n Explained Standard Error: {:.5f}\n Gap: {:.5f}\"\"\".format(\n self.params[0],\n self.std[0],\n self.params[1],\n self.std[1],\n self.params[2],\n )\n )\n )\n if self.model_type == 3:\n if self.std is None:\n print(\n dedent(\n f\"\"\"\\\n Oaxaca-Blinder Three-fold Effects\n Endowment Effect: {self.params[0]:.5f}\n Coefficient Effect: {self.params[1]:.5f}\n Interaction Effect: {self.params[2]:.5f}\n Gap: {self.params[3]:.5f}\"\"\"\n )\n )\n else:\n print(\n dedent(\n f\"\"\"\\\n Oaxaca-Blinder Three-fold Effects\n Endowment Effect: {self.params[0]:.5f}\n Endowment Standard Error: {self.std[0]:.5f}\n Coefficient Effect: {self.params[1]:.5f}\n Coefficient Standard Error: {self.std[1]:.5f}\n Interaction Effect: {self.params[2]:.5f}\n Interaction Standard Error: {self.std[2]:.5f}\n Gap: {self.params[3]:.5f}\"\"\"\n )\n )", "def inspect_state(self):\n for name in self._param_store.get_all_param_names():\n self._logger.info(\"Param [%s]: %r\", name,\n pyro.param(name).data.numpy())", "def summary(self, *args, parameters_to_show=4, **kwargs):\n string = super(MultiAnalysisRead, self).summary(\n show_parameters=False, show_nsamples=False\n )\n string += \"analyses: {}\\n\\n\".format(\", \".join(self.labels))\n for num, label in enumerate(self.labels):\n string += \"{}\\n\".format(label)\n string += \"-\" * len(label) + \"\\n\"\n string += \"description: {}\\n\".format(self.description[label])\n string += \"nsamples: {}\\n\".format(len(self.samples[num]))\n string += \"parameters: {}\\n\\n\".format(\n self._parameter_summary(\n self.parameters[num], parameters_to_show=parameters_to_show\n )\n )\n return string[:-2]", "def printModelAndTime(self):\n import time\n self._reporter.writeOutput(\"Model name = \" + self.modelName + '\\n' +\n \"Output directory = \" + self._outputDir_ + '\\n' +\n \"Time = \" + time.asctime() + '\\n')\n return", "def on_train_begin(self, logs=None):\n pass", "def print_layer_trainable(model_name):\n\n print('trainable : layer name')\n print('- '*30)\n for layer in model_name.layers:\n # if layer.trainable:\n print(\"{0}:\\t{1}\".format(layer.trainable, layer.name))\n \n return", "def write_info(model, dataset, batch_size, learning_rate, epochs):\n file_info = open(os.path.join(model['model_dir'], 'info.json'), 'w')\n dic_info = {\n 'cameras': dataset['num_cameras'],\n 'dataset': dataset['dataset_dir'],\n 'max_demos': dataset['max_demos'],\n 'resume': model['resume'],\n 'epochs': epochs,\n 'learning_rate': learning_rate,\n 'batch_size': batch_size\n }\n file_info.write(json.dumps(dic_info, cls=NumpyEncoder))\n file_info.close()", "def show_learning_stats(track, train_loss, train_acc, valid_acc, test_acc):\n\n if track[\"valid\"] and track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Val acc: {:.4f} -- Test acc: {:.4f}\".format(\n train_loss, train_acc, valid_acc, test_acc))\n\n if track[\"valid\"] and not track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Val acc: {:.4f}\".format(\n train_loss, train_acc, valid_acc))\n\n if not track[\"valid\"] and track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} -- Test acc: {:.4f}\".format(\n train_loss, train_acc, test_acc))\n\n if not track[\"valid\"] and not track[\"test\"]:\n print(\"Train loss: {:.4f} -- Train acc: {:.4f} \".format(\n train_loss, train_acc))", "def stdout_logging_callback(epoch_n, batch_n, model, optimizer):\n if not batch_n % 100:\n logger.debug(f\"Epoch {epoch_n}, batch {batch_n}\")", "def log_step(\n metric_dict={},\n mode='train',\n writer=None,\n global_step=0,\n elapsed_eta=None,\n training_speed=None\n):\n log_msg = '[{mode}] step: {step}'\n log_msg = log_msg.format(\n mode=mode,\n step=global_step,\n )\n for key, value in metric_dict.items():\n log_msg += ' - {}: {}'.format(key, round(value, 4))\n\n # Write to tensorboard\n if writer is not None:\n for key, value in metric_dict.items():\n writer.add_scalar(key, value, global_step=global_step)\n\n if elapsed_eta is not None:\n log_msg += ' - elapsed: {} - eta: {}'.format(\n datetime.timedelta(seconds=int(elapsed_eta[0])),\n datetime.timedelta(seconds=int(elapsed_eta[1]))\n )\n if writer is not None:\n writer.add_scalar('eta', elapsed_eta[1], global_step=global_step)\n\n if training_speed is not None:\n log_msg += ' - step/sec: {:.4f}'.format(training_speed)\n if writer is not None:\n writer.add_scalar(\n 'step/sec', training_speed, global_step=global_step)\n\n logger.info(log_msg)", "def get_summary(self):\n return self.model.summary()" ]
[ "0.7465972", "0.73062223", "0.68315184", "0.6818728", "0.67563677", "0.67499065", "0.673236", "0.6722741", "0.66750515", "0.6649921", "0.6504973", "0.64990056", "0.64763284", "0.64610624", "0.64330405", "0.64009935", "0.63829356", "0.6330708", "0.62939197", "0.62824273", "0.62676394", "0.6233499", "0.62318116", "0.6189634", "0.617957", "0.61774755", "0.61599916", "0.6115774", "0.6064826", "0.60516363", "0.60356295", "0.60329044", "0.6015165", "0.5992067", "0.5985521", "0.5952695", "0.5949994", "0.5949994", "0.59498113", "0.5947", "0.5939939", "0.59383816", "0.59383816", "0.59383816", "0.59383816", "0.59301627", "0.5920307", "0.59168065", "0.59037703", "0.59016293", "0.5898406", "0.58826697", "0.5871419", "0.5871419", "0.5865958", "0.5865037", "0.58586055", "0.5851803", "0.5840603", "0.58366513", "0.5822442", "0.58201534", "0.5814321", "0.5809398", "0.5807674", "0.5806668", "0.57931215", "0.57931215", "0.57787657", "0.57775223", "0.5771644", "0.5771357", "0.5732636", "0.57266724", "0.5725727", "0.57127154", "0.5706001", "0.5698657", "0.56972075", "0.5669949", "0.56528795", "0.56440693", "0.5643741", "0.5640987", "0.5639169", "0.56337357", "0.5632479", "0.5624435", "0.5622453", "0.5605884", "0.5601703", "0.55937994", "0.5577046", "0.5557792", "0.554924", "0.5547681", "0.554533", "0.5534946", "0.5531204", "0.55307555" ]
0.60460424
30
Create a model with temperature scaling by wrapping the result of config.create_model with ModelWithTemperature, if temperature scaling config has been provided, otherwise return the result of config.create_model
def create_model_with_temperature_scaling(config: ModelConfigBase) -> Any: # wrap the model around a temperature scaling model if required model = config.create_model() if isinstance(config, SequenceModelBase) and config.temperature_scaling_config: model = ModelWithTemperature(model, config.temperature_scaling_config) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)", "def do_create_model(**kwargs):\n model_params = {\n 'name': kwargs['dag_run'].conf.get('model_name'),\n 'description': 'A custom DNN regressor model',\n 'regions': [REGION]\n }\n\n ti = kwargs['ti']\n\n is_model = ti.xcom_pull(key='is_project', task_ids='check_model')\n if not is_model:\n mle = MLEngineHook()\n mle.create_model(PROJECT, model_params)", "def model(self, **config_kwargs):\n measurement = self.get_measurement(**config_kwargs)\n log.debug(\n 'model being created for measurement {0:s}'.format(measurement['name'])\n )\n\n patches = config_kwargs.get('patches', [])\n\n modelspec = {\n 'channels': self.spec['channels'],\n 'parameters': measurement['config']['parameters'],\n }\n for patch in patches:\n modelspec = jsonpatch.JsonPatch(patch).apply(modelspec)\n\n return Model(modelspec, poiname=measurement['config']['poi'], **config_kwargs)", "def create(model: TModel) -> ModelTransformer:\n model_backend = get_backend(model)\n if model_backend == BackendType.ONNX:\n from nncf.onnx.graph.model_transformer import ONNXModelTransformer\n\n return ONNXModelTransformer(model)\n if model_backend == BackendType.OPENVINO:\n from nncf.openvino.graph.model_transformer import OVModelTransformer\n\n return OVModelTransformer(model)\n if model_backend == BackendType.TORCH:\n from nncf.torch.model_transformer import PTModelTransformer\n\n return PTModelTransformer(model)\n raise RuntimeError(\n \"Cannot create backend-specific model transformer because {} is not supported!\".format(model_backend)\n )", "def create_scaling_model(params, experiments, reflections):\n autos = [None, Auto, \"auto\", \"Auto\"]\n use_auto_model = params.model in autos\n # Determine non-auto model to use outside the loop over datasets.\n if not use_auto_model:\n model_class = None\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == params.model:\n model_class = entry_point.load()\n break\n if not model_class:\n raise ValueError(f\"Unable to create scaling model of type {params.model}\")\n\n for expt, refl in zip(experiments, reflections):\n if not expt.scaling_model or params.overwrite_existing_models:\n # need to make a new model\n if use_auto_model:\n if not expt.scan:\n model = KBScalingModel\n else: # set model as physical unless scan < 1.0 degree\n osc_range = expt.scan.get_oscillation_range()\n abs_osc_range = abs(osc_range[1] - osc_range[0])\n if abs_osc_range < 1.0:\n model = KBScalingModel\n else:\n model = PhysicalScalingModel\n else:\n model = model_class\n expt.scaling_model = model.from_data(params, expt, refl)\n else:\n # allow for updating of an existing model.\n expt.scaling_model.update(params)\n return experiments", "def calibrate_temperature(task_id, data, mnet, hnet, hhnet, device, config,\n shared, logger, writer, cal_per_model=False,\n only_correctly_classified=False,\n cal_target_entropy=-1):\n logger.info('Temperature calibration for task %d ...' % (task_id+1))\n\n # FIXME We could also follow the code from\n # https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py\n # but they don't consider BNNs. Note, there code is much more efficient\n # since they compute the logits before entering the training loop (which\n # is possible when only having one model). Though, in general, we have\n # multiple models.\n\n set_train_mode(True, mnet, hnet, hhnet, None)\n\n gauss_main = False\n if isinstance(mnet, GaussianBNNWrapper):\n gauss_main = True\n\n # Whether the hypernet represents an implicit distribution (i.e., it's\n # input is a random variable), or whether it has task embeddings as input.\n det_hnet = False\n if hnet is not None:\n if hnet.num_known_conds > 0:\n assert hhnet is None\n\n det_hnet = True\n # Can currently only be the case if we train a BbB setup with option\n # `mean_only` enabled.\n if not gauss_main:\n assert hasattr(config, 'mean_only') and config.mean_only\n\n # The single parameter to be tuned by this method.\n temp_param = torch.nn.Parameter(shared.softmax_temp[task_id],\n requires_grad=True)\n assert temp_param == 1.\n\n # Which temperature transfer function to use during training. Note, this\n # can ensure that temperatures don't become negative.\n # ttf = temperature transfer function\n ttf_choice = 'softplus'\n if ttf_choice == 'linear':\n ttf = lambda x : x\n #torch.nn.init.ones_(temp_param.data)\n elif ttf_choice == 'exp':\n ttf = torch.exp\n torch.nn.init.zeros_(temp_param.data)\n else:\n ttf = F.softplus\n temp_param.data = torch.log(torch.exp(torch.ones(1)) - \\\n torch.ones(1)).to(device)\n\n allowed_outputs = pmutils.out_units_of_task(config, data, task_id,\n config.num_tasks)\n\n optimizer = tutils.get_optimizer([temp_param], config.lr,\n momentum=config.momentum, weight_decay=config.weight_decay,\n use_adam=config.use_adam, adam_beta1=config.adam_beta1,\n use_rmsprop=config.use_rmsprop, use_adadelta=config.use_adadelta,\n use_adagrad=config.use_adagrad)\n\n mnet_kwargs = pmutils.mnet_kwargs(config, task_id, mnet)\n\n num_w_samples = config.train_sample_size if config.cal_sample_size == -1 \\\n else config.cal_sample_size\n\n with torch.no_grad():\n # We don't change any network parameters, so these calls produce\n # constant outputs.\n theta_current = None\n if hhnet is not None:\n theta_current = hhnet.forward(cond_id=task_id)\n theta_current = [p.detach() for p in theta_current]\n\n if gauss_main:\n assert hhnet is None\n\n if hnet is not None:\n hnet_out = hnet.forward(cond_id=task_id)\n else:\n hnet_out = None\n w_mean, w_rho = mnet.extract_mean_and_rho(weights=hnet_out)\n w_std = putils.decode_diag_gauss(w_rho,\n logvar_enc=mnet.logvar_encoding)\n\n elif det_hnet:\n w_mean = hnet.forward(cond_id=task_id)\n\n ### We first compute the logit outputs over all samples for all models,\n ### since they don't change anymore.\n # FIXME Could lead to memory issues for large datasets and might not be\n # inefficient if ``config.cal_temp_iter`` is small, since we won't\n # iterate over the whole dataset.\n inputs = data.get_train_inputs()\n targets = data.get_train_outputs()\n\n T = data.output_to_torch_tensor(targets, device, mode='train')\n # Modify 1-hot encodings according to CL scenario.\n assert T.shape[1] == data.num_classes\n # In CL1, CL2 and CL3 (with seperate heads) we do not have to modify the\n # targets.\n if config.cl_scenario == 3 and not config.split_head_cl3:\n raise NotImplementedError('Temperature calibration not ' +\n 'implemented for CL3 without split-head.')\n\n _, labels = torch.max(T, 1) # Integer labels.\n #labels = labels.detach()\n\n num_samples = inputs.shape[0]\n\n logit_outputs = torch.empty((num_w_samples, num_samples, T.shape[1])). \\\n to(device)\n\n for j in range(num_w_samples):\n if gauss_main: # Gaussian weight posterior.\n # In case of the local-reparam trick, we anyway have a different\n # weight per sample. So, the demand of having the same model for\n # all samples in the dataset drops.\n if config.local_reparam_trick:\n # Note, the sampling will happen inside the forward method.\n weights = None\n emean = w_mean\n erho = w_rho\n else:\n weights = putils.sample_diag_gauss(w_mean, w_std,\n is_radial=config.radial_bnn)\n emean = None\n erho = None\n\n elif det_hnet:\n weights = w_mean\n\n else:\n if hnet is not None: # Implicit hypernetwork.\n z = torch.normal(torch.zeros(1, shared.noise_dim),\n config.latent_std).to(device)\n weights = hnet.forward(uncond_input=z,\n weights=theta_current)\n else: # Main network only training.\n weights = None\n\n # I use the validation batch size on purpose, since it is usually\n # bigger and we just want to quickly compute the logits.\n curr_bs = config.val_batch_size\n n_processed = 0\n\n while n_processed < num_samples:\n if n_processed + curr_bs > num_samples:\n curr_bs = num_samples - n_processed\n n_processed += curr_bs\n\n sind = n_processed - curr_bs\n eind = n_processed\n\n ### Compute negative log-likelihood (NLL).\n X = data.input_to_torch_tensor(inputs[sind:eind, :], device,\n mode='train')\n\n if gauss_main:\n Y = mnet.forward(X, weights=None, mean_only=False,\n extracted_mean=emean, extracted_rho=erho,\n sample=weights, **mnet_kwargs)\n else:\n Y = mnet.forward(X, weights=weights, **mnet_kwargs)\n\n if allowed_outputs is not None:\n Y = Y[:, allowed_outputs]\n\n logit_outputs[j, sind:eind, :] = Y\n\n # Since we computed all training logits, we might as well compute\n # the training accuracy on the predictive distributions at temperature 1\n # (note, temperature doesn't change predicted labels).\n pred_dists = F.softmax(logit_outputs, dim=2).mean(dim=0)\n assert pred_dists.ndim == 2\n _, pred_labels = torch.max(pred_dists, 1)\n train_acc = 100. * torch.sum(pred_labels == labels) / num_samples\n logger.debug('Task %d -- training accuracy: %.2f%%.' % \\\n (task_id+1, train_acc))\n\n log_pred_dists = torch.log(torch.clamp(pred_dists, min=1e-5))\n in_entropies = -torch.sum(pred_dists * log_pred_dists, dim=1)\n\n # Normalize by maximum entropy.\n max_ent = - np.log(1.0 / data.num_classes)\n in_entropies /= max_ent\n\n in_entropies_mean = in_entropies.mean()\n in_entropies_std = in_entropies.std()\n logger.debug('Task %d -- training in-dist. entropy: %f.' % \\\n (task_id+1, in_entropies_mean))\n\n if not hasattr(shared, 'train_in_ent_mean'):\n shared.train_in_ent_mean = []\n shared.train_in_ent_std = []\n shared.train_in_ent_mean.append( \\\n in_entropies_mean.detach().cpu().numpy())\n shared.train_in_ent_std.append(in_entropies_std.detach().cpu().numpy())\n\n if only_correctly_classified:\n num_correct = torch.sum(pred_labels == labels)\n\n logger.info('Task %d -- only using %d/%d correctly classified ' \\\n % (task_id+1, num_correct, num_samples) + \\\n 'samples for calibration.')\n\n logit_outputs = logit_outputs[:, pred_labels == labels, :]\n num_samples = num_correct\n assert logit_outputs.shape[1] == num_correct\n\n labels = labels[pred_labels == labels]\n assert labels.shape[0] == num_correct\n\n # Sanity check!\n pred_dists = F.softmax(logit_outputs, dim=2).mean(dim=0)\n _, pred_labels = torch.max(pred_dists, 1)\n assert torch.sum(pred_labels == labels) == num_correct\n\n logit_outputs = logit_outputs.detach()\n\n ### Calibrate temperature.\n for i in range(config.cal_temp_iter):\n optimizer.zero_grad()\n\n batch_inds = np.random.randint(0, num_samples, config.batch_size)\n\n batch_logits = logit_outputs[:, batch_inds, :]\n batch_labels = labels[batch_inds]\n assert batch_logits.ndim == 3\n\n # Note, this first option is more numerically stable when calibrating NLL.\n if cal_per_model or num_w_samples == 1:\n loss = 0\n for j in range(num_w_samples):\n if cal_target_entropy != -1:\n batch_sm = F.softmax(batch_logits[j, :, :] / \\\n ttf(temp_param), dim=1)\n # For numerical stability.\n batch_log_sm = torch.log(torch.clamp(batch_sm, min=1e-5))\n\n # Mean entropy within the batch.\n batch_entropy = -torch.sum(batch_sm * batch_log_sm,\n dim=1).mean()\n\n loss += (batch_entropy - cal_target_entropy)**2\n else: # Compute NLL loss\n # Note, softmax will be computed inside the `cross_entropy`.\n loss += F.cross_entropy( \\\n batch_logits[j, :, :] / ttf(temp_param), batch_labels,\n reduction='mean')\n loss /= num_w_samples\n\n else:\n batch_pred_dist = F.softmax(batch_logits / ttf(temp_param),\n dim=2).mean(dim=0)\n # FIXME nll_loss expects log_softmax as input. To compute the\n # predictive distribution, we have to first average softmax outputs\n # before we can apply the log, which might lead to numerical\n # instabilities.\n #batch_log_pd = batch_pred_dist\n #batch_log_pd[batch_pred_dist < 1e-5] = 1e-5\n batch_log_pd = torch.clamp(batch_pred_dist, min=1e-5)\n batch_log_pd = torch.log(batch_log_pd)\n if cal_target_entropy != -1:\n # Mean entropy within the batch.\n batch_entropy = -torch.sum(batch_pred_dist * batch_log_pd,\n dim=1).mean()\n\n loss += (batch_entropy - cal_target_entropy)**2\n else: # Compute NLL loss\n loss = F.nll_loss(batch_log_pd, batch_labels, reduction='mean')\n\n loss.backward()\n if config.clip_grad_value != -1:\n torch.nn.utils.clip_grad_value_(optimizer.param_groups[0]['params'],\n config.clip_grad_value)\n elif config.clip_grad_norm != -1:\n torch.nn.utils.clip_grad_norm_(optimizer.param_groups[0]['params'],\n config.clip_grad_norm)\n optimizer.step()\n\n if ttf_choice == 'linear':\n # NOTE In this case, nothing prevents the temperature from going\n # negative (e.g., when starting with a large learning rate).\n # Therefore, we have to actively capture this case.\n temp_param.data = torch.clamp(temp_param, min=1e-5)\n\n if i % 50 == 0:\n writer.add_scalar('cal/task_%d/loss' % task_id, loss, i)\n writer.add_scalar('cal/task_%d/temp' % task_id,\n ttf(temp_param), i)\n\n final_temp = ttf(temp_param).data\n shared.softmax_temp[task_id] = final_temp.data\n\n logger.info('Calibrated softmax temperature of task %d is: %f.' % \\\n (task_id+1, final_temp))\n\n logger.info('Temperature calibration for task %d ... Done' % (task_id+1))", "def create(self, req, body):\n context = req.environ['meteos.context']\n\n if not self.is_valid_body(body, 'model'):\n raise exc.HTTPUnprocessableEntity()\n\n model = body['model']\n\n LOG.debug(\"Create model with request: %s\", model)\n\n try:\n experiment = self.engine_api.get_experiment(\n context, model['experiment_id'])\n utils.is_valid_status(experiment.__class__.__name__,\n experiment.status,\n constants.STATUS_AVAILABLE)\n template = self.engine_api.get_template(\n context, experiment.template_id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n except exception.InvalidStatus:\n raise\n\n display_name = model.get('display_name')\n display_description = model.get('display_description')\n experiment_id = model.get('experiment_id')\n source_dataset_url = model.get('source_dataset_url')\n dataset_format = model.get('dataset_format', 'csv')\n model_type = model.get('model_type')\n model_params = model.get('model_params')\n swift_tenant = model.get('swift_tenant')\n swift_username = model.get('swift_username')\n swift_password = model.get('swift_password')\n\n new_model = self.engine_api.create_model(context,\n display_name,\n display_description,\n source_dataset_url,\n dataset_format,\n model_type,\n model_params,\n template.id,\n template.job_template_id,\n experiment_id,\n experiment.cluster_id,\n swift_tenant,\n swift_username,\n swift_password)\n\n return self._view_builder.detail(req, new_model)", "def create_shunt_model(self):\r\n\r\n print('\\nCreate shunt model')\r\n\r\n if not self.original_model:\r\n raise ValueError('Original model not yet initialized! Either call create_original_model or set it manually.')\r\n if not self.shunt_params:\r\n raise ValueError('No parameters found in config for shunt model! Create the field [SHUNT_MODEL]')\r\n\r\n logging.info('')\r\n logging.info('#######################################################################################################')\r\n logging.info('############################################ SHUNT MODEL ##############################################')\r\n logging.info('#######################################################################################################')\r\n logging.info('')\r\n\r\n dilation_rate_input, dilation_rate_output = find_input_output_dilation_rates(self.original_model, self.shunt_params['locations'])\r\n\r\n print('Used dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('Creating shunt with dilation rates: {}'.format(Architectures.get_dilation_rates(self.shunt_params['arch'], dilation_rate_input, dilation_rate_output)))\r\n logging.info('')\r\n\r\n with self.activate_distribution_scope():\r\n if self.shunt_params['from_file']:\r\n self.shunt_model = keras.models.load_model(self.shunt_params['filepath'])\r\n print('Shunt model loaded successfully!')\r\n else:\r\n input_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][0]).input_shape[1:]\r\n if isinstance(input_shape_shunt, list):\r\n input_shape_shunt = input_shape_shunt[0][1:]\r\n output_shape_shunt = self.original_model.get_layer(index=self.shunt_params['locations'][1]).output_shape[1:]\r\n if isinstance(output_shape_shunt, list):\r\n output_shape_shunt = output_shape_shunt[0][1:]\r\n\r\n self.shunt_model = Architectures.createShunt(input_shape_shunt,\r\n output_shape_shunt,\r\n arch=self.shunt_params['arch'],\r\n use_se=False,\r\n dilation_rate_input=dilation_rate_input,\r\n dilation_rate_output=dilation_rate_output,\r\n expansion_factor=1.0)\r\n\r\n if self.shunt_params['pretrained']:\r\n self.shunt_model.load_weights(self.shunt_params['weightspath'])\r\n print('Shunt weights loaded successfully!')\r\n\r\n self.shunt_model.summary(print_fn=self.logger.info, line_length=150)\r\n\r\n keras.models.save_model(self.shunt_model, Path(self.folder_name_logging, \"shunt_model.h5\"))\r\n logging.info('')\r\n logging.info('Shunt model saved to {}'.format(self.folder_name_logging))\r\n\r\n # calculate flops\r\n flops_shunt = calculate_flops_model(self.shunt_model)\r\n self.flops_dict['shunt'] = flops_shunt\r\n logging.info('')\r\n logging.info('FLOPs of shunt model: {}'.format(flops_shunt))", "def create_model(self, model_config):\n\n return self.conn.create_model(\n **model_config)", "def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector", "def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model", "def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def create_model(model_name, random_state, epoch, device, log_path, **hparams):\n model = eval(f'{model_name}')(\n **hparams, epoch=int(epoch), random_state=random_state, device=device,\n log_path=log_path\n )\n\n return model", "def get_model(model_name, model_config, to_cuda,\n uniform_initialize_bn_weight=False, forward_is_infer=False):\n model = None\n if model_name == 'Tacotron2':\n if forward_is_infer:\n class Tacotron2__forward_is_infer(Tacotron2):\n def forward(self, inputs, input_lengths):\n return self.infer(inputs, input_lengths)\n model = Tacotron2__forward_is_infer(**model_config)\n else:\n model = Tacotron2(**model_config)\n elif model_name == 'WaveGlow':\n if forward_is_infer:\n class WaveGlow__forward_is_infer(WaveGlow):\n def forward(self, spect, sigma=1.0):\n return self.infer(spect, sigma)\n model = WaveGlow__forward_is_infer(**model_config)\n else:\n model = WaveGlow(**model_config)\n else:\n raise NotImplementedError(model_name)\n\n if uniform_initialize_bn_weight:\n init_bn(model)\n\n if to_cuda:\n model = model.cuda()\n return model", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def create_mean_teacher_model(self) -> None:\n self._mean_teacher_model = create_model_with_temperature_scaling(self.config)", "def config_task(self) -> None:\n weights = self.hyperparams[\"weights\"]\n\n if self.hyperparams[\"model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n\n class_weights = None\n if isinstance(self.class_weights, torch.Tensor):\n class_weights = self.class_weights.to(dtype=torch.float32)\n elif hasattr(self.class_weights, \"__array__\") or self.class_weights:\n class_weights = torch.tensor(self.class_weights, dtype=torch.float32)\n\n self.loss = nn.CrossEntropyLoss(\n ignore_index=ignore_value, weight=class_weights\n )\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n if self.hyperparams[\"model\"] != \"fcn\":\n if weights and weights is not True:\n if isinstance(weights, WeightsEnum):\n state_dict = weights.get_state_dict(progress=True)\n elif os.path.exists(weights):\n _, state_dict = utils.extract_backbone(weights)\n else:\n state_dict = get_weight(weights).get_state_dict(progress=True)\n self.model.encoder.load_state_dict(state_dict)\n\n # Freeze backbone\n if self.hyperparams.get(\"freeze_backbone\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n\n # Freeze decoder\n if self.hyperparams.get(\"freeze_decoder\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.decoder.parameters():\n param.requires_grad = False", "def set_up_and_parameterise_model_for_experiment(self):\n self.experiment_unique_steps_to_model = {}\n for op_number, op in enumerate(self.experiment.unique_steps):\n new_model = self.model.new_copy()\n new_parameter_values = self.parameter_values.copy()\n\n if op.type != \"current\":\n # Voltage or power control\n # Create a new model where the current density is now a variable\n # To do so, we replace all instances of the current density in the\n # model with a current density variable, which is obtained from the\n # FunctionControl submodel\n # check which kind of external circuit model we need (differential\n # or algebraic)\n if op.type == \"voltage\":\n submodel_class = pybamm.external_circuit.VoltageFunctionControl\n elif op.type == \"power\":\n submodel_class = pybamm.external_circuit.PowerFunctionControl\n\n # Build the new submodel and update the model with it\n submodel = submodel_class(new_model.param, new_model.options)\n variables = new_model.variables\n submodel.variables = submodel.get_fundamental_variables()\n variables.update(submodel.variables)\n submodel.variables.update(submodel.get_coupled_variables(variables))\n variables.update(submodel.variables)\n submodel.set_rhs(variables)\n submodel.set_algebraic(variables)\n submodel.set_initial_conditions(variables)\n new_model.rhs.update(submodel.rhs)\n new_model.algebraic.update(submodel.algebraic)\n new_model.initial_conditions.update(submodel.initial_conditions)\n\n # Set the \"current function\" to be the variable defined in the submodel\n new_parameter_values[\"Current function [A]\"] = submodel.variables[\n \"Current [A]\"\n ]\n self.update_new_model_events(new_model, op)\n # Update parameter values\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n experiment_parameter_values = self.get_experiment_parameter_values(\n op, op_number\n )\n new_parameter_values.update(\n experiment_parameter_values, check_already_exists=False\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[repr(op)] = parameterised_model\n\n # Set up rest model if experiment has start times\n if self.experiment.initial_start_time:\n new_model = self.model.new_copy()\n # Update parameter values\n new_parameter_values = self.parameter_values.copy()\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n new_parameter_values.update(\n {\"Current function [A]\": 0, \"Ambient temperature [K]\": \"[input]\"},\n check_already_exists=False,\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[\n \"Rest for padding\"\n ] = parameterised_model", "def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError", "def prepare_model_(model, *data, device='cpu'):\n _auto_name('', model)\n set_default_parent(model)\n def _prep_data(d):\n if isinstance(d, (np.ndarray, torch.Tensor)):\n return torch.as_tensor(d).to(device)\n elif isinstance(d, (list, tuple)):\n if all(isinstance(x, int) for x in d):\n return torch.randn(*d, device=device)\n return [_prep_data(x) for x in d]\n elif isinstance(d, dict):\n return {k:_prep_data(v) for k, v in d.items()}\n with torch.no_grad():\n is_training = model.training\n data = [_prep_data(d) for d in data]\n model.eval()\n model.to(device)\n model(*data)\n model.train(is_training)\n return model", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def _random_model(self, input_size, output_size, task, config: dict) -> AbstractModel:\n return create_random_model(input_size, output_size, config, task)", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def create_model(model_class, model_params=None, model_name='model'):\n\n model_params = {} if model_params is None else model_params\n\n model = model_class(**model_params)\n\n if special_parameters.load_model: # recover from checkpoint\n _load_model(model, model_name)\n\n # configure usage on GPU\n if use_gpu():\n model.to(first_device())\n model = torch.nn.DataParallel(model, device_ids=all_devices())\n\n # print info about devices\n print_info('Device(s)): ' + str(device_description()))\n\n return model", "def _adjust_for_gpus(cls, model: DeviceAwareModule, config: ModelConfigBase,\n model_execution_mode: ModelExecutionMode) -> DeviceAwareModule:\n if config.use_gpu:\n model = model.cuda()\n logging.info(\"Adjusting the model to use mixed precision training.\")\n # If model parallel is set to True, then partition the network across all available gpus.\n if config.use_model_parallel:\n devices = config.get_cuda_devices()\n assert devices is not None # for mypy\n model.partition_model(devices=devices) # type: ignore\n else:\n logging.info(\"Making no adjustments to the model because no GPU was found.\")\n\n # Update model related config attributes (After Model Parallel Activated)\n config.adjust_after_mixed_precision_and_parallel(model)\n\n # DataParallel enables running the model with multiple gpus by splitting samples across GPUs\n # If the model is used in training mode, data parallel is activated by default.\n # Similarly, if model parallel is not activated, data parallel is used as a backup option\n use_data_parallel = (model_execution_mode == ModelExecutionMode.TRAIN) or (not config.use_model_parallel)\n if config.use_gpu and use_data_parallel:\n logging.info(\"Adjusting the model to use DataParallel\")\n # Move all layers to the default GPU before activating data parallel.\n # This needs to happen even though we put the model to the GPU at the beginning of the method,\n # but we may have spread it across multiple GPUs later.\n model = model.cuda()\n model = DataParallelModel(model, device_ids=config.get_cuda_devices())\n\n return model", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def model_setup(params):\n n_classes = len(classes_config.training_ids)\n if general_config.model_id == constants.ssdlite:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list)\n elif general_config.model_id == constants.ssd:\n model = resnet_ssd.SSD300(n_classes=n_classes)\n elif general_config.model_id == constants.ssd_modified:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list,\n out_channels=params.out_channels, width_mult=params.width_mult)\n model.to(general_config.device)\n\n return model", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def create_model(configuration):\n model = find_model_using_name(configuration['model_name'])\n instance = model(configuration)\n print(\"model [{0}] was created\".format(type(instance).__name__))\n return instance", "def build_model(cfg, model, gpu_id=None):\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n # Construct the model\n # name = cfg.MODEL.MODEL_NAME\n # model = MODEL_REGISTRY.get(name)(cfg)\n \n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n #, find_unused_parameters=True\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n \n return model", "def _compute_single_model(self, **kwargs):\n _, shape_parameter_settings = self._kwargs_to_settings(**kwargs)\n config = combine_dicts(self.pdf_base_config, shape_parameter_settings, deep_copy=True)\n\n config['never_save_to_cache'] = True\n return Model(config, **shape_parameter_settings)", "def __call__(self,setup_options=True, instantiate_options=True, verbose=False):\n model = self.setup(setup_options)\n model(instantiate_options, verbose)\n return model", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def backend_specific_model(model: TModel, tmp_dir: str):", "def register_model(self, tags: Optional[Dict[str, str]] = None) -> Model:\n if self.run is None:\n raise ValueError(\n \"An experiment has to be run before a model can be registered\"\n )\n self.model = self.run.register_model(\n model_name=self.name,\n model_path=\"outputs/model.pkl\",\n properties=tags,\n )\n return self.model", "def __init__(self,\n names,\n data,\n embedding_fns,\n encoder_fns_1,\n encoder_fns_2,\n logits_fns,\n evaluation_fns,\n # MTL\n mixing_ratios,\n L2_coefficient=None,\n is_distill=False,\n distill_coefficient_loc=None,\n distill_coefficient_scale=None,\n distill_temperature=1.0,\n # optimization\n optimizer=\"Adam\",\n learning_rate=0.001,\n gradient_clipping_norm=2.0,\n # misc\n graph=None,\n logdir=None,\n main_model_index=0,\n debug_mode=False):\n \n super(MultitaskBaseModel, self).__init__(\n logdir=logdir, graph=graph,\n saver_max_to_keep=MAX_CHECKPOINTS_TO_KEEP)\n\n num_models = len(names)\n _check_list_compatability(data, num_models)\n _check_fn_list_compatability(embedding_fns, num_models, True)\n _check_fn_list_compatability(encoder_fns_1, num_models, True)\n _check_fn_list_compatability(encoder_fns_2, num_models, True)\n _check_fn_list_compatability(logits_fns, num_models, False)\n _check_fn_list_compatability(evaluation_fns, num_models, False)\n\n # check mixing ratios and MTL\n if len(names) == 1:\n raise ValueError(\"Not supported\")\n _mr_compatible(mixing_ratios, num_models, print_out=True)\n if main_model_index != 0:\n raise ValueError(\"`main_model_index` must be set to `0`\")\n\n self._names = names\n self._data = data\n self._embedding_fns = embedding_fns\n self._encoder_fns_1 = encoder_fns_1\n self._encoder_fns_2 = encoder_fns_2\n self._logits_fns = logits_fns\n self._evaluation_fns = evaluation_fns\n\n # MTL\n self._mixing_ratios = mixing_ratios\n self._L2_coefficient = L2_coefficient\n self._is_disill = is_distill\n self._distill_temperature = distill_temperature\n self._distill_coefficient_loc = distill_coefficient_loc\n self._distill_coefficient_scale = distill_coefficient_scale\n\n self._optimizer = optimizer\n self._learning_rate = learning_rate\n self._gradient_clipping_norm = gradient_clipping_norm\n\n self._main_model_index = main_model_index\n self._debug = collections.defaultdict(list)\n self._debug_mode = debug_mode", "def config_task(self) -> None:\n if self.hparams[\"model\"] == \"resnet18\":\n self.model = models.resnet18(pretrained=True)\n in_features = self.model.fc.in_features\n self.model.fc = nn.Linear( # type: ignore[attr-defined]\n in_features, out_features=1\n )\n else:\n raise ValueError(f\"Model type '{self.hparams['model']}' is not valid.\")", "def build_model(cfg, gpu_id=None):\n # Construct the model\n if MODEL_REGISTRY.get(cfg.MODEL.NAME) == None:\n # attempt to find standard models\n model = BaseVideoModel(cfg)\n else:\n # if the model is explicitly defined,\n # it is directly constructed from the model pool\n model = MODEL_REGISTRY.get(cfg.MODEL.NAME)(cfg)\n\n if torch.cuda.is_available():\n assert (\n cfg.NUM_GPUS <= torch.cuda.device_count()\n ), \"Cannot use more GPU devices than available\"\n else:\n assert (\n cfg.NUM_GPUS == 0\n ), \"Cuda is not available. Please set `NUM_GPUS: 0 for running on CPUs.\"\n\n if cfg.NUM_GPUS:\n if gpu_id is None:\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n else:\n cur_device = gpu_id\n model = model.cuda(device=cur_device)\n \n model_ema = None\n if cfg.MODEL.EMA.ENABLE:\n model_ema = ModelEmaV2(model, decay=cfg.MODEL.EMA.DECAY)\n\n try:\n # convert batchnorm to be synchronized across \n # different GPUs if needed\n sync_bn = cfg.BN.SYNC_BN\n if sync_bn == True and cfg.NUM_GPUS * cfg.NUM_SHARDS > 1:\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n except:\n sync_bn = None\n\n # Use multi-process data parallel model in the multi-gpu setting\n if cfg.NUM_GPUS*cfg.NUM_SHARDS > 1:\n # Make model replica operate on the current device\n if cfg.PAI:\n # Support distributed training on the cluster\n model = torch.nn.parallel.DistributedDataParallel(\n module=model\n )\n else:\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device\n )\n\n return model, model_ema", "def create_model(ModelName=None, PrimaryContainer=None, Containers=None, ExecutionRoleArn=None, Tags=None, VpcConfig=None, EnableNetworkIsolation=None):\n pass", "def create_and_copy_model(model, create_model_func, **kwargs):\n new_model = create_model_func(**kwargs)\n\n update_model_weights( # copy trainable weights\n new_model, model.trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=True),\n trainable=True, force_update=True)\n\n update_model_weights( # copy non-trainable weights\n new_model, model.non_trainable_weights,\n weights_structure=get_model_weights_structure(new_model, trainable=False),\n trainable=False, force_update=True)\n\n # make sure that model is \"built\" and new variables are not created\n build_model(new_model, model.input_shape)\n\n return new_model", "def from_pretrained(model_name: str, aliases: Dict = None, device: str = None):\n if device == None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n model = None\n\n models = AutoModel.list_models(return_dict=True)\n model_config = models.get(model_name)\n\n # Try to find by alias\n if model_config == None and aliases:\n name_from_alias = aliases.get(model_name)\n\n if name_from_alias:\n model_config = models.get(name_from_alias)\n if model_config:\n model_name = name_from_alias\n\n # Try to load from local saved model\n if model_config == None:\n try:\n model = load(model_name)\n model.to(device)\n except ValueError:\n raise ValueError(f\"Model '{model_name}' not found\")\n \n if model == None:\n model_class = model_config[\"class\"]\n init_kwargs = model_config[\"init_kwargs\"]\n\n model = model_config[\"class\"](**init_kwargs,\n description=model_config[\"description\"],\n tasks=model_config[\"tasks\"],\n name=model_name,\n details=model_config.get(\"details\"),\n device=device)\n\n return model", "def prepare(cls, model, device, **kwargs):\n super(SingaBackend, cls).prepare(model, device, **kwargs)\n # when parsing graph, we use the shape of input gived by onnx to init a random value\n # HOWEVER, the random value may not be correct for some inputs, such as gather which needs indices\n # so if have operators, the user must give inputs\n init_inputs = kwargs.get(\"init_inputs\", None)\n # whether initializers are moved into inputs, due to https://github.com/onnx/onnx/issues/2417\n # sometimes, input contains all initializer's info, sometimes, may not\n cls.keep_initializers_as_inputs = kwargs.get(\n 'keep_initializers_as_inputs', True)\n # optimize and infer the shape of the model\n try:\n model = onnx.utils.polish_model(model)\n except IndexError as err:\n # due to https://github.com/onnx/onnx/issues/2417\n model = onnx.shape_inference.infer_shapes(model)\n\n # check the opset version and ir version\n opset_version = None\n for imp in model.opset_import:\n if not imp.HasField(\"domain\") or imp.domain == \"\":\n opset_version = imp.version\n if imp.version > cls._known_opset_version:\n warnings.warn(\n \"This version of singa targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.\"\n .format(cls._known_opset_version, imp.version))\n else:\n warnings.warn(\"Unrecognized operator set {}\".format(imp.domain))\n if opset_version is None:\n if model.ir_version >= 0x00000003:\n raise RuntimeError(\n \"Model with IR version >= 3 did not specify ONNX operator set version (singa requires it)\"\n )\n else:\n opset_version = 1\n weights, singa_ops = cls._onnx_model_to_singa_net(\n model, init_inputs, device, opset_version)\n return SingaRep(model, weights, singa_ops,\n cls.keep_initializers_as_inputs)", "def tff_model_fn():\n keras_model = load_model(FLAGS.batch_size)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n return simple_fedavg_tf.KerasModelWrapper(keras_model,\n test_data.element_spec, loss)", "def from_config(cls, *args, **kwargs):\n _config = args\n\n if isinstance(args, tuple): # multiple non-keyword arguments were provided\n if len(args) > 0:\n _config = args[0]\n\n else:\n _config = kwargs['config_path']\n kwargs.pop('config_path')\n\n local = False\n if 'make_new_path' in kwargs:\n local = True\n elif isinstance(_config, str) and os.path.isfile(_config):\n local = True\n elif isinstance(_config, dict) and \"category\" in _config:\n local = True\n\n if local:\n config = None\n config_path = None\n\n # we need to build ai4water's Model class\n if isinstance(_config, dict):\n config = _config\n else:\n config_path = _config\n return BaseModel._get_config_and_path(\n cls,\n config=config,\n config_path=config_path,\n **kwargs\n )\n\n # tf1.15 has from_config so call it\n return super().from_config(*args, **kwargs)", "def initialize_multitask_model(\n *,\n model_def: nn.Module,\n input_spec: Dict[Tuple[Tuple[str, str], ...],\n Sequence[Union[Tuple[Tuple[int, ...], jnp.dtype],\n Tuple[int, ...]]]],\n config: ml_collections.ConfigDict,\n rngs: Union[jnp.ndarray, Mapping[str, jnp.ndarray]],\n) -> Tuple[PyTree, PyTree, int, Optional[Dict[str, float]]]:\n\n def init_fn(model_def):\n for kwargs, in_spec in input_spec.items():\n\n if config.get('batch_sizes') is not None:\n batch_size = config.batch_sizes.get(dict(kwargs)['dataset'])\n else:\n batch_size = config.batch_size\n\n batch_size = (batch_size // jax.device_count()) if batch_size else None\n\n input_shapetype = [\n debug_utils.input_spec_to_jax_shape_dtype_struct(\n spec, batch_size=batch_size) for spec in in_spec\n ]\n dummy_input = []\n for in_st in input_shapetype:\n dummy_input.append(jnp.zeros(in_st.shape, in_st.dtype))\n model_def(\n *dummy_input, train=False, debug=False, **dict(kwargs))\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def _initialize_model(rngs):\n \"\"\"Initialization function to be jitted.\"\"\"\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state\n\n if not isinstance(rngs, dict):\n rngs = {'params': rngs}\n init_params, init_model_state = _initialize_model(rngs)\n # Pop out params rng:\n rngs.pop('params')\n\n # Count number of trainable parameters:\n num_trainable_params = debug_utils.log_param_shapes(init_params)\n\n # Count gflops:\n count_flops = config.get('count_flops',\n ml_collections.ConfigDict({'count_flops': True}))\n if count_flops:\n variables = {'params': init_params, **init_model_state}\n gflops_dict = {}\n gflops_all = 0\n for kwargs, in_spec in input_spec.items():\n flops = debug_utils.compute_flops(\n flax_model_apply_fn=functools.partial(\n model_def.apply,\n variables,\n train=False,\n debug=False,\n rngs=rngs,\n **dict(kwargs)),\n input_spec=count_flops.get('input_spec', in_spec),\n fuse_multiply_add=count_flops.get('fuse_multiply_add', True))\n gflops = flops / (10**9)\n gflops_key = 'gflops/' + '/'.join(f'{x}={y}' for x, y in kwargs)\n gflops_dict[gflops_key] = gflops\n gflops_all += gflops\n gflops_dict['gflops'] = gflops_all\n else:\n gflops_dict = None\n\n return init_params, init_model_state, num_trainable_params, gflops_dict", "def construct_model(self, output_model_path):\n\n input_tensor = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [1, 1, 7, 7])\n output_tensor = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [1, 1, 8, 8])\n ini_w = helper.make_tensor(\"weight\", TensorProto.FLOAT, [1, 1, 2, 2], [1.0, 1.0, 1.0, 1.0])\n ini_b = helper.make_tensor(\"bias\", TensorProto.FLOAT, [1], [0.17])\n conv_tranpose_node = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"input\", \"weight\", \"bias\"],\n [\"output\"],\n kernel_shape=[2, 2],\n output_padding=[0, 0],\n pads=[0, 0, 0, 0],\n strides=[1, 1],\n dilations=[1, 1],\n group=1,\n )\n graph = helper.make_graph(\n [conv_tranpose_node],\n \"conv_transpose_test\",\n [input_tensor],\n [output_tensor],\n initializer=[ini_w, ini_b],\n )\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n model.ir_version = 7 # use stable onnx ir version\n\n onnx.save(model, output_model_path)", "def build_model(model_name: Text,\n model_config: archs.MobileNetConfig,\n dataset_config: Optional[dataset_factory.DatasetConfig] = None,\n ) -> tf.keras.models.Model:\n\n model_build_function = _get_model_builder().get(model_name)\n if model_build_function:\n if dataset_config:\n image_size = dataset_config.image_size\n channels = dataset_config.num_channels\n model_config.input_shape = (image_size, image_size, channels)\n model_config.num_classes = dataset_config.num_classes\n return model_build_function(config=model_config)\n else:\n raise ValueError('The model {} is not supported.'.format(model_name))", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def __init__(self, data_set, model, config):\n\n self.config = config\n self.data_set = data_set\n # Normalize or standardize the features, to have them ready to use as model input\n self.data_set.shift_and_scale(self.config[\"shift\"], self.config[\"scaling\"])\n self.model = model\n self.model.eval()\n self.device = torch.device(\"cpu\") if not self.config[\"use_gpu\"] \\\n else torch.device(\"cuda:\" + str(self.config[\"gpu_no\"]))", "def config_from_pytorch_model(\n model,\n granularity='model',\n backend=None,\n default_precision='ap_fixed<16,6>',\n default_reuse_factor=1,\n inputs_channel_last=False,\n transpose_outputs=True,\n):\n\n config = {}\n\n model_config = {}\n model_config['Precision'] = default_precision\n model_config['ReuseFactor'] = default_reuse_factor\n model_config['InputsChannelLast'] = inputs_channel_last\n model_config['TransposeOutputs'] = transpose_outputs\n model_config['Strategy'] = 'Latency'\n\n config['Model'] = model_config\n\n return config", "def train_model(self) -> Model:\n run = self.submit_experiment_run(wait_for_completion=self.wait_for_completion)\n model = run.register_model(\n model_name=self.model_name, model_path=self.model_path\n )\n return model", "def model_fn_builder(config: electra_files.configure_finetuning.FinetuningConfig, tasks,\n num_train_steps, pretraining_config=None):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec\n\n return model_fn", "def get_model_adapter(config):\n if config['task'] == 'joint':\n return JointModelAdapter()\n elif config['task'] == 'keypoints':\n return KeypointsModelAdapter()\n elif config['task'] == 'headsegmentation':\n return HeadSegmentationModelAdapter()\n elif config['task'] == 'detect':\n return DetectionModelAdapter(config['model'])\n return ClassificationModelAdapter()", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def make_model(cls, model_id: str):\n try:\n make_model = getattr(importlib.import_module(\"kgcnn.literature.%s\" % model_id), \"make_model\")\n\n except ModuleNotFoundError:\n raise NotImplementedError(\"ERROR:kgcnn: Unknown model identifier %s\" % model_id)\n\n return make_model", "def createNewModel(self, modelName):\n try:\n storage = FileSystemStorage(join(settings.MEDIA_ROOT, 'models'))\n\n folderSufix = 1\n new_model_name = modelName\n while storage.exists(join(storage.base_location, new_model_name)):\n folderSufix += 1\n new_model_name = f'{modelName}_{folderSufix}'\n\n folder_path = join(storage.base_location, new_model_name)\n model_file = join(folder_path, f'{new_model_name}.ppl')\n\n if not storage.exists(folder_path):\n os.mkdir(folder_path)\n\n calcEngine = CalcEngine.factory(self.client_session)\n if calcEngine.createNewModel(model_file, new_model_name):\n self.closeModel()\n return self.openModel(join(storage.base_location, new_model_name, f'{new_model_name}.ppl'))\n except Exception as ex:\n raise ex", "def build_variational_keras_model(\n input_shape=[1],\n layer_units=[200, 100, 1],\n layer_activations=[\"relu\", \"relu\", \"linear\"],\n initial_unconstrained_scale=None,\n transform_unconstrained_scale_factor=0.05,\n normalization_layer=False,\n prior_scale_identity_multiplier=1,\n kl_weight=None,\n noise_scale_prior=None,\n n_train=None,\n mean_init_noise_scale=0.0,\n names=None,\n evaluate_ignore_prior_loss=True,\n):\n if names is None:\n names = [None] * len(layer_units)\n\n # model = tf.keras.Sequential()\n input = tf.keras.Input(shape=input_shape)\n if normalization_layer:\n x = NormalizationFix(input_shape=input_shape, name=\"normalization\")(input)\n\n prior = prior_fn_factory(prior_scale_identity_multiplier)\n posterior_mean_field = posterior_mean_field_generator(mean_init_noise_scale)\n for i, units, activation, name in zip(\n range(len(layer_units)), layer_units, layer_activations, names\n ):\n layer = tfp.layers.DenseVariational(\n units,\n activation=activation,\n name=name,\n make_posterior_fn=posterior_mean_field,\n make_prior_fn=prior,\n kl_weight=kl_weight,\n use_bias=True,\n )\n if i == 0 and not normalization_layer:\n x = layer(input)\n else:\n x = layer(x)\n if initial_unconstrained_scale is not None:\n x = AddSigmaLayer(initial_unconstrained_scale, name=\"sigma_layer\")(x)\n x = tfp.layers.DistributionLambda(\n lambda t: tfd.Normal(\n loc=t[..., :1],\n scale=transform_unconstrained_scale(\n t[..., 1:], transform_unconstrained_scale_factor\n ),\n )\n )(x)\n if evaluate_ignore_prior_loss:\n model = ValidationModel(inputs=input, outputs=x)\n else:\n model = tf.keras.Model(inputs=input, outputs=x)\n if initial_unconstrained_scale is not None:\n if noise_scale_prior is not None:\n model.add_loss(\n lambda: -tf.reduce_sum(\n noise_scale_prior.log_prob(\n transform_unconstrained_scale(\n model.get_layer(\"sigma_layer\").sigma,\n transform_unconstrained_scale_factor,\n )\n )\n )\n / n_train\n )\n return model", "def create_model(self, setting: SettingType) -> BaselineModel[SettingType]:\n # Create the model, passing the setting, hparams and config.\n return MyCustomModel(setting=setting, hparams=self.hparams, config=self.config)", "def create_model(feats2d, shapes, model_settings, model_architecture,\n is_training, runtime_settings=None):\n if model_architecture == 'single_fc':\n return create_single_fc_model(feats2d, shapes, model_settings, is_training)\n elif model_architecture == 'simple_conv2D':\n return create_simple_conv2D_model(feats2d, shapes, model_settings, is_training)\n elif model_architecture == 'simple_conv1D':\n return create_simple_conv1D_model(feats2d, shapes, model_settings, is_training)\n elif model_architecture == 'lstm':\n return create_bidirectionnal_dynamic_rnn_model(feats2d, shapes, model_settings, is_training)\n elif model_architecture == 'cnn_lstm':\n return create_CNN_LSTM_model(feats2d, shapes, model_settings, is_training)\n elif model_architecture == 'dcnn_lstm':\n return create_DCNN_LSTM_model(feats2d, shapes, model_settings, is_training)\n else:\n raise Exception('model_architecture argument \"' + model_architecture +\n '\" not recognized, should be one of \"single_fc\", \"simple_conv2D\",' +\n ' \"simple_conv1D\", or \"lstm\"')", "def build_model(\n model_purpose: str,\n name: str,\n init_w: str,\n input_shape: np.ndarray,\n classes: int,\n dropout_rate: np.float32,\n) -> keras.Model:\n\n if model_purpose.startswith(\"segmentation\"):\n seg_builder = sm.Seg_model_builder(name, input_shape, classes, dropout_rate)\n model = seg_builder.get_model()\n\n elif model_purpose == \"inversion\":\n reg_builder = rm.Reg_model_builder(name, input_shape, classes, init_w)\n model = reg_builder.get_model()\n\n elif model_purpose == \"pixel_concentration_retrieval\":\n model = pwrm.Unet_2(input_shape, classes)\n\n return model", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def _InstantiateTaskModel(\n self, task_params\n ) -> Union[base_model.SingleTaskModel, base_model.MultiTaskModel]:\n if issubclass(task_params.cls, base_model.MultiTaskSubModel):\n return task_params.Instantiate(\n shared_model=self._shared_model, executor_ema=self._executor_ema)\n return task_params.Instantiate(executor_ema=self._executor_ema)", "def buildModel(model_name):\n if model_name == \"resnet50\":\n model = kapp.resnet50.ResNet50(weights=\"imagenet\", include_top=False)\n return model, kapp.resnet50.preprocess_input\n elif model_name == \"vgg16\":\n model = kapp.vgg16.VGG16(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg16.preprocess_input\n elif model_name == 'xception':\n model = kapp.xception.Xception(weights=\"imagenet\", include_top=False)\n return model, kapp.xception.preprocess_input\n elif model_name == 'vgg19':\n model = kapp.vgg19.VGG19(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg19.preprocess_input\n elif model_name == 'inceptionv3':\n model = kapp.inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)\n return model, kapp.inception_v3.preprocess_input\n elif model_name == 'mobilenet':\n model = kapp.mobilenet.MobileNet(weights=\"imagenet\", include_top=False)\n return model, kapp.mobilenet.preprocess_input\n else:\n raise Exception(\"Unsupported model error\")", "def create(self, task_model):\n raise NotImplementedError()", "def setup_model(self,\n model_weights_path: Optional[str] = None,\n model_def_path: Optional[str] = None) -> None:\n if self.model is not None:\n self.model.to(self.device)\n return\n\n self._onnx_mode = (model_weights_path is not None\n and model_weights_path.lower().endswith('.onnx'))\n if self._onnx_mode:\n model = self.load_onnx_model(model_weights_path)\n else:\n model = self.build_model(model_def_path)\n\n if self.cfg.model.external_def is not None:\n # this model will have 1 extra output classes that we will ignore\n self.model = TorchVisionODAdapter(model, ignored_output_inds=[0])\n else:\n # this model will have 2 extra output classes that we will ignore\n num_classes = self.cfg.data.num_classes\n self.model = TorchVisionODAdapter(\n model, ignored_output_inds=[0, num_classes + 1])\n\n if not self._onnx_mode:\n self.model.to(self.device)\n self.load_init_weights(model_weights_path)", "def __init__(self,\n model_fn=None,\n model_dir=None,\n config=None,\n params=None,\n use_tpu=True,\n train_batch_size=None,\n eval_batch_size=None,\n predict_batch_size=None,\n batch_axis=None):\n if config is None or not isinstance(config, tpu_config.RunConfig):\n raise ValueError(\n '`config` must be provided with type `tpu_config.RunConfig`')\n\n if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):\n raise ValueError('{} are reserved keys but existed in params {}.'.format(\n _RESERVED_PARAMS_KEYS, params))\n\n if use_tpu:\n # Perform some very basic validations. More validations will be found in\n # _TPUContext.\n if train_batch_size is None:\n raise ValueError('`train_batch_size` cannot be `None`')\n util_lib.check_positive_integer(train_batch_size, 'train_batch_size')\n\n if (config.tpu_config.per_host_input_for_training is\n tpu_config.InputPipelineConfig.PER_SHARD_V1 and\n config.tpu_config.computation_shape):\n raise ValueError(\n 'Model parallelism only supports per host input for training. '\n 'Please adjust TPURunconfig.per_host_input_for_training.')\n\n if eval_batch_size is not None:\n util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')\n\n if predict_batch_size is not None:\n util_lib.check_positive_integer(predict_batch_size,\n 'predict_batch_size')\n\n # Verifies the model_fn signature according to Estimator framework.\n estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access\n # We cannot store config and params in this constructor as parent\n # constructor might change them, such as assigning a temp dir for\n # config.model_dir.\n model_function = self._augment_model_fn(model_fn, batch_axis)\n\n # Passing non-None params as wrapped model_fn has it.\n params = params or {}\n super(TPUEstimator, self).__init__(\n model_fn=model_function,\n model_dir=model_dir,\n config=config,\n params=params)\n self._iterations_per_training_loop = (\n self._config.tpu_config.iterations_per_loop)\n\n # All properties passed to _TPUContext are immutable.\n # pylint: disable=protected-access\n self._ctx = tpu_context._get_tpu_context(\n self._config, train_batch_size,\n eval_batch_size, predict_batch_size,\n use_tpu)\n\n self._is_input_fn_invoked = None", "def set_up_model(dt, model, update = False):\n \n start_scope()\n \n ##### Update model parameters (should be done, if original parameters have been changed)\n if update:\n ###### Temperature in Kelvin\n model.T_kelvin = model.zero_celsius + model.T_celsius*kelvin\n \n ##### Potentials\n # Resting potential (calculated with Goldman equation)\n model.V_res = (model.R*model.T_kelvin)/model.F * np.log((model.P_K*model.n_init**2*model.K_e + model.P_Na*model.h_init*model.m_init**3*model.Na_e)/\\\n (model.P_K*model.n_init**2*model.K_i + model.P_Na*model.h_init*model.m_init**3*model.Na_i))\n \n # Nerst potential for leakage current; leakage chanels were excluded but could be added by using: g_L*(E_L-(v-V_res)) \n model.E_L = (-1/model.g_L)*(model.P_Na*model.m_init**3*model.h_init*(model.V_res*model.F**2)/(model.R*model.T_kelvin) * \\\n (model.Na_e-model.Na_i*exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))) + \\\n model.P_K*model.n_init**2*(model.V_res*model.F**2)/(model.R*model.T_kelvin) *\\\n (model.K_e-model.K_i*np.exp(model.V_res*model.F/(model.R*model.T_kelvin)))/(1-np.exp(model.V_res*model.F/(model.R*model.T_kelvin))))\n \n \n ##### structure of ANF\n # terminal = 0\n # internode = 1\n # node = 2\n # presomatic region = 3\n # Soma = 4\n # postsomatic region = 5)\n model.structure = np.array(list(np.tile([2,1],model.nof_internodes)) + [2])\n model.nof_comps = len(model.structure)\n \n ##### Compartment lengths\n # initialize\n model.compartment_lengths = np.zeros_like(model.structure)*um\n # length internodes\n model.compartment_lengths[model.structure == 1] = model.length_internodes\n # length nodes\n model.compartment_lengths[model.structure == 2] = model.length_nodes\n # total length neuron\n model.length_neuron = sum(model.compartment_lengths)\n \n ##### Compartment diameters\n # initialize\n model.compartment_diameters = np.zeros(model.nof_comps+1)*um\n # dendrite\n model.fiber_inner_diameter = 0.7* model.fiber_outer_diameter\n model.compartment_diameters[:] = model.fiber_inner_diameter\n \n ##### Compartment middle point distances (needed for plots)\n model.distance_comps_middle = np.zeros_like(model.compartment_lengths)\n model.distance_comps_middle[0] = 0.5*model.compartment_lengths[0]\n for ii in range(0,model.nof_comps-1):\n model.distance_comps_middle[ii+1] = 0.5* model.compartment_lengths[ii] + 0.5* model.compartment_lengths[ii+1]\n \n ##### Capacitivites\n # initialize\n model.c_m = np.zeros_like(model.structure)*uF/cm**2\n # internodes\n model.c_m[np.where(model.structure == 1)] = 0*uF/cm**2\n # nodes\n model.c_m[np.where(model.structure == 2)] = model.c_m_layer\n \n ##### Axoplasmatic resistances\n model.compartment_center_diameters = np.zeros(model.nof_comps)*um\n model.compartment_center_diameters = (model.compartment_diameters[0:-1] + model.compartment_diameters[1:]) / 2 \n model.R_a = (model.compartment_lengths*model.rho_in) / ((model.compartment_center_diameters*0.5)**2*np.pi)\n \n ##### Surface arias\n # lateral surfaces\n m = [np.sqrt(abs(model.compartment_diameters[i+1] - model.compartment_diameters[i])**2 + model.compartment_lengths[i]**2)\n for i in range(0,model.nof_comps)]\n # total surfaces\n model.A_surface = [(model.compartment_diameters[i+1] + model.compartment_diameters[i])*np.pi*m[i]*0.5\n for i in range(0,model.nof_comps)]\n \n ##### Noise term\n model.P_Na_vector = np.zeros(model.nof_comps)*um/second\n model.P_Na_vector[model.structure == 2] = model.P_Na\n model.noise_term = np.sqrt(model.A_surface*model.P_Na_vector)\n \n ##### Compartments to plot\n model.comps_to_plot = range(1,model.nof_comps)\n \n ##### initialize defaultclock\n defaultclock.dt = dt\n\n ##### define morphology\n morpho = Section(n = model.nof_comps,\n length = model.compartment_lengths,\n diameter = model.compartment_diameters)\n \n ##### define neuron\n neuron = SpatialNeuron(morphology = morpho,\n model = model.eqs,\n Cm = model.c_m,\n Ri = model.rho_in,\n method=\"exponential_euler\")\n \n ##### initial values\n neuron.v = model.V_res\n neuron.m = model.m_init\n neuron.n = model.n_init\n neuron.h = model.h_init\n \n ##### Set parameter values of differential equations\n # conductances active compartments\n neuron.g_Na = model.g_Na\n neuron.g_K = model.g_K\n \n # conductances internodes\n neuron.g_Na[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n neuron.g_K[np.asarray(np.where(model.structure == 1))] = 0*msiemens/cm**2\n \n # other parameters\n neuron.V_res = model.V_res\n neuron.T_celsius = model.T_celsius\n neuron.E_Na = model.E_Na\n neuron.E_K = model.E_K\n neuron.E_L = model.E_L\n neuron.g_L = model.g_L \n \n return neuron, model", "def get_model(model_name: str = \"\", cfg={}) -> torch.nn.Module:\n if model_name == \"default\":\n model = AudioNTT2020(n_mels=cfg.n_mels, d=cfg.feature_d)\n\n elif model_name == \"resnetish34\":\n model = resnetish34()\n\n elif model_name == \"clstm\":\n model = CLSTM()\n\n elif model_name == \"cvt\":\n s1_depth, s2_depth, s3_depth = cfg.depths\n s1_emb_dim, s2_emb_dim, s3_emb_dim = cfg.embed_dims\n s1_mlp_mult, s2_mlp_mult, s3_mlp_mult = cfg.mlp_mults\n\n model = CvT(\n s1_emb_dim=s1_emb_dim,\n s1_depth=s1_depth,\n s1_mlp_mult=s1_mlp_mult,\n s2_emb_dim=s2_emb_dim,\n s2_depth=s2_depth,\n s2_mlp_mult=s2_mlp_mult,\n s3_emb_dim=s3_emb_dim,\n s3_depth=s3_depth,\n s3_mlp_mult=s3_mlp_mult,\n pool=cfg.cvt_pool,\n )\n else:\n raise ValueError(\"Model not found.\")\n return model", "def parse_temp_model(self, default=None):\n\t\tcfg_temp_model = self.cfg_root.find('temp_model')\n\n\t\tif cfg_temp_model and cfg_temp_model.text in self.VALID_TEMP_MODEL:\n\t\t\tcfg_temp_model = cfg_temp_model.text\n\t\telse:\n\t\t\tcfg_temp_model = default\n\n\t\treturn cfg_temp_model", "def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)", "def _init_model(\n self,\n cfg: ConfigType,\n weights: Optional[str],\n device: str = 'cpu',\n ) -> nn.Module:\n checkpoint: Optional[dict] = None\n if weights is not None:\n checkpoint = _load_checkpoint(weights, map_location='cpu')\n\n if not cfg:\n assert checkpoint is not None\n try:\n # Prefer to get config from `message_hub` since `message_hub`\n # is a more stable module to store all runtime information.\n # However, the early version of MMEngine will not save config\n # in `message_hub`, so we will try to load config from `meta`.\n cfg_string = checkpoint['message_hub']['runtime_info']['cfg']\n except KeyError:\n assert 'meta' in checkpoint, (\n 'If model(config) is not provided, the checkpoint must'\n 'contain the config string in `meta` or `message_hub`, '\n 'but both `meta` and `message_hub` are not found in the '\n 'checkpoint.')\n meta = checkpoint['meta']\n if 'cfg' in meta:\n cfg_string = meta['cfg']\n else:\n raise ValueError(\n 'Cannot find the config in the checkpoint.')\n cfg.update(\n Config.fromstring(cfg_string, file_format='.py')._cfg_dict)\n\n # Delete the `pretrained` field to prevent model from loading the\n # the pretrained weights unnecessarily.\n if cfg.model.get('pretrained') is not None:\n del cfg.model.pretrained\n\n model = MODELS.build(cfg.model)\n model.cfg = cfg\n self._load_weights_to_model(model, checkpoint, cfg)\n model.to(device)\n model.eval()\n return model", "def run_model(config_file):\n config_file = os.path.join(os.getcwd(), config_file)\n result = Tethys(config_file=config_file)\n result.run_model()\n return result", "def create_model(config_obj: Union[ModelConfig, dict], random_seed: int = default_random_seed) -> BaseModel:\n if isinstance(config_obj, dict):\n config_obj = ModelConfig.from_dict(config_obj)\n model_type = get_from_registry(config_obj.model_type, model_type_registry)\n return model_type(config_obj, random_seed=random_seed)", "def scale_model(model,scaleparname='A',scaleval=1):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant'),operation='*',\n parnames={'C1':scaleparname})\n setattr(res,scaleparname,scaleval)\n return res", "def make(model: Type[Model], **kwargs: Any) -> Model:\n return modelfactory_factory(model)(**kwargs)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec", "def create(name, out_channel, pretrain):\n if out_channel == 10 or out_channel == 100 or out_channel == 200:\n # use custom models\n if name not in custom_factory:\n raise KeyError(\"Unknown model:\", name)\n return custom_factory[name](out_channel)\n elif out_channel == 1000:\n if name not in torchvision_factory:\n raise KeyError(\"Unknown model:\", name)\n return torchvision_factory[name](pretrain)\n else:\n raise Exception", "def make_model(self, options, generator):\n model_type = options.model_type\n input_shape = (options.target_size[0], options.target_size[1],\n len(options.active_input_inds))\n nb_labels = generator.dataset.nb_labels\n\n if model_type == CONV_LOGISTIC:\n model = make_conv_logistic(input_shape, nb_labels,\n options.kernel_size)\n elif model_type == FCN_RESNET:\n model = make_fcn_resnet(\n input_shape, nb_labels, options.use_pretraining,\n options.freeze_base)\n elif model_type == DUAL_FCN_RESNET:\n model = make_dual_fcn_resnet(\n input_shape, options.dual_active_input_inds,\n nb_labels, options.use_pretraining, options.freeze_base)\n elif model_type == UNET:\n model = make_unet(input_shape, nb_labels)\n elif model_type == FC_DENSENET:\n model = make_fc_densenet(\n input_shape, nb_labels, drop_prob=options.drop_prob,\n weight_decay=options.weight_decay,\n down_blocks=options.down_blocks,\n up_blocks=options.up_blocks)\n elif model_type in [CONCAT_ENSEMBLE, AVG_ENSEMBLE]:\n models, active_input_inds_list = self.load_ensemble_models(options)\n if model_type == CONCAT_ENSEMBLE:\n model = ConcatEnsemble(\n models, active_input_inds_list, input_shape, nb_labels)\n elif model_type == AVG_ENSEMBLE:\n model = AvgEnsemble(models, active_input_inds_list)\n else:\n raise ValueError('{} is not a valid model_type'.format(model_type))\n\n return model", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec", "def model_fn(features, labels, mode, params):\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec", "def transformerXLModel(*args, **kwargs):\n model = TransfoXLModel.from_pretrained(*args, **kwargs)\n return model", "def CreateModel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def create_supervised_evaluator(\n model, prepare_batch, metrics=None, device=None, non_blocking=False, output_transform=val_transform,\n):\n metrics = metrics or {}\n\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, y, ids, patch_locations = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n y_pred = _upscale_model_output(y_pred, x)\n return output_transform(x, y, y_pred, ids, patch_locations)\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine", "def createModel(config_path, checkpoint_path, graph_path):\n\n global build_graph, prev_classes\n\n trt_graph = None\n input_names = None\n \n if build_graph:\n frozen_graph, input_names, output_names = build_detection_graph(\n config=config_path,\n checkpoint=checkpoint_path\n )\n \n trt_graph = trt.create_inference_graph(\n input_graph_def=frozen_graph,\n outputs=output_names,\n max_batch_size=1,\n max_workspace_size_bytes=1 << 25,\n precision_mode='FP16',\n minimum_segment_size=50\n )\n\n with open(graph_path, 'wb') as f:\n f.write(trt_graph.SerializeToString())\n\n with open('config.txt', 'r+') as json_file: \n data = json.load(json_file)\n data['model'] = []\n data['model'] = [{'input_names': input_names}]\n json_file.seek(0)\n json_file.truncate()\n json.dump(data, json_file)\n\n else:\n with open(graph_path, 'rb') as f:\n trt_graph = tf.GraphDef()\n trt_graph.ParseFromString(f.read())\n with open('config.txt') as json_file: \n data = json.load(json_file)\n input_names = data['model'][0]['input_names']\n\n return Model(trt_graph, input_names)", "def recent_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n config.dense_type = model_args.dense_type\n config.act_type = model_args.act_type\n config.num_labels_per_head = [\n len(label_id) for label_id in task_infos.head_id_to_label_id\n ]\n config.head2label = task_infos.head_id_to_label_id\n model_cls = getattr(mod, model_args.architectures,\n RobertaForKlueRecent)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model", "def initialize_model_from_cfg(args, gpu_id=0):\n model = eval(args.model).loot_model(args)\n model.eval()\n\n if args.cuda:\n model.cuda()\n\n if args.load_ckpt:\n load_name = args.load_ckpt\n logger.info(\"loading checkpoint %s\", load_name)\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt(model, checkpoint['model'])\n\n if args.load_detectron:\n logger.info(\"loading detectron weights %s\", args.load_detectron)\n load_detectron_weight(model, args.load_detectron)\n\n\n return model", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def optimize_model(input,\n model_type='bert',\n num_heads=0,\n hidden_size=0,\n optimization_options=None,\n opt_level=0,\n use_gpu=False,\n only_onnxruntime=False):\n (optimizer_class, producer, run_onnxruntime) = MODEL_CLASSES[model_type]\n\n temp_model_path = None\n if opt_level > 1: # Optimization specified for an execution provider.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=use_gpu, opt_level=opt_level)\n elif run_onnxruntime:\n # Use Onnxruntime to do optimizations (like constant folding and cast elimation) that is not specified to exection provider.\n # CPU provider is used here so that there is no extra node for GPU memory copy.\n temp_model_path = optimize_by_onnxruntime(input, use_gpu=False, opt_level=1)\n\n model = load_model(temp_model_path or input, format=None, load_external_data=True)\n\n if model.producer_name and producer != model.producer_name:\n logger.warning(\n f\"Model producer not matched: Expect {producer}, Got {model.producer_name} {model.producer_version}. Please specify correct --model_type parameter.\"\n )\n\n if optimization_options is None:\n optimization_options = BertOptimizationOptions(model_type)\n\n optimizer = optimizer_class(model, num_heads, hidden_size)\n\n if not only_onnxruntime:\n optimizer.optimize(optimization_options)\n\n # Remove the temporary model.\n if temp_model_path:\n os.remove(temp_model_path)\n logger.debug(\"Remove tempoary model: {}\".format(temp_model_path))\n\n optimizer.model.producer_name = \"onnxruntime.transformers\"\n from onnxruntime import __version__ as onnxruntime_version\n optimizer.model.producer_version = onnxruntime_version\n\n return optimizer", "def generate_model(**kwargs):\n model = ResNet3D(Bottleneck, [3, 4, 6, 3], [64, 128, 256, 512], **kwargs)\n return model", "def basic_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n model_cls = getattr(mod, model_args.architectures,\n AutoModelForSequenceClassification)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model", "def model_creator(config):\n return nn.Linear(1, 1)", "def set_temperature(logits, label):\n nll_criterion = nn.CrossEntropyLoss().to(device)\n\n class TemperatureModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.temperature = nn.Parameter(torch.ones(1) * 1.5)\n\n def forward(self, logits):\n temperature = self.temperature.unsqueeze(1).expand(logits.size(0), logits.size(1))\n logits = logits / temperature\n return logits, self.temperature\n\n model_T = TemperatureModel()\n optimizer = optim.Adam(\n model_T.parameters(), lr=0.01, weight_decay=1e-4\n )\n for i in range(40):\n TC_pred, _ = model_T(logits)\n optimizer.zero_grad()\n loss = nll_criterion(TC_pred, label)\n loss.backward()\n optimizer.step()\n\n _, T = model_T(logits)\n return T", "def create_model(self):\n pass", "def create_model(self):\n pass", "def _make_model_v1():\n tf.reset_default_graph()\n\n with tf.Session() as sess:\n # Make two simple graphs, both of which will be served by TF\n x = tf.placeholder('float', shape=(None, 3), name='Input')\n z = tf.placeholder('float', shape=(), name='ScalarMultiple')\n m = tf.Variable([1.0, 1.0, 1.0], name='Slopes')\n y = m * x + 1\n len_fun = tf.reduce_sum(y - x) # Returns the number of elements in the array\n scale_mult = tf.multiply(z, x, name='scale_mult')\n\n # Initialize the variables\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Create the tool for saving the model to disk\n builder = tf.saved_model.builder.SavedModelBuilder(tf_export_path)\n\n # Make descriptions for the inputs and outputs\n x_desc = tf.saved_model.utils.build_tensor_info(x)\n y_desc = tf.saved_model.utils.build_tensor_info(y)\n z_desc = tf.saved_model.utils.build_tensor_info(z)\n len_fun_desc = tf.saved_model.utils.build_tensor_info(len_fun)\n scale_mult_desc = tf.saved_model.utils.build_tensor_info(scale_mult)\n\n # Make a signature for the functions to be served\n func_sig = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': x_desc},\n outputs={'y': y_desc},\n method_name='run'\n )\n len_sig = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': x_desc},\n outputs={'len': len_fun_desc},\n method_name='length'\n )\n mult_sig = tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': x_desc, 'z': z_desc},\n outputs={'scale_mult': scale_mult_desc},\n method_name='scalar_multiply'\n )\n\n # Add the functions and the state of the graph to the builder\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: func_sig,\n 'length': len_sig,\n 'scalar_multiply': mult_sig\n })\n\n # Save the function\n builder.save()", "def from_pretrained(cls,\n *args,\n **kwargs):\n pretrained_model_name_or_path = kwargs.get(\"pretrained_model_name_or_path\", None) \\\n if len(args) == 0 else args[0]\n config_dict, _ = PretrainedConfig.get_config_dict(pretrained_model_name_or_path)\n bigdl_transformers_low_bit = config_dict.pop(\"bigdl_transformers_low_bit\", False)\n invalidInputError(not bigdl_transformers_low_bit,\n f\"Detected model is a low-bit({bigdl_transformers_low_bit}) model, \"\n f\"Please use load_low_bit to load this model.\")\n\n # For huggingface transformers cls.HF_Model.from_pretrained could only restore the model\n # in the original format, which is not quantized,\n # we can convert the model to quantized later.\n load_in_4bit = kwargs.pop(\"load_in_4bit\", False)\n load_in_low_bit = kwargs.pop(\"load_in_low_bit\", None)\n optimize_model = kwargs.pop(\"optimize_model\", True)\n\n if load_in_4bit or load_in_low_bit:\n # load int x-bit\n kwargs[\"low_cpu_mem_usage\"] = True\n # set default torch_dtype='auto'\n kwargs[\"torch_dtype\"] = kwargs.get(\"torch_dtype\", 'auto')\n # Avoid tensor parallel F.Linear Operations\n if \"pretraining_tp\" in config_dict:\n kwargs[\"pretraining_tp\"] = 1\n q_k = load_in_low_bit if load_in_low_bit else \"sym_int4\"\n model = cls.load_convert(q_k, optimize_model, *args, **kwargs)\n else:\n # load default\n model = cls.HF_Model.from_pretrained(*args, **kwargs)\n\n return model", "def set_temperature2(logits, label):\n\n nll_criterion = nn.CrossEntropyLoss().to(device)\n\n class TemperatureModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.temperature = nn.Parameter(torch.ones(1) * 1.5)\n\n def forward(self, logits):\n temperature = self.temperature.unsqueeze(1).expand(logits.size(0), logits.size(1))\n logits = logits / temperature\n return logits, self.temperature\n\n model_T = TemperatureModel()\n optimizer = optim.Adam(\n model_T.parameters(), lr=0.01, weight_decay=1e-4\n )\n for i in range(40):\n TC_pred, _ = model_T(logits)\n optimizer.zero_grad()\n loss = nll_criterion(TC_pred, label)\n loss.backward()\n optimizer.step()\n\n _, T = model_T(logits)\n return T" ]
[ "0.73296446", "0.57077694", "0.570612", "0.57056636", "0.56610817", "0.55574673", "0.54785675", "0.5419638", "0.53965676", "0.53905576", "0.538604", "0.538604", "0.5313637", "0.52842414", "0.52818054", "0.5228253", "0.5218954", "0.5209238", "0.52031755", "0.5201383", "0.51910466", "0.51866466", "0.5161635", "0.515062", "0.5128692", "0.5108899", "0.5107936", "0.5107017", "0.5105261", "0.5101205", "0.50903654", "0.5081285", "0.504077", "0.50379235", "0.5030885", "0.5024988", "0.50151837", "0.49843875", "0.49791056", "0.49639997", "0.49568135", "0.49551764", "0.4940765", "0.4937491", "0.4931022", "0.49306655", "0.49292192", "0.49114683", "0.48930836", "0.48929816", "0.4892956", "0.48923966", "0.48773983", "0.48705", "0.48531246", "0.48494416", "0.4841089", "0.48410282", "0.48405635", "0.48390466", "0.48375046", "0.48321953", "0.48213097", "0.48201022", "0.48179272", "0.48125222", "0.48040283", "0.48035505", "0.48028344", "0.47996506", "0.47974712", "0.47951767", "0.47940215", "0.47814786", "0.47808978", "0.4777628", "0.4776726", "0.47673738", "0.476675", "0.4766639", "0.4757638", "0.47422272", "0.47372547", "0.47329733", "0.47325525", "0.47305018", "0.47280627", "0.4721701", "0.4719108", "0.471823", "0.47165692", "0.47144204", "0.47108465", "0.47106728", "0.4710142", "0.47099254", "0.47099254", "0.4697651", "0.46956053", "0.46901697" ]
0.8471851
0
Initialize your data structure here.
def __init__(self): self.buckets = collections.defaultdict(list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_empty(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self.data = []\n self.record = {}", "def initialize(self):\n self.data = None\n self.errors = []", "def initialize(self):\n\t\tpass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def __init__(self):\n self.structure = {}", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def __init__(self):\n self._data = [] # non-public underlying Python list as storage", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []\n self.idx = {}", "def __init__(self):\n self._dict = {}\n self._array = []", "def init(self) -> None:", "def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass", "def _init(self):\n pass", "def initialize(self):\n return", "def initialize(self):\n self.muondEdx = []\n self.muondNdx = []\n self.muonmomentum = []\n self.piondEdx = []\n self.piondNdx = []\n self.pionmomentum = []\n self.kaondEdx = []\n self.kaondNdx = []\n self.kaonmomentum = []\n self.protdEdx = []\n self.protdNdx = []\n self.protmomentum = []\n self.elecdEdx = []\n self.elecdNdx = []\n self.elecmomentum = []", "def initialize(self) -> None:\n pass", "def initialize(self):\n pass # pragma: no cover", "def __init__(self):\n self.d = {}\n self.l = []", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialise(self):", "def __init__(self):\n self.l = {}\n self.s = {}", "def __init__(self):\n self._data=[]", "def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def init(self) -> None:\n ...", "def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db", "def __init__(self):\n self.keys = []\n self.values = []", "def __init__(self):\n self.d = {}\n self.h = []", "def memb_init(self):\n self.initialize()", "def __init__(self):\n self.dic={}\n self.data=[]", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def __init__(self):\n dict.__init__(self)\n self.datatype = None", "def __init__(self, data={}):\n self._update_(data)", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def __init__(self):\n self._data = PositionalList() # list of _Item instances", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def _init(self):\n raise NotImplementedError", "def __init__(self):\r\n self.indices = {}\r\n self.data = []\r\n self.len = 0", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self, data):\n self.data = data\n return", "def __init__(self):\n self.root = [None, dict(), False] # val, sons, end-able", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def __init__(self):\n self.data = {}\n self.refresh()", "def initialize(self):\n self.voteskips = []\n self.response = {}\n self.route = {}\n self.userlist = []\n self.poll = []\n self.media = []\n self.init = False\n self.question = None\n self.jumble = None\n self.imgur = None", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def init(self):", "def init(self):", "def __init__(self):\n self.x = {}\n self.len = 0\n self.annotations = {}", "def __init__(self):\n # Dict of minecraft object in form of \"dict[id] = name\"\n self.data_values = dict()\n self.parser = self.setup_parser()", "def initialize(self):\n self.keys = [None] * BUCKET_SIZE\n self.values = [None] * BUCKET_SIZE", "def __init__(self, data: dict = {}):\n pass", "def initialize(self): \r\n pass", "def __init__(self):\n self._data = PositionalList() # list of Item instances", "def __init__(self):\n self.table = {}\n self.ls = []", "def initialize(self):\r\n self.bucket_array.initialize()", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data=None):\n self.data = data", "def __init__(self):\n self._data = set()", "def __init__(self):\n self.key_dict = {}\n self.value_dict = {}\n self.head, self.last = None, None" ]
[ "0.7765608", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7595176", "0.75853467", "0.7558298", "0.7530608", "0.7530608", "0.7530608", "0.7530608", "0.7530608", "0.74971247", "0.74971247", "0.7478105", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.744441", "0.7426435", "0.74157697", "0.74143684", "0.73898417", "0.73898417", "0.7389144", "0.7387738", "0.7383786", "0.7324126", "0.731669", "0.73065454", "0.729799", "0.7287291", "0.7271846", "0.725931", "0.72522944", "0.72522944", "0.72522944", "0.72494334", "0.72494334", "0.72494334", "0.7243696", "0.7239823", "0.72368526", "0.7208368", "0.72016877", "0.72016877", "0.72016877", "0.72016877", "0.71985286", "0.71985286", "0.7195241", "0.71885264", "0.71857035", "0.7176733", "0.7160906", "0.7159325", "0.7149614", "0.71474445", "0.7135992", "0.7128525", "0.7123646", "0.71142536", "0.71142536", "0.71142536", "0.71142536", "0.71109176", "0.71011794", "0.7099338", "0.708543", "0.70676583", "0.70648897", "0.70618606", "0.70606047", "0.7059818", "0.7039291", "0.7039291", "0.7035077", "0.70237756", "0.70142615", "0.6999669", "0.69952625", "0.6994778", "0.6987417", "0.6981039", "0.6976582", "0.6976582", "0.6976431", "0.6976431", "0.6976431", "0.6976431", "0.69684774", "0.69561034", "0.69411176" ]
0.0
-1
Build a dictionary through a list of words
def buildDict(self, words: List[str]) -> None: for word in words: self.buckets[len(word)].append(word)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_dict(word_list):\r\n\r\n # initialize a dictonary\r\n d = dict()\r\n\r\n # iterate through the word_list, mapping sorted letters to word\r\n for i in word_list:\r\n\r\n # key - sorted letters in the word\r\n # how to sort ? --> convert to list, then sort. Finally join the sorted list.\r\n key = ''.join(sorted(list(i)))\r\n\r\n # check if sorted letters avaialble in dict,\r\n # if yes - append the word to the value\r\n # else - put the word as the 0th element of the value list\r\n if key in d:\r\n d[key].append(i)\r\n else:\r\n d[key] = [i]\r\n\r\n return d", "def _gen_word_dict(words):\n\n # grab all of them with a single in statement.\n results = lookup_words_by_words(\n (set([self.normalize(w) for w in words] + words)),\n session, Word)\n\n def merge(word):\n if word in results:\n return\n # hopefully these are unique.\n # results[word] = unique_merge(session, Word, word=word)\n results[word] = session.merge(Word(word=word))\n\n for word in words:\n merge(word)\n merge(self.normalize(word))\n\n return results", "def buildDict(self, words):\n self.dict = collections.defaultdict(set)\n for word in words:\n for i in xrange(len(word)):\n self.dict[word[:i] + '*' + word[i+1:]].add(word[i])", "def buildDict(self, words):\r\n for word in words:\r\n self.trie.addWord(word)", "def buildDict(self, words):\n for word in words:\n length = len(word)\n key = \"{}/{}\".format(length, word[0])\n ls = self.origin.get(key, [])\n ls.append(word)\n self.origin[key] = ls", "def parallel_word_dict(w_list, st, end):\n import spacy\n w_list = w_list[st:end]\n nlp, out_dict, count = spacy.load('en_core_web_lg'), {}, 0\n for word in w_list:\n word_obj = nlp(word)\n if word_obj.has_vector:\n out_dict[word] = word_obj.vector\n count += 1\n return out_dict", "def prepareDictionary(words):\n wordsDictionary = {}\n for word in words:\n # Handle subsequent Occurences\n if (wordsDictionary.get(word.lower(), None) != None):\n # Search and add words by checking their lowercase version\n wordsDictionary[word.lower()] = wordsDictionary.get(word.lower()) + 1\n # Handle first Occurence\n else:\n wordsDictionary[word.lower()] = 1\n return wordsDictionary", "def make_freq_dict(word_list):\n\n\tfreq_dict = {}\n\n\tfor word in word_list: #need to slice each tale into a list of words for this to work\n\t\tif word in freq_dict:\n\t\t\tcurrent_val = freq_dict.get(word)\n\t\t\tval = current_val + 1\n\t\t\tfreq_dict[word] = val #made a dictionary of the string (word, frequnecy)\n\t\telse: #if it isn't in the dictionary\n\t\t\tfreq_dict[word] = 1\n\treturn freq_dict", "def make_word_dict():\n d = dict()\n fin = open(\"words.txt\")\n for line in fin:\n word = line.strip().lower()\n d[word] = None\n #have to add single letter words to the word list;\n #also, the empty string is considered a word.\n for letter in ['a', 'i', '']:\n d[letter] = letter\n return d", "def word_list():\n\n d = {}\n with open('words.txt') as fin:\n for line in fin.readlines():\n word = line.strip().lower()\n d[word] = True\n return d", "def __init__(self, words):\n self.d = {}\n for i, w in enumerate(words):\n self.d[w] = self.d.get(w, []) + [i]", "def word_list_into_dict(word_list: list) -> dict:\n all_word_dict = {}\n longest_word_dict = {}\n \n for word_index in range(0, len(word_list), 2):\n word = word_list[word_index]\n word_class = word_list[word_index+1][0]\n try:\n all_word_dict[word_class].append(word)\n\n longest_word = longest_word_dict[word_class][0]\n\n if(len(longest_word) < len(word)):\n longest_word_dict[word_class][0] = word\n\n elif(len(longest_word) == len(word)):\n longest_word_dict[word_class][0] = sorted([longest_word, word])[0]\n except:\n all_word_dict[word_class] = [word]\n longest_word_dict[word_class] = [word]\n \n return all_word_dict, longest_word_dict", "def dictionary(word_list):\n word_list.append(\"\")\n for i in range(len(word_list)-2):\n prob_dict.setdefault(word_list[i], []).append(word_list[i+1])", "def buildDict(self, words):\n for word in words:\n self.word_set.add(word)\n for candidate in self.candidates(word):\n self.neighbors[candidate] += 1", "def word_dict():\n fin = open('words.txt')\n w_dict = {}\n for line in fin:\n word = line.strip()\n w_dict[word] = word\n return w_dict", "def make_word_dict():\n d = dict()\n for line in open('words.txt'):\n word = line.strip().lower()\n d[word] = None\n\n return d", "def dict_list(self):\n word_list = []\n for index in range(len(self.token)-1):\n if self.token[index] not in self.word_dict:\n word_list.append(self.token[index+1]) # Appends words that follow the types\n self.word_dict[self.token[index]] = word_list # Create a dictionary based on types and it's word list\n else:\n if self.token[index] in self.word_dict.keys():\n value = self.word_dict.get(self.token[index]) # gets the list if the type already exists\n value.append(self.token[index+1]) # and append the new word to the list\n word_list = []\n # print(\"self.word_dict\", self.word_dict)", "def mkwrddct(inputfile):\n fin = open(inputfile)\n words = dict()\n for line in fin:\n w = line.strip()\n words[w] = w\n return words", "def make_trigrams_dict(word_list):\n tris = {}\n for w1, w2, w3 in zip(word_list[:-2], word_list[1:-1], word_list[2:]):\n pair = (w1, w2)\n if pair in tris:\n tris[pair].append(w3)\n else:\n tris[pair] = [w3]\n\n return tris", "def database(words):\n\n d={}\n if len(words) < 3:\n return\n \n for i,word in enumerate(words):\n try:\n first,second,third = (words[i], words[i+1], words[i+2])\n except IndexError:\n break\n key = (first,second)\n if key not in d:\n d[key] = []\n d[key].append(third)\n \n return d", "def parse_file(input_lst):\n word_dct = {}\n for line in input_lst:\n raw_output = line.split() # these are lists of strings\n for str_ in raw_output: # strings\n str_ = str_.lower()\n str_ = str_.replace(\"-\", \" \")\n str_ = str_.replace(\"?\", \"\")\n str_ = str_.replace(\"!\", \"\")\n str_ = str_.replace(\",\", \"\")\n str_ = str_.replace(\"\\'\", \"\")\n str_ = str_.replace('\\\"', \"\")\n str_ = str_.replace(\".\", \"\")\n if str_ not in word_dct:\n word_dct[str_] = 1\n else:\n word_dct[str_] += 1\n return word_dct", "def get_definitions(wlist):\n ddict = {}\n for word in wlist:\n text = get_def_page(word)\n defs = extract_defs(text)\n ddict[word] = defs\n return ddict", "def words(phrase):\n\twordlist = phrase.split()\n\tunique_wordlist = []\n\tword_freq = []\n\n \n\twhile wordlist:\n\t\tword_freq.append(wordlist.count(wordlist[0])) #count the instances of a word and add it to the frequencies list\n\t\tunique_wordlist.append(wordlist[0]) #add the word into a unique words list\n\t\twordlist = list(filter((wordlist[0]).__ne__, wordlist)) #remove all other similar words from the wordlist\n\n\n\tn = len(word_freq)\n\toutput = {}\n\n\tfor i in range(n):\n\t\tif unique_wordlist[i].isdigit(): #convert sting digits into int\n\t\t\tunique_wordlist[i] = int(unique_wordlist[i])\n\t\toutput[unique_wordlist[i]] = word_freq[i] #add the unique words with their corresponding frequencies into the output dict\n\t\n\treturn output", "def wordListToFreqDict(word_list: list) -> dict:\n word_freq = [word_list.count(p) for p in word_list]\n return dict(list(zip(word_list, word_freq)))", "def build_word_dict(self, examples):\n word_dict = TokenDictionary()\n for w in self.load_words(examples):\n word_dict.add(w)\n return word_dict", "def analyze_words(self):\n\t\t\n\t\tword_analysis = {}\n\t\tfor word in self.word_list:\n\t\t\tif word not in word_analysis:\n\t\t\t\tacademic = (word in LEMMA_DICT)\n\t\t\t\tlength = len(word)\n\t\t\t\tfrequency = len(self.word_list[word])\n\t\t\t\tstem = word\t\n\t\t\t\tword_location_index = len(self.sentence_index)-1 #first set it as the last index\n\t\t\t\t\n\t\t\t\tfor index, sentence in self.sentence_index.items():\n\t\t\t\t\tif word in sentence.split():#need to be individual words, not parts of a word\n\t\t\t\t\t\tword_location_index = index \n\t\t\t\t\t\tbreak\n\t\t\t\t\tif self.word_list[word][0] in sentence.split():#accounts for words with upper cases\n\t\t\t\t\t\tword_location_index = index\n\t\t\t\t\t\n\t\t\t\t#selection critera\n\t\t\t\tif academic:\n\t\t\t\t\tselection_criteria = 'academic word'\n\t\t\t\telif frequency > 1: \n\t\t\t\t\tselection_criteria = 'high frequency'\n\t\t\t\telse:\n\t\t\t\t\tselection_criteria = 'word length'\n\n\t\t\t\tword_analysis[word] = (academic, length, frequency, stem, word_location_index, selection_criteria)\n\t\t\n\t\tself.word_analysis = word_analysis\n\t\t\n\t\treturn self.word_analysis", "def train(s):\n # Creates a new dictionary.\n newDict = {}\n # Separates the string into a list based on the spaces between words.\n wordList = s.split(' ')\n # Loops through the entire list of words. If the word is not in the dictionary, add the current word as a key and add the next word to its list. If the word is in the dictionary, add the next word to the list of the current word. Accounts for the last word in the list by performing the same operation on the first word in the list so that it connects.\n for num in range(len(wordList)):\n if num == len(wordList) - 1:\n if wordList[num] not in newDict:\n newDict[wordList[num]] = []\n newDict[wordList[num]].append(wordList[0])\n else:\n newDict[wordList[num]].append(wordList[0])\n else:\n if wordList[num] not in newDict:\n newDict[wordList[num]] = []\n newDict[wordList[num]].append(wordList[num + 1])\n else:\n newDict[wordList[num]].append(wordList[num + 1])\n\n return newDict", "def anagrams(word_lst):\n words_dict = {}\n for word in word_lst:\n characters = ''.join(sorted(list(word)))\n if characters in words_dict:\n words_dict[characters].append(word)\n else:\n words_dict[characters] = [word]\n return words_dict", "def update_dictionary_entries(word_list, the_dict):\n\tfor word in word_list:\n\t\tthe_dict[word] = True\n\treturn the_dict", "def anagrams(word_list):\n output = dict()\n\n for word in word_list:\n word = word.strip()\n letters = word_to_tuple(word)\n # add letters as key to output dict\n # if not present already\n output[letters] = output.get(letters, [])\n # append word to list at key\n output[letters].append(word)\n\n return output", "def dictify(words):\n word_freq = {}\n for word in words:\n if word:\n key = word.lower()\n if key in word_freq:\n word_freq[key] += 1\n else:\n word_freq[key] = 1\n else:\n pass\n return word_freq", "def list_to_word_count_dict(self,l):\n to_return = {}\n for i,word in enumerate(l):\n to_return[(word,i)] = 0\n return to_return", "def iterate_words(counter, li, all_dict, emotion_dict):\n\n counter += 1\n # iterate through the words in the list\n for word in li:\n # if word not in the dict of all words add it with frequency 1, else increase its frequency by 1\n if word not in all_dict:\n all_dict[word] = 1\n else:\n all_dict[word] += 1\n # if word not in the dict of words with certain emotion add it with frequency 1, else increase its frequency by 1\n if word not in emotion_dict:\n emotion_dict[word] = 1\n else:\n emotion_dict[word] += 1\n\n return counter", "def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all", "def _create_dictionary(self, document_set):\n words = self._normalize_words(document_set.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def generate_dictionary(location):\n f = open('../data/wordlist.txt', 'rb')\n words = Counter(re.findall('[a-z]+', f.read().lower().decode()))\n joblib.dump(words, location)", "def buildDict(self, dict):\n for word in dict:\n self.add(word)", "def wordbag( text, ignore_words = Ignore_words ) :\n iter = (stripword(s) for s in text.lower().split() if stripword(s) not in ignore_words)\n result = {}\n for x in iter :\n if result.has_key(x) :\n result[x] += 1\n else :\n result[x] = 1\n return result", "def fill_in_dict():\n # assign a 'data' list from the txt file\n data = open('words.txt')\n # assign an empty 'my_dict' dictionary\n my_dict = dict()\n\n for word in data:\n # fill in dictionarys wit a keys and empty values\n my_dict[word] = ''\n return(my_dict)", "def buildDict(self, dict):\n self.all_words = set(dict)\n self.wc_dict = collections.defaultdict(int)\n for w in dict:\n for wc in self.get_wildcards(w):\n self.wc_dict[wc] += 1", "def word_freq(self, word_list):\n hist = {}\n for word in word_list:\n hist[word] = hist.get(word, 0) + 1\n return hist", "def word_map(text):\n\n # Replace puncation with words\n s = text.replace('.', \" :period:\")\n s = s.replace('\\n', \"\")\n s = s.replace('\"', \" :quote:\")\n s = s.replace(',', \" :comma:\")\n s = s.replace('?', \" :quest:\")\n\n words = sorted(set(s.split(\" \")))\n\n n_to_word = {}\n word_to_n = {}\n\n num = 0\n for word in words:\n n_to_word[num] = word\n word_to_n[word] = num\n num += 1\n\n return words, n_to_word, word_to_n", "def create_dictionary(filename):\n file = open(filename, 'r')\n text = file.read()\n file.close()\n words = text.split()\n d = {}\n current_word = '$'\n \n for next_word in words:\n if current_word not in d:\n d[current_word] = [next_word]\n else:\n d[current_word] += [next_word]\n if next_word[-1] == '.' or next_word[-1] == '!' or next_word[-1] == '?':\n current_word = '$'\n else:\n current_word = next_word\n return d", "def perform_indexing(self, words_list):\n\n indexer_table = {}\n\n for word in words_list:\n hash_value = self.calculate_weighted_hash(word)\n freq_table = calculate_frequency_table(word)\n\n if hash_value not in indexer_table:\n indexer_table[hash_value] = {}\n indexer_table[hash_value][as_set(freq_table)] = [word]\n else:\n if as_set(freq_table) not in indexer_table[hash_value]:\n indexer_table[hash_value][as_set(freq_table)] = [word]\n else:\n indexer_table[hash_value][as_set(freq_table)].append(word)\n\n return indexer_table", "def words(self):\n\t\treturn {c: sorted(l) for (c,l) in sorted(self.dictData.items())}", "def makeWords(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if x not in self.words: \r\n self.words[x] = 1\r\n else: \r\n self.words[x] += 1\r\n return self.words", "def __init__(self, dictionary):\n self.d = {}\n for word in dictionary:\n abbr = self.getAbbr(word)\n if abbr in self.d:\n self.d[abbr] += word,\n else:\n self.d[abbr] = [word]", "def make_mimic_dict(filename):\r\n with open(filename, 'r') as file:\r\n text = file.read().lower().replace(\"'\",'').split()\r\n mimic_dict = {}\r\n prev = ''\r\n for word in text:\r\n if not prev in mimic_dict:\r\n mimic_dict[prev] = [word]\r\n else:\r\n mimic_dict[prev].append(word)\r\n prev = word\r\n return mimic_dict", "def _create_dictionary(self, document):\n words = self._normalize_words(document.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def create_dict(text):\n #On/Off case sensitivity\n text = text.lower() \n\n #handy one liner that splits words apart via whitespace, and \n #removes punctuation. Results in list of words.\n word_list = [s.strip(string.punctuation) for s in text.split()]\n \n d = dict()\n for word in word_list:\n d[word] = d.get(word, 0) +1\n return d", "def histogram(word_list):\n assert type(word_list) == list\n\n histogram = {}\n total_words = len(word_list)\n word_frac = 1.0/total_words\n\n for word in word_list:\n if word in histogram: histogram[word] += word_frac\n else: histogram[word] = word_frac\n\n return histogram", "def __init__(self, dictionary):\n self.dict = {}\n for word in dictionary:\n abbr = self.gen_abbr(word)\n if abbr not in self.dict:\n word_set = set([word])\n self.dict[abbr] = word_set\n else:\n self.dict[abbr].add(word)", "def build_trigram_dict(word_list):\n\n trigram = {}\n for index in range(len(word_list) - 2):\n word1 = word_list[index]\n word2 = word_list[index + 1]\n word3 = word_list[index + 2]\n pair = (word1, word2)\n if pair not in trigram:\n trigram[pair] = [word3]\n else:\n trigram[pair].append(word3)\n return trigram", "def create_words_and_values(file_in):\n # Get all key words\n words_in_order = get_lemitized_words_in_order(file_in)\n\n words_and_values = get_keywords_and_values(words_in_order)\n\n return words_and_values", "def get_keywords_and_values(words):\n d={}\n triple_keyword_value = 5\n double_keyword_value= 3\n single_keyword_occurance_value = 1\n\n stop_words = set(stopwords.words(\"english\"))\n\n for i in range(0, len(words)-2):\n if words[i] not in stop_words and words[i].isalnum():\n d[words[i]] = d.get(words[i],0.0)+ single_keyword_occurance_value\n if words[i+1] not in stop_words and words[i+1].isalnum():\n d[words[i]+\" \"+words[i+1]] = d.get(words[i]+\" \"+words[i+1],0.0)+double_keyword_value\n if words[i + 2] not in stop_words and words[i + 2].isalnum():\n d[words[i]+\" \"+words[i+1]+\" \"+words[i+2]] = d.get(words[i]+\" \"+words[i+1]+\" \"+words[i+2],0.0)+triple_keyword_value\n\n print(i, len(words))\n\n if words[i+1] not in stop_words and words[i+1].isalnum():\n d[words[i+1]] = d.get(words[i+1],0.0)+ single_keyword_occurance_value\n if words[i+2] not in stop_words and words[i+2].isalnum():\n d[words[i+1]+\" \"+words[i+2]] = d.get(words[i+1]+\" \"+words[i+2],0.0)+double_keyword_value\n if words[i+2] not in stop_words and words[+2].isalnum():\n d[words[i+2]] = d.get(words[i+2],0.0)+ single_keyword_occurance_value\n return d", "def build_trigrams(words):\n trigrams = {}\n for i in range(len(words) - 2):\n pair = words[i:i + 2]\n follower = words[i + 2]\n # add tuple type to make it immutable / use append to add more options to the follower selections\n trigrams.setdefault(tuple(pair), []).append(follower)\n #print(trigrams) - for testing\n return trigrams", "def get_words(s):\n d = {}\n s = s.lower()\n for word in s.split():\n d[word] = d.get(word,0) + 1\n return d", "def frequency(lst):\n\n count = dict()\n for word in lst:\n if word in count:\n count[word] += 1\n else:\n count[word] = 1\n return count", "def make_worddict(self):\n\t\tprint(\"Making word dictionary\")\n\t\tword_to_freq = self.make_word_to_freq()\n\t\twords = list(word_to_freq.keys())\n\t\twords.sort() # sort alphabetically first to avoid non-deterministic ordering of words with the same frequency\n\t\twords.sort(key = lambda x:word_to_freq[x], reverse = True)\n\n\t\tfor word in words[:self.FREQCAP-len(self.worddict)]:\n\t\t\tself.worddict[word] = len(self.worddict)\n\t\t\n\t\tprint(\"Word dictionary size:\", len(self.worddict))", "def build_word_dict(self, file_type):\n file_name = self.config.parser[file_type + '_dir']\n with open(file_name, 'r') as f:\n for line in f.readlines():\n contents = line.strip().split(' ')\n # an empty line, means seperator for two batch\n # doc id, means a new batch whose `docid` is doc id\n # a word and its tag sepaerated by a blank\n if len(contents) >= 2:\n word, _ = contents[0], contents[1]\n self.word_dict.add_word(word)\n Print(f'word dict from {file_name} is added', 'success')", "def lookup_words_by_words(words, session, Word):\n\n return {w.word: w for w in session.query(Word).filter(\n Word.word.in_(words)).all()}", "def search_multiple_words(words):\n all_sites = {}\n for word in words:\n for site, freq in search_single_word(word):\n if site not in all_sites: # case 1: haven't included this site\n all_sites[site] = freq # make a new entry for site, freq\n else: # case 2: have included this site\n all_sites[site] += freq # add the frequencies\n L = all_sites.items()\n L.sort(key = lambda pair: pair[1], reverse = True)\n return L", "def word_count(phrase):\n words = phrase.split()\n deDupedWords = set(words)\n wordCount = {}\n\n for element in deDupedWords:\n wordCount.update({element: words.count(element)})\n\n return wordCount", "def _make_word_dictionary(self,annos):\n # get training annos\n train_annos = self.annos[\"train\"]\n # read tokens\n tokens_list = []\n for ann in train_annos:\n tokens_list += [tk for tk in ann[\"tokens\"]]\n # print results: count tokens and show top-n\n print(\"Top-{} tokens list:\".format(self.cfg.DATASET.SHOW_TOP_VOCAB))\n tokens_count = sorted(Counter(tokens_list).items(), key=lambda x:x[1])\n for tk in tokens_count[-self.cfg.DATASET.SHOW_TOP_VOCAB:]:\n print(\"\\t- {}: {}\".format(tk[0],tk[1]))\n # make wtoi, itow\n wtoi = {}\n wtoi[\"<PAD>\"], wtoi[\"<UNK>\"] = 0, 1\n wtoi[\"<S>\"], wtoi[\"<E>\"] = 2, 3\n for i,(tk,cnt) in enumerate(tokens_count):\n idx = i+4 # idx start at 4\n wtoi[tk] = idx\n itow = {v:k for k,v in wtoi.items()}\n self.cfg.MODEL.QUERY.EMB_IDIM = len(wtoi)\n return wtoi, itow", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def make_word_num_map(words):\n\tword_num_map = dict()\n\tfor word in words:\n\t\tword_num_map[word] = word_num_map.get(word, 0) + 1\n\treturn word_num_map", "def __init__(self):\n self.word_dict = collections.defaultdict(list)", "def extractWordFeatures(x):\n # BEGIN_YOUR_CODE (around 5 lines of code expected)\n a = Counter(x.split())\n return dict(a)\n # END_YOUR_CODE", "def word_frequencies(word_list: TextIO) -> dict:\n words = word_list.read().split(' ')\n amount_of_words = len(set(words))\n frequencies = {}\n for index, word in enumerate(words):\n clean_word = remove_punctuation(word)\n if clean_word not in frequencies:\n frequencies[clean_word] = (index + 1) / amount_of_words\n del frequencies[\"\"]\n return frequencies", "def get_word_list_features(word_list, word_features):\n document = ' '.join(word_list)\n words = word_tokenize(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features", "def getEmbeddings(model, words):\n\tembeddings = {}\n\tfor word in words:\n\t\tembeddings[word] = model[word]\n\treturn embeddings", "def count(words):\n word_count = {}\n num_words = 0\n unique_words = 0\n for word in words:\n num_words += 1\n if word_count.has_key(word):\n word_count[word] += 1\n else:\n word_count[word] = 1\n unique_words += 1\n word_count[\"total\"] = num_words\n word_count[\"unique\"] = unique_words\n return word_count", "def word_indexer(word_lst):\n unique_words = list(set(word_lst))\n word_index = {}\n for i in range(len(unique_words)):\n word_index[unique_words[i].lower()] = i + 4\n word_index['<PAD>'] = 0\n word_index['<START>'] = 1\n word_index['<UNK>'] = 2\n word_index['<UNUSED>'] = 3\n return word_index", "def create_dictionary(file_dir):\r\n\tword_list = []\r\n\tfile_list = read_files(file_dir, \"lab\") # step 7\r\n\tfor file in file_list:\r\n\t\twith open(file, 'r') as f:\r\n\t\t\ttext = f.read()\r\n\t\tword_list = store_to_dictionary(text, word_list) # step 8cii\r\n\tmake_dictionary_file(file_dir, word_list) # step 9\r", "def generate_vocab_dict(vocab):\n v_dict = {}\n for word in vocab:\n if len(word) in v_dict:\n v_dict[len(word)].append(word)\n else:\n v_dict[len(word)] = [word]\n return v_dict", "def create_word_list(self):\n return set(self.split(self.title)+self.split(self.conditions)+self.split(self.interventions))", "def create_model_owc(text: str) -> Dict[str, Set[str]]:\n dict_so_far = {}\n list_of_words = str.split(text)\n\n\n for x in range(0, len(list_of_words)):\n \"\"\"\n check if the word is followed by a period and add it to the follow list if it is, then remove the period to \n check if the word is followed by something else\n \"\"\"\n if list_of_words[x][-1] == '.':\n list_of_words[x] = list_of_words[x][0:-1]\n update_follow_set(dict_so_far, list_of_words[x], '.')\n\n else:\n update_follow_set(dict_so_far, list_of_words[x], list_of_words[x + 1].rstrip('.'))\n return dict_so_far", "def count_words_in_sentence_list(sentence_list):\n\ttotal_dict = dict()\n\tfor _ in sentence_list:\n\t\tcount = count_word_in_each_sentence(_)\n\t\tfor key, value in count.items():\n\t\t\tif total_dict.get(key):\n\t\t\t\ttotal_dict[key] += value\n\t\t\telse:\n\t\t\t\ttotal_dict[key] = value\n\treturn total_dict", "def word_frequency_dict(tokens):\n\n\tfdist = FreqDist(tokens) \t\t\t\t\t\t# fdist.keys() fdist.values()\n\treturn dict(fdist)", "def read_dictionary():\n with open(FILE, 'r') as f:\n for line in f:\n words_lst = line.split()\n for word in words_lst:\n dict_list.append(word)", "def add_words_from_dict(self, kind: str, fn: str, words: Any) -> None:\n for word in words or []:\n self.words.add(word)\n self.words.add(word.lower())", "def anagrams(words):\n\n anagrams = {}\n for word in words:\n wordAsList = \"\".join(sorted(list(word))).strip()\n if anagrams.has_key(wordAsList):\n anagrams[wordAsList].append(word)\n else:\n anagrams[wordAsList] = [word]\n\n return anagrams", "def buildDict(self, dict):\n for word in dict:\n self.s.add(word)\n self.length_set = set([len(word) for word in dict])", "def dictionary(cleaned_data,threshold):\n news = []\n for date in cleaned_data:\n for headlines in cleaned_data[date]:\n news.append(headlines)\n\n word_freq = nltk.FreqDist(itertools.chain(*news))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id", "def word_frequency(words):\r\n frequency = {}\r\n for w in words:\r\n frequency[w] = frequency.get(w, 0) + 1\r\n return frequency", "def to_words(content,words):\n return ''.join(words[x] for x in content)", "def test_dictionary_create_repeat():\n tokens = [['testing'], ['testing', 'testing', 'testing'], ['make', 'tokens']]\n dictionary = edurate_gensim.dictionary_create(tokens)\n corp = [dictionary.doc2bow(token) for token in tokens]\n assert corp == [[(0, 1)], [(0, 3)], [(1, 1), (2, 1)]]", "def _vector_mapping(self) -> dict:\n words = set()\n for file in os.listdir(self.processed_path):\n doc_path = f\"{self.processed_path}/{file}\"\n with open(doc_path, 'r') as f:\n text_words = f.readline().split()\n words = words.union(set(text_words))\n words = list(words)\n words.sort()\n\n return dict(zip(words, range(len(words))))", "def build_dict(min_word_freq=0, train_dir=\"\", test_dir=\"\"):\n word_freq = collections.defaultdict(int)\n files = os.listdir(train_dir)\n for fi in files:\n with open(os.path.join(train_dir, fi), \"r\") as f:\n word_freq = word_count(f, word_freq)\n files = os.listdir(test_dir)\n for fi in files:\n with open(os.path.join(test_dir, fi), \"r\") as f:\n word_freq = word_count(f, word_freq)\n\n word_freq = [x for x in six.iteritems(word_freq) if x[1] > min_word_freq]\n word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*word_freq_sorted))\n word_idx = dict(list(zip(words, six.moves.range(len(words)))))\n return word_idx", "def make_bag_words(document_tokenized):\n bag_words = dict()\n for token in document_tokenized:\n if token in bag_words.keys():\n bag_words[token] += 1\n else:\n bag_words[token] = 1\n return bag_words", "def create_word_map(tokenized_descriptions_file_path, word_dictionary_output_path):\n if os.path.exists(word_dictionary_output_path):\n print(\"Word map already exists in workspace. Will be reused.\")\n return\n\n print(\"Word map not found. Generating....\")\n\n words_list = []\n words_to_id = {}\n\n with open(tokenized_descriptions_file_path, 'r') as file:\n for line in file:\n tokens = line.strip().split(\",\")\n words_list.extend(tokens[1:])\n\n # remove duplicate words\n words_list = list(set(words_list))\n\n # sorting the words\n words_list = sorted(words_list)\n for i in range(len(words_list)):\n words_to_id[words_list[i]] = i\n\n with open(word_dictionary_output_path, 'w') as f:\n [f.write('{0},{1}'.format(key, value) + \"\\n\") for key, value in words_to_id.items()]", "def bow(tokens):\n return dict(collections.Counter(re.findall(r'\\w+', \" \".join(tokens))))", "def prep_dict(word):\n counts = {}\n for l in word.lower():\n if l!=\" \":\n counts[l] = counts.get(l,0) + 1\n return counts", "def get_count_words(novel, words):\n dic_word_counts = {}\n for word in words:\n dic_word_counts[word] = novel.get_count_of_word(word)\n return dic_word_counts", "def get_features(words):\n features = {}\n for word in [i for i in words.split() if i not in stopwords.words('english')]:\n features['contains_%s' % word.lower()] = True\n return features", "def count(words):\n\n values = []\n \n # dictionary whose keys are words and values number of occurrences\n D = {}\n\n for word in words:\n # if word is already in dict add 1 to the count\n try : D[word] +=1\n # otherwise add entrye to dict\n except : D[word] = 1\n\n values += [D[word]]\n\n return values", "def build_trigrams(words):\n trigrams = {}\n for i in range(len(words) - 2): # why -2 ?\n pair = ()\n pair = words[i:i + 2]\n key = \" \"\n key = key.join(pair)\n follower = []\n follower.append(words[i + 2])\n follower = words[i + 2]\n if key in trigrams.keys():\n existing_value = trigrams[key]\n list(existing_value)\n existing_value.append(follower)\n trigrams[key] = existing_value\n else:\n trigrams.update({key: list(follower.split(\" \"))})\n return trigrams", "def word_counter(list_) -> list:\n from operator import itemgetter\n _dict = dict()\n for word in list_:\n if word in _dict:\n _dict[word] = _dict[word] + 1\n else:\n _dict[word] = 1\n return sorted(_dict.items(), key=itemgetter(1), reverse=True)", "def clsWordCounts(wd_dict, wd_list, cl):\n\n # set up counter object for words in passed word list\n word_counts = Counter(wd_list)\n\n # for each term in counter object\n for term in word_counts:\n\n # if already in dictionary add to the count\n if term in wd_dict[cl]:\n\n wd_dict[cl][term] += word_counts[term]\n\n # else add to the dictionary\n else:\n\n wd_dict[cl][term] = word_counts[term]\n\n return wd_dict", "def analyze_word(s):\n\n a = {}\n a['word'] = s\n a['n_letters'] = len(s)\n a['n_vowels'] = count_vowels(s)\n \n return a" ]
[ "0.7771935", "0.7714796", "0.76650697", "0.75896126", "0.7402118", "0.7371921", "0.7257023", "0.724838", "0.72190464", "0.71511894", "0.7121465", "0.70906883", "0.70329714", "0.69910663", "0.6939837", "0.6926235", "0.69013107", "0.68620795", "0.68157786", "0.6809816", "0.6763535", "0.6733835", "0.66926974", "0.6672779", "0.6632391", "0.6610456", "0.65891856", "0.6588288", "0.6573257", "0.6570725", "0.65617216", "0.6555818", "0.65151054", "0.6510993", "0.6505765", "0.6496117", "0.6495923", "0.64873344", "0.64711845", "0.6463316", "0.646033", "0.644894", "0.64170086", "0.6415676", "0.64130116", "0.6403004", "0.63776106", "0.63559026", "0.6348519", "0.6340014", "0.63326", "0.63074356", "0.62916356", "0.6282301", "0.6274789", "0.6257068", "0.62545043", "0.6248074", "0.6244352", "0.62415487", "0.62377536", "0.6234071", "0.6231643", "0.6228746", "0.6220464", "0.61950105", "0.61757195", "0.6175457", "0.6174988", "0.61698747", "0.6156159", "0.6156113", "0.61491454", "0.6146453", "0.61455715", "0.61360997", "0.6124478", "0.61233276", "0.6106081", "0.61056834", "0.60981476", "0.6092355", "0.6091201", "0.6087671", "0.6076661", "0.607472", "0.60695285", "0.6059188", "0.6058311", "0.6057396", "0.6047712", "0.60415244", "0.6040443", "0.60358363", "0.6035677", "0.6022717", "0.6018232", "0.6008099", "0.60052556", "0.5999378" ]
0.75077
4
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
def search(self, word: str) -> bool: # # for candidate in self.buckets[len(word)]: # # for a, b in zip(word, candidate): # # result = any(sum(a!=b)) return any(sum(a!=b for a, b in zip(word, candidate)) == 1 for candidate in self.buckets[len(word)]) # # for candidate in self.buckets[len(word)]: # sum = 0 # for a, b in zip(word, candidate): # sum += (a!=b) # if sum == 0: # return True # return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_word(trie, string: str) -> bool:\n return any(w == string for w in trie)", "def search(self, word):\n curr = self.trie\n for i, ch in enumerate(word):\n curr = curr.get(ch, {})\n if curr:\n continue\n else:\n break\n \n if i==len(word)-1 and '\\0' in curr:\n ret = True\n else:\n ret = False\n\n return ret", "def has_word(self, word)->bool:\n if len(word) == 1:\n chars = word + GNode.CHAR_EOW\n else:\n chars = word[0] + GNode.CHAR_REV + word[1:] + GNode.CHAR_EOW\n cursor = self.root\n for c in chars.lower():\n if c not in cursor.children:\n return False\n else:\n cursor = cursor.children[c]\n return True", "def search(self, word):\n length = len(word)\n if length == 1:\n for letter in string.ascii_lowercase:\n key = \"{}/{}\".format(1, letter)\n if key in self.origin and letter != word:\n return True\n return False\n\n key = \"{}/{}\".format(len(word), word[0])\n ls = self.origin.get(key, [])\n if len(ls) == 0:\n return False\n\n for origin in ls:\n if self.only_modify_one_char(word, origin):\n return True\n return False", "def in_trie(self, trie, word):\n current_dict = trie\n for letter in word:\n if letter in current_dict:\n current_dict = current_dict[letter]\n else:\n return False\n else:\n if '_end_' in current_dict:\n return current_dict['_end_']\n else:\n return False\n return False", "def search(self, word):\n level = self.trie\n for c in word:\n if c in level:\n level = level[c]\n else:\n return False\n return self.end in level", "def search_full_word(self, word):\n node = self.root\n for i in range(len(word)):\n current_letter = word[i]\n current_index = self.to_index(current_letter)\n if node.children[current_index] and node.is_word:\n node = node.children[current_index]\n else:\n return False\n # save last node after a search\n self.saved_node = node\n return True", "def search(self, word):\n if not word:\n return False\n if word[0] not in self.trie:\n return False\n cur = self.trie[word[0]]\n for char in word[1:]:\n if char not in cur.nexts:\n return False\n cur = cur.nexts[char]\n return (cur and cur.isTerm) == True", "def search(self, word: str) -> bool:\n tries = [self.trie]\n for c in word:\n if c != '.':\n tries = [\n trie[c] for trie in tries if c in trie\n ]\n else:\n tries = [\n v for trie in tries for v in trie.values() if v\n ]\n\n if not tries:\n return False\n\n return any(None in trie for trie in tries)", "def search(self, word):\r\n t = self.trie\r\n for w in word: \r\n if w not in t: \r\n return False\r\n t = t[w]\r\n if '#' in t:\r\n return True\r\n return False", "def search(self, word: str) -> bool:\n node = self.root\n for char in word:\n if char not in node:\n return False\n node = node[char]\n return self.end_of_word in node", "def search(self, word: str) -> bool:\r\n nroot=self.root\r\n for j in word:\r\n \r\n # index=ord(j)-ord('a')\r\n if j not in nroot.children:\r\n return False\r\n nroot=nroot.children[j] \r\n return nroot.endofword", "def search(self, word):\n for i in xrange(len(word)):\n w = word[:i] + '*' + word[i+1:]\n if w in self.dict and (len(self.dict[w]) > 1 or word[i] not in self.dict[w]): return True \n return False", "def search(self, word: str) -> bool:\n cur = self.root\n for letter in word:\n if letter not in cur:\n return False\n cur = cur[letter]\n if \"isWord\" not in cur:\n return False\n return True", "def search(self, word: str) -> bool:\n temp=self.root\n \n for char in word:\n if(not temp.children[ord(char)-ord('a')]):\n return False\n temp=temp.children[ord(char)-ord('a')]\n \n if(temp and temp.endOfWord==True):\n return True\n \n return False", "def check_present_and_add(self, word):\n\n current_node = self.root_node\n is_new_word = False\n\n # iterate through trie adding missing notes\n for char in word:\n if char not in current_node:\n is_new_word = True\n current_node[char] = {}\n current_node = current_node[char]\n \n # mark end of word so that words that are prefixes of present words are not\n # returned - i.e. each word must have an explicit \"End of Word\" marker\n if \"End of Word\" not in current_node:\n is_new_word = True\n current_node[\"End on Word\"] = {}\n\n return is_new_word", "def check_word(self, word):\n\n if not self.words:\n return None\n word = ''.join(word)\n return next((True for w in self.words if w == word), False)", "def search(self, word: str) -> bool:\n current = self.root\n for letter in word: \n current = current.children.get(letter)\n if not current:\n return False\n return current.is_word", "def checkWord(self, word):\n\t\treturn self.root.checkString(u' ' + word);", "def search(self, word: str) -> bool:\n return self.trie.search(word + '#', self.trie.trie)", "def search(self, word):\n node = self.root\n for char in word:\n if char in node.dict:\n node = node.dict[char]\n else:\n return False\n if node.end:\n return True\n return False", "def search(self, word: str, root: dict = None) -> bool:\n word += '0'\n return self._search(word, self.trie)", "def search(self, word: str) -> bool:\n node = self.root\n for c in word:\n if c not in node:\n return False\n node = node[c]\n return self.end in node", "def checkWord(word):\r\n check = word in cachedWordList\r\n if check:\r\n print(word + \" spelt correctly\")\r\n else:\r\n print(word + \" not found in dictionary\")\r\n return check", "def contains_word(root, input_word):\n\n cNode = root\n\n for char in list(input_word):\n found_match = False\n for node in cNode.nodes:\n if node.char == char:\n found_match = True\n cNode = node\n break\n\n if not found_match:\n print(\"Exited in for loop\")\n return False\n\n return cNode.is_word", "def search(self, word):\n current = self.root\n for i in word:\n if current.hash_map.get(i) is None:\n return False\n current = current.hash_map.get(i)\n if current.num != 0:\n return True\n return False", "def search(self, word: str) -> bool:\r\n node=self.root\r\n for c in word:\r\n if c not in node:\r\n return False\r\n else:\r\n node = node[c]\r\n if self.end_of_words in node:\r\n return True\r\n else:\r\n return False", "def search(self, word: str) -> bool:\n #start from the root\n node = self.root\n for char in word:\n if char in node.child:\n node = node.child.get(char)\n else:\n return False\n return node.isWord", "def search(self, word):\n #edge case\n if word == \"\": \n return True if self._dict.children[26] != None else False\n\n cur = self._dict\n for c in word:\n ind = ord(c) - 97\n if cur.children[ind] == None:\n return False\n cur = cur.children[ind]\n\n return True if cur.isleaf == True else False", "def search(self, word):\n current = self.root\n for letter in word:\n current = current.children.get(letter)\n \n if current is None:\n return False\n return current.is_word", "def search(self, word: str) -> bool:\n node = self\n for c in word:\n node = node.d.get(c)\n if not node:\n return False\n return node.end", "def search(self, word: str) -> bool:\n n = self.root\n for l in word[0:-1]:\n cn = n.get_child_with_val(l)\n if cn == None or cn.eow:\n return False\n n = cn\n\n last_node = n.get_eow_child_with_val(word[-1])\n if last_node == None:\n return False\n return True", "def search(self, word):\n if len(word) not in self.length_set:\n return False\n for i in self.mutate(word):\n if i in self.s:\n return True\n return False", "def search(self, word: str) -> bool:\n curr = self.root\n for c in word:\n if not c in curr.adj:\n return False\n curr = curr.adj[c]\n return curr.isWord", "def search(self, word: str) -> bool:\n node = self.root\n for c in word:\n if c not in node.children:\n return False\n node = node.children[c]\n return True", "def search(self, word: str) -> bool:\n def DFS(i, word, trie):\n if i == len(word):\n return '#' in trie\n # c == '.'\n # c in trie\n c = word[i]\n res = False\n if c == '.':\n for cc in trie.keys():\n if cc != '#':\n res = res or DFS(i + 1, word, trie[cc])\n elif word[i] in trie:\n res = res or DFS(i + 1, word, trie[c])\n return res\n\n return DFS(0, word, self.trie)", "def search(self, word: str) -> bool:\n node = self.root\n for w in word:\n node = node.children.get(w)\n if not node:\n return False\n return node.end", "def search(self, word: str) -> bool:\n curr = self.root\n for ch in word:\n curr = curr.children.get(ch)\n if curr is None:\n return False\n return curr.is_word", "def search(self, word: str) -> bool:\n def recur(word, trie):\n if not word:\n if '$' in trie:\n return True\n return False\n if word[0] in trie:\n return recur(word[1:], trie[word[0]])\n elif word[0] == '.':\n to_return = False\n for char in trie.keys():\n to_return = to_return or recur(word[1:], trie[char])\n return to_return\n return False\n return recur(word, self.trie)", "def search(self, word):\n curNode = self.root\n for c in word:\n if not c in curNode:\n return False\n curNode = curNode[c]\n \n # Doesn't end here\n if not self.end in curNode:\n return False\n \n return True", "def search(self, word: str) -> bool:\n if len(word) == 0:\n return True\n idx = ord(word[0]) - ord('a')\n if not self.children[idx]:\n return False\n if len(word) == 1:\n return self.children[idx].tail # switch for only once\n\n return self.children[idx].search(word[1:])", "def search(self, word):\n pointer = self.tries\n for i in range(len(word)):\n ascii = ord(word[i]) - ord('a')\n if pointer[ascii] == None:\n return False\n pointer = pointer[ascii]\n if word in pointer[26:]:\n return True\n else:\n return False", "def search(self, word: str) -> bool:\n curr_chars = self.chars\n for c in list(word):\n if c not in curr_chars:\n return False\n curr_chars = curr_chars[c]\n return self.end_of_word in curr_chars", "def search(self, word):\n node = self.root\n for letter in word:\n if letter not in node.children:\n return False\n node = node.children[letter]\n return node.endOfWord", "def search(self, word: str) -> bool:\n parent = self.root\n for char in word:\n if char not in parent.children:\n return False\n parent = parent.children[char]\n return parent.endhere", "def search(self, word):\n now = self.tree\n for i in word:\n if i in now:\n now = now[i]\n else:\n return False\n return True if 'end' in now else False", "def search(self, word: str) -> bool:\n node = self.root\n for char in word:\n if char not in node:\n return False\n node = node[char]\n if \"#\" not in node:\n return False\n return True", "def search_word(self, word):\n current = self.root\n for letter in word:\n \n current = current.get_child(letter) # node is not next letter in word\n\n if not current: # if node is null, return False\n return False\n \n if not current.get_end():\n return False\n return True", "def search(self, word: str) -> bool:\n\n temp = self.start\n\n for i in range(len(word)):\n \n if temp.children[ord(word[i]) - ord('a')] is None:\n return False\n temp = temp.children[ord(word[i])-ord('a')]\n if i+1 == len(word) and temp.end == True:\n return True\n\n return False", "def search(self, word):\n node = self.root\n for c in word:\n if c in node.children:\n node = node.children[c]\n else:\n return False\n return node.word_end", "def search(self, word: str) -> bool:\n current = self.root\n for letter in word:\n if letter not in current.children:\n return False\n current = current.children[letter]\n return current.has_end", "def search(self, word):\n node = self.root\n for letter in word:\n if letter not in node.children:\n return False\n node = node.children[letter]\n return node.word", "def search(self, word):\n curr = [self.trie]\n\n for c in word:\n next_curr = []\n if c != '.':\n for n in curr:\n if c in n.children:\n next_curr.append(n.children[c])\n else:\n for n in curr:\n next_curr.extend(n.children.values())\n curr = next_curr\n if not curr:\n return False\n\n return any([n.is_term for n in curr])", "def search(self, word: str) -> bool:\n currnode=self.root\n\n for ch in word:\n node=currnode.children.get(ch)\n if node is None:\n return False\n\n return currNode.isWordEnd", "def check_word(words, word):\r\n if word in words:\r\n return True\r\n else:\r\n return False", "def has_word(self, word):\n return word in self.word_set", "def search(self, word):\n if len(word) == 0:\n return False\n todo = [self]\n for idx, letter in enumerate(word):\n if len(todo) == 0:\n break\n new_todo = []\n if letter == '.':\n for node in todo:\n new_todo += list(node.kids.values())\n else:\n for node in todo:\n if letter in node.kids:\n new_todo.append(node.kids[letter])\n if idx == len(word) - 1 and node.kids[letter].isWord:\n return True\n todo = new_todo\n if len(todo) > 0 and word[-1] == '.':\n for ele in todo:\n if ele.isWord:\n return True\n\n return False", "def search(self, word):\n node = self.search_prefix(word)\n return node is not None and '#' in node and node['#'] == 1", "def search(self, word):\n currNode = self.root\n\n for c in word:\n if c not in currNode.children:\n return False\n currNode = currNode.children[c]\n return currNode.isEnd", "def search(self, word: str) -> bool:\n node = self.head\n for c in word:\n if c not in node.next:\n return False\n node = node.next[c]\n return node.valid", "def search(self, word):\n lenw = len(word)\n if lenw not in self.bag: return False\n return any([self.equal_to(word, item) for item in self.bag[lenw]])", "def search(self, word) -> bool:\n return self.match(self.root, 0, word)", "def search(self, word: str):\n node = self.root\n for letter in word:\n if letter in node.child:\n node = node.child[letter]\n else:\n return False\n return node.is_leaf", "def check_match(self, word_found,word): \r\n self.count = 0\r\n for char in self.word_found:\r\n if char != self.word[self.count]:\r\n return False\r\n self.count +=1\r\n #print(self.count)\r\n \r\n return True", "def search(self, word: str) -> bool:\n node = self._traverse(word)\n return node.word if node else False", "def search(self, word: 'str') -> 'bool':\n p = self.root\n for ch in word:\n if ch in p:\n p = p[ch]\n else:\n return False\n\n if '#' in p:\n return True\n else:\n return False", "def check_word(self, word):\n first_letter, rest = word[0], word[1:]\n\n for possible_start in self._find_letter(first_letter):\n if self._check_word(possible_start, rest):\n return True\n\n return False", "def search(self, word):\n def r_search(word,i,d):\n if len(word) <= i:\n return True\n \n if d == 0:\n return False\n \n return (word[i] in d) and r_search(word,i+1,d[word[i]])\n \n tri = self.root.d\n if len(word) == 0: \n return True\n \n if len(tri) == 0:\n return False\n \n return r_search(word + '$',0,tri)", "def check_word(word):\r\n if word in word_master:\r\n valid = True\r\n else:\r\n valid = False\r\n return valid", "def word_check(word):\n word1 = word[1:]\n if word1 not in word_dict: return False\n if not homophones (word, word1): return False\n \n \n word2 = word[0] + word[2:]\n if word2 not in word_dict: return False\n if not homophones(word, word2): return False\n\n return True", "def search(self, word: str) -> bool:\n node = self.find(word)\n return node and node.is_word", "def search(self, word: str) -> bool:\n return self.search_recursive(self.root, word, 0)", "def search(self, word: str) -> bool:\n tmp = self.root\n for i in range(len(word)): \n if word[i] == \".\":\n valid = False\n for nxt in tmp.seq.keys():\n valid = valid or self.search(word[:i] + nxt + word[i+1:])\n \n if valid:\n return True\n return False\n \n if word[i] not in tmp.seq:\n return False \n tmp = tmp.seq[word[i]]\n \n return tmp.value == word", "def search(self, word):\n node = self.root\n for i in word:\n if i not in node.children:\n return False\n node = node.children[i]\n return node.word", "def search(self, word):\n length = len(word)\n if length not in self.dic:\n return False\n else:\n candidateList = self.dic[length]\n for candidate in candidateList:\n for i in xrange(length):\n if candidate[i] != word[i]:\n if candidate[i+1:] == word[i+1:]:\n return True\n else:\n break\n return False", "def chars_match (found, word):\n index = 0\n for i in found:\n if (i != word[index]):\n return False\n index += 1\n return True", "def check_word(self, word):\n word = word.lower().strip()\n return not word or word in self.dictionary", "def valid_word(self, word):\n\n word = ''.join(word)\n return next((w for w in self.words if w == word), None)", "def contains(self, word: Iterable[Terminal]) -> bool:\n return self._get_final_state(word) is not None", "def test_contains_returns_false_when_word_mismatches(full_trie):\n assert full_trie.contains(\"hello\") is False", "def search(self, word):\n for wc in self.get_wildcards(word):\n # Don't forget word not in self.all_words\n if wc in self.wc_dict and (self.wc_dict[wc] > 1 or word not in self.all_words) :\n return True\n return False", "def test_contains_returns_true_when_word_in_trie(full_trie):\n assert full_trie.contains(\"hey\") is True", "def search(self, word):\n end_node = self.__find_node(word)\n if end_node and end_node.is_word:\n return True\n return False", "def check(self,word):\n if self.pre:\n def sub_word(chars):\n if re.match('^'+chars+'.*',word):\n return word[len(chars):]\n else:\n return None\n else:\n def sub_word(chars):\n if re.match('^.*'+chars+'$',word):\n return word[:-len(chars)]\n else:\n return None\n\n if word == '':\n return self\n for chars in self.branches.keys():\n res = sub_word(chars)\n if res:\n return self.branches[chars].check(res)\n elif res == '':\n return self.branches[chars]\n return None", "def search(self, word):\n for candidate in self.candidates(word):\n if self.neighbors[candidate] > 1:\n return True\n elif self.neighbors[candidate] == 1 and word not in self.word_set:\n return True\n return False", "def trifeca(word):\n if not word:\n return False\n\n for i in range(len(word)-1):\n if word[i]==word[i+1]:\n if len(word[i:])>=6:\n if word[i+2:i+6:2]==word[i+3:i+7:2]:\n return True \n return False", "def existRecu(self, board, word, row, col) -> bool:\n if len(word) == 0:\n return True\n elif row < 0 or row == len(board): # Edge breaking\n return False\n elif col < 0 or col == len(board[0]): # Edge breaking\n return False\n elif board[row][col] is None:\n return False\n elif board[row][col] != word[0]: # miss-matching\n return False\n else:\n board[row][col] = None\n return self.existRecu(deepcopy(board), word[1:], row - 1, col) or \\\n self.existRecu(deepcopy(board), word[1:], row + 1, col) or \\\n self.existRecu(deepcopy(board), word[1:], row, col - 1) or \\\n self.existRecu(deepcopy(board), word[1:], row, col + 1)", "def is_word(self, word):\r\n\r\n return self.data(word) is not None", "def contains(self, word: str) -> bool:\n if not isinstance(word, str):\n raise TypeError('word argument must be str')\n\n current = self._base\n for character in word + '$':\n try:\n current = current[character]\n except KeyError:\n return False\n else:\n return True", "def test_contains_returns_true_for_partial_word_in_multi_word_trie(multi_trie):\n assert multi_trie.contains(\"hell\") is True", "def search(self, word):\n if not word:\n return False\n if '.' not in word:\n return word in self.word_dict[len(word)]\n for v in self.word_dict[len(word)]:\n for i, ch in enumerate(word):\n if ch != v[i] and ch != '.':\n break\n else:\n return True\n return False", "def __contains__(self, word):\n if word in self.vocab:\n return True\n else:\n char_ngrams = compute_ngrams(word, self.min_n, self.max_n)\n return any(ng in self.ngrams for ng in char_ngrams)", "def search(self, word: str) -> bool:\n m = len(word)\n\n for dict_word in self.dict[m]:\n i = 0\n while i < m:\n if (word[i] == dict_word[i]) or (word[i] == '.'):\n i += 1\n else:\n break\n\n if i == m:\n return True\n\n return False", "def basic_check(word):\n if word[-1] == \"b\" or word[-1] == \"g\":\n return False\n consonant_counter = 0\n for char in word:\n if char in VOWELS:\n consonant_counter = 0\n else:\n consonant_counter += 1\n if consonant_counter >= 3:\n return False\n return True", "def search(self, word: str, current_node=None) -> bool:\n\t\tif current_node == None:\n\t\t\tcurrent_node = self.root\n\t\tfor ch in word:\n\t\t\tfound_in_child = False\n\t\t\tfor node in current_node.children:\n\t\t\t\tif node.char == ch:\n\t\t\t\t\tfound_in_child = True\n\t\t\t\t\tcurrent_node = node\n\t\t\tif found_in_child == False: # some char not found anywhere\n\t\t\t\treturn False\n\t\tif current_node.completeWord:\n\t\t\treturn True\n\t\treturn False", "def word_dict_contains (self,\r\n word):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('WORDDICT CONTAINS')\r\n\r\n value_tuple = (notebookname, word,)\r\n db_cursor.execute(\"SELECT rowid\"\r\n +\" FROM word_to_indexes\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(word) in self.word_dict", "def isWord(word, dictionary):\n return word in dictionary", "def test_contains_returns_false_when_word_not_in_trie(empty_trie, full_trie):\n assert empty_trie.contains(\"goodbye\") is False\n assert full_trie.contains(\"goodbye\") is False", "def isUnique(self, word):\n if len(word) <= 1:\n n = word\n else:\n n = word[0] + str(len(word) - 2) + word[-1] #Get the abbrviation.\n if n not in self.abbrdict or (self.abbrdict[n] == 1 and word in self.origdict): #If it is not in abbrdict or the abbrevation count is 1 and the word has appeared in dictionary, return true.\n return True\n else: #Otherwise, return false.\n return False", "def search(self, word: 'str') -> 'bool':\n \n def dfs(word,dictword):\n if not word: \n if '#' in dictword:\n return True\n else:\n return False\n for k in range(len(word)):\n if word[k]!='.':\n if word[k] not in dictword:\n return False\n else:\n return dfs(word[k+1:],dictword[word[k]])\n else:\n for ss in 'qwertyuiopasdfghjklzxcvbnm':\n if ss in dictword and dfs(word[k+1:],dictword[ss]):\n return True\n return False\n return dfs(word,self.dictword)" ]
[ "0.7898829", "0.77123356", "0.76817137", "0.7651471", "0.74973875", "0.7494274", "0.7490946", "0.7471417", "0.7440383", "0.7440078", "0.74153924", "0.74125475", "0.73968333", "0.7382937", "0.73649126", "0.7333139", "0.73310703", "0.7293905", "0.729297", "0.72762936", "0.7275739", "0.7211364", "0.72047067", "0.72003835", "0.7198947", "0.7198598", "0.71788365", "0.71755683", "0.7160483", "0.7130068", "0.7127907", "0.7124436", "0.71228695", "0.70993555", "0.709855", "0.7091737", "0.7072758", "0.7069442", "0.70634663", "0.7062562", "0.7016676", "0.7000654", "0.6995837", "0.6994039", "0.6986388", "0.6974976", "0.6960983", "0.6959844", "0.6952699", "0.6942338", "0.69327486", "0.6931396", "0.693002", "0.69217825", "0.68994135", "0.6897771", "0.688772", "0.6882916", "0.68795663", "0.6873254", "0.6862361", "0.68558645", "0.6835378", "0.6818395", "0.68171716", "0.6799035", "0.6796226", "0.6790387", "0.67855227", "0.6781026", "0.67724997", "0.6772053", "0.67699677", "0.67619", "0.6748994", "0.6747389", "0.6742173", "0.6734318", "0.67328864", "0.6721488", "0.66796464", "0.6677644", "0.6670951", "0.6667653", "0.66529036", "0.6651965", "0.6614682", "0.66124547", "0.6597147", "0.6595557", "0.65923715", "0.65825653", "0.6559314", "0.65043974", "0.65025234", "0.6498732", "0.64883184", "0.6487576", "0.64751035", "0.6463183" ]
0.664082
86
Load twine from a .json filename, filelike or a json string and validates twine contents.
def _load_twine(self, source=None): if source is None: # If loading an unspecified twine, return an empty one rather than raising error (like in _load_data()) raw_twine = {} logger.warning("No twine source specified. Loading empty twine.") else: raw_twine = self._load_json("twine", source, allowed_kinds=("file-like", "filename", "string", "object")) self._validate_against_schema("twine", raw_twine) self._validate_twine_version(twine_file_twined_version=raw_twine.get("twined_version", None)) return raw_twine
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_tweets(filename):\n\n try:\n with open(filename, 'r') as f:\n data = json.loads(f.read())\n except:\n print('ERROR in load_tweets.')\n\n return data", "def test_loader_loads_from_file():\n base_json = 'tests/test_json.json'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json) == json_test", "def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)", "def load(filename):\n\n try:\n with open(filename) as data:\n return json.load(data)\n except:\n return None", "def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))", "def sniff( self, filename ):\r\n try:\r\n json.load( open(filename) )\r\n return True\r\n except Exception:\r\n return False", "def load_json(path, name):\n if 'txt' not in name:\n name += '.json'\n with open(os.path.join(path, name), 'r') as json_file:\n return json.load(json_file)", "def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')", "def load_tweets(file):\n with open(file) as f:\n data = json.load(f)\n return data", "def test_load_json_str():\n\n file_name = 'test_fooof_all'\n\n data = load_json(file_name, TEST_DATA_PATH)\n\n assert data", "def test_loader_loads_from_str():\n base_json = '{\"foo\": \"bar\"}'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json, from_file=False) == json_test", "def test_verifies_token_file_contains_json(self):\n\n with open(self.sample_token_file, 'w',\n encoding=\"utf8\", errors=\"surrogateescape\") as stf_h:\n stf_h.write(\"Bad JSON\")\n\n with self.assertRaises(json.decoder.JSONDecodeError):\n badgr = BadgrLite(token_filename=self.sample_token_file)\n badgr.load_token()", "def json_loader(filename):\n\n with open(filename, \"r\", encoding=\"UTF-8\") as source:\n data = json.load(source, object_hook=object_decode)\n return data", "def load_from_json_file(filename):\n if type(filename) is not str:\n return\n\n with open(filename, mode=\"r\") as file:\n return json.loads(file.read())", "def load_json(filepath: str):\n with open(filepath, \"r\", encoding=\"utf8\") as f:\n return json.loads(f.read())", "def from_file(filename):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n f = open(filename, 'r')\n string = f.read()\n return json.loads(string)", "def load_json_data(filepath):\n with open(filepath,'r') as f:\n return json.load(f)", "def read(self,filename):\n with open(str(filename),\"r\") as f:\n data = f.read()\n #check if the loaded file is json\n try:\n datajson = json.loads(data)\n except Exception as e:\n if mer == True:\n merrors.error('could not load '+str(filename)+', add a basic entry to the config like {\"name\":\"Example\"}. Python error: '+str(e))\n quit()\n else:\n print(\"could not load \"+str(filename)+\". Python error: \"+str(e))\n quit()\n self.datajson = datajson\n self.filename = filename\n f.close()", "def load_json(filepath: str):\n with open(filepath, encoding=\"utf-8\") as f:\n return json.load(f)", "def load_from_json_file(filename):\n with open(filename, encoding=\"utf-8\") as round:\n return json.load(round)", "def load_life(name):\n\tif not '.json' in name:\n\t\tname += '.json'\n\t\n\twith open(os.path.join(LIFE_DIR, name), 'r') as e:\n\t\treturn json.loads(''.join(e.readlines()))", "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def __load_json(self, path):\n try:\n with Path(path).open('r') as f:\n return json.load(f)\n except ValueError as ve:\n six.raise_from(ValueError(\"error while loading the fixture %s\" % path), ve)", "def load(self):\n filename = self._filename\n if not os.path.exists(filename):\n self.service.log.store('Cannot load %s, does not exist' % filename)\n return False\n \n # Read from file\n self.service.log.store('Loading %s' % filename)\n f = open(filename, 'r')\n raw = f.read()\n f.close()\n \n self.from_json(raw)\n return True", "def load_tweets(fname):\n tweets = []\n for line in open(fname):\n tweets.append(json.loads(line))\n return tweets", "def _loadf(fname):\n with open(fname, encoding=\"ISO-8859-1\") as f:\n return json.load(f)", "def load_from_file(self, filepath, validate=True, results=None):\n with open(filepath) as fd:\n try:\n data = json.load(fd)\n except ValueError as ex:\n ex = JSONEncodingError(ex)\n if not results:\n raise ex\n results.add(filepath, ex)\n return results\n\n return self.load(data, validate=validate, results=results, id=filepath)", "def test_load_unsupported_type(self):\n expected = {\n \"name\": \"Kevin\",\n \"age\": 21,\n \"pet\": {\n \"name\": \"Trippy Jack\",\n \"age\": 20762,\n \"__type__\": \"hyperdimensional.hamster\"\n }\n }\n with open('tests/unsupported_type.json', 'r') as json_file:\n self.assertEqual(expected, morejson.load(json_file))", "def loadJSON(jsonData):\n\n if hasattr(jsonData, 'read'):\n loadedjson = json.load(jsonData)\n elif isinstance(jsonData, str):\n if os.path.exists(jsonData):\n with open(jsonData) as jsonFile:\n loadedjson = json.load(jsonFile)\n else:\n try:\n loadedjson = json.loads(jsonData)\n except JSONDecodeError as e:\n raise ValueError(f\" {str(e)}: Got {jsonData}, either bad format of file does not exist\")\n\n elif isinstance(jsonData, dict):\n loadedjson = jsonData\n else:\n err = f\"workflow type: {type(jsonData)} is unknonw. Must be str, file-like or dict. \"\n raise ValueError(err)\n\n\n return loadedjson", "def read_object_from_file(file_name):\n if os.path.exists(file_name) is False:\n print (\"Error read path: [%s]\" % file_name)\n return None\n with open(file_name, 'r') as f:\n try:\n obj = json.load(f)\n except Exception:\n print (\"Error json: [%s]\" % f.read()[0:10])\n return None\n return obj", "def test_load_an_object_json_file(self):\n from test.resources import object_json\n self.assertEqual(object_json._data, {'answer': 42})\n self.assertEqual(len(object_json), 1)\n self.assertEqual(object_json['answer'], 42)", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n obj = json.loads(f.read())\n return obj", "def from_JSON(cls, filename):\n with open(os.path.expanduser(filename), encoding='utf-8') as f:\n return json.load(f, object_hook=class_hook)", "def process_jsonld_file(fname):\n with open(fname, 'r', encoding='utf-8') as fh:\n json_dict = json.load(fh)\n return process_jsonld(json_dict)", "def test_load_an_object_json_file(self):\n from test.resources import malaga\n self.assertEqual(len(malaga.data), 5018112)\n self.assertEqual(malaga.Model, 'iPhone 4')", "def _localloadjson(path: str) -> JSONType:\n with open(path, encoding=\"utf-8\") as fh:\n return json.load(fh)", "def load_from_json_file(filename):\n with open(filename, \"r\") as my_file:\n return json.loads(my_file.read())", "def loadJson (self, path):\n\n # get all lines in json, concatenate then into a big string then parse it\n with open(path, \"r\") as file_content:\n all_lines = file_content.readlines()\n all_content_str = \"\".join(all_lines)\n json_dict = json.loads(all_content_str)\n self.tile_reprs = list(json_dict['tiles']['structural-tiles'].keys())\n\n # remove this empty char\n self.tile_reprs.remove(\"-\")", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def test_valid_json():\n invalid_json = False\n for filename in os.listdir(\"../networking\"):\n if filename.endswith(\".cfn.json\"):\n print(\"Validating json file: %s\" % filename)\n with open(f\"../networking/{filename}\", encoding=\"utf-8\") as f:\n try:\n json.load(f)\n print(\"SUCCESS: Valid json.\")\n except ValueError as e:\n print(\"ERROR: Invalid json: %s\" % e)\n invalid_json = True\n\n assert not invalid_json", "def load_rentals_file(filename):\n logging.debug('Loading rental file %s', filename)\n try:\n with open(filename) as file:\n try:\n data = json.load(file)\n except ValueError:\n logging.error('File %s cannot be read as JSON', filename)\n exit(0)\n except IOError:\n logging.error('File %s cannot be read (does not exist?)', filename)\n exit(0)\n logging.debug('Successfully loaded rental file %s', filename)\n return data", "def load():\n try:\n with open('learn.json', 'r') as file:\n return json.load(file)\n except IOError:\n return []", "def load_from_json_file(filename):\n with open(filename, mode=\"r\", encoding=\"utf-8\") as a_file:\n return json.loads(a_file.read())", "def json_load(file_path):\n\n with open(file_path) as f:\n return json_loads(f.read())", "def loadJSONFile(filename):\n\twith open(filename, 'r') as f:\n\t\treturn json.loads(f.read())", "def load(filename):\n if str(filename).endswith('.yml') or str(filename).endswith('.yaml'):\n return load_yaml(filename)\n elif str(filename).endswith('.json'):\n return load_json(filename)", "def load_hyp(filepath):\n with open(filepath, 'r') as f:\n return json.load(f)", "def load_json(file_name):\n return json.load(open(file_name))", "def load_from_json(self, json_fp: str):\n # TODO:\n pass", "def load(self):\n data = None\n try:\n with open(self.__filepath, 'r') as file:\n text = file.read()\n data = jsonpickle.decode(text)\n except FileNotFoundError:\n data = None\n except IOError as e:\n print(e)\n return data", "def from_file(cls, file_name):\n\n with open(file_name, 'r') as fi:\n the_dict = json.load(fi)\n return cls.from_dict(the_dict)", "def load_from_json_file(filename):\n with open(filename, 'r', encoding='utf8') as f:\n return json.load(f)", "def load(self, s):\n self._filename = s\n # self._isLoaded = True\n with open(s, 'r') as f:\n self._dict = json.load(f)", "def load(file, **kwargs):\n extension = os.path.splitext(file)\n if extension not in {'.json', '.axjson'}:\n raise RuntimeError('Given extension ({}) not supported'.format(extension))\n with open(file) as f:\n data = json.load(f)\n if extension == '.json':\n return data\n else:\n json_str = json.dumps(data)\n return loads(json_str, **kwargs)", "def load_json_document(f):\n return json.load(f)", "def test_validate_file_extension_json(self):\n data_locations = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n data_locations_false = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n a = validate_file_extension_json(data_locations)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_json(data_locations_false)\n data_locations.close()\n data_locations_false.close()\n self.assertTrue(\"Kein gültiges JSON-File\" or \"No valid JSON file\" in\n str(context.exception))", "def test_load_json_fobj():\n\n file_name = 'test_fooof_all'\n\n with open(os.path.join(TEST_DATA_PATH, file_name + '.json'), 'r') as f_obj:\n data = load_json(f_obj, '')\n\n assert data", "def _load(self, json_str, filepath):\n # pylint: disable=protected-access\n return self.json_o._load(json_str, filepath)", "def load_from_json_file(filename):\n with open(filename) as f:\n return json.load(f)", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def loadTweets(filename):\n tweets = open(filename, 'r').read().splitlines()\n print \"Loading %d tweets from %s ...\" % (len(tweets), filename)\n tweetObjects = []\n for tweet in tweets:\n try:\n js = json.loads(tweet)\n if (not ('place' in js)) or js['place'] == None:\n continue\n elif (not ('full_name' in js['place'])):\n continue\n elif (not ('geo' in js)) or js['geo'] == None:\n continue\n elif (not ('coordinates' in js['geo'])):\n continue\n coords = js['geo']['coordinates']\n place = js['place']\n tweetObject = Tweet(js['text'], place['full_name'], coords[0], coords[1], place['country'], js['created_at'])\n tweetObjects.append(tweetObject)\n except ValueError:\n pass\n print \"Loaded %d tweets\" % len(tweetObjects)\n return tweetObjects", "def load_from_json_file(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n return(json.loads(f.read()))", "def load(filename):\n if filename.endswith('.yml') or filename.endswith('.yaml'):\n return load_yml(filename)\n elif filename.endswith('.json'):\n return load_json(filename)", "def read_json_breakdown(cls, fname):\n if not os.path.exists(fname):\n raise RuntimeError\n\n with open(fname, 'r') as data_file:\n return cls.fixup_from_json(data_file.read())", "def readf(self, fileName):\n\t\tif os.path.exists(fileName):\n\t\t\tf = open(fileName)\n\t\t\ttry:\n\t\t\t\td = json.load(f)\n\t\t\texcept Exception as e:\n\t\t\t\tlog.error(e)\n\t\t\t\tf.close()\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\treturn self.readd(d)\n\t\t\t\tf.close()", "def load_stat(input):\n with open(input['json'], 'r', encoding=input['encoding']) as f:\n return json.load(f)", "def load_text(txt_path):\n with open(txt_path, 'r') as json_file:\n data = json_file.read()\n content = json.loads(data)\n \n return content", "def load_from_json_file(filename):\n import json\n with open(filename, 'r') as s:\n return json.load(s)", "def load_from_utterance_file(filename, utterance_start_index, utterance_end_index):\n with open(filename, \"r\") as f:\n try:\n ext = filename.split(\".\")[-1]\n if ext == \"json\":\n utterances = json.load(f)\n elif ext == \"jsonl\":\n utterances = []\n if utterance_start_index is None: utterance_start_index = 0\n if utterance_end_index is None: utterance_end_index = float('inf')\n idx = 0\n for line in f:\n if utterance_start_index <= idx <= utterance_end_index:\n utterances.append(json.loads(line))\n idx += 1\n except Exception as e:\n raise Exception(\"Could not load corpus. Expected json file, encountered error: \\n\" + str(e))\n return utterances", "def from_json_file(filename, check_format=True):\n filename = os.path.abspath(filename)\n directory = os.path.dirname(filename)\n with open(filename, \"r\") as infile:\n return ExperimentListFactory.from_json(\n infile.read(), check_format=check_format, directory=directory\n )", "def load_json(filename):\n with open(filename) as file:\n obj = json.load(file)\n return obj", "def load_json(filepath: str):\n if not filepath:\n return None\n\n abs_path = _resolve_relative_path(filepath)\n with open(abs_path) as f:\n raw_json = f.read()\n\n return json.loads(raw_json)", "def file_jsoncheck(filename):\n with open(filename, 'r') as jsontable:\n try:\n json_object = json.load(jsontable)\n except ValueError, e:\n return False\n\n # DQLL.json number of lines should be 35\n # Will change with table version\n nlines = 35\n \n with open(filename, 'r') as f:\n l = [x for x in f.readlines()]\n # Default number of lines should be 35\n if len(l) != nlines:\n print \"Number of lines in DQLL.json is not default {} but {}\".format(nlines, len(l))\n return False\n\n return True", "def load_data_file(path):\n with open(path, encoding='utf-8') as f:\n return json.load(f)", "def from_file(cls, file_name: str):\n\n with open(file_name, 'r') as fi:\n input_dict = json.load(fi)\n return cls.from_dict(input_dict)", "def load_from_json(file):\n with open(file, 'r') as f:\n return json.load(f)", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n print \"\\nValid JSON\"\n return True\n except ValueError:\n print \"\\nInvalid JSON\"\n exit(-1)\n return False", "def __init__(self, filename):\n #Opening the file and storing its contents in a list\n with open(filename) as fp:\n self.data = json.load(fp)", "def test_load_file_contents():\n\n file_name = 'test_fooof_all'\n loaded_data = load_json(file_name, TEST_DATA_PATH)\n\n # Check settings\n for setting in OBJ_DESC['settings']:\n assert setting in loaded_data.keys()\n\n # Check results\n for result in OBJ_DESC['results']:\n assert result in loaded_data.keys()\n\n # Check results\n for datum in OBJ_DESC['data']:\n assert datum in loaded_data.keys()", "def load_json(fpath):\n with open(fpath, \"r\") as f:\n return json.load(f)", "def import_sample(infile):\n deserialized = None\n with open(infile, 'r') as file_handle:\n deserialized = json.load(file_handle, object_hook=decode_sample)\n return deserialized", "def loadJsonFromFile(filename):\n with open(filename) as f:\n return json.loads(f.read())", "def load_from_json(filename):\n\n with open(filename, 'r') as file:\n return json.load(file)", "def test_load_json_file_not_found_error() -> None:\n fname = \"invalid_file.json\"\n\n assert load_json(fname) == {}\n assert load_json(fname, default=\"\") == \"\"\n assert load_json_object(fname) == {}\n assert load_json_object(fname, default={\"Hi\": \"Peter\"}) == {\"Hi\": \"Peter\"}\n assert load_json_array(fname) == []\n assert load_json_array(fname, default=[\"Hi\"]) == [\"Hi\"]", "def load_data(filename):\n # Load JSON lines\n with open(filename, encoding='utf-8') as f:\n examples = [json.loads(line) for line in f]\n\n return examples", "def from_json(path: str):\n with open(path) as f:\n return json.load(f)", "def load_json_obj(path: str) -> RAW_CFG:\n with fsspec.open(path) as json_file:\n return json.load(json_file)", "def load(self, filepath):\n yaml_load = lambda fp: yaml.load(fp, Loader=yaml.SafeLoader)\n reader = json.load if Config.isjson(filepath) else yaml_load\n with open(filepath, 'r') as f:\n self.__init__(reader(f))\n return self", "def load_json(filename_or_dict):\n\tif isinstance(filename_or_dict, str):\n\t\tinput_file = open(filename_or_dict, encoding='utf-8')\n\t\tjson_dict = json.loads(input_file.read())\n\t\tinput_file.close()\n\t\treturn json_dict\n\treturn filename_or_dict", "def validate_input(update_file):\n try:\n json.load(open(update_file))\n #print \"Valid JSON\"\n return True\n except ValueError:\n print \"Invalid JSON. Exiting.\"\n exit(-1)\n return False", "def load_from_json(self, file_name: str) -> bool:\n try:\n with open(file_name, 'r') as f:\n data = json.loads(f.read())\n self.__g = DiGraph.from_dict(data)\n return True\n except:\n traceback.print_exc()\n return False", "def load_json(filename):\n\n with open(filename, encoding=constants.load_encoding) as file:\n return loads(file.read())", "def util_load_json(path):\n with io.open(path, mode=\"r\", encoding=\"utf-8\") as f:\n return json.loads(f.read())", "def load(config_file: typing.TextIO) -> \"TrainingConfig\":\n return TrainingConfig.from_json(config_file.read())", "def read_twitter_json(f):\n tweets = list()\n with open(f) as json_file:\n for line in json_file:\n tweets.append(json.loads(line))\n return tweets", "def load_json_file(i):\n\n import json\n\n fn = i['json_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening json file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading json file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n if sys.version_info[0] > 2:\n d = json.loads(s)\n else:\n d = json.loads(s, encoding='utf8')\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing json from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance", "def ler_json() -> dict:\n\n try:\n data = None\n with open('/home/kurt/GitHub/geobit_test/data.json') as json_file: \n data = json.load(json_file)\n \n return data\n \n except:\n return print(\n \"There was a problem executing the'ler_son' function from the leitura file\")", "def load(self, file=\"setup\", path=\"settings\"):\n\n # check if filename already contains file extension, if not, add it\n if file[-5:] != '.json':\n file += '.json'\n # load mappings from file\n with open(os.path.join(path, file), 'r') as file:\n self.data = json.load(file)", "def load():\n\n #: the file passed by the user in the post request\n file = request.files[\"file\"]\n\n # ensure that file exists\n if file == None:\n return BadRequest(\"No file given\")\n\n # ensure that file is readable\n try:\n file = json.loads(file.read())\n except UnicodeDecodeError:\n return BadRequest(\"Invalid file\")\n \n # ensure that the file can be indexed\n try:\n points = file[\"points\"]\n reg_json = file[\"reg\"]\n except TypeError:\n return BadRequest(\"Invalid file\")\n\n global no_dimensions\n #: number of dimensions\n no_dimensions = file[\"no_dimensions\"]\n\n\n # give each point an annotation weight if it does not already have one\n for i in range(0, len(points)):\n if points[i].get(\"annot_weight\") == None:\n points[i][\"annot_weight\"] = random.uniform(0, 1)\n\n global reg \n # regression model loaded from file\n if not reg_json:\n reg = jsonpickle.loads(reg_json)\n\n global tsne \n tsne = points\n \n return {\"points\": points, \"reg\": reg != None, \"no_dimensions\": no_dimensions}" ]
[ "0.64222986", "0.61474323", "0.6144654", "0.61129427", "0.6093864", "0.6074711", "0.6030403", "0.59984696", "0.59946185", "0.59418505", "0.59320986", "0.58631575", "0.5842694", "0.5824927", "0.57905143", "0.5786703", "0.5777964", "0.57755256", "0.5758631", "0.57461494", "0.57401806", "0.57319224", "0.5727668", "0.57230777", "0.5722163", "0.5718931", "0.5711493", "0.5708973", "0.5705908", "0.56931615", "0.56890583", "0.5677451", "0.56762826", "0.5675732", "0.56750745", "0.56600446", "0.5654667", "0.56540126", "0.5641753", "0.56302345", "0.5608574", "0.5607227", "0.5591759", "0.5584992", "0.5580432", "0.5579113", "0.5574573", "0.5572174", "0.5566599", "0.5565561", "0.5559077", "0.55588394", "0.5553719", "0.55506176", "0.55473906", "0.5544015", "0.5540335", "0.5531413", "0.55294496", "0.55255836", "0.55255365", "0.5513949", "0.5513776", "0.55044395", "0.5503186", "0.5495519", "0.5487166", "0.54840565", "0.5483339", "0.54705507", "0.5470207", "0.5470051", "0.5468411", "0.54663163", "0.54633933", "0.5459222", "0.54573476", "0.5456068", "0.5455679", "0.54555684", "0.54538906", "0.5453326", "0.54516625", "0.5441221", "0.5440878", "0.54227215", "0.54217064", "0.54147315", "0.54137725", "0.5408543", "0.5402521", "0.53991574", "0.53806037", "0.5372282", "0.5371886", "0.5367621", "0.5364385", "0.536119", "0.53594613", "0.53572905" ]
0.7133785
0
Load data from either a .json file, an open file pointer or a json string. Directly returns any other data.
def _load_json(self, kind, source, **kwargs): if source is None: raise exceptions.invalid_json_map[kind](f"Cannot load {kind} - no data source specified.") # Decode the json string and deserialize to objects. try: data = load_json(source, **kwargs) except FileNotFoundError as e: raise exceptions.file_not_found_map[kind](e) except jsonlib.decoder.JSONDecodeError as e: raise exceptions.invalid_json_map[kind](e) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_data_from_file(self, input_file_path):\n with FileOrBufferHandler(input_file_path, 'r', \n encoding=self.file_encoding) as input_file:\n try:\n data = json.load(input_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n input_file.seek(0)\n data = data_utils.read_json(\n data_generator=input_file,\n selected_columns=self.selected_keys,\n read_in_string=False\n )\n return data", "def load(filename):\n\n try:\n with open(filename) as data:\n return json.load(data)\n except:\n return None", "def _load_json_data(filename):\n\n relative_path = join(\"data\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as data_file:\n return json.loads(data_file.read())", "def load_json_data(filepath):\n with open(filepath,'r') as f:\n return json.load(f)", "def __load_data(data_loc):\n if not data_loc.endswith('.json'):\n raise ValueError('data_loc must be a json file location.')\n with open(data_loc, 'rb') as f:\n return json.load(f)", "def load(file, **kwargs):\n extension = os.path.splitext(file)\n if extension not in {'.json', '.axjson'}:\n raise RuntimeError('Given extension ({}) not supported'.format(extension))\n with open(file) as f:\n data = json.load(f)\n if extension == '.json':\n return data\n else:\n json_str = json.dumps(data)\n return loads(json_str, **kwargs)", "def load_json(filename_or_dict):\n\tif isinstance(filename_or_dict, str):\n\t\tinput_file = open(filename_or_dict, encoding='utf-8')\n\t\tjson_dict = json.loads(input_file.read())\n\t\tinput_file.close()\n\t\treturn json_dict\n\treturn filename_or_dict", "def read_json(file_or_path):\n try:\n with (open(file_or_path, 'r') if isinstance(file_or_path, (str, bytes)) else file_or_path) as f:\n obj = json.load(f)\n except IOError:\n obj = json.loads(file_or_path)\n return obj", "def json_loader(filename):\n\n with open(filename, \"r\", encoding=\"UTF-8\") as source:\n data = json.load(source, object_hook=object_decode)\n return data", "def _localloadjson(path: str) -> JSONType:\n with open(path, encoding=\"utf-8\") as fh:\n return json.load(fh)", "def json_load(file_path):\n\n with open(file_path) as f:\n return json_loads(f.read())", "def open_json(path):\n with open(path, \"r\") as json_data_file:\n data = json.load(json_data_file)\n return data", "def load_json(file_name):\n return json.load(open(file_name))", "def load_data(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n return data", "def load_json(path):\n import json\n\n def _load_json(*args, **kwargs):\n with open(path) as data_file:\n return json.load(data_file)\n\n return _load_json()", "def load(self):\n data = None\n try:\n with open(self.__filepath, 'r') as file:\n text = file.read()\n data = jsonpickle.decode(text)\n except FileNotFoundError:\n data = None\n except IOError as e:\n print(e)\n return data", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def loadJSON(jsonData):\n\n if hasattr(jsonData, 'read'):\n loadedjson = json.load(jsonData)\n elif isinstance(jsonData, str):\n if os.path.exists(jsonData):\n with open(jsonData) as jsonFile:\n loadedjson = json.load(jsonFile)\n else:\n try:\n loadedjson = json.loads(jsonData)\n except JSONDecodeError as e:\n raise ValueError(f\" {str(e)}: Got {jsonData}, either bad format of file does not exist\")\n\n elif isinstance(jsonData, dict):\n loadedjson = jsonData\n else:\n err = f\"workflow type: {type(jsonData)} is unknonw. Must be str, file-like or dict. \"\n raise ValueError(err)\n\n\n return loadedjson", "def load_json(file_path):\n try:\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n data = json.load(f)\n except json.JSONDecodeError as e:\n raise ValueError(f\"Invalid JSON format in file {file_path}\") from e\n except FileNotFoundError as e:\n raise ValueError(f\"File not found: {file_path}\") from e\n return data", "def read_json(self, *args, **kwargs):\n with self.open('rb') as f:\n return json.load(f, *args, **kwargs)", "def json_load(fp):\n with _iotools.open_file(fp, \"r\") as f:\n return json.load(f, cls=DataDecoder)", "def load_json_or_yaml(file_path):\n # handle json doc\n if isinstance(file_path, dict):\n return file_path\n # handle url\n elif file_path.startswith(\"http\"):\n with requests.get(file_path) as url:\n # check if http requests returns a success status code\n if url.status_code != 200:\n raise ValueError(f\"Invalid URL [{url.status_code}]: {file_path} !\")\n else:\n _data = url.content\n # handle file path\n else:\n try:\n with open(file_path) as f:\n _data = f.read()\n except FileNotFoundError:\n raise ValueError(\"Invalid File Path!\")\n try:\n if isinstance(_data, bytes):\n _data = _data.decode(\"utf-8\")\n data = json.loads(_data)\n # except ValueError: # for py<3.5\n except json.JSONDecodeError: # for py>=3.5\n try:\n data = yaml.load(_data, Loader=yaml.SafeLoader)\n except (yaml.scanner.ScannerError, yaml.parser.ParserError):\n raise ValueError(\"Not a valid JSON or YAML format.\")\n return data", "def load_json_or_yaml(file_path):\n # handle json doc\n if isinstance(file_path, dict):\n return file_path\n # handle url\n if isinstance(file_path, str) and file_path.startswith(\"http\"):\n with requests.get(file_path) as url:\n # check if http requests returns a success status code\n if url.status_code != 200:\n raise ValueError(\"Invalid URL!\")\n _data = url.content\n # handle file path\n else:\n try:\n with open(str(file_path)) as f:\n _data = f.read()\n except FileNotFoundError:\n raise ValueError(\"Invalid File Path!\")\n try:\n if isinstance(_data, bytes):\n _data = _data.decode('utf-8')\n data = json.loads(_data)\n except json.JSONDecodeError: # for py>=3.5\n try:\n data = yaml.load(_data, Loader=yaml.SafeLoader)\n except (yaml.scanner.ScannerError,\n yaml.parser.ParserError):\n raise ValueError(\"Not a valid JSON or YAML format.\")\n return data", "def loadOrRun(filename,function,*args):\r\n def loadJSON(filename):\r\n \"saves the data object as a JSON string\"\r\n with open(filename,\"r\") as openFile:\r\n data = json.loads(openFile.read())\r\n return data\r\n\r\n def saveJSON(filename,data):\r\n \"saves the data object as a JSON string\"\r\n with open(filename,\"w\") as openFile:\r\n openFile.write(json.dumps(data))\r\n try:\r\n data = loadJSON(filename)\r\n except IOError:\r\n data = function(*args)\r\n saveJSON(filename,data)\r\n\r\n return data", "def load_json(filepath: str):\n if not filepath:\n return None\n\n abs_path = _resolve_relative_path(filepath)\n with open(abs_path) as f:\n raw_json = f.read()\n\n return json.loads(raw_json)", "def load_data_file(path):\n with open(path, encoding='utf-8') as f:\n return json.load(f)", "def load_json(path: Path) -> Any:\n with path.open() as f:\n return json.load(f)", "def load_json(file_name):\n with open(file_name) as f:\n data = json.load(f)\n\n return data", "def _json_read(filename):\n with open(filename) as file:\n return json.load(file)", "def load(self, path):\n with open(path, \"rt\") as open_file:\n data = json.load(open_file)\n return data", "def load_json(filename: str):\n filepath = get_file_path(filename)\n\n if filepath.exists():\n with open(filepath, mode=\"r\", encoding=\"UTF-8\") as f:\n data = json.load(f)\n return data\n else:\n save_json(filename, {})\n return {}", "def get_data():\n try:\n with open('./data.json') as data_file:\n return json.load(data_file)\n except:\n data_json = json.loads('{\"message\": \"Error with file data.json\", \"success\": false}')\n return data_json", "def load_from_json_file(filename):\n if type(filename) is not str:\n return\n\n with open(filename, mode=\"r\") as file:\n return json.loads(file.read())", "def load_data(filepath=None):\r\n if filepath is None:\r\n filepath = LATEST_DATA_SET_PATH\r\n\r\n with open(filepath) as file:\r\n return json.load(file)", "def load_json(fpath):\n with open(fpath, \"r\") as f:\n return json.load(f)", "def openJson(self):\n json_file = open(self.file, 'r')\n json_data = json_file.read()\n result = json.loads(json_data)\n return result", "def read_json(filepath):\n if (filepath in _json_cache):\n return _json_cache[filepath]\n with open(filepath, 'r', encoding='utf-8') as fileinfo:\n data = json.load(fileinfo)\n _json_cache[filepath] = data\n return data", "def load_json(path):\n with open(path) as data_file:\n return json.load(data_file)", "def data_from_file(filename):\n with open(data_full_filename(filename)) as f:\n return json.loads(f.read())", "def _loadJson(self, file):\n # TODO : Is it paranoid checking?\n if os.path.isfile(file):\n try:\n with open(file, 'r') as f:\n data = json.load(f)\n return data\n except ValueError:\n msg = \"Corrupted JSON file => %s\" % file\n # logger.error(msg)\n self._exception(200, msg)\n # return -2 # code for corrupted json file\n else:\n msg = \"File cannot be found => %s\" % file\n self._exception(201, msg)", "def loadJsonFromFile(filename):\n with open(filename) as f:\n return json.loads(f.read())", "def load_json(filepath: str):\n with open(filepath, \"r\", encoding=\"utf8\") as f:\n return json.loads(f.read())", "def _get_data_file(self, data_path):\n\n return json.load(open(data_path))", "def load_json(load_path):\n # read from path\n with open(load_path) as json_file:\n data = json.load(json_file)\n return data", "def load_json_file(self, file, default_content=None):\n if os.path.isfile(file) and os.path.getsize(file):\n with open(file, \"r\", encoding=\"utf-8\") as f:\n return json.load(f)\n return default_content", "def load_data(fname):\n # load the json in gzip format\n with gzip.open(fname, 'r') as fin:\n data = json.loads(fin.read().decode('utf-8'))\n return data", "def load_file(file_path, graceful=False):\n try:\n with open(file_path) as data_file:\n json_obj = json.load(data_file)\n logging.debug(\"Loaded JSON file from '{0}'\".format(file_path))\n except IOError:\n if graceful:\n json_obj = []\n else:\n logging.error(\"¯\\_(ツ)_/¯ There is no such file: '{0}'\".format(file_path))\n sys.exit(1)\n except ValueError:\n logging.error(\"¯\\_(ツ)_/¯ That's not JSON at all! '{0}'\".format(file_path))\n sys.exit(2)\n return json_obj", "def open_data(filepath):\n\tdata = []\n\twith open(filepath) as f:\n\t data = f.readlines()\n\t data = list(map(json.loads, data)) \n\treturn data", "def get_file(file):\n with open(file) as data_file:\n return json.load(data_file)", "def _load(self, json_str, filepath):\n # pylint: disable=protected-access\n return self.json_o._load(json_str, filepath)", "def load_from_json_file(filename):\n with open(filename, \"r\") as my_file:\n return json.loads(my_file.read())", "def load_json_file(i):\n\n import json\n\n fn = i['json_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening json file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading json file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n if sys.version_info[0] > 2:\n d = json.loads(s)\n else:\n d = json.loads(s, encoding='utf8')\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing json from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def load_json(self, json_path=None):\n if json_path is None:\n json_path = self.json_path\n with open(json_path, encoding='utf-8', mode='r') as f:\n data = json.load(f)\n return data", "def _json_from_file(file: IO[AnyStr]) -> Json:\n if os.path.getsize(file.name) > 0:\n return typing.cast(Json, json.load(file))\n return {}", "def read_json(file_name):\n try:\n with open(file_name, \"rt\") as input_file:\n return json.loads(input_file.read())\n except TypeError:\n print(\"No valid JSON data!\")\n raise\n except IOError:\n print(\"Could not read file from disk!\")\n raise", "def openFile(self, path):\n with open(path) as f:\n return json.load(f)", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n obj = json.loads(f.read())\n return obj", "def read_json(jsonfp):\n with JsonFile(jsonfp) as jsondata:\n return jsondata", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def read_json(fname, check_extension=False):\n if fname is None:\n return None\n if check_extension:\n IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))\n with OpenFile(fname, 'r') as fobj:\n return json.load(fobj)", "def load_json(file_name_template, record_id):\n with open(file_name_template % (record_id)) as f:\n json_data = json.load(f)\n return json_data", "def loadJSONFile(filename):\n\twith open(filename, 'r') as f:\n\t\treturn json.loads(f.read())", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file, 'rb') as f:\n return json.load(f)", "def load_from_json(file):\n with open(file, 'r') as f:\n return json.load(f)", "def load_json(filename):\n with open(filename) as file:\n obj = json.load(file)\n return obj", "def load_json(filepath: str):\n with open(filepath, encoding=\"utf-8\") as f:\n return json.load(f)", "def read(self, filepath, dirpath=None):\n try:\n #filepath = os.path.normpath(filepath)\n with open(filepath) as f_p:\n try:\n self.json_dict = json.load(f_p)\n self.filepath = filepath\n return self.json_dict\n except ValueError as err:\n print('JSON content error in \"%s\"' % filepath)\n print(err)\n except (IOError, FileNotFoundError):\n print(\n 'Failed to open JSON file \"%s\" \"%s\"' %\n (os.path.abspath(''), filepath))\n raise NoSuchFileError(filepath)\n raise JsonContentError", "def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)", "def read_file(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n return data", "def load_data(path):\r\n\r\n _, ftype = os.path.splitext(path) #get fname and extension\r\n\r\n if os.path.isfile(path):\r\n with open(path) as f:\r\n\r\n if ftype == \".json\" or ftype == \".geojson\": #handle json\r\n data = json.load(f)\r\n # print(data)\r\n return data\r\n\r\n elif ftype == \".csv\": #handle csv with csv reader\r\n with open(path, newline ='') as csvfile:\r\n data = csv.DictReader(csvfile)\r\n return list(data)\r\n\r\n else:\r\n print(\"neither json or csv\")\r\n return None", "def read_json(filename: Union[pathlib.Path, str]):\n if isinstance(filename, str):\n filename = pathlib.Path(filename)\n with open(filename) as fh:\n return json.loads(fh.read())", "def load_from_json_file(filename):\n with open(filename, encoding=\"utf-8\") as round:\n return json.load(round)", "def readf(self, fileName):\n\t\tif os.path.exists(fileName):\n\t\t\tf = open(fileName)\n\t\t\ttry:\n\t\t\t\td = json.load(f)\n\t\t\texcept Exception as e:\n\t\t\t\tlog.error(e)\n\t\t\t\tf.close()\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\treturn self.readd(d)\n\t\t\t\tf.close()", "def util_load_json(path):\n with io.open(path, mode=\"r\", encoding=\"utf-8\") as f:\n return json.loads(f.read())", "def read_json(fname):\n with open(fname) as f:\n d = json.load(f)\n return d", "def load_jsondata_from_file(path, ftype=None):\n print(\"loading %s\" % path)\n t0 = time.time()\n data = []\n with open(path, 'r') as f:\n if ftype == None:\n for line in f:\n item = json.loads(line)\n data.append(item)\n elif ftype == 'user':\n for line in f:\n item = json.loads(line)\n data.append({'user_id': item['user_id'], 'friends': item['friends']})\n elif ftype == 'business':\n for line in f:\n item = json.loads(line)\n data.append({'business_id': item['business_id'], 'categories': item['categories'], 'city': item['city']})\n elif ftype == 'review':\n for line in f:\n item = json.loads(line)\n data.append({'user_id': item['user_id'], 'business_id': item['business_id'], 'stars': item['stars']})\n print(\"loading %s done, time cost %.2f\" % (path, time.time()-t0))\n return data", "def read_object_from_file(file_name):\n if os.path.exists(file_name) is False:\n print (\"Error read path: [%s]\" % file_name)\n return None\n with open(file_name, 'r') as f:\n try:\n obj = json.load(f)\n except Exception:\n print (\"Error json: [%s]\" % f.read()[0:10])\n return None\n return obj", "def load_from_json_file(filename):\n with open(filename, 'r') as jFile:\n fString = jFile.read()\n fObj = json.loads(fString)\n return fObj", "def read_json_file(filename):\n with open(filename) as f:\n try:\n data = json.loads(f.read())\n except:\n data = {}\n return data", "def load_from_json_file(filename):\n with open(filename, mode=\"r\", encoding=\"utf-8\") as a_file:\n return json.loads(a_file.read())", "def read_file(path):\n with open(path) as json_file:\n data = json.load(json_file)\n return data", "def load_from_json_file(filename):\n with open(filename) as f:\n return json.load(f)", "def read_or_new_json(filename, value, *args, **kwargs):\n data = None\n filename = \"{}.json\".format(filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n if os.path.isfile(filename):\n # If file had been created, but is empty return None since another process\n # could be writing to it.\n if os.path.getsize(filename) > 0:\n with open(filename, \"r\") as f:\n try:\n data = json.load(f, preserve_order=False)\n except Exception as e:\n print(e)\n raise e\n else:\n if callable(value):\n data = value(*args, **kwargs)\n else:\n data = value\n with open(filename, \"w\") as f:\n json.dump(data, f, indent=4, separators=(',', ': '), sort_keys=True, allow_nan=True)\n return data", "def read_json_data(data_path: str):\n f = open(data_path, \"r\")\n return json.load(f)", "def _read_json(filename):\n with open(filename) as f:\n import json\n return json.load(f)", "def load_json(jsonfile):\n with open(jsonfile) as f:\n return json.load(f)", "def load_json(jsonfile):\n with open(jsonfile) as f:\n return json.load(f)", "def read(fname):\n # Read string from JSON file.\n with open(fname, 'r') as fi:\n serial = fi.read()\n\n # Decode.\n decoder = json.JSONDecoder(object_hook=numpy_hook)\n data = decoder.decode(serial)\n\n return data", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def _read_json(cls, input_file):\n with open(input_file) as f:\n return json.load(f)", "def read_json(filepath):\n\n with open(filepath, 'r', encoding='utf-8') as file_obj:\n data = json.load(file_obj)\n\n return data", "def _remoteloadjson(path: str) -> JSONType:\n return json.loads(request.urlopen(path).read())", "def load_from_json_file(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n return(json.loads(f.read()))", "def from_json(fname):\n with open(fname, 'r') as fh:\n d = json.load(fh)\n return d", "def from_file(filename):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n f = open(filename, 'r')\n string = f.read()\n return json.loads(string)", "def __init__(self, json_str: object = None, json_file_path: object = None) -> None:\n self.data = None\n if json_str is None and json_file_path is None:\n # raise Exception(\"Invalid file path or json string. Please provide valid file path for json data or provide json string\")\n print(\"No valid json file has been loaded\")\n if json_str is None:\n with open(json_file_path) as file:\n self.data = json.load(file)\n else:\n self.data = json.loads(json_str)\n # if self.data is not None:", "def read_json(fn):\n with open(fn) as f:\n return json.load(f, object_hook=_operator_object_hook)", "def loader(data: Union[str, dict], _: FileInfo) -> Optional[dict]:\n if isinstance(data, str):\n if fmt != 'json-ld':\n g = Graph()\n g.parse(data=data, format=fmt)\n data = pyld_jsonld_from_rdflib_graph(g)\n\n if not isinstance(data, dict):\n # TODO: Add a context processor to the source w/ CONTEXTS_PARAM_TYPE\n # TODO: figure out what to do base options below\n # TODO: determine whether jsonld.frame can handle something other than string input\n data_as_dict = jsonld.frame(data, contexts)\n else:\n data_as_dict = data\n typ = data_as_dict.pop('@type', None)\n # TODO: remove this when we get the Biolinkml issue fixed\n if not typ:\n typ = data_as_dict.pop('type', None)\n if typ and typ != target_class.class_name:\n # TODO: connect this up with the logging facility or warning?\n print(f\"Warning: input type mismatch. Expected: {target_class.__name__}, Actual: {typ}\")\n return json_clean(data_as_dict)", "def load_from_json_file(filename):\n import json\n with open(filename, 'r') as s:\n return json.load(s)" ]
[ "0.74808633", "0.7463258", "0.7429995", "0.7426092", "0.73366517", "0.72446245", "0.7234326", "0.7181051", "0.7129091", "0.71012217", "0.7058075", "0.70502263", "0.7007759", "0.6973326", "0.69674516", "0.6960993", "0.69258857", "0.69136333", "0.6907644", "0.6902104", "0.6878441", "0.6876933", "0.6873137", "0.6860294", "0.6859", "0.6850668", "0.6839429", "0.6829113", "0.6825724", "0.68229425", "0.68141276", "0.679877", "0.67983663", "0.67732304", "0.6765584", "0.67576617", "0.67250216", "0.6724055", "0.6722727", "0.6722316", "0.6717097", "0.67163146", "0.6710104", "0.6700374", "0.669439", "0.6690857", "0.6675834", "0.66725093", "0.66709954", "0.6670464", "0.6668725", "0.66638774", "0.6652164", "0.66473323", "0.6644527", "0.6641803", "0.66414905", "0.6634641", "0.6632855", "0.6620538", "0.661893", "0.66173154", "0.6615846", "0.6615846", "0.66036123", "0.6602922", "0.659314", "0.6591999", "0.65877664", "0.6585453", "0.65722984", "0.6570041", "0.65650356", "0.65617096", "0.6556709", "0.6556427", "0.65559596", "0.6546881", "0.65331835", "0.6519233", "0.65154487", "0.6514032", "0.6512435", "0.6506935", "0.65040886", "0.65037715", "0.6498708", "0.6498708", "0.64934176", "0.64930975", "0.64930975", "0.64864707", "0.64762086", "0.64756167", "0.6475604", "0.6475167", "0.6473566", "0.6465799", "0.64584994", "0.6441749" ]
0.7064134
10
Get the schema for the given strand.
def _get_schema(self, strand): if strand == "twine": # The data is a twine. A twine *contains* schema, but we also need to verify that it matches a certain # schema itself. The twine schema is distributed with this packaged to ensure version consistency... schema_path = "schema/twine_schema.json" elif strand in CHILDREN_STRANDS: # The data is a list of children. The "children" strand of the twine describes matching criteria for # the children, not the schema of the "children" data, which is distributed with this package to ensure # version consistency... schema_path = "schema/children_schema.json" elif strand in MANIFEST_STRANDS: # The data is a manifest of files. The "*_manifest" strands of the twine describe matching criteria used to # filter files appropriate for consumption by the digital twin, not the schema of the manifest data, which # is distributed with this package to ensure version consistency... schema_path = "schema/manifest_schema.json" else: if strand not in SCHEMA_STRANDS: raise exceptions.UnknownStrand(f"Unknown strand {strand}. Try one of {ALL_STRANDS}.") # Get schema from twine.json file. schema_key = strand + "_schema" try: return getattr(self, schema_key) except AttributeError: raise exceptions.StrandNotFound(f"Cannot validate - no {schema_key} strand in the twine") return jsonlib.loads(pkg_resources.resource_string("twined", schema_path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_schema(self):\n self._pick()\n return Schema()", "def _get_schema_using_query(self, query: str) -> sch.Schema:\n return sch.Schema.from_tuples(self._metadata(query))", "def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))", "def get_schema(cls):\n return cls.schema()", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def get_schema(): # noqa: WPS440\n return config.DEFAULT_SCHEMA", "def get_schema(self) -> ArchiveSchema:\n return self.schema", "def get_schema(self):\r\n return self.__schema", "def sample_schema(self):\n if 'sample' not in self._schemas:\n logging.debug(f\"{self.id} - no schema? {self._schemas}\")\n return None\n return self._schemas['sample']", "def getSchema(cls):\n pass", "def schema(cls):\n return Schema.get_instance(cls)", "def get_schema(schema): # noqa: E501\n return 'do some magic!'", "def get_schema():\n if TEST_COLLECTION:\n return TestSchema()\n return MySchema()", "def getSchema( sourceDirectory ):\r\n if( sourceDirectory == settings.LEXISNEXIS_FILETAG ): return LexisNexisSchema()\r\n raise Exception( \"Filer for source <%s> is not registered in getSchema( source ).\" % ( sourceDirectory ) )", "def get_schema():\n if not os.path.isfile(_schema_file):\n create_schema()\n with open(_schema_file, 'r') as fd:\n out = decode_json(fd)\n return out", "def get_schema(self) -> dict:", "async def get_schema(request: Request, namespace: str, project: str):\n # endpoint to schema.databio.org/...\n # like pipelines/ProseqPEP.yaml\n\n try:\n schema = eido.read_schema(\n f\"https://schema.databio.org/{namespace}/{project}.yaml\"\n )[0]\n except IndexError:\n raise HTTPException(status_code=404, detail=\"Schema not found\")\n\n return schema", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def get_schema(self, name):\n return Schema(self, name)", "def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)", "def schema(self, name):\n return model.Schema(self, name)", "def _get_schema(name):\n global SCHEMA\n\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = \"{}/{}.json\".format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n\n return SCHEMA.get(name)", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def schema(self):\n return self._schema", "def schema(self) -> str:\n return parse_schema(self._spec[\"schema\"])", "def getDBSchema(self, desired=None):\n role = self.getRole(desired)\n schema = role[\"roleName\"]\n return schema", "def _get_schema(want_version):\n for maj, min in _GET_SCHEMA_MICROVERSIONS:\n if want_version.matches((maj, min)):\n return getattr(schema, 'GET_SCHEMA_%d_%d' % (maj, min))\n\n return schema.GET_SCHEMA_1_10", "def schema(self):\n # type: () -> object\n return self._schema", "def get_default_schema(self):\n schema = self._connection.settings.get(\"schema\")\n if schema:\n res = (\n self.sql(_SELECT_SCHEMA_NAME_QUERY.format(escape(schema)))\n .execute()\n .fetch_all()\n )\n try:\n if res[0][0] == schema:\n return Schema(self, schema)\n except IndexError:\n raise ProgrammingError(\n f\"Default schema '{schema}' does not exists\"\n ) from None\n return None", "def get_schema(sid, did, scid):\n\n driver = get_driver(PG_DEFAULT_DRIVER)\n manager = driver.connection_manager(sid)\n conn = manager.connection(did=did)\n\n ver = manager.version\n server_type = manager.server_type\n\n # Fetch schema name\n status, schema_name = conn.execute_scalar(\n render_template(\n \"/\".join(['schemas',\n '{0}/#{1}#'.format(server_type, ver),\n 'sql/get_name.sql']),\n conn=conn, scid=scid\n )\n )\n\n return status, schema_name", "def get_schema(db, sourcename):\n try:\n schema = db[\"tables\"][sourcename]\n schema[\"type\"] = constants.TABLE\n except KeyError:\n try:\n schema = db[\"views\"][sourcename]\n schema[\"type\"] = constants.VIEW\n except KeyError:\n raise ValueError(\"no such table/view\")\n return schema", "def get_bundled_schema_path():\n return str(data.load_resource(\"schema\"))", "def _schema_type(self) -> Optional[type]:\n return SeriesSchema", "def schema_for_config(self) -> Dict[str, Any]:\n return self.rsimulator.schema_for_config()", "def get_schema_defs():\n return SCHEMA_DEFS", "def __getitem__(self, name) -> 'StarSchema':\n return self.schemas[name]", "def load_dde_schemas(self, schema):\n if self.verbose:\n print(f'Loading registered DDE schema \"{schema}\"')\n schema_source = schemas.get(schema)\n schema_source.pop(\"_id\")\n return schema_source", "def schema(self) -> Schema:\n return next(schema for schema in self.metadata.schemas if schema.schema_id == self.metadata.current_schema_id)", "def _schema_type(self) -> Optional[type]:\n return IndexSchema", "def schema() -> None:\n pass", "def sbi_schema():\n schema_path = os.path.join(os.path.dirname(__file__), '..', '..',\n 'scheduling', 'schema',\n 'configure_sbi.json')\n with open(schema_path, 'r') as file:\n schema_data = file.read()\n return json.loads(schema_data)", "async def get_schema(self) -> AvroSchemaT:\n schema = None\n try:\n schema = await self._client.schema_by_topic(self._subject)\n except Exception:\n msg = f\"Could not retrieve schema for subject {self._subject}.\"\n raise SchemaException(msg)\n\n return schema", "def schema_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schema_name\")", "def get_docname_from_schema(doctype,config):\n\tfor coll in config[\"schema\"].keys():\n\t\tfor doc in config[\"schema\"][coll].keys():\n\t\t\tif doctype == doc: return config[\"schema\"][coll][doc]", "def get_schema() -> dict:\n raise NotImplementedError()", "def create_schema(self, schema: str):\n return", "def namespace_schema(self, namespace):\n try:\n return self._namespace_schemas[namespace]\n except KeyError:\n raise Error(\"undefined namespace: \\\"%s\\\"; defined namespaces: %s\" % (namespace, util.quoted_list(self._namespace_schemas.keys())))", "def schema(self):\n return self.snowflake_options.schema", "def get_schema_cls() -> t.Any:\n return SignupRequestSchema", "def _get_stored_schema(self, table: str) -> Optional[TableSchema]:\n try:\n with open(self.schemas / (table + '.json'), 'r') as f:\n return json.load(f)\n except FileNotFoundError:\n return None", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def schema(self) -> Dict[str, Dict]:\n return self._schema", "def _schema_type(self) -> Optional[type]:\n return SeasonSchema", "def output_schema(self) -> Optional[str]:\n return pulumi.get(self, \"output_schema\")", "async def get_schema(\n self, refresh: bool = False, headers: Optional[Dict[str, str]] = None\n ) -> graphql.GraphQLSchema:\n # TODO: consider adding ttl logic for expiring schemas for long running services\n if self._schema is None or refresh:\n self._schema = await self.introspect(headers=headers)\n return self._schema", "def resolver(schema):\n name = schema.__name__\n if name.endswith(\"Schema\"):\n return name[:-6] or name\n return name", "def get_schema(self, get_stats=False):\n query = \"schema {}\"\n\n results = self.run_dgraph_query_raw(query)\n\n schema = {}\n\n for row in results[\"schema\"]:\n table_name = row[\"predicate\"]\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n return list(schema.values())", "def full_schema_list(self, schema: str) -> List[str]:\n # Generate the information_schema identifier for that database\n # in order to be able to filter it out\n name_parts = schema.split(\".\")\n\n info_schema = f\"{name_parts[0]}.information_schema\"\n\n fetched_schemas = []\n\n # All Schemas\n if name_parts[1] == \"*\":\n db_schemas = self.show_schemas(name_parts[0])\n for db_schema in db_schemas:\n if db_schema != info_schema:\n fetched_schemas.append(db_schema)\n\n # Prefix schema match\n elif \"*\" in name_parts[1]:\n db_schemas = self.show_schemas(name_parts[0])\n for db_schema in db_schemas:\n schema_name = db_schema.split(\".\", 1)[1].lower()\n if schema_name.startswith(name_parts[1].split(\"*\", 1)[0]):\n fetched_schemas.append(db_schema)\n\n # TODO Handle more complicated matches\n\n else:\n # If no * in name, then return provided schema name\n fetched_schemas = [schema]\n\n return fetched_schemas", "def load_dde_schemas(self, schema):\n url = DDE_SCHEMA_BASE_URL + schema\n if self.verbose:\n print(f'Loading registered DDE schema from \"{url}\"')\n return load_json_or_yaml(url)[\"source\"]", "def create_schema():\n schema = Schema(idx=ID(stored=True),\n data=STORED,\n body=TEXT(analyzer=StemmingAnalyzer()),\n )\n print(\"schema creation successful\")\n return schema", "def schema(self):\n pass", "def schema(self) -> graphql.GraphQLSchema:\n return self._schema", "def schema(self):\n raise NotImplementedError", "def get_schema(path):\n with open(path, 'r') as f:\n return json.load(f)", "def schemas(self):\n return model.Schemas(self)", "def schema_for_state(self) -> Dict[str, Any]:\n return self.rsimulator.schema_for_state()", "def get_schemas(self):\n query = mssqlqueries.get_schemas()\n logger.info(u'Schemas query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def get_schema(self, repo, table):\n return self.user_con.get_schema(repo=repo, table=table)", "def _schema_type(self) -> Optional[type]:\n return PanelSchema", "def schemas(self):\n return self.get_schemas()", "def getSchema(self):\n\n schema = [\n \"title\",\n \"body\",\n \"created_at\",\n \"id\",\n \"summary\",\n \"abstract\",\n \"keywords\",\n ]\n\n return schema", "def schemas(self):\n if not self._schemas:\n self._schemas = get_schema(self.attributes.workspace.namespace, self.attributes.workspace.name)\n return self._schemas", "def schema(self) -> Optional[TensorDatasetSchema]:\n try:\n features_schema = _infer_schema(self._features)\n targets_schema = None\n if self._targets is not None:\n targets_schema = _infer_schema(self._targets)\n return TensorDatasetSchema(features=features_schema, targets=targets_schema)\n except Exception as e:\n _logger.warning(\"Failed to infer schema for NumPy dataset. Exception: %s\", e)\n return None", "def get_schema_cls() -> t.Any:\n return None", "def _schema_type(self) -> Optional[type]:\n return SeriesPanelMetaSchema", "def get_schema_url(self):\n return self.NAME_TYPE_SCHEMAS.get(self.name_type, None)", "def _schema_type(self) -> Optional[type]:\n return AdBreakSchema", "def schema_helper(self, name, _, schema=None, **kwargs):\n if schema is None:\n return None\n\n schema_instance = resolve_schema_instance(schema)\n\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n\n return json_schema", "def schema(self):\n schema_el = self.root.xpath(\n '/wsdl:definitions/wsdl:types/xsd:schema', namespaces=NS_MAP,\n )[0]\n return element_as_tree(schema_el)", "def reference_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationReferenceDataSourceReferenceSchemaArgs']:\n return pulumi.get(self, \"reference_schema\")", "def _schema(self):\n\n self._check_compiled()\n return self._compiled._schema", "def schema(self):\n return self.table_info.schema", "def get_local_schema(self, descriptor):\n return self._schemas[descriptor]['local']", "def schema(self):", "def serializeSchema(schema):\n\n # determine portal_type\n try:\n prefix, portal_type, schemaName = splitSchemaName(schema.__name__)\n except ValueError:\n # not a dexterity schema\n return\n\n # find the FTI and model\n fti = queryUtility(IDexterityFTI, name=portal_type)\n if fti.model_source:\n model = fti.lookupModel()\n\n # synchronize changes to the model\n syncSchema(schema, model.schemata[schemaName], overwrite=True)\n fti.model_source = serializeModel(model)\n else:\n raise TypeError(\"Changes to non-dynamic schemata not yet supported.\")", "def parse_schema_from_string(schema_str):\n return schema.Parse(schema_str)", "def get_meta_schema(self):\n return self._tc_meta_schema", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def readjamschema(schema):\n raise NotImplementedError(msg)", "def discover_schema(node):\n xmlns = node.get('__xmlns__', None)\n\n if xmlns:\n node['Schema'] = 'Unknown'\n if xmlns.startswith('smpte_stereo'):\n node['Schema'] = 'SMPTE Stereoscopic'\n elif xmlns.startswith('smpte'):\n node['Schema'] = 'SMPTE'\n elif xmlns.startswith('interop'):\n node['Schema'] = 'Interop'\n elif xmlns.startswith('atmos'):\n node['Schema'] = 'Atmos'", "def _get_schema_from_object(self, data):\n if \"items\" in data:\n return self._get_schema_from_object(data[\"items\"])\n\n url_key = None\n\n if '$id' in data:\n url_key = '$id'\n\n if 'id' in data:\n url_key = 'id'\n\n if url_key:\n url = data[url_key]\n schema = Schema().build()\n schema.domain_entity = self.get_domain_entity_from_url(url)\n schema.high_level_entity = self.get_high_level_entity_from_url(url)\n schema.module = self.get_module_from_url(url)\n schema.url = url\n return schema\n\n return None", "def getSchema(self, fp_id: int) -> Dict:\n # Identify the format\n formatquery = (\n \"\"\"select file_format from file_pattern_detail where fp_id = {0}\"\"\".format(\n fp_id\n )\n )\n formattype = self.getDataAsDict(formatquery)[0]\n if formattype[\"file_format\"] == \"DELIMITED\":\n schemaQuery = \"\"\"\n select col_name,\n col_datatype,\n col_ordinal,\n primary_key_pos,\n is_defining_col,\n is_audit_col\n from delimited_col_detail where fp_detail_id = {}\n \"\"\".format(\n fp_id\n )\n data = self.getDataAsDict(schemaQuery)\n return data", "def schema_xml(self):\n return self.properties.get('SchemaXml', None)", "def load_schema(schema_path):\n with open(schema_path) as schema_file:\n return Utils.parse(schema_file.read())", "def _schema_type(self) -> Optional[type]:\n return SigningPolicySchema", "def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))", "def get_schemas(self, conn):\n return conn.get_schemas()['table_schema']", "def _get_schema(self):\n self.to_dask()\n return Schema(dtype=self._df.dtypes,\n shape=(None, len(self._df.columns)),\n npartitions=self._df.npartitions,\n metadata=self.metadata)", "def get_schema_structure(self) -> SchemaStructure:\n constructors: List[CombinatorData] = list(\n self._combinator_map.values()\n )\n methods: List[FunctionData] = list(\n self._function_map.values()\n )\n\n return SchemaStructure(constructors=constructors, methods=methods)", "def JSONSchema(self, default=None):\n return self.data.get('metadata', {}).get('$schema', default)" ]
[ "0.66698045", "0.6518725", "0.6390867", "0.6313399", "0.6210594", "0.62096435", "0.61262435", "0.61096203", "0.6025287", "0.5981262", "0.5939648", "0.5933518", "0.5902661", "0.58778864", "0.5872866", "0.58726424", "0.58520615", "0.58372164", "0.58244634", "0.5824207", "0.581969", "0.5783459", "0.5776963", "0.57689315", "0.57574755", "0.5726965", "0.57250506", "0.5714461", "0.5697706", "0.56923294", "0.56688327", "0.5665392", "0.56356925", "0.56322044", "0.5609556", "0.5597766", "0.5548235", "0.5541266", "0.55236185", "0.55118716", "0.5503674", "0.54893935", "0.546701", "0.5457018", "0.5451556", "0.54461867", "0.5429848", "0.53786594", "0.53594714", "0.53580964", "0.5341359", "0.5320134", "0.5313377", "0.53090924", "0.53016293", "0.52810794", "0.5273553", "0.52470654", "0.5203085", "0.5201358", "0.51940435", "0.5167343", "0.51617956", "0.5161381", "0.51533", "0.5152831", "0.51415414", "0.51373535", "0.5134352", "0.51117676", "0.5091775", "0.50913453", "0.5090095", "0.50797313", "0.5066588", "0.50664735", "0.5064499", "0.50513864", "0.50385195", "0.50379115", "0.5031788", "0.5031475", "0.50293505", "0.50254554", "0.5022628", "0.5012679", "0.50097924", "0.5006238", "0.50053716", "0.5003605", "0.5000007", "0.49880132", "0.49860084", "0.49856916", "0.4972932", "0.49688518", "0.4964152", "0.49598068", "0.4958572", "0.49577603" ]
0.8233626
0
Validate data against a schema, raises exceptions of type InvalidJson if not compliant.
def _validate_against_schema(self, strand, data): schema = self._get_schema(strand) try: jsonschema_validate(instance=data, schema=schema) logger.debug("Validated %s against schema", strand) except ValidationError as e: raise exceptions.invalid_contents_map[strand](str(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self, json_data):\n self._errors = None\n success = True\n for item in self._schema:\n if not item.validate(json_data):\n success = False\n\n return success", "def validate_schema(*,\n jsonschema: dict,\n data: Any\n ) -> None:\n # from typing import TYPE_CHECKING\n # if not TYPE_CHECKING:\n # otherwise mypy raises error\n # return\n\n _errors: defaultdict = defaultdict(list)\n\n def set_nested_item(data_dict, path, key, val): # type: ignore\n for _key in path:\n data_dict.setdefault(_key, {})\n data_dict = data_dict[_key]\n\n data_dict.setdefault(key, list())\n data_dict[key].append(val)\n\n for err in Draft7Validator(schema=jsonschema).iter_errors(instance=data):\n path = err.schema_path\n\n if \"properties\" in path:\n path.remove(\"properties\")\n key = path.popleft()\n\n if \"required\" in path or key == \"required\":\n key = err.message.split(\"'\")[1]\n elif err.relative_path:\n key = err.relative_path.pop()\n\n set_nested_item(_errors, err.relative_path, key, err.message)\n\n if _errors:\n raise app_exceptions.ValidateDataError(dict(_errors))", "def validate_json_schema(self, json_schema):\n cls = validators.validator_for(json_schema)\n cls.check_schema(json_schema)", "def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)", "def test_validate_json(self):\n # Lifted directly from the python-jsonschema docs\n test_schema = {\"type\": \"object\",\n \"properties\": {\n \"price\": {\"type\": \"number\"},\n \"name\": {\"type\": \"string\"},\n }}\n valid = {\"name\": \"Eggs\", \"price\": 34.99}\n invalid = {\"name\": \"Eggs\", \"price\": \"Invalid\"}\n\n test_model = RecordSchema(schema=test_schema)\n\n self.assertIsNone(test_model.validate_json(valid))\n\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n test_model.validate_json(invalid)", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)", "def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def validate_data(schema_cls: Serializer, data: dict) -> dict:\n schema = schema_cls(data=data)\n schema.is_valid(raise_exception=True)\n return schema.validated_data", "def validate_against_schema(request, schema, data):\n try:\n data_pure = schema.deserialize(data)\n data_clean = post_serialize(data_pure)\n # Attach data_clean to request: see usage in views.\n request.data_clean = data_clean\n except Invalid as e:\n # here we transform the errors we got from colander into cornice\n # errors\n for field, error in e.asdict().items():\n request.errors.add('body', field, error)", "def validate(self, json_data):\n try:\n self.process_json(json_data)\n except ValueError as e:\n # self.process_errors.append(e.args[0])\n self.process_errors = [e.args[0]]\n\n self.errors = list(self.process_errors)\n\n # Run validators\n if not self.errors:\n chain = itertools.chain(self.validators)\n self._run_validation_chain(chain)\n\n return len(self.errors) == 0", "def validator(data_json):\n fields = spec[\"fields\"]\n data = json.loads(data_json, object_pairs_hook=collections.OrderedDict)\n for k, v in fields.items():\n if v.get(\"required\"):\n found = False\n if k in data:\n found = True\n elif \".\" in k:\n # Dotted keys could be nested, like ecs.version\n subkeys = k.split(\".\")\n subval = data\n for subkey in subkeys:\n subval = subval.get(subkey, {})\n if subval:\n found = True\n if not found:\n raise ValidationError(\"Missing required key {}\".format(k))\n if k in data:\n if v[\"type\"] == \"string\" and not (\n isinstance(data[k], str) or isinstance(data[k], basestring)\n ):\n raise ValidationError(\n \"Value {0} for key {1} should be string, is {2}\".format(\n data[k], k, type(data[k])\n )\n )\n if v[\"type\"] == \"datetime\":\n try:\n datetime.datetime.strptime(data[k], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n raise ValidationError(\n \"Value {0} for key {1} doesn't parse as an ISO datetime\".format(\n data[k], k\n )\n )\n if v.get(\"index\") and list(data.keys())[v.get(\"index\")] != k:\n raise ValidationError(\"Key {0} is not at index {1}\".format(k, index))\n\n return data_json", "def validate_against_schema(self, json_doc):\n if self.uri not in self.se.validation:\n raise RuntimeError(\"$validation is not defined for {} field; thus the json document could not be validated\".format(self.name))\n else:\n validate(json_doc, self.se.validation[self.uri])\n print('The JSON document is valid')", "def validate_json(self):\n pass", "def validate(cls, data, schema=None):\n if data is None:\n raise APIError(\n \"Data of the apartment to be created must be passed a json body\",\n status=400,\n )\n\n try:\n if schema is None:\n return data\n\n return schema.load(data)\n except ValidationError as error:\n raise APIError(\n \"Error validating input data\", causes=error.messages\n ) from error", "def validate_json(self, data, schema):\n validator = jsonschema.Draft7Validator(schema, format_checker=jsonschema.draft7_format_checker)\n errors = validator.iter_errors(data)\n error_list = [(error.message, str(error.path), error) for error in errors]\n return error_list", "def is_json_valid(json_data: dict, json_schema: dict) -> bool:\r\n try:\r\n validate(instance=json_data, schema=json_schema)\r\n except jsonschema.exceptions.ValidationError as err:\r\n return False\r\n return True", "def assert_valid_schema(data, schema_file):\n\n schema = _load_json_schema(schema_file)\n return validate(data, schema)", "def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)", "def validate_json_schema(path, name, data, schema, full_schema=not is_extension):\n errors = 0\n\n # The standard repository has an example extension.\n if 'docs/examples/organizations/organizational_units/ocds_divisionCode_extension' in path:\n full_schema = False\n\n # Kingfisher Collect uses JSON Schema files to validate Scrapy items.\n code_repo = repo_name == 'kingfisher-collect'\n\n # Non-OCDS schema don't:\n # * pair \"enum\" and \"codelist\"\n # * disallow \"null\" in \"type\" of \"items\"\n # * UpperCamelCase definitions and lowerCamelCase properties\n # * allow \"null\" in the \"type\" of optional fields\n # * include \"id\" fields in objects within arrays\n # * require \"title\", \"description\" and \"type\" properties\n json_schema_exceptions = {\n 'json-schema-draft-4.json',\n 'meta-schema.json',\n 'meta-schema-patch.json',\n }\n ocds_schema_exceptions = {\n 'dereferenced-release-schema.json',\n # standard-maintenance-scripts\n 'codelist-schema.json',\n 'extension-schema.json',\n # extension_registry\n 'extensions-schema.json',\n 'extension_versions-schema.json',\n # spoonbill\n 'ocds-simplified-schema.json',\n }\n schema_exceptions = json_schema_exceptions | ocds_schema_exceptions\n\n validate_items_type_kwargs = {\n 'allow_invalid': {\n '/definitions/Amendment/properties/changes/items', # deprecated\n '/definitions/AmendmentUnversioned/properties/changes/items', # deprecated\n '/definitions/record/properties/releases/oneOf/0/items', # 1.1\n },\n }\n\n def validate_codelist_enum_allow_missing(codelist):\n return is_extension and codelist in external_codelists\n\n validate_codelist_enum_kwargs = {\n 'fallback': {\n '/definitions/Metric/properties/id': ['string'],\n '/definitions/Milestone/properties/code': ['string', 'null'],\n },\n 'allow_missing': validate_codelist_enum_allow_missing,\n }\n\n validate_letter_case_kwargs = {\n 'property_exceptions': {'former_value'}, # deprecated\n 'definition_exceptions': {'record'}, # 1.1\n }\n\n def validate_metadata_presence_allow_missing(pointer):\n return 'links' in pointer.split('/') or code_repo # ocds_pagination_extension\n\n validate_metadata_presence_kwargs = {\n 'allow_missing': validate_metadata_presence_allow_missing,\n }\n\n def validate_object_id_allow_missing(pointer):\n parts = pointer.split('/')\n return 'versionedRelease' in parts or parts[-1] in {\n 'changes', # deprecated\n 'records', # uses `ocid` not `id`\n '0', # linked releases\n }\n\n validate_object_id_kwargs = {\n 'allow_missing': validate_object_id_allow_missing,\n 'allow_optional': {\n # 2.0 fixes.\n # See https://github.com/open-contracting/standard/issues/650\n '/definitions/Amendment',\n '/definitions/Organization',\n '/definitions/OrganizationReference',\n '/definitions/RelatedProcess',\n },\n }\n if repo_name == 'infrastructure':\n validate_object_id_kwargs['allow_optional'].add('/definitions/Classification')\n\n validate_null_type_kwargs = {\n # OCDS allows null. OC4IDS disallows null.\n 'no_null': repo_name == 'infrastructure' or code_repo,\n 'allow_object_null': {\n '/definitions/Amendment/properties/changes/items/properties/former_value', # deprecated\n # See https://github.com/open-contracting/standard/pull/738#issuecomment-440727233\n '/definitions/Organization/properties/details',\n },\n 'allow_no_null': {\n '/definitions/Amendment/properties/changes/items/properties/property', # deprecated\n\n # Children of fields with omitWhenMerged.\n '/definitions/Link/properties/rel',\n '/definitions/Link/properties/href',\n\n # 2.0 fixes.\n # See https://github.com/open-contracting/standard/issues/650\n '/definitions/Organization/properties/id',\n '/definitions/OrganizationReference/properties/id',\n '/definitions/RelatedProcess/properties/id',\n },\n }\n\n validate_array_items_kwargs = {\n 'allow_invalid': {\n '/definitions/Amendment/properties/changes/items/properties/former_value', # deprecated\n '/definitions/Location/properties/geometry/properties/coordinates/items', # recursion\n },\n }\n\n validate_deep_properties_kwargs = {\n 'allow_deep': {\n '/definitions/Amendment/properties/changes/items', # deprecated\n },\n }\n if is_extension: # avoid repetition in extensions\n validate_deep_properties_kwargs['allow_deep'].add('/definitions/Item/properties/unit')\n\n validator = Draft4Validator(schema, format_checker=FormatChecker())\n\n errors += validate_schema(path, data, validator)\n if errors:\n warnings.warn(f'{path} is not valid JSON Schema ({errors} errors)')\n\n if name not in schema_exceptions:\n if 'versioned-release-validation-schema.json' in path:\n validate_items_type_kwargs['additional_valid_types'] = ['object']\n errors += validate_array_items(path, data, **validate_array_items_kwargs)\n errors += validate_items_type(path, data, **validate_items_type_kwargs)\n if not code_repo:\n errors += validate_codelist_enum(path, data, **validate_codelist_enum_kwargs)\n errors += validate_letter_case(path, data, **validate_letter_case_kwargs)\n errors += validate_merge_properties(path, data)\n\n # `full_schema` is set to not expect extensions to repeat information from core.\n if full_schema:\n exceptions_plus_versioned = schema_exceptions | {\n 'versioned-release-validation-schema.json',\n }\n\n exceptions_plus_versioned_and_packages = exceptions_plus_versioned | {\n 'project-package-schema.json',\n 'record-package-schema.json',\n 'release-package-schema.json',\n }\n\n if not code_repo:\n # Extensions aren't expected to repeat referenced `definitions`.\n errors += validate_ref(path, data)\n\n if name not in exceptions_plus_versioned:\n # Extensions aren't expected to repeat `title`, `description`, `type`.\n errors += validate_metadata_presence(path, data, **validate_metadata_presence_kwargs)\n if not code_repo:\n # Extensions aren't expected to repeat referenced `definitions`.\n errors += validate_object_id(path, jsonref.replace_refs(data), **validate_object_id_kwargs)\n\n if name not in exceptions_plus_versioned_and_packages:\n # Extensions aren't expected to repeat `required`. Packages don't have merge rules.\n errors += validate_null_type(path, data, **validate_null_type_kwargs)\n # Extensions aren't expected to repeat referenced codelist CSV files\n # TODO: This code assumes each schema uses all codelists. So, for now, skip package schema.\n errors += validate_schema_codelists_match(path, data, cwd, is_extension, is_profile, external_codelists)\n\n else:\n # Don't count these as errors.\n validate_deep_properties(path, data, **validate_deep_properties_kwargs)\n\n assert not errors, 'One or more JSON Schema files are invalid. See warnings below.'", "def validate(schema):\n def decorator(func):\n def wrapper(self, req, resp, *args, **kwargs):\n try:\n raw_json = req.stream.read()\n obj = json.loads(raw_json.decode('utf-8'))\n obj['req_id'] = req.context.get('request_id')\n except Exception:\n raise falcon.HTTPBadRequest(\n title='Invalid data',\n description='Could not properly parse the provided data as JSON',\n code='001'\n )\n\n try:\n jsonschema.validate(obj, schema)\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Failed data validation',\n description=e.message,\n code='002'\n )\n\n return func(self, req, resp, *args, parsed=obj, **kwargs)\n return wrapper\n return decorator", "def validateJSON(jsonData):\n try:\n json.loads(jsonData)\n validate(instance=json.loads(jsonData), schema=read_json_schema(schema_file_path))\n except Exception as err:\n logging.error(err)\n logging.info(\" Message received is not correct \")\n logging.info(\" Message sent to Pulsar Rejection Topic for reprocessing\")\n # IF a message is not correct, I prefer to stop the consumer and fix the problem. Another way will be to\n # Send message to another to topic if the message is not valid and change raise below by pass.\n raise\n return False\n\n return True", "def validator(request, schema):\n try:\n body = request.body.decode('utf-8')\n dictbody = json.loads(body) if body else {}\n validate_against_schema(request, schema, dictbody)\n except ValueError as e:\n request.errors.add('body', 'body', six.text_type(e))", "def validate_json(schema, doc):\n is_invalid = set(doc).difference(set(schema))\n if is_invalid:\n return False\n return True", "def validate(obj, schema=PROCESSING_SERVER_CONFIG_SCHEMA):\n return JsonValidator.validate(obj, schema)", "def validate(self) -> None:\n schema = type(self).schema\n if schema:\n if self.data is None:\n raise PresenterException(\"No data given.\")\n try:\n schema(self.data)\n except JsonSchemaException as exception:\n raise PresenterException(exception.message)\n else:\n if self.data is not None:\n raise PresenterException(\"This presenter does not take data.\")", "def assert_valid_schema(data, schemafile):\n\n schema = _load_json_schema(schemafile)\n return validate(data, schema)", "def validate(cls, data, errors):", "def decide_schema(self, json_data):\n pass", "def decide_schema(self, json_data):\n pass", "def validate(data):\n try:\n return Schema(Validator.SCHEMA).validate(data)\n except SchemaError as exception:\n logging.getLogger(__name__).error(exception)\n return None", "def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def validate(self, data):\n age = data.get(\"age\", None)\n age = age.split(\",\")\n size = data.get(\"size\", None)\n size = size.split(\",\")\n gender = data.get(\"gender\", None)\n gender = gender.split(\",\")\n for i in age:\n if i not in ['b', 'y', 'a', 's']:\n raise serializers.ValidationError(\n \"Age must be either 'b' for baby, 'y' for young,\"\n \" 'a' for adult, or 's' for senior. Can do multiple with\"\n \" commas, ex: a,y,e\")\n for i in size:\n if i not in ['s', 'm', 'l', 'xl']:\n raise serializers.ValidationError(\n \"Size must be either 's' for small, 'm' for medium, 'l' \"\n \"for large, or 'xl' for extra large. Can do multiple with\"\n \" commas, ex: s,l,xl\")\n for i in gender:\n if i not in ['m', 'f']:\n raise serializers.ValidationError(\n \"Gender must be either 'm' for male, or 'f' for female. Can\"\n \" have both using commas, ex: m,f\")\n return data", "def validate(json_resp, schema, validictory_path, schema_base=None):\n # assumes /extern/validictory exists (see /cm for instructions)\n if not validictory_path in sys.path:\n sys.path.append(validictory_path)\n import validictory\n\n try:\n if schema_base and not json_resp[\"$schema\"].startswith(schema_base):\n print \"Warning: JSON schema is \", json_resp[\"$schema\"], \"instead of \", schema_base\n validictory.validate(json_resp, schema, required_by_default=False)\n return True\n except Exception as e:\n print \"Received exception %s while trying to validate: %s\" % (\n str(e), json_resp)\n return False", "def validate_schema(schema):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n try:\n validate(request.json, schema)\n except:\n return bad_request()\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator", "def test_invalid_schema():\n # setup\n uid = uuid.uuid4()\n schema_dir = f'/tmp/{uid}'\n schema_file = f'{schema_dir}/bad_schema.json'\n os.makedirs(schema_dir)\n text_file = open(schema_file, 'w')\n text_file.write('this will fail[];fail()')\n text_file.close()\n\n data = {}\n\n # test\n is_valid, errors = validate(data, 'bad_schema', validate_schema=True)\n\n # teardown\n os.remove(schema_file)\n os.removedirs(schema_dir)\n\n assert not is_valid\n assert errors", "def validate(data, schema, resolver):\n try:\n # if valid returns None\n return Draft7Validator(schema=schema, resolver=resolver).validate(data)\n except ValidationError:\n raise ValidationError(\"data couldn't be validated.\")\n except RefResolutionError:\n raise RefResolutionError(\"schema references couldn't be solved.\")", "def validate_insert_json(request_json):\n try:\n jsonschema.validate(request_json, schema_input)\n except (jsonschema.exceptions.ValidationError, jsonschema.exceptions.SchemaError, JSONDecodeError) as e:\n current_app.logger.info(\"Invalid json:{}\".format(str(e)))\n raise (InvalidJSONError(\"Invalid json:{}\".format(str(e))))", "def validate_property_schema(self, schema):\n json_schema_path = os.path.join(_ROOT,\n 'data',\n 'property_json_schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def validate(json_data: json,\n schema_id: str,\n schema_store: dict = None,\n validate_schema: bool = False,\n schema_search_path: str = None\n ) -> Tuple[bool, iter]:\n try:\n if not schema_search_path:\n schema_search_path = path.join(path.dirname(__file__), 'schemas')\n\n if not schema_store:\n schema_store = get_schema_store(validate_schema, schema_search_path)\n\n schema = schema_store.get(f'{BASE_URI}/{schema_id}')\n if validate_schema:\n Draft7Validator.check_schema(schema)\n\n schema_file_path = path.join(schema_search_path, schema_id)\n resolver = RefResolver(f'file://{schema_file_path}.json', schema, schema_store)\n\n if Draft7Validator(schema,\n format_checker=Draft7Validator.FORMAT_CHECKER,\n resolver=resolver\n ) \\\n .is_valid(json_data):\n return True, None\n\n errors = Draft7Validator(schema,\n format_checker=Draft7Validator.FORMAT_CHECKER,\n resolver=resolver\n ) \\\n .iter_errors(json_data)\n return False, errors\n\n except SchemaError as error:\n # handle schema error\n return False, error", "def validate(document, schema=\"ctsa::bts:CTSADataset\"):\n try:\n document = ensure_document(document)\n schema = ensure_schema(schema)\n\n except (TypeError, ValueError) as err:\n raise DatasetValidationError(err)\n\n try:\n validator = jsonschema.Draft7Validator(schema)\n if not validator.is_valid(document):\n errors = []\n for error in sorted(validator.iter_errors(document), key=str):\n errors.append(error.message)\n raise DatasetJsonSchemaValidationError(errors)\n except jsonschema.exceptions.SchemaError as err: #Invalid Schema\n raise DatasetJsonSchemaValidationError(err)\n except jsonschema.exceptions.ValidationError as err: #Invalid Doc\n raise DatasetJsonSchemaValidationError(err)\n except Exception as exc: # unexpected errors\n raise DatasetValidationError(str(exc))\n\n return ValidatedDict(document)", "def validate_api_resp(actual_json_resp, json_schema_path: str, json_schema_file_name):\n with open(os.path.join(JSON_SCHEMA_ROOT, json_schema_path, json_schema_file_name), 'r') as f:\n json_schema = json.loads(f.read())\n actual_json = json.loads(str(actual_json_resp.data, 'utf-8'))\n jsonschema.validate(actual_json, json_schema)", "def schema_validation(schema):\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n data = {}\n if request.method in ['POST', 'PATCH', 'PUT']:\n data = request.get_json(force=True)\n elif request.method in ['GET', 'DELETE']:\n data = request.args.to_dict()\n\n v = Validator(schema)\n v.allow_unknown = True\n if v.validate(data):\n return function(*args, **kwargs)\n else:\n return jsonify({'errors': v.errors}), 400\n\n return wrapper\n return decorator", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def isValidForSchema(schema):\n\n return True", "def validate_with(schema):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n if not (request.json or request.data or request.form):\n flask_restful.abort(400, message='Validation error.',\n errors=['No data provided.'])\n try:\n data = schema(prepare_request_data(request))\n except voluptuous.MultipleInvalid as err:\n flask_restful.abort(400,\n message='Validation error.',\n errors=[str(e) for e in err.errors])\n setattr(request, 'validated_body', data)\n return f(*args, **kwargs)\n return wrapper\n return decorator", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False", "def validate_payload(cls, event):\n # TODO: Use invenio-jsonschemas/jsonresolver instead of this\n # Validate against Event JSONSchema\n # NOTE: raises `jsonschemas.ValidationError`\n cls._jsonschema_validator.validate(event)\n\n # Validate using marshmallow loader\n for payload in event:\n errors = RelationshipSchema(check_existing=True).validate(payload)\n if errors:\n raise MarshmallowValidationError(str(errors) + \"payload\" + str(payload))", "def assertValidJSON(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_json(data)", "def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)", "def validate_class_schema(self, schema):\n json_schema_path = os.path.join(_ROOT,\n 'data',\n 'class_json_schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def validator(data):\n\n request_validator = cerberus.Validator(SCHEMA)\n if request_validator.validate(data):\n return True\n else:\n return request_validator.errors", "def validate(self, schema=None, callback=None):\n return hxl.schema(schema, callback).validate(self)", "def validate(self, config_json):\n pass", "def testCheck(self):\r\n from pydsl.Grammar.Definition import JsonSchema\r\n from pydsl.Check import JsonSchemaChecker\r\n schema = {\r\n \"type\" : \"string\",\r\n \"items\" : {\r\n \"type\" : [\"string\", \"object\"],\r\n \"properties\" : {\r\n \"foo\" : {\"enum\" : [1, 3]},\r\n #\"bar\" : { #See https://github.com/Julian/jsonschema/issues/89\r\n # \"type\" : \"array\",\r\n # \"properties\" : {\r\n # \"bar\" : {\"required\" : True},\r\n # \"baz\" : {\"minItems\" : 2},\r\n # }\r\n #}\r\n }\r\n }\r\n }\r\n grammardef = JsonSchema(schema)\r\n checker = JsonSchemaChecker(grammardef)\r\n self.assertTrue(checker.check(\"a\"))\r\n self.assertFalse(checker.check([1, {\"foo\" : 2, \"bar\" : {\"baz\" : [1]}}, \"quux\"]))", "def validate(validator, document):\n try:\n validator.validate(document)\n except jsonschema.ValidationError as ex:\n raise wsgi_errors.HTTPBadRequestBody(\n '{0}: {1}'.format(ex.args, ex.message)\n )", "def validate(self) -> bool:\n\n # Start by reading in the blueprint schema json\n schema = json.loads(pkgutil.get_data(\"FactorioTools\", \"blueprintSchema.json\"))\n\n # Validate the object's schema against the blueprintSchema JSON\n try:\n jsonschema.validate(self.data, schema)\n return True\n except jsonschema.ValidationError:\n pass\n\n return False", "def check_schema(body, schema):\n validator = jsonschema.Draft4Validator(\n schema, format_checker=jsonschema.FormatChecker())\n try:\n validator.validate(body)\n except jsonschema.ValidationError as exc:\n raise exception.InvalidParameterValue(_('Invalid create body: %s') %\n exc)", "def validate(\n instance: typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any]],\n schema: typing.Dict[str, typing.Any],\n path: typing.Optional[typing.List[str]] = None,\n allow_disabled_languages: bool = False,\n strict: bool = False\n) -> None:\n if path is None:\n path = []\n if not isinstance(schema, dict):\n raise ValidationError('invalid schema (must be dict)', path)\n if 'type' not in schema:\n raise ValidationError('invalid schema (must contain type)', path)\n if schema['type'] == 'array' and isinstance(instance, list):\n return _validate_array(instance, schema, path, allow_disabled_languages=allow_disabled_languages, strict=strict)\n elif schema['type'] == 'object' and isinstance(instance, dict):\n return _validate_object(instance, schema, path, allow_disabled_languages=allow_disabled_languages, strict=strict)\n elif schema['type'] == 'text' and isinstance(instance, dict):\n return _validate_text(instance, schema, path, allow_disabled_languages=allow_disabled_languages)\n elif schema['type'] == 'datetime' and isinstance(instance, dict):\n return _validate_datetime(instance, schema, path)\n elif schema['type'] == 'bool' and isinstance(instance, dict):\n return _validate_bool(instance, schema, path)\n elif schema['type'] == 'quantity' and isinstance(instance, dict):\n return _validate_quantity(instance, schema, path)\n elif schema['type'] == 'sample' and isinstance(instance, dict):\n return _validate_sample(instance, schema, path)\n elif schema['type'] == 'measurement' and isinstance(instance, dict):\n return _validate_measurement(instance, schema, path)\n elif schema['type'] == 'object_reference' and isinstance(instance, dict):\n return _validate_object_reference(instance, schema, path)\n elif schema['type'] == 'tags' and isinstance(instance, dict):\n return _validate_tags(instance, schema, path, strict=strict)\n elif schema['type'] == 'hazards' and isinstance(instance, dict):\n return _validate_hazards(instance, schema, path)\n elif schema['type'] == 'user' and isinstance(instance, dict):\n return _validate_user(instance, schema, path)\n elif schema['type'] == 'plotly_chart' and isinstance(instance, dict):\n return _validate_plotly_chart(instance, schema, path)\n else:\n raise ValidationError('invalid type', path)", "def validate(self, data):\n raise NotImplementedError(\"Inherit this class and override this method.\")", "def validate(self, data):\n gender = data.get(\"gender\", None)\n size = data.get(\"size\", None)\n if gender not in ['m', 'f', 'u']:\n raise serializers.ValidationError(\n \"Gender must be either 'm' for male, 'f' \"\n \"for female, or 'u' for unknown.\")\n elif size not in ['s', 'm', 'l', 'xl', 'u']:\n raise serializers.ValidationError(\n \"Size must be 's' for small, 'm' for medium, 'l' for large,\"\n \" 'xl' for extra large, or 'u' for unknown.\")\n return data", "def validate(schema, record):\n if six.PY3:\n return Utils._py3_validate(schema, record)\n else:\n return Utils._py2_validate(schema, record)", "def validate_datasets(row):\n data_validator = DataJSONDataset(row)\n valid = data_validator.validate(validator_schema=row['validator_schema'])\n errors = data_validator.errors\n row['validation_errors'] = errors\n if not valid:\n logger.error(f'Error validating {row}: {errors}')", "def validate_schema(payload, schema):\n errors = []\n validator = jsonschema.Draft4Validator(schema,\n format_checker=jsonschema.FormatChecker())\n for error in sorted(validator.iter_errors(payload), key=str):\n errors.append(error.message)\n\n return errors", "def test_jsonschema_validation_with_schema_object(self):\n check_value = {\"a\": 1}\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"a\": {\n \"type\": \"integer\",\n \"const\": 1\n }\n }\n }\n jsonschema_validation(check_value, schema)", "def validate_schema(self, data, **kwargs):\n if \"role\" not in data and \"visible\" not in data:\n raise ValidationError(_(\"Missing fields 'role' and/or 'visible'.\"))", "def handle_marshmallow_validaton(err): # except ValidationError as err\n return jsonify(err.messages), 400 # bad request", "def _verify_schema(schema):\n assert type(schema) in [dict, tuple], f'Expected a dict or a tuple but got {type(schema)}'\n if isinstance(schema, tuple):\n assert len(schema) == 2, f'Expected a tuple with length 2 but got length {len(schema)}'\n if schema[1] is not None:\n assert isinstance(schema[1], schema[0]), f'{str(schema[1])} does not have expected type {str(schema)}'\n elif isinstance(schema, dict):\n for sub_schema in schema.values():\n _verify_schema(sub_schema)", "def validate(self, value):\n if isinstance(value, dict):\n if set(value.keys()) == {\"type\", \"coordinates\"}:\n if value[\"type\"] != self._type:\n self.error(f'{self._name} type must be \"{self._type}\"')\n return self.validate(value[\"coordinates\"])\n else:\n self.error(\n \"%s can only accept a valid GeoJson dictionary\"\n \" or lists of (x, y)\" % self._name\n )\n return\n elif not isinstance(value, (list, tuple)):\n self.error(\"%s can only accept lists of [x, y]\" % self._name)\n return\n\n validate = getattr(self, \"_validate_%s\" % self._type.lower())\n error = validate(value)\n if error:\n self.error(error)", "def valid(schema=None):\n def dec(fun):\n @wraps(fun)\n def d_func(self, ctx, data, *a, **kw):\n try:\n validate(data['params'], schema)\n except ValidationError as err:\n raise InvalidParams(err)\n except SchemaError as err:\n raise InternalError(err)\n return fun(self, ctx, data['params'], *a, **kw)\n return d_func\n return dec", "def validate(data):\n if 'value' not in data or \\\n 'category' not in data or \\\n 'classification' not in data or \\\n 'account' not in data:\n raise Exception('Missing required field.')\n classifications = ['Personal', 'Essential', 'Savings', 'Income']\n if data['classification'] not in classifications:\n raise Exception('Invalid classification.')", "def validate(self, descriptor, schema_id):\n try:\n jsonschema.validate(descriptor, self.load_schema(schema_id))\n return True\n\n except ValidationError as e:\n log.error(\"Failed to validate Descriptor against schema '{}'\"\n .format(schema_id))\n self.error_msg = e.message\n log.error(e.message)\n return\n\n except SchemaError as e:\n log.error(\"Invalid Schema '{}'\".format(schema_id))\n self.error_msg = e.message\n log.debug(e)\n return", "def validate(mapping: Mapping[str, Any], ref: str) -> List[SchemaError]:\n # pylint: disable=too-many-branches\n\n valid_err = None # type: Optional[jsonschema.ValidationError]\n try:\n jsonschema.validate(instance=mapping, schema=mapry.schemas.GRAPH)\n except jsonschema.ValidationError as err:\n valid_err = err\n\n if valid_err is not None:\n return [\n SchemaError(\n message=\"Does not follow json schema: {}\".format(\n valid_err.message),\n ref='/'.join([ref] + [str(part) for part in valid_err.path]))\n ]\n\n errors = [] # type: List[SchemaError]\n\n # Enforce name and description to be at the top of the schema\n if isinstance(mapping, collections.OrderedDict):\n mapping_keys = list(mapping.keys())\n\n if mapping_keys[0] != 'name':\n errors.append(\n SchemaError(\n message=(\n \"Expected name to be the first property of the schema, \"\n \"but got {}\").format(mapping_keys[0]),\n ref='{}/name'.format(ref)))\n\n if mapping_keys[1] != 'description':\n errors.append(\n SchemaError(\n message=(\n \"Expected description to be the second property \"\n \"of the schema, but got {}\").format(mapping_keys[1]),\n ref='{}/description'.format(ref)))\n\n # Register classes and embeds and\n # check that there are no duplicate class or embed names.\n name_set, name_errs = _validate_names(mapping=mapping, ref=ref)\n errors.extend(name_errs)\n\n # Validate class fields except properties\n if 'classes' in mapping:\n for i, cls_mapping in enumerate(mapping['classes']):\n errors.extend(\n _validate_class(\n mapping=cls_mapping, ref='{}/classes/{}'.format(ref, i)))\n\n # Validate properties of the classes and embeds\n types = _NONCOMPOSITE_TYPE_SET.copy()\n types.update(name_set)\n\n # Check that the graph properties are valid\n errors.extend(_validate_properties(mapping=mapping, ref=ref, types=types))\n\n # Check that the properties of the classes are valid\n if 'classes' in mapping:\n for i, cls_mapping in enumerate(mapping['classes']):\n errors.extend(\n _validate_properties(\n mapping=cls_mapping,\n ref='{}/classes/{}'.format(ref, i),\n types=types))\n\n # Check that the properties of the embeds are valid\n if 'embeds' in mapping:\n for i, embed_mapping in enumerate(mapping['embeds']):\n errors.extend(\n _validate_properties(\n mapping=embed_mapping,\n ref='{}/embeds/{}'.format(ref, i),\n types=types))\n\n # Check that all the plurals of the classes\n # do not conflict any of the graph properties\n errors.extend(_validate_plurals(mapping=mapping, ref=ref))\n\n return errors", "def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))", "def _handle_with_schema_validation(success_handler, schema_dict):\n try:\n schema = Schema(schema_dict)\n args = schema(request.args.to_dict())\n except MultipleInvalid as e:\n return abort(400, 'value at %s failed validation: %s' % (e.path, e.msg))\n result = success_handler(args)\n return jsonify(result)", "def is_schema_valid(self, schema):\n for k, v in schema.items():\n if v[0] == \"var_len\":\n assert len(v) == 2\n assert v[1] in TF_VALUE\n\n if v[0] == \"fixed_len\":\n assert len(v) == 3\n assert v[1] in TF_VALUE\n assert isinstance(v[2], list)", "def get_valid_json_or_abort(schema):\n\n json_request = flask.request.get_json(force=True)\n\n try:\n jsonschema.validate(json_request, schema)\n except jsonschema.ValidationError as e:\n flask_restful.abort(400, message=e.message)\n else:\n return json_request", "def test_schema_completeness_validation_valid_input(self):\n for complete_schema in list_of_schema_inputs:\n validate_json_schema_completeness(complete_schema)\n\n assert True", "def json_attribs_check(func):\n @wraps(func)\n def inner_func(jsonStr):\n gslvtsSchema = {\"type\":\"object\",\n \"properties\":{\n \"tagID\": {\"type\":\"number\"}, \n \"UTC\": {\"type\":\"string\",\n \"format\":\"date-time\"}\n\t\t\t},\n\t\t\t\"required\":[\"tagID\",\"UTC\"]\n }\n try:\n jsonGslvts=json.loads(jsonStr)\n for elem in jsonGslvts:\n try: \n validate(elem, gslvtsSchema, format_checker=FormatChecker())\n except ValidationError, e:\n print \"[-] Invalid json post data. Check it, brah.\"\n print e\n raise AttributeError \n except (AttributeError, ValueError):\n print \"[-] IDk what that was, but it wasn't JSON.\"\n raise AttributeError\n\n return(func(jsonStr)) \n return inner_func", "def test_input_schema(self, data, errors):\n resp = self.client.post(self.url, json=data)\n\n if not errors:\n assert resp.status_code == 200\n assert resp.get_json() == {\n 'status': 'OK',\n 'message': 'Data published via Upload service',\n }\n else:\n assert resp.status_code == 400\n assert resp.get_json() == {\n 'status': 'Error',\n 'message': 'Input payload validation failed',\n 'errors': {\n k: ['Missing data for required field.'] for k in errors\n },\n }", "def validate_input(self, deposition, draft_id=None):\n v = APIValidator()\n draft_id = draft_id or deposition.get_default_draft_id()\n metadata_schema = deposition.type.api_metadata_schema(draft_id)\n\n if metadata_schema:\n schema = self.input_schema.copy()\n schema['metadata'] = metadata_schema\n else:\n schema = self.input_schema\n\n # Either conform to dictionary schema or dictionary is empty\n if not v.validate(request.json, schema) and \\\n request.json:\n abort(\n 400,\n message=\"Bad request\",\n status=400,\n errors=filter_validation_errors(v.errors),\n )", "def test_validate_invalid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n with self.assertRaises(jsonschema.ValidationError):\n resumeschema.validate(self.invalid_resume)", "def validate(scheme, data):\n return validate_common(scheme, data)", "def validate_index(self):\n schema_path = pkg_resources.resource_filename(\n \"FLIR.conservator\", \"index_schema.json\"\n )\n with open(schema_path) as o:\n schema = json.load(o)\n\n try:\n with open(self.index_path) as index:\n index_data = json.load(index)\n jsonschema.validate(index_data, schema)\n return True\n except jsonschema.exceptions.ValidationError as e:\n logger.error(e.message)\n logger.debug(e)\n return False", "def test_validation_error_json():\n error = ValidationError(\n type=\"Syntax Error\",\n data={\"data\": [1, 2, 3]},\n )\n\n assert ValidationError(**json.loads(error.json())) == error", "def validate(request, schema, variable_decode=True,\n variable_decode_dict_char=\".\", variable_decode_list_char=\"-\",\n context=None, data=None):\n\n if not data:\n try:\n data = request.json_body\n except ValueError:\n data = request.params\n\n if variable_decode is True:\n data = formencode.variabledecode.variable_decode(data,\n dict_char=variable_decode_dict_char,\n list_char=variable_decode_list_char)\n\n state = Dummyobj()\n state.request = request\n state.context = context\n\n schema = inspect.isclass(schema) and schema() or schema\n\n return schema.to_python(data, state=state)", "def test_schema_valid(path, name, data):\n schemas = metaschemas()\n if name in ('release-schema.json', 'release-package-schema.json'):\n metaschema = schemas['release_package_metaschema']\n elif name == 'record-package-schema.json':\n metaschema = schemas['record_package_metaschema']\n elif name in ('project-schema.json', 'project-package-schema.json'):\n metaschema = schemas['project_package_metaschema']\n else:\n metaschema = schemas['metaschema']\n\n validate_json_schema(path, name, data, metaschema)", "def validate(self, data):\n if data[\"start_time_period\"] > data[\"end_time_period\"]:\n raise serializers.ValidationError(\"End time can not be before start time\")\n elif (timezone.now() - data[\"end_time_period\"]) > timedelta(days=1):\n raise serializers.ValidationError(\"Weather can not be over 24 hours old\")\n elif data[\"end_time_period\"] > timezone.now():\n raise serializers.ValidationError(\"End time can not be in the future\")\n elif (data[\"end_time_period\"] - data[\"start_time_period\"]) >= timedelta(days=1):\n raise serializers.ValidationError(\"Only supports 1 day time frame for weather\")\n return data", "def test_validate_business_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',\n 'lastPreBobFilingTimestamp': '2019-04-15T20:05:49.068272+00:00',\n 'legalName': 'legal name - CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def validate_json(func):\n\n # get the function name because we're going to\n # see if we need SCHEMA_POST, SCHEMA_GET, etc.\n function_name = func.__name__.upper()\n\n def wrapped_func(self, *args, **kwargs):\n respective_schema = getattr(self, 'SCHEMA_' + function_name)\n json_request = get_valid_json_or_abort(respective_schema)\n return func(self, json_request, *args, **kwargs)\n\n return wrapped_func", "def validate(self):\n self.valid = True\n\n if self._value is None and self._strict:\n self.valid = False\n raise self.MissingFieldData\n\n elif self._value is not None:\n self._type_specific_validation()", "def test_encode_errors(self):\n if self._cls == 'MetaschemaType':\n if self._invalid_validate:\n self.assert_raises(NotImplementedError, self.import_cls.encode,\n self._invalid_validate[0], self.typedef)\n else:\n if self._invalid_validate:\n self.assert_raises((ValueError,\n jsonschema.exceptions.ValidationError),\n self.import_cls.encode,\n self._invalid_validate[0], self.typedef)", "def parse_json_or_fail(message, schema):\n try:\n body = tornado.escape.json_decode(message)\n except ValueError as e:\n raise tornado.web.HTTPError(400, reason=str(e))\n\n try:\n jsonschema.validate(body, schema)\n except jsonschema.exceptions.ValidationError as e:\n raise tornado.web.HTTPError(400, reason=e.message)\n\n return body", "def validate(self, fqn, data, errors):\n\t\terrors.append(\"{}: validate() must be implemented for SchemaBase derived classes.\".format(self.__class__.__name__))\n\t\treturn False", "def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))", "def _validate_data(self, vms, fw_rules):\n self._validate_vms(vms)\n self._validate_fw_rules(fw_rules)\n self._validated = True", "def validate_json(sub_domain, end_point_pattern, method, json_payload):\n if sub_domain not in REGISTER_INFO:\n return {\"errors\": ['invalid sub-domain']}\n # Get RAML resource\n raml = REGISTER_INFO[sub_domain][\"raml\"]\n raml_end_point = end_point_pattern.replace('<', '{').replace('>', '}')\n raml_resource = None\n for resource in raml.resources:\n if resource.path == raml_end_point and resource.method.lower() == method.lower():\n raml_resource = resource\n break\n if not raml_resource:\n return {\"errors\": ['cannot find RAML resource definition']}\n raml_schema = None\n # Get schema name from RAML resource\n if raml_resource.body:\n for body in raml_resource.body:\n if body.mime_type == \"application/json\":\n schema_name = body.schema\n for schema in raml.schemas:\n if schema_name in schema:\n raml_schema = schema[schema_name]\n break\n break\n if not raml_schema:\n return {\"errors\": ['cannot find schema in RAML resource definition']}\n # Load schema and validate against it\n try:\n validator = jsonschema.Draft4Validator(raml_schema, resolver=RELATIVE_RESOLVER)\n except jsonschema.SchemaError:\n return {\"errors\": ['invalid json schema']}\n errors = sorted(validator.iter_errors(json_payload), key=lambda e: e.path)\n error_return = []\n for error in errors:\n error_return.append(\"{}, {}\".format(str(list(error.schema_path)), error.message))\n for suberror in sorted(error.context, key=lambda e: e.schema_path):\n error_return.append(\"{}, {}\".format(str(list(suberror.schema_path)), suberror.message))\n return {\"errors\": error_return}", "def validate_config(\n json_schema: JsonDict, config: Any, config_path: StrSequence\n) -> None:\n try:\n jsonschema.validate(config, json_schema)\n except jsonschema.ValidationError as e:\n raise json_error_to_config_error(e, config_path)", "def validate(instance, schema, cls=None, *args, **kwargs):\r\n if cls is None:\r\n cls = validator_for(schema)\r\n cls.check_schema(schema)\r\n cls(schema, *args, **kwargs).validate(instance)", "def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True", "def validate(self, data):\n try:\n employee_rank = data['rank']\n except KeyError:\n employee_rank = 0\n\n try:\n supervisor_rank = data['supervisor'].rank\n except (AttributeError, KeyError):\n if employee_rank >= 0:\n return data\n else:\n raise serializers.ValidationError('Supervisor must have a rank')\n\n if employee_rank > supervisor_rank:\n raise serializers.ValidationError('Supervisor must have a higher rank than the Employee')\n return data" ]
[ "0.7925582", "0.78330433", "0.7702376", "0.7666036", "0.7600036", "0.75039744", "0.75000393", "0.74321485", "0.73822933", "0.73632777", "0.7302417", "0.72996646", "0.7248956", "0.71885204", "0.718743", "0.7182984", "0.69556284", "0.6946508", "0.69202065", "0.6914744", "0.6913997", "0.6910613", "0.6892817", "0.6866488", "0.6859395", "0.68378145", "0.6835319", "0.6810065", "0.6810065", "0.6770942", "0.6770414", "0.6756532", "0.6689595", "0.6601698", "0.65896475", "0.65882295", "0.6578094", "0.6568812", "0.6565888", "0.653349", "0.65034866", "0.64950615", "0.647298", "0.64423203", "0.6410456", "0.6392368", "0.6388402", "0.63870996", "0.63785976", "0.63761175", "0.6375627", "0.6369548", "0.63601154", "0.63517123", "0.6348413", "0.63454986", "0.6325446", "0.6324929", "0.6319054", "0.63138837", "0.6313755", "0.6310969", "0.62572026", "0.6255753", "0.6234075", "0.6225905", "0.6211293", "0.6203912", "0.62035215", "0.6201354", "0.6196464", "0.6189285", "0.61674726", "0.6167134", "0.6142796", "0.61309826", "0.6115044", "0.6099664", "0.6088083", "0.6081709", "0.6074435", "0.607352", "0.6071154", "0.6068628", "0.6067409", "0.60669315", "0.60635287", "0.6051935", "0.6051119", "0.60222846", "0.60154015", "0.60152125", "0.5985942", "0.59824795", "0.5982028", "0.5970247", "0.5967884", "0.5966777", "0.5964504", "0.59560305" ]
0.76444364
4
Validate that the installed version is consistent with an optional version specification in the twine file.
def _validate_twine_version(self, twine_file_twined_version): installed_twined_version = pkg_resources.get_distribution("twined").version logger.debug( "Twine versions... %s installed, %s specified in twine", installed_twined_version, twine_file_twined_version ) if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version): raise exceptions.TwineVersionConflict( f"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_version(self):\n pass", "def validate_configurator_version():\n if settings.CONFIGURATOR_MODULE == \"bootmachine.contrib.configurators.salt\":\n pkgver = settings.SALT_AUR_PKGVER\n pkgrel = settings.SALT_AUR_PKGREL\n response = urllib2.urlopen(\"https://aur.archlinux.org/packages/sa/salt/PKGBUILD\")\n for line in response:\n if line.startswith(\"pkgver=\") and not pkgver in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgver, line.strip()))\n if line.startswith(\"pkgrel=\") and not pkgrel in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgrel, line.strip()))", "def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()", "def test_schema_version(self):\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )", "def validate_project_version(config: Dict[str, Any]) -> None:\n spacy_version = config.get(\"spacy_version\", None)\n if spacy_version and not is_compatible_version(about.__version__, spacy_version):\n err = (\n f\"The {PROJECT_FILE} specifies a spaCy version range ({spacy_version}) \"\n f\"that's not compatible with the version of spaCy you're running \"\n f\"({about.__version__}). You can edit version requirement in the \"\n f\"{PROJECT_FILE} to load it, but the project may not run as expected.\"\n )\n msg.fail(err, exits=1)", "def test_valid_hh_version():\n # TODO: Basically only enforcing correct main segment, since not using `re.fullmatch`\n # TODO: Probably want `re.fullmatch` here - Currently ignoring any potentially invalid suffix\n version_pattern = r\"^[0-9]+\\.[0-9]+\\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])\"\n res = re.match(version_pattern, hh.__version__)\n assert res is not None", "def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)", "def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion", "def validate_required_python_version_running(minimal_required_version: str) -> None:\n try:\n parts = minimal_required_version.split(\".\")\n min_py_version = 1000000*int(parts[0]) + 1000*(int(parts[1]) if len(parts) > 1 else 0) + (int(parts[2]) if len(parts) > 2 else 0)\n running_py_version = 1000000*sys.version_info.major + 1000*sys.version_info.minor + sys.version_info.micro\n if running_py_version < min_py_version:\n raise RuntimeError(\"\")\n except:\n raise RuntimeError(f\"Kqlmagic requires python >= {Constants.MINIMAL_PYTHON_VERSION_REQUIRED}, you use python {sys.version}\")", "def check_version(self, node):\n assert \"version\" in node, \"Version node does not contain attribute 'version'\"\n assert len(node[\"version\"]) >= 1, \"Expecting at least one 'version' value\"\n # TODO: add more thorough checks", "def validate_backend_version(self):\n pass", "def validate_version(version):\n matched = VERSION_REGEX.match(version)\n if matched is None:\n print_stderr(\"Version '{0}' does not match version regex\".format(version))\n return\n\n # Format is syntactically valid\n version_dict = matched.groupdict()\n\n # Check dependents\n dependencies = [(\"special_sep\", \"special\"), (\"special\", \"special_sep\"), (\"index_sep\", \"index\"), (\"index\", \"index_sep\"), (\"index\", \"special\"), (\"special\", \"index\")]\n for dependent, dependency in dependencies:\n if version_dict[dependent] and not version_dict[dependency]:\n print_stderr(\"Version '{0}' is invalid: '{1}' is defined but not '{2}'\".format(version, dependent, dependency))\n return None\n\n # Remove noise\n for noise in [\"special_sep\", \"index_sep\"]:\n del version_dict[noise]\n\n return version_dict", "def test_pynast_suported_version(self):\r\n min_acceptable_version = (1, 2)\r\n max_acceptable_version = (1, 2, 2)\r\n try:\r\n from pynast import __version__ as pynast_lib_version\r\n version = pynast_lib_version.split('.')\r\n if version[-1][-4:] == '-dev':\r\n version[-1] = version[-1][:-4]\r\n version = tuple(map(int, version))\r\n pass_test = (version >= min_acceptable_version and\r\n version <= max_acceptable_version)\r\n version_string = str(pynast_lib_version)\r\n except ImportError:\r\n pass_test = False\r\n version_string = \"Not installed\"\r\n\r\n min_version_str = '.'.join(map(str, min_acceptable_version))\r\n max_version_str = '.'.join(map(str, max_acceptable_version))\r\n error_msg = (\"Unsupported pynast version. Must be >= %s and <= %s, \"\r\n \"but running %s.\" % (min_version_str, max_version_str,\r\n version_string))\r\n self.assertTrue(pass_test, error_msg)", "def package_version_check(args, parser):\n if (args.build or args.check) and args.package_version:\n parser.error('--package-version works only with --create')", "def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))", "def check_version(ctx, builder, version_function, *,\n requires_version=None,\n requires_at_least_version=None,\n requires_at_most_version=None):\n if any(v is not None for v in (\n requires_version,\n requires_at_least_version,\n requires_at_most_version)):\n ctx.logger.check('checking %s version' % builder)\n\n version_str = version_function()\n\n # Convert the version into a tuple\n version = []\n for i in version_str.split('.'):\n try:\n version.append(int(i))\n except ValueError:\n # The subversion isn't a number, so just convert it to a\n # string.\n version.append(i)\n version = tuple(version)\n\n if requires_version is not None and requires_version != version:\n msg = 'version %s required; found %s' % (\n '.'.join(str(i) for i in requires_version), version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n if requires_at_least_version is not None and \\\n requires_at_least_version > version:\n msg = 'at least version %s required; found %s' % (\n '.'.join(str(i) for i in requires_at_least_version),\n version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n if requires_at_most_version is not None and \\\n requires_at_most_version < version:\n msg = 'at most version %s required; found %s' % (\n '.'.join(str(i) for i in requires_at_most_version),\n version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n ctx.logger.passed(version_str)", "def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)", "def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions", "def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)", "def check_from_version(version: str) -> str:\n version_int = [int(v) for v in version.split(\".\")]\n if version_int[0] not in PipetteModelMajorVersion:\n raise ValueError(f\"Major version {version_int[0]} is not supported.\")\n if version_int[1] not in PipetteModelMinorVersion:\n raise ValueError(f\"Minor version {version_int[1]} is not supported.\")\n return version", "def is_version_valid(version):\n return _compiled_version_regex.match(version) is not None", "def _checkUpdateNeeded(self):\n try:\n currentVersionLine = str(subprocess.run(['pacman', '-Q', '-i', self._name],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True).stdout)\n currentVersion = re.sub(r'.*Version\\s*: ([\\d|\\.]*)-.*', r'\\1', currentVersionLine).split('.')\n newVersion = self._version.split('.')\n for i in range(0, min(len(currentVersion), len(newVersion))):\n if currentVersion[i].isdigit():\n # TODO: test if new version is only digits too, two of them should be the same anyway\n if int(newVersion[i]) > int(currentVersion[i]):\n return True\n if int(newVersion[i]) < int(currentVersion[i]):\n return False\n return len(newVersion) > len(currentVersion)\n except subprocess.CalledProcessError:\n # Package not found: to be installed then\n return True", "def check_version_is_supported(name, version, min_version, help=''):\n if (pkg_resources.parse_version(version) <\n pkg_resources.parse_version(min_version)):\n # Version is too old.\n print('ERROR: Unsupported %s version: %s (minimum %s).%s' %\n (name, version, min_version, (' %s' % help) if help else ''),\n file=sys.stderr)\n exit(1)", "def check_version_2(dataset):\n\n if float(dataset.get('version')) >= 2.0 \\\n if dataset.get('version') else False:\n return True\n else:\n return False", "def check_version():\n reset_flag = False\n try:\n data = du.read_yml(du.DEFAULT)\n if (\n data[\"version\"].split(\".\")[0] != __version__.split(\".\")[0]\n ): # If Version if different from \"1.x.y\" remove data:\n reset_flag = True\n except (KeyError, FileNotFoundError, TypeError):\n reset_flag = True\n\n if reset_flag:\n print(\"Your configuration file version is older than 1.0.0\")\n print(\n \"Your .Experiment file will be removed, please run daf.init to generate an up-to-date file\"\n )\n if os.path.isfile(du.DEFAULT):\n os.remove(du.DEFAULT)\n sys.exit(0)", "def is_valid_version(self) -> bool:\n return self._is_valid_version()", "def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))", "def check_version_part(self, node, ecosystem, package, version):\n version_node = node[\"version\"]\n # check the ecosystem, version, and name attributes that are required for a version\n self.check_pecosystem(version_node)\n self.check_pname(version_node)\n self.check_version(version_node)\n\n # compare with expected values\n e = version_node[\"pecosystem\"][0]\n p = version_node[\"pname\"][0]\n v = version_node[\"version\"][0]\n self.compare_ecosystems(e, ecosystem)\n self.compare_packages(p, package)\n self.compare_versions(v, version)", "def check_py_version(self, cur_version):\n\n # convert cur_version to string, in case of erroneous type being passed\n cur_version = str(cur_version)\n\n acceptable_python_versions_regex = r\"(^(2\\.[6-9])(\\.?\\d{1,2})?$)|(^(3\\.[3-9])(\\.?\\d{1,2})?$)\"\n pyversions_regex_compiled = re.compile(acceptable_python_versions_regex)\n pyversions_match = pyversions_regex_compiled.match(cur_version)\n\n # If match is found, return True. If no match, return False\n if pyversions_match:\n return True\n else:\n return False", "def test_versioning_unknown_version(workflow_runner):\n with pytest.raises(WDL.Error.SyntaxError):\n workflow_runner(\"test_versioning_unknown_version.wdl\")", "def check_openhpiver(reqver_text):\n\treturn check_pkgcfg_ver(reqver_text, 'openhpi')", "def check_cal_format_version(version: Optional[Version] = None, current_version: Version = _CAL_FORMAT_VERSION):\n # No version means, the old 1.0 format is used that does not provide a version string\n if not version:\n version = Version(\"1.0.0\")\n if isinstance(version, str):\n version = Version(version)\n\n if version == current_version:\n return\n if version > current_version:\n raise ValueError(\"The provided version, is larger than the currently supported version.\")\n if version < current_version:\n raise ValueError(\n \"The provided calibration format is no longer supported. \"\n \"Check `imucal.legacy` if conversion helper exist.\"\n )", "def checkVersion(self, clientName, edamVersionMajor, edamVersionMinor):\r\n pass", "def test_patch_hyperflex_software_version_policy(self):\n pass", "def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))", "def python_version_check():\n min_version_list = PYTHON_MIN_VERSION.split(\".\")\n # Truncate if the list is more the 4 items\n if len(min_version_list) > 4:\n min_version_list = min_version_list[:4]\n # Fill if the list is less then 4 items\n if len(min_version_list) == 1:\n min_version_list.append(\"0\")\n if len(min_version_list) == 2:\n min_version_list.append(\"0\")\n if len(min_version_list) == 3:\n min_version_list.append(\"f0\")\n # Calculate the minimum version and an integer, which, when displayed as\n # hex, is easily recognised as the version. E.g. 0x30502f0 is 3.5.2\n min_version_value = 0\n for index, item in enumerate(min_version_list[::-1]):\n min_version_value = min_version_value + int(item, 16) * 2**(index * 8)\n if debug: print(\"Python Version Minimum:{}, Decimal:{}, Hex:{}\"\n .format(PYTHON_MIN_VERSION, min_version_value,\n hex(min_version_value)))\n # test value and exit if below minimum revision\n if sys.hexversion < min_version_value:\n print(\"Python Version: {}. Required minimum version is: {}. Exiting...\"\n .format(sys.version.split(\" \")[0], PYTHON_MIN_VERSION))\n sys.exit()", "def _verify_patchelf() -> None:\n if not find_executable(\"patchelf\"):\n raise ValueError(\"Cannot find required utility `patchelf` in PATH\")\n try:\n version = check_output([\"patchelf\", \"--version\"]).decode(\"utf-8\")\n except CalledProcessError:\n raise ValueError(\"Could not call `patchelf` binary\")\n\n m = re.match(r\"patchelf\\s+(\\d+(.\\d+)?)\", version)\n if m and tuple(int(x) for x in m.group(1).split(\".\")) >= (0, 14):\n return\n raise ValueError(\n f\"patchelf {version} found. auditwheel repair requires \" \"patchelf >= 0.14.\"\n )", "def check_versioning(ctx, stmt):\n\n # Don't perform this check for modules that are not OpenConfig\n # or are OpenConfig infrastructure (e.g., extensions)\n if (OCLintFunctions.is_openconfig_validatable_module(stmt.arg) in\n [ModuleType.NONOC, ModuleType.OCINFRA]):\n return\n\n version = None\n for substmt in stmt.substmts:\n # pyang uses a keyword tuple when the element is from\n # an external extension rather than a built-in, check for\n # this before checking the argument. Assumption is made\n # that openconfig-version is unique across all extension\n # modules.\n if (isinstance(substmt.keyword, tuple) and\n substmt.keyword[1] == \"openconfig-version\"):\n version = substmt\n\n if version is None:\n err_add(ctx.errors, stmt.pos, \"OC_MODULE_MISSING_VERSION\",\n stmt.arg)\n return\n\n if not re.match(r\"^[0-9]+\\.[0-9]+\\.[0-9]+$\", version.arg):\n err_add(ctx.errors, stmt.pos, \"OC_INVALID_SEMVER\",\n version.arg)\n\n # Check that there\n match_revision = False\n for revision_stmt in stmt.search(\"revision\"):\n reference_stmt = revision_stmt.search_one(\"reference\")\n if reference_stmt is not None and reference_stmt.arg == version.arg:\n match_revision = True\n\n if match_revision is False:\n err_add(ctx.errors, stmt.pos, \"OC_MISSING_SEMVER_REVISION\",\n version.arg)", "def version_match(required, candidate):\n return _discover.version_match(required, candidate)", "def solr_version_check(core):\n expected_version = SCHEMA[core].version\n solr_uri = config.CFG.get(\"solr\", \"uri\")\n u = urllib2.urlopen(\"%s/%s/schema/version\" % (solr_uri, core))\n content = loads(u.read())\n seen_version = content[\"version\"]\n if not seen_version == expected_version:\n raise VersionMismatchException(core, expected_version, seen_version)\n logger.debug(\"%s: version %1.1f matches %1.1f\", core, expected_version,\n seen_version)", "def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")", "def test_required_fields_schema_version(self):\n\n del self.validator.adata.uns[\"schema_version\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: adata has no schema definition in 'adata.uns'. \"\n \"Validation cannot be performed.\"\n ],\n )", "def is_stable_version(version):\n if not isinstance(version, tuple):\n version = version.split('.')\n last_part = version[-1]\n\n if not re.search('[a-zA-Z]', last_part):\n return True\n else:\n return False", "def _check_version () -> None:\n py_version_info: typing.Tuple = sys.version_info[:2]\n\n if py_version_info < MIN_PY_VERSION:\n error_msg = \"This version of pytextrank requires Python {} or later ({} detected)\\n\"\n raise RuntimeError(error_msg.format(_versify(MIN_PY_VERSION), _versify(py_version_info)))", "def assert_version(version, minimum=VERSION_MINIMUM, maximum=VERSION_MAXIMUM, blacklist=VERSION_BLACKLIST):\n\t# Convert version strings to integer tuples\n\tversion = list(map(int, version.split('-', 1)[0].split('.')))\n\tminimum = list(map(int, minimum.split('-', 1)[0].split('.')))\n\tmaximum = list(map(int, maximum.split('-', 1)[0].split('.')))\n\n\tif minimum > version or version >= maximum:\n\t\traise exceptions.VersionMismatch(version, minimum, maximum)\n\t\n\tfor blacklisted in blacklist:\n\t\tblacklisted = list(map(int, blacklisted.split('-', 1)[0].split('.')))\n\t\tif version == blacklisted:\n\t\t\traise exceptions.VersionMismatch(version, minimum, maximum)", "def _check_python_version(self):\n python_exe = tools.which(\"python\")\n if not python_exe:\n msg = (\"Python must be available in PATH \"\n \"in order to build v8\")\n raise ConanInvalidConfiguration(msg)\n # In any case, check its actual version for compatibility\n from six import StringIO # Python 2 and 3 compatible\n version_buf = StringIO()\n cmd_v = \"{} --version\".format(python_exe)\n self.run(cmd_v, output=version_buf)\n p = re.compile(r'Python (\\d+\\.\\d+\\.\\d+)')\n verstr = p.match(version_buf.getvalue().strip()).group(1)\n if verstr.endswith('+'):\n verstr = verstr[:-1]\n version = tools.Version(verstr)\n # >= 2.7.5 & < 3\n py2_min = \"2.7.5\"\n py2_max = \"3.0.0\"\n py3_min = \"3.8.0\"\n if (version >= py2_min) and (version < py2_max):\n msg = (\"Found valid Python 2 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n elif version >= py3_min:\n msg = (\"Found valid Python 3 required for v8:\"\n \" version={}, path={}\".format(version_buf.getvalue().strip(), python_exe))\n self.output.success(msg)\n else:\n msg = (\"Found Python in path, but with invalid version {}\"\n \" (v8 requires >= {} and < \"\n \"{} or >= {})\".format(verstr, py2_min, py2_max, py3_min))\n raise ConanInvalidConfiguration(msg)", "def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def checkIfVersionIsExact(testConfig):\n assert \"name\" in testConfig\n assert \"binary\" in testConfig\n assert \"version\" in testConfig\n \n #Set default version command as \"testConfig[\"name\"] --version\"\n #Otherwise, use testConfig[\"version_command\"]\n if \"version_command\" in testConfig:\n versionCommand = testConfig[\"version_command\"]\n else:\n versionCommand = testConfig[\"binary\"]+r\" --version\"\n \n #Run the version command, grab stdout and stderr\n p = subprocess.Popen(versionCommand.split(), stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n versionOut,versionError = p.communicate()\n versionOut = str(versionOut)+str(versionError)\n \n #Find all instances of something that could be the version number in the output\n installedVersion = re.findall(r\"([0-9.]*[0-9]+)\", versionOut)\n \n #Go through all the matches, if anything starts with our expected version,\n #Set test as pass\n testPass=False\n for version in installedVersion:\n if re.match(testConfig[\"version\"],str(version)) :\n testPass=True\n break\n \n \n assert testPass,\"\\nVersion output was :\"+versionOut+\\\n \"\\nExpected version: \"+testConfig[\"version\"]+\\\n \"\\n Test failed.\"", "def _check_version(version):\n # Update cache if needed.\n if _check_version._versions_cache is None:\n log.debug(\"Loading versions cache ...\")\n _check_version._versions_cache = __salt__[\"netbeans.list_versions\"]()\n\n # Convert latest.\n if version is None or version == \"latest\":\n return __salt__[\"netbeans.pick_latest_version\"](\n _check_version._versions_cache\n )\n\n # Check if version is available.\n if version not in _check_version._versions_cache:\n return None\n return version", "def test1_version(self):\n lVersion = rdbhdb.__version__.split('.')\n nVersion = need_version.split('.')\n self.assert_(lVersion >= nVersion, rdbhdb.__version__)", "def check_version_str(version):\n if not version.startswith('v') and version != 'current':\n version = 'v%s' % version\n return version", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def test_arg_version(run_nait) -> None: # type: ignore\n expected = nanaimo.version.__version__\n assert run_nait(['--version']).stdout.decode('utf-8').startswith(expected)", "def valid_version(self, new_version):\n if not re.match(r\"\\d+\\.\\d+\\.\\d+\", new_version):\n return False\n\n x1, y1, z1 = [int(i) for i in self.current_version().split(\".\")]\n x2, y2, z2 = [int(i) for i in new_version.split(\".\")]\n\n if x2 < x1:\n return False\n\n if x2 == x1 and y2 < y1:\n return False\n\n if x2 == x1 and y2 == y1 and z2 <= z1:\n return False\n\n return True", "def _check_required(self):\n if self.data['history_file'] is None:\n return\n required = self.data.get('required_changelog_text')\n if not required:\n return\n if isinstance(required, six.string_types):\n required = [required]\n history_last_release = self.data['history_last_release']\n for text in required:\n if text in history_last_release:\n # Found it, all is fine.\n return\n pretty_required = '\"{}\"'.format('\", \"'.join(required))\n if not utils.ask(\n \"WARNING: Changelog should contain at least one of \"\n \"these required strings: {}. Are you sure you \"\n \"want to release?\".format(pretty_required),\n default=False):\n sys.exit(1)", "def test_above_24_latest_version(self):\n self.data['version'] = ''\n self.data['appVersion'] = '28.0'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1", "def require_version(version):\n def check_require_version(f):\n version_elements = version.split('.')\n try:\n compare = tuple([int(v) for v in version_elements])\n except ValueError:\n raise ValueError('%s is not a correct version : should be X.Y[.Z].' % version)\n current = sys.version_info[:3]\n if current < compare:\n def new_f(self, *args, **kwargs):\n self.skipTest('Need at least %s version of python. Current version is %s.' % (version, '.'.join([str(element) for element in current])))\n new_f.__name__ = f.__name__\n return new_f\n else:\n return f\n return check_require_version", "def _sanityCheckProtocolVersions(other):\n if other.minVersion > other.maxVersion:\n raise ValueError(\"Versions set incorrectly\")\n if other.minVersion not in KNOWN_VERSIONS:\n raise ValueError(\"minVersion set incorrectly\")\n if other.maxVersion not in KNOWN_VERSIONS:\n raise ValueError(\"maxVersion set incorrectly\")\n\n if other.maxVersion < (3, 4):\n other.versions = [i for i in other.versions if i < (3, 4)]", "def test_undefined_semver(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = None\n\n self.assertEqual(v1.build, expected)", "def test_version(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\")\n assert bb.version == \"1.0\"\n\n bb = parse_input(\"name testname\\nversion 1.12\")\n assert bb.version == \"1.12\"", "def test_parse_invalid_version(self):\n version = VersionNumberScaleMeasurement.parse_version(\"This is not a version number\")\n self.assertEqual(Version(\"0\"), version)", "def check_schema_version(context, version):\n data = context.response.json()\n check_and_get_attribute(data, version)", "def _check_server_version(self, server_version: str) -> None:\n cur_version = parse_version(server_version)\n min_version = parse_version(MIN_SERVER_VERSION)\n if cur_version < min_version:\n raise InvalidServerVersion\n if cur_version != min_version:\n self._logger.warning(\n \"Connected to a Zwave JS Server with an untested version, \\\n you may run into compatibility issues!\"\n )", "def checkIfMinimumVersionIsMet(testConfig):\n assert \"name\" in testConfig\n assert \"binary\" in testConfig\n assert \"minimum_version\" in testConfig\n \n #Set default version command as \"testConfig[\"name\"] --version\"\n #Otherwise, use testConfig[\"version_command\"]\n if \"version_command\" in testConfig:\n versionCommand = testConfig[\"version_command\"]\n else:\n versionCommand = testConfig[\"binary\"]+r\" --version\"\n \n #Run the version command, grab stdout and stderr\n p = subprocess.Popen(versionCommand.split(), stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n versionOut,versionError = p.communicate()\n versionOut = str(versionOut)+str(versionError)\n \n #Find all instances of something that could be the version number in the output\n installedVersion = re.findall(r\"([0-9]+\\.)+[0-9]+\", versionOut)\n \n #Go through all the matches, if anything starts with our expected version,\n #Set test as pass\n testPass=False\n for version in installedVersion:\n if LooseVersion(str(version)) >= LooseVersion(testConfig[\"minimum_version\"]):\n testPass=True\n break\n \n assert testPass,\"\\nVersion output was :\"+versionOut+\\\n \"\\nExpected minimum version: \"+testConfig[\"minimum_version\"]+\\\n \"\\n Test failed.\"", "def test_major(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[0] = int(new_version_parts[0]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is False", "def test_version_exists():\n assert ztm.__version__", "def test_minor(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[1] = int(new_version_parts[1]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def check_version(min_version: str,\n warning_only: bool = False,\n library: Optional[ModuleType] = None):\n # pylint: disable=import-outside-toplevel\n from .. import __version__\n if library is None:\n version = __version__\n name = 'GluonNLP'\n else:\n version = library.__version__\n name = library.__name__\n from packaging.version import parse\n bad_version = parse(version.replace('.dev', '')) < parse(min_version)\n if bad_version:\n msg = 'Installed {} version {} does not satisfy the ' \\\n 'minimum required version {}'.format(name, version, min_version)\n if warning_only:\n warnings.warn(msg)\n else:\n raise AssertionError(msg)", "def check_pythonver(reqver_text):\n\treqver = map(int, reqver_text.split('.'))\n\tpythonver = sys.version_info[:3]\n\treturn check_ver(pythonver, reqver)", "def test_versioned_release_schema():\n path = 'versioned-release-validation-schema.json'\n if os.path.exists(path):\n warn_and_assert([path], '{0} is present, run: rm {0}',\n 'Versioned release schema files are present. See warnings below.')", "def check_version():\n err = \"PaddlePaddle version 1.6 or higher is required, \" \\\n \"or a suitable develop version is satisfied as well. \\n\" \\\n \"Please make sure the version is good with your code.\" \\\n\n try:\n fluid.require_version('1.6.0')\n except Exception as e:\n logger.error(err)\n sys.exit(1)", "def check_python_version(match, current=None):\n if current is None:\n current = list(sys.version_info[:3])\n if not isinstance(match, list):\n match = [match]\n for m in match:\n minimal = False\n if isinstance(m, float):\n m = str(m)\n if m.endswith(\"+\"):\n minimal = True\n m = m[:-1]\n # assert m[0].isdigit()\n # assert m[-1].isdigit()\n m = [int(x) for x in m.split(\".\")]\n current_len = current[: len(m)]\n # print(m, current, current_len)\n if minimal:\n if current_len >= m:\n return True\n else:\n if current_len == m:\n return True\n return False", "def test_local_version(self):\n self.assertIsInstance(__version__, float)", "def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6", "def compatible_version(self):\n note_version = self.py_version\n py_version = sys.version_info\n if note_version[0] != py_version[0]:\n return False\n if len(note_version) > 1 and note_version[1] > py_version[1]:\n return False\n return True", "def test_versionInfo(self):\n self.assertEqual(\n nevow.__version_info__,\n (nevow.version.major, nevow.version.minor, nevow.version.micro))", "def _is_version_uptodate(self):\n logging.info(\"Checking tesseract version\")\n cmd = '%s -v' % (self.binary)\n logging.info(cmd) \n try:\n ret_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except CalledProcessError:\n # Could not run tesseract\n error(self.msgs['TS_MISSING'])\n\n ver_str = '0.0.0'\n for line in ret_output.splitlines():\n if 'tesseract' in line:\n ver_str = line.split(' ')[1]\n if ver_str.endswith('dev'): # Fix for version strings that end in 'dev'\n ver_str = ver_str[:-3]\n\n # Iterate through the version dots\n ver = [int(x) for x in ver_str.split('.')]\n req = [int(x) for x in self.required.split('.')]\n\n # Aargh, in windows 3.02.02 is reported as version 3.02 \n # SFKM\n if str(os.name) == 'nt':\n req = req[:2]\n\n version_good = False\n for i,num in enumerate(req):\n if len(ver) < i+1:\n # This minor version number is not present in tesseract, so it must be\n # lower than required. (3.02 < 3.02.01)\n break\n if ver[i]==num and len(ver) == i+1 and len(ver)==len(req):\n # 3.02.02 == 3.02.02\n version_good = True\n continue\n if ver[i]>num:\n # 4.0 > 3.02.02\n # 3.03.02 > 3.02.02\n version_good = True\n break\n if ver[i]<num:\n # 3.01.02 < 3.02.02\n break\n \n return version_good, ver_str", "def test_invalid_version_fields(self):\n self.assertRaises(ValueError, versions.Version, version='1234', name='foo')", "def model_is_valid(self, model: OscalBaseModel) -> bool:\n oscal_version = model.metadata.oscal_version.__root__\n p = re.compile(OSCAL_VERSION_REGEX)\n matched = p.match(oscal_version)\n return matched is not None", "def check_package_version(self, node, ecosystem, package, version):\n assert \"package\" in node, \"'package' node is expected\"\n assert \"version\" in node, \"'version' node is expected\"\n self.check_package_part(node, ecosystem, package)\n self.check_version_part(node, ecosystem, package, version)\n # TODO: add more thorough checks", "def test_invalid_version_ints(self):\n self.assertRaises(ValueError, versions.Version, version='1a.2', name='foo')", "def checkPatchValidity(val):\n\n tag_list = val.split('-')\n if len(tag_list) < 5:\n return False\n\n if tag_list[0] not in os.environ.get('environment'):\n return False\n\n if tag_list[1] not in os.environ.get('platform'):\n return False\n\n if tag_list[2] not in os.environ.get('role'):\n return False \n\n if tag_list[3] not in os.environ.get('urgency'):\n return False \n\n if tag_list[4] not in os.environ.get('order'):\n return False\n\n return True", "def _check_package_version(package, min_version):\n # Re-raise with a more informative message when the package is not\n # installed\n try:\n module = import_module(package)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install {0} with a version >= \"\n \"{1} in order to install scikit-lr.\"\n .format(package, min_version))\n\n if LooseVersion(module.__version__) < min_version:\n raise ValueError(\"The current version of {0} is {1} installed in {2}.\"\n .format(package, module.__version__, module.__path__))", "def test_update_hyperflex_software_version_policy(self):\n pass", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_Validator_is_valid_two_arguments(self):\n\n validator = validators.Draft7Validator({})\n with self.assertWarns(DeprecationWarning) as w:\n result = validator.is_valid(\"foo\", {\"type\": \"number\"})\n\n self.assertFalse(result)\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Passing a schema to Validator.is_valid is deprecated \",\n ),\n )", "def VerifyVersion(root):\r\n global VERSION\r\n global REQUIRED_VERSION\r\n try:\r\n if root.get(\"version\") < REQUIRED_VERSION:\r\n raise XmlVersionError(\"XML Version must be %s or above, we found %s!\" \\\r\n % (REQUIRED_VERSION, root.get(\"version\")))\r\n except KeyError:\r\n raise MalformedXmlError()", "def test_VersionOptionalFields():\n # onlyRequiredVersion is a version message that only contains the\n # required versions and all other values set to their default values.\n onlyRequiredVersion = minimumMsgVersion()\n\n onlyRequiredVersionEncoded = baseVersionEncoded()[:-55]\n\n # addrMeVersion is a version message that contains all fields through\n # the AddrMe field.\n addrMe = netaddress.NetAddress(\n ip=\"127.0.0.1\", port=8333, services=wire.SFNodeNetwork, stamp=0,\n )\n addrMeVersion = minimumMsgVersion()\n addrMeVersion.addrMe = addrMe\n\n addrMeVersionEncoded = baseVersionEncoded()[:-29]\n\n # nonceVersion is a version message that contains all fields through\n # the Nonce field.\n nonceVersion = minimumMsgVersion()\n nonceVersion.addrMe = addrMe\n nonceVersion.nonce = 123123 # 0x1e0f3\n nonceVersionEncoded = baseVersionEncoded()[:-21]\n\n # uaVersion is a version message that contains all fields through\n # the UserAgent field.\n uaVersion = minimumMsgVersion()\n uaVersion.addrMe = addrMe\n uaVersion.nonce = 123123\n uaVersion.userAgent = \"/dcrdtest:0.0.1/\"\n uaVersionEncoded = baseVersionEncoded()[:-4]\n\n # lastBlockVersion is a version message that contains all fields\n # through the LastBlock field.\n lastBlockVersion = minimumMsgVersion()\n lastBlockVersion.addrMe = addrMe\n lastBlockVersion.nonce = 123123\n lastBlockVersion.userAgent = \"/dcrdtest:0.0.1/\"\n lastBlockVersion.lastBlock = 234234 # 0x392fa\n lastBlockVersionEncoded = baseVersionEncoded()\n\n tests = [\n (onlyRequiredVersion, onlyRequiredVersionEncoded),\n (addrMeVersion, addrMeVersionEncoded),\n (nonceVersion, nonceVersionEncoded),\n (uaVersion, uaVersionEncoded),\n (lastBlockVersion, lastBlockVersionEncoded),\n ]\n\n for expMsg, buf in tests:\n # Decode the message from wire format.\n msg = msgversion.MsgVersion.btcDecode(buf, wire.ProtocolVersion)\n assert sameMsgVersion(msg, expMsg)", "def test_version():\n assert pywren.__version__ is not None", "def is_version_sufficient(self, min_version):\n \n try:\n current_version = self.get_software_version()\n return LooseVersion(current_version) >= LooseVersion(min_version)\n except:\n raise", "def test_version():\n assert(hasattr(tekel, '__version__'))", "def _verify_firmware_version(self):\n firmware_version = self.device.firmware_version\n self.assertTrue(firmware_version)\n self.assertIsInstance(firmware_version, str)", "def py_versiontest(c):\n pass", "def require_python(version_spec, warn=False, extra_msg=None, prereleases=None):\n valid_specifiers = ('===', '==', '<=', '>=', '!=', '~=', '<', '>')\n for spec in valid_specifiers:\n if version_spec.startswith(spec):\n break\n else:\n if version_spec[0].isdigit():\n version_spec = f'~={version_spec}'\n else:\n raise InvalidSpecifier(\n f\"Invalid version specifier: '{version_spec}'\"\n )\n\n version_constraint = SpecifierSet(version_spec, prereleases=prereleases)\n python_version = sys.version.split()[0]\n if python_version not in version_constraint:\n msg = (\n \"The Python version installed in the environment \"\n f\"(v{python_version}) does not satisfy the constraint \"\n f\"'{version_constraint}'\"\n )\n if extra_msg is not None:\n msg = f\"{msg}.\\n{extra_msg}\"\n\n if warn:\n warnings.warn(msg, category=RuntimeWarning)\n else:\n raise DavosError(msg)", "def test_version(self):\n pass", "def test_get_version(self):\n pass", "def is_valid_version(version):\n return bool(\n is_valid_instance_id(version) or\n is_valid_tag(version) or\n REF_RE.match(version)\n )", "def test_patch(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[2] = int(new_version_parts[2]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def check_python_version():\n version = sys.version.split()[0]\n if version < \"2.6\" or version >= \"3\":\n raise CuckooStartupError(\"You are running an incompatible version of Python, please use 2.6 or 2.7\")", "def test_get_short_version(self):\n pass" ]
[ "0.6902856", "0.6780541", "0.67345285", "0.6424998", "0.6407822", "0.64057225", "0.6383413", "0.6376578", "0.6337082", "0.6273863", "0.6264896", "0.62600714", "0.6233149", "0.62270665", "0.6219592", "0.61668104", "0.61236084", "0.6105264", "0.60952014", "0.60824794", "0.6016871", "0.6010673", "0.60073346", "0.59973854", "0.59782064", "0.593414", "0.59320503", "0.5928235", "0.59116226", "0.59097207", "0.5903058", "0.5896106", "0.58958584", "0.5892842", "0.588549", "0.5882546", "0.58753383", "0.5873691", "0.5867869", "0.5862441", "0.5861926", "0.5859026", "0.58417505", "0.58380896", "0.5835368", "0.583349", "0.5828468", "0.58273476", "0.58237714", "0.58151007", "0.5803481", "0.5785615", "0.5785017", "0.57826674", "0.5779917", "0.57797605", "0.5771377", "0.57655334", "0.575898", "0.5757928", "0.5754432", "0.5753647", "0.5732347", "0.5726836", "0.5724179", "0.5722315", "0.57212114", "0.5711632", "0.57081604", "0.57079303", "0.5706682", "0.57035804", "0.57007813", "0.5692864", "0.56843376", "0.56804377", "0.5679623", "0.5675915", "0.5675263", "0.56658393", "0.56657445", "0.5661447", "0.56581104", "0.56541777", "0.5650541", "0.5647996", "0.56440455", "0.56199306", "0.56167954", "0.5616622", "0.56149817", "0.56137586", "0.5610844", "0.5607602", "0.5606704", "0.56056166", "0.56049657", "0.56045663", "0.5603459", "0.56000835" ]
0.7964266
0
Validate values against the twine schema.
def _validate_values(self, kind, source, cls=None, **kwargs): data = self._load_json(kind, source, **kwargs) self._validate_against_schema(kind, data) if cls: return cls(**data) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate():", "def validate(self, name, values):\r\n \r\n pass", "def is_schema_valid(self, schema):\n for k, v in schema.items():\n if v[0] == \"var_len\":\n assert len(v) == 2\n assert v[1] in TF_VALUE\n\n if v[0] == \"fixed_len\":\n assert len(v) == 3\n assert v[1] in TF_VALUE\n assert isinstance(v[2], list)", "def validate(cls, data, errors):", "def test_validation_function(self):\n\n for data in ('tbldata', 'dihedraldata', 'rdcdata', 'danidata'):\n v = self.web.query_nodes(key=data)\n\n if not v.empty():\n self.assertTrue(validate_tbl(v.value, pcs=False))", "def test_match_valid_data_val(self):\n f = lws.valid_data_val\n schema_val = ('some text', str, 'text')\n assert f(schema_val, 'text') is True\n schema_val = ('some number', float, 7.00)\n assert f(schema_val, 7) is False\n assert f(schema_val, 7.00) is True\n schema_val = ('True', bool, True)\n assert f(schema_val, True) is True\n assert f(schema_val, False) is False\n schema_val = ('even', int, lambda x: x % 2 == 0)\n assert f(schema_val, 2) is True\n assert f(schema_val, 257) is False", "def _validate_data(self, vms, fw_rules):\n self._validate_vms(vms)\n self._validate_fw_rules(fw_rules)\n self._validated = True", "def validate(self, instance, value):", "def validate(self, instance, value):", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def check_validity(self):", "def check_validity(self):\n try:\n if self.type == ConstraintTypes.EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.NOT_EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.WITHIN:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], type(self.value[1])), \"Invalid types.\"\n )\n enforce(\n isinstance(self.value[1], type(self.value[0])), \"Invalid types.\"\n )\n elif self.type == ConstraintTypes.IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.NOT_IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.DISTANCE:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], Location),\n \"Invalid type, expected Location.\",\n )\n enforce(\n isinstance(self.value[1], float), \"Invalid type, expected Location.\"\n )\n else: # pragma: nocover\n raise ValueError(\"Type not recognized.\")\n except ValueError:\n return False # pragma: nocover\n\n return True", "def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def validate(self):", "def validate(self):", "def _validate(self, instance, value):", "def validate(self, value, clean=True):\n pass", "def validate(self, value, clean=True):\n pass", "def validate(self):\n self.valid = True\n\n if self._value is None and self._strict:\n self.valid = False\n raise self.MissingFieldData\n\n elif self._value is not None:\n self._type_specific_validation()", "def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))", "def validate(self, value):\n return True", "def validate(self, value):\n\n return True", "def test_validate_pincode(self):\n schema = vol.Schema(valid_pin)\n\n for value in ('', '123-456-78', 'a23-45-678', '12345678', 1234):\n with self.assertRaises(vol.MultipleInvalid):\n schema(value)\n\n for value in ('123-45-678', '234-56-789'):\n self.assertTrue(schema(value))", "def _validate(self, value):\n return True", "def validate_against_schema(request, schema, data):\n try:\n data_pure = schema.deserialize(data)\n data_clean = post_serialize(data_pure)\n # Attach data_clean to request: see usage in views.\n request.data_clean = data_clean\n except Invalid as e:\n # here we transform the errors we got from colander into cornice\n # errors\n for field, error in e.asdict().items():\n request.errors.add('body', field, error)", "def __validate():\n # TODO: implement", "def _validate_input(self):\n self.data.validate()\n self.meta_hybridizer.validate_input()", "def validate_data(self, row, col, value):\n\n return True", "def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))", "def validate(self, value):\n if value is None:\n msg = message_factory.get_message('vapi.data.validate.mismatch',\n self.type,\n 'None')\n return [msg]\n return None", "def test_multiple_if_pass(self):\n schema = yaml.load(self.yaml_ifif, Loader=yaml.FullLoader)\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n document = {'age': '21', 'lifestage': 'adult'} # True\n self.assertTrue(val.validate(document))\n\n document = {'age': '2', 'lifestage': 'juvenile'} # True\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n self.assertTrue(val.validate(document))", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)", "def validate(self):\n\n raise NotImplementedError('Ando validation is not implemented yet.')", "def _validate_values(self, values):\n prev_len = -1\n i = j = -1\n if values is None or len(values) == 0:\n self.shape = 0, 0\n return\n for i, row in enumerate(values):\n if prev_len == -1:\n prev_len = len(row)\n if prev_len != len(row):\n raise ValueError(f\"Row {i} differs in length: {prev_len} != {len(row)}\")\n for j, val in enumerate(row):\n if type(val) not in (int, float, complex):\n raise ValueError(f\"[{i}, {j}]: {val} is of bad type ({type(val)})\")\n if val == 0:\n self.empty_loc = (i, j)\n if i == -1:\n self.shape = 0, 0\n else:\n self.shape = i + 1, j + 1", "def validate(self, doc):\n return self.schema.validate(doc)", "def validate(self, value):\n if value is None:\n msg = message_factory.get_message('vapi.data.validate.mismatch',\n self.type,\n 'None')\n return [msg]\n\n if value.type != Type.ERROR:\n msg = message_factory.get_message('vapi.data.validate.mismatch',\n self.type,\n value.type)\n return [msg]\n\n return None", "def validate(self) -> t.NoReturn:\n try:\n if \"label_columns\" in self.config:\n assert isinstance(self.config.label_columns, list), (\n \"label_columns\",\n list,\n type(self.config.label_columns),\n )\n for elem in self.config.label_columns:\n assert isinstance(elem, int), (\"label_columns element\", int, type(elem))\n assert elem >= 0, \"label_columns element negative\"\n if \"options\" in self.config:\n assert isinstance(self.config.options, (dict, AttributeDict)), (\n \"Options\",\n (dict, AttributeDict),\n type(self.config.options),\n )\n except AssertionError as e:\n raise MisconfiguredError(\"timevalue: {} expected {}, got: {}\".format(*e.args[0]))", "def validate(self):\n self._validate_time_index()\n self._validate_num_profiles()\n self._validate_merge_col_exists()\n self._validate_unique_merge_col()\n self._validate_merge_col_overlaps()", "def test_validate_business_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',\n 'lastPreBobFilingTimestamp': '2019-04-15T20:05:49.068272+00:00',\n 'legalName': 'legal name - CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def test_multiple_if_error(self):\n schema = yaml.load(self.yaml_ifif, Loader=yaml.FullLoader)\n document = {'age': '21', 'lifestage': 'juvenile'}\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n val.validate(document)\n self.assertEqual(val.errors,\n {'lifestage': [{'if_0': ['unallowed value juvenile',\n 'max length is 6'],\n 'if_2': ['max length is 5']}]})", "def _validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self):\n pass", "def validate(self) -> None:\n\n if self.field not in self.model.table_fields:\n raise ValueError(f\"Value field {self.field} not present in {self.model.table}\")\n\n if self.pivot:\n if self.pivot not in self.model.table_fields:\n raise ValueError(\n f\"Pivot field {self.pivot} not present in {self.model.table}\"\n )\n\n if self.connector:\n if self.connector not in self.model.table_fields:\n raise ValueError(\n f\"Connector field {self.connector} not present in {self.model.table}\"\n )\n\n for field in self.selectors:\n if field not in self.model.table_fields:\n raise ValueError(f\"Selector field {field} not present in {self.model.table}\")", "def validate_fields(self, window, values):\n \n #Check if record id is new\n is_valid = True\n problem_field_name = \"\"\n experiment_names = GUI.api.get_experiment_names()\n if values['record_id'] in experiment_names:\n is_valid = False\n problem_field_name = \"Record ID\"\n return is_valid, problem_field_name \n \n metadata = GUI.api.get_metadata()\n enbaled_fields = filter(lambda elem: (elem['form_name']=='material_information' or elem['form_name']=='printer_information') \n and not (isinstance(window[elem['field_name']], sg.Text) or window[elem['field_name']].Disabled), metadata)#only validate enbaled fields\n for field in enbaled_fields:\n validation = field['text_validation_type_or_show_slider_number']\n value = values[field['field_name']]\n if (validation == \"number\" and value.isdigit()):\n #check if correct ranges\n if field['text_validation_max'] != \"\":\n if value > field['text_validation_max']:\n is_valid = False \n problem_field_name = field['field_label']\n return is_valid, problem_field_name \n if field['text_validation_min'] != \"\":\n if value < field['text_validation_min']:\n is_valid = False \n problem_field_name = field['field_label']\n return is_valid, problem_field_name \n elif (validation == \"number\" and not value.isdigit()):\n is_valid = False\n problem_field_name = field['field_label']\n return is_valid, problem_field_name\n return is_valid, problem_field_name", "def test_required_term(self):\n schema = yaml.load(self.yaml_multiple_term, Loader=yaml.FullLoader)\n\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n document = {'abundance': 'many'}\n val.validate(document)\n self.assertEqual(val.errors, {'eventDate': ['required field']})", "def isValidForSchema(schema):\n\n return True", "def test_validation_class(self):\n\n for data in ('tbldata', 'dihedraldata', 'rdcdata', 'danidata', 'tensordata', 'pcsdata'):\n v = self.web.query_nodes(key=data)\n\n if not v.empty():\n self.assertTrue(v.validate())", "def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()", "def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()", "def validate(self, schema=None, callback=None):\n return hxl.schema(schema, callback).validate(self)", "def validate(self):\n self._check_type()", "def test_is_valid_invalid_resume(self):\n self.assertFalse(resumeschema.is_valid(self.invalid_resume))", "def test__validate_topic__2():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_topic(input_value)", "def validate(self,value):\r\n return type(value) is self.datatype", "def test_conversion_schema():\n legal_filing = {'conversion': BEN_CONVERSION}\n is_valid, errors = validate(legal_filing, 'conversion')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid", "def validate(self):\n ...", "def test_validate_ok(self, args, value):\n sch = scheme.Scheme(*args)\n sch.validate(value)", "def validate(self, value, obj=None):\n return True", "def _validate_row(self, row):\n\n # assume value.\n is_valid = True\n\n # test if each field in @row has the correct data type.\n tests = []\n for field, value in row.items():\n value_type, header_type = (type(value).__name__, \n self.required_headers[field].__name__)\n test = value_type == header_type\n if not test:\n err = \"Field '{}' not valid; expected '{}', got '{}'.\".format(field,\n header_type, value_type)\n self.logger.debug(err)\n tests.append(test)\n\n # if any test failed, set @is_valid to False.\n if False in tests:\n is_valid = False\n \n return is_valid", "def check_data(self):\n\n for i in range(len(self.full_ed_lines)):\n if self.full_ed_lines[i].text() != \"\":\n if self.full_ed_lines[i].hasAcceptableInput():\n continue\n else:\n if i == 1:\n self.msg2Statusbar.emit('Неправильный формат версии! Исправьте и повторите действие!')\n elif i == 5:\n self.msg2Statusbar.emit('Неправильная почта! Исправьте и повторите действие!')\n return False\n else:\n self.msg2Statusbar.emit('Не все поля заполнены! Исправьте и повторите действие!')\n return False\n return True", "def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()", "def test_not_blank_validator_valid_value_should_return_true(self):\n for item in self.stdtype_fixtures:\n self.assertTrue(NotBlankValidator(TypeHint(item.get('type')), item.get('valid')))", "def _validate_input(self):\n\n try:\n expected_type(str, self.venue_id, \"venue_id\")\n expected_type(datetime.datetime, self.timestamp_utc, \"timestamp_utc\")\n\n expected_type(VenueStreamType, self.measurement_type, \"measurement_type\")\n\n expected_type(int, self.number_of_people, \"number_of_people\")\n\n if self.measurement_type is VenueStreamType.ABSOLUTE:\n if self.operator:\n raise ValueError(\"The stream type for the venue doesn't allow passing an Event operator\")\n\n elif self.measurement_type is VenueStreamType.EVENT:\n expected_type(EventStreamOperator, self.operator, \"operator\")\n else:\n raise ValueError(\"Unsupported member of the VenueStreamType enum\")\n\n if self.metadata:\n expected_type(dict, self.metadata, \"metadata\")\n\n except Exception as ex:\n raise ValueError(\"Validation of input failed. Reason: %s\" % str(ex))", "def validate_data(self, deployment='ops'):\n self.validator.set_example(self.example)\n\n # Don't just use the built in validate_data method as this needs to be future proofed against C100 firmware\n # upgrades. This upgrade will result in a new mode SELAP (R...CNTL2MODE == 64).\n self.validator.validate_capture_file_counts()\n self.validator.validate_capture_file_waveforms()\n\n # Many of these examples will have some amount of rounding error.\n self.validator.validate_waveform_times(min_end=10.0, max_start=-1534.0, step_size=0.2)\n self.validator.validate_cavity_modes(mode=(4, 64), deployment=deployment)\n self.validator.validate_zones()", "def validate(self):\n try:\n self.values.clear()\n self.values.append(int(self.e1.get()))\n except ValueError:\n messagebox.showwarning(\n \"Bad input\",\n \"Illegal values, please try again.\")\n return False\n\n return True", "def test__validate_owner__1():\n for field_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_owner(field_value)", "def validate_data(values):\n try:\n [int(value) for value in values]\n if len(values) != 6:\n raise ValueError(\n f'Exactly 6 values are required - you provided {len(values)}'\n )\n except ValueError as e:\n print(f'Invalid data entered: {e}, please try again!\\n')\n return False\n\n return True", "def check_value(self, value):", "def is_valid(self, value):\r\n pass", "def validate(self, value: Any, low: int, high: int) -> bool:\n pass", "def _further_validate_and_setup(self) -> None:\n\n # Make sure parameters make sense/are valid\n if len(self.validated['learners']) != len(self.validated['param_grids']):\n raise SchemaError(autos=None,\n errors='The lists of of learners and parameter '\n 'grids must be the same size.')\n if (self.validated['hashed_features'] is not None\n and self.validated['hashed_features'] == 0):\n self.validated['hashed_features'] = self._n_features_feature_hashing\n if self.validated['lognormal'] and self.validated['power_transform']:\n raise SchemaError(autos=None,\n errors='Both \"lognormal\" and \"power_transform\" '\n 'were set simultaneously.')\n if len(self.validated['learners']) != len(self.validated['param_grids']):\n raise SchemaError(autos=None,\n errors='The \"learners\" and \"param_grids\" '\n 'parameters were both set and the '\n 'lengths of the lists are unequal.')", "def test_admin_freeze_schema():\n legal_filing = {'adminFreeze': ADMIN_FREEZE}\n\n is_valid, errors = validate(legal_filing, 'admin_freeze')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid", "def validate(cls, value):\n return cls.properties.validate(value)", "def validate(entry: _LexiconEntry) -> None:\n _entry_has_required_fields(entry)\n _entry_field_values_are_not_empty(entry)\n _entry_field_values_does_not_contain_infix_whitespace(entry)\n _entry_tag_is_valid(entry)\n _entry_compound_annotation_is_valid(entry)\n _entry_morphophonemics_annotation_is_valid(entry)\n _entry_features_annotation_is_valid(entry)\n _entry_has_required_features(entry)\n _entry_required_features_are_valid(entry)\n _entry_optional_features_are_valid(entry)\n _entry_features_are_not_redundant(entry)", "def testValid(self):\n validate = timing_util.ValidateMeasurementsFlag\n self.assertIs(validate([]), True)\n self.assertIs(validate(['none']), True)\n self.assertIs(validate(['end_to_end_runtime']), True)\n self.assertIs(validate(['runtimes']), True)\n self.assertIs(validate(['timestamps']), True)\n self.assertIs(validate(['end_to_end_runtime', 'runtimes']), True)\n self.assertIs(validate(['end_to_end_runtime', 'timestamps']), True)\n self.assertIs(validate(['runtimes', 'timestamps']), True)\n self.assertIs(\n validate(['end_to_end_runtime', 'runtimes', 'timestamps']), True)", "def validate(self, list_value):\n errors = DataDefinition.validate(self, list_value)\n if errors:\n return errors\n\n for index, value in enumerate(list_value):\n errors = self.element_type.validate(value)\n if errors:\n msg = message_factory.get_message(\n 'vapi.data.list.invalid.entry',\n str(value), index)\n return [msg] + errors\n\n return None", "def validate(self):\r\n for name in self._columns.keys():\r\n func_name = 'validate_{}'.format(name)\r\n val = getattr(self, name)\r\n if hasattr(self, func_name):\r\n val = getattr(self, func_name)(val)\r\n else:\r\n val = self.validate_field(name, val)\r\n setattr(self, name, val)", "def test_oneOf_and_anyOf_are_weak_matches(self):\r\n\r\n validator = Draft4Validator(\r\n {\r\n \"minProperties\" : 2,\r\n \"anyOf\" : [{\"type\" : \"string\"}, {\"type\" : \"number\"}],\r\n \"oneOf\" : [{\"type\" : \"string\"}, {\"type\" : \"number\"}],\r\n }\r\n )\r\n best = self.best_match(validator.iter_errors({}))\r\n self.assertEqual(best.validator, \"minProperties\")", "def test_validate_valid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n self.assertIsNone(resumeschema.validate(self.valid_resume))", "def validate(self, converted_value, context):\n pass", "def test_firms_conversion_schema():\n legal_filing = {'conversion': FIRMS_CONVERSION}\n\n is_valid, errors = validate(legal_filing, 'conversion')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid", "def test_Validator_is_valid_two_arguments(self):\n\n validator = validators.Draft7Validator({})\n with self.assertWarns(DeprecationWarning) as w:\n result = validator.is_valid(\"foo\", {\"type\": \"number\"})\n\n self.assertFalse(result)\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Passing a schema to Validator.is_valid is deprecated \",\n ),\n )", "def validate(self, value):\n # Use the parent's handling of required fields, etc.\n super().validate(value)\n for email in value:\n validate_email(email)", "def verify_post_data ( ):\n # check every field is present\n try:\n request.json[ 'source_lang' ]\n request.json[ 'target_lang' ]\n request.json[ 'text' ]\n\n TranslatorApp.verify_rpc_value ( request.json )\n\n except KeyError: # All the values are not present\n # 400 Bad Request\n abort ( 400, \"All mandatory fields are not provided\" )\n except ValueError as err:\n # 422 Unprocessable Entity\n abort ( 422, \"Unprocessable value: {0}\".format ( err.args ) )\n except BadRequest:\n # 400 Bad Request\n abort ( 400, \"Provided values are having malformed syntax\" )", "def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)", "def validate_values(self):\n if self.avp_def.has_defined_values():\n defined_values = dict(self.avp_def.attr_defined_values)\n if self.avp_value not in defined_values.values():\n raise ValueError(\n f\"{self.avp_def.attr_name} - value {self.avp_value} \"\n \"is not allowed\")\n\n return True", "def test__validate_status__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_status(input_value)", "def _check_validity(self):\n pass", "def validate(self, data):\n l = len(data[\"start_times\"])\n for i in range(l):\n if data[\"start_times\"][i]>=data['end_times'][i]:\n raise serializers.ValidationError(\"Start times should come before end times\") \n return data", "def check_consistency(self) -> 'Schema':\n errors = []\n fields = self.__fields__\n for k, v in fields.items():\n _, err = v.validate(getattr(self, k), fields, loc=k)\n if err:\n errors.append(err)\n if errors:\n raise ValidationError(errors, self.__class__)\n return self", "def test_minmax(self):\n val = DwcaValidator(yaml.load(self.yaml_value, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'percentage': 9.}\n self.assertFalse(val.validate(document))\n document = {'percentage': 2.1}\n self.assertFalse(val.validate(document))\n\n val = DwcaValidator(yaml.load(self.yaml_value, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'individualCount': 9}\n self.assertFalse(val.validate(document))\n document = {'individualCount': 2}\n self.assertFalse(val.validate(document))", "def test_special_case(self):\n cases = [\n ('3467875434578764345789654', False),\n ('AAAAAAAAAAA', False),\n ('', False),\n ]\n for titulo_eleitoral, is_valid in cases:\n self.assertEqual(self.titulo_eleitoral.validate(titulo_eleitoral), is_valid)", "def validate(self, test_data):\n if not isinstance(test_data, np.number):\n raise ValidationError('Invalid type/value.', 'numpy.number',\n type(test_data))\n if self.max_value is not None and test_data > self.max_value:\n raise ValidationError('Maximum value exceeded.',\n self.max_value, test_data)\n if self.min_value is not None and test_data < self.min_value:\n raise ValidationError('Minimum value undercut.',\n self.min_value, test_data)\n if test_data.dtype != self.dtype:\n raise ValidationError('Invalid dtype.', self.dtype,\n test_data.dtype)", "def validate_subset_of_schema(self, schema):\n super(BooleanAttributeSchema, self).validate_subset_of_schema(schema)\n\n if not self.values.issubset(schema.values):\n raise AttributeSchemaError(\n \"Values %s are not a subset of %s\"\n % (self.values, schema.values)\n )" ]
[ "0.6888409", "0.6783357", "0.6623987", "0.6546313", "0.6526652", "0.6525666", "0.64644414", "0.6449077", "0.6449077", "0.64253944", "0.6407268", "0.63882005", "0.6381051", "0.63460135", "0.63460135", "0.6308076", "0.6282616", "0.6282616", "0.6281018", "0.62414503", "0.6232009", "0.62216306", "0.62011117", "0.61829644", "0.6167385", "0.61612105", "0.61578304", "0.6143167", "0.6140295", "0.6139734", "0.6124934", "0.6124913", "0.6119326", "0.61015457", "0.6089719", "0.6080682", "0.60776293", "0.6052787", "0.60509247", "0.60407007", "0.6039464", "0.60380715", "0.60380715", "0.60380715", "0.60380715", "0.60380715", "0.60380715", "0.60380715", "0.60380715", "0.6029609", "0.6018973", "0.6004776", "0.60047483", "0.59884036", "0.5941646", "0.5941646", "0.59353673", "0.59353447", "0.59286267", "0.59270537", "0.59208256", "0.591786", "0.5902043", "0.58912665", "0.5888456", "0.5885104", "0.5882367", "0.58812314", "0.58796537", "0.5877526", "0.58771634", "0.58743876", "0.58735776", "0.58677626", "0.5864473", "0.58637786", "0.58616245", "0.58590823", "0.5856415", "0.5853955", "0.58535445", "0.5852994", "0.5849558", "0.58392787", "0.58340293", "0.58264273", "0.5824494", "0.58237123", "0.582223", "0.5814511", "0.5813703", "0.5813143", "0.5809729", "0.5809421", "0.5809065", "0.58073723", "0.5802463", "0.5793866", "0.57930636", "0.579266", "0.57925934" ]
0.0
-1
Validate manifest against the twine schema.
def _validate_manifest(self, kind, source, cls=None, **kwargs): data = self._load_json(kind, source, **kwargs) # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive inbound = True if hasattr(data, "to_primitive"): inbound = False data = data.to_primitive() self._validate_against_schema(kind, data) self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data) if cls and inbound: return cls(**data) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def validate_output_manifest(self, source, **kwargs):\n return self._validate_manifest(\"output_manifest\", source, **kwargs)", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def validate(self):\n import os\n\n if self.kind == KDM.INTEROP:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'interop.xsd'), 'r') as f:\n schema = f.read()\n elif self.kind == KDM.SMPTE:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'smpte.xsd'), 'r') as f:\n schema = f.read()\n\n base_dir = os.getcwd()\n os.chdir(os.path.join(os.path.dirname(__file__), 'xsd'))\n try:\n schema = ET.XMLSchema(ET.XML(schema))\n xmlparser = ET.XMLParser(schema=schema)\n ET.fromstring(self.raw, xmlparser)\n finally:\n os.chdir(base_dir)", "def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):\n # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.\n manifest_schema = getattr(self, manifest_kind)\n\n for expected_dataset_name, expected_dataset_schema in manifest_schema[\"datasets\"].items():\n if expected_dataset_name in manifest[\"datasets\"]:\n continue\n\n if expected_dataset_schema.get(\"optional\", False):\n continue\n\n raise exceptions.invalid_contents_map[manifest_kind](\n f\"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing.\"\n )", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")", "def _validate_against_schema(self, strand, data):\n schema = self._get_schema(strand)\n\n try:\n jsonschema_validate(instance=data, schema=schema)\n logger.debug(\"Validated %s against schema\", strand)\n\n except ValidationError as e:\n raise exceptions.invalid_contents_map[strand](str(e))", "def supports_manifest(manifest):\n pass", "def test_validate_valid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n self.assertIsNone(resumeschema.validate(self.valid_resume))", "def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)", "def schema_check(self):\n\n try:\n self.schema.assertValid(self.get_content())\n except lxml.etree.DocumentInvalid:\n logger.error(\"PDU failed schema check\")\n for line in self.pretty_print_content().splitlines():\n logger.warning(line)\n raise", "def validate_full_schema(self):\n #self.check_duplicate_labels()\n for record in self.extension_schema['schema']['@graph']:\n #self.check_whether_atid_and_label_match(record)\n if record['@type'] == \"rdfs:Class\":\n self.validate_class_schema(record)\n #self.validate_class_label(record[\"@id\"])\n self.validate_validation_field(record)\n elif record['@type'] == \"rdf:Property\":\n self.validate_property_schema(record)\n #self.validate_property_label(record[\"@id\"])\n #self.validate_domainIncludes_field(record[\"http://schema.org/domainIncludes\"])\n #self.validate_rangeIncludes_field(record[\"http://schema.org/rangeIncludes\"])\n #else:\n # raise ValueError('wrong @type value found: {}'.format(record))", "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")", "def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def validate_configuration_manifest(self, source, **kwargs):\n return self._validate_manifest(\"configuration_manifest\", source, **kwargs)", "def schema_valid(arch, **kwargs):\n validator = relaxng(arch.tag)\n if validator and not validator.validate(arch):\n result = True\n for error in validator.error_log:\n _logger.error(tools.ustr(error))\n result = False\n return result\n return True", "def validate(self, schema=os.path.join(os.path.dirname(__file__), 'am.xsd')):\n return validate_xml(schema, self.path, from_path=True)", "def test_check_presence_only(self):\n schema = yaml.load(self.yaml_presence_check, Loader=yaml.FullLoader)\n val = DwcaValidator(schema, allow_unknown=True,\n error_handler=WhipErrorHandler)\n\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': ''}\n self.assertTrue(val.validate(document))\n document = {'eventDate': ''}\n val.validate(document)\n self.assertEqual(val.errors, {})", "def _validate_yaml(self):\n\n # verify the format is correct\n if self.validater == 'yamale':\n\n import yamale\n\n print('Validating yaml file with yamale.')\n cwd = Path(os.path.dirname(__file__))\n schema_path = str(cwd.parent / 'schema') + '/generic_schema.yaml'\n schema = yamale.make_schema(schema_path)\n data = yamale.make_data(self.yaml_path)\n try:\n yamale.validate(schema, data, strict=False)\n print('Validation success! 👍')\n return True\n except ValueError as e:\n print(\n 'Yamale found that your file, '\n + self.yaml_path\n + ' is not formatted correctly.'\n )\n print(e)\n return False\n else:\n print('Did not validate yaml.')\n print('If unexpected results occur, try installing yamale and rerun.')\n return True", "def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)", "def test_is_valid_valid_resume(self):\n self.assertTrue(resumeschema.is_valid(self.valid_resume))", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def validate_schema(self, schema):\n json_schema_path = os.path.join(_ROOT, 'data', 'schema.json')\n json_schema = load_json_or_yaml(json_schema_path)\n return validate(schema, json_schema)", "def _validate_twine_version(self, twine_file_twined_version):\n installed_twined_version = pkg_resources.get_distribution(\"twined\").version\n logger.debug(\n \"Twine versions... %s installed, %s specified in twine\", installed_twined_version, twine_file_twined_version\n )\n if (twine_file_twined_version is not None) and (installed_twined_version != twine_file_twined_version):\n raise exceptions.TwineVersionConflict(\n f\"Twined library version conflict. Twine file requires {twine_file_twined_version} but you have {installed_twined_version} installed\"\n )", "def test_schema_valid(path, name, data):\n schemas = metaschemas()\n if name in ('release-schema.json', 'release-package-schema.json'):\n metaschema = schemas['release_package_metaschema']\n elif name == 'record-package-schema.json':\n metaschema = schemas['record_package_metaschema']\n elif name in ('project-schema.json', 'project-package-schema.json'):\n metaschema = schemas['project_package_metaschema']\n else:\n metaschema = schemas['metaschema']\n\n validate_json_schema(path, name, data, metaschema)", "def validate_metadata(self, metadata: Dict[str, dict]):\n encoder = NWBMetaDataEncoder()\n # The encoder produces a serialiazed object so we de serialized it for comparison\n serialized_metadata = encoder.encode(metadata)\n decoded_metadata = json.loads(serialized_metadata)\n validate(instance=decoded_metadata, schema=self.get_metadata_schema())\n if self.verbose:\n print(\"Metadata is valid!\")", "def validate(args):\n args = {k.lstrip('-').lower().replace('-', '_'): v\n for k, v in args.items()}\n schema = Schema({\n 'ptvsd': Or(None, And(Use(int), lambda port: 1 <= port <= 65535)),\n 'root_dir': os.path.exists,\n 'resume': bool,\n })\n args = schema.validate(args)\n return args", "def validate(self, namespace):\n pass", "def test_is_valid_manifest_with_wide_row(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_wide_row.tsv\",\n )\n wide_warning = f\"line 3, number of fields (6) in row is unequal to number of column names in manifest (5)\"\n assert wide_warning in caplog.text\n assert result == True", "def test_is_valid_invalid_resume(self):\n self.assertFalse(resumeschema.is_valid(self.invalid_resume))", "def app_validate(data):\n\n schema = json.load(open('schemas/app_description_schema.json', 'r'))\n try:\n jsonschema.validate(data, schema)\n except jsonschema.ValidationError as e:\n raise InvalidApplicationDescription(str(e))\n except jsonschema.SchemaError:\n log.exception('BUG: invalid schema for application descriptions')\n raise ZoeLibException('BUG: invalid schema for application descriptions')\n\n # Start non-schema, semantic checks\n if data['version'] != zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION:\n raise InvalidApplicationDescription('Application description version mismatch (expected: {}, found: {}'.format(zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION, data['version']))\n\n found_monitor = False\n for service in data['services']:\n if service['monitor']:\n found_monitor = True\n\n service['resources']['memory']['max'] = zoe_lib.config.get_conf().max_memory_limit * (1024 ** 3)\n if service['resources']['memory']['min'] is not None and service['resources']['memory']['min'] > service['resources']['memory']['max']:\n raise InvalidApplicationDescription(msg='service {} tries to reserve more memory than the administrative limit'.format(service['name']))\n\n if service['resources']['cores']['min'] is None:\n service['resources']['cores']['min'] = 0.1\n\n if not found_monitor:\n raise InvalidApplicationDescription(msg=\"at least one process should have the monitor property set to true\")", "def test_validate_invalid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n with self.assertRaises(jsonschema.ValidationError):\n resumeschema.validate(self.invalid_resume)", "def isValidForSchema(schema):\n\n return True", "def validate_payload(cls, event):\n # TODO: Use invenio-jsonschemas/jsonresolver instead of this\n # Validate against Event JSONSchema\n # NOTE: raises `jsonschemas.ValidationError`\n cls._jsonschema_validator.validate(event)\n\n # Validate using marshmallow loader\n for payload in event:\n errors = RelationshipSchema(check_existing=True).validate(payload)\n if errors:\n raise MarshmallowValidationError(str(errors) + \"payload\" + str(payload))", "def validate(entry: _LexiconEntry) -> None:\n _entry_has_required_fields(entry)\n _entry_field_values_are_not_empty(entry)\n _entry_field_values_does_not_contain_infix_whitespace(entry)\n _entry_tag_is_valid(entry)\n _entry_compound_annotation_is_valid(entry)\n _entry_morphophonemics_annotation_is_valid(entry)\n _entry_features_annotation_is_valid(entry)\n _entry_has_required_features(entry)\n _entry_required_features_are_valid(entry)\n _entry_optional_features_are_valid(entry)\n _entry_features_are_not_redundant(entry)", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def test_theme_manifest(err, xpi_package=None):\n\n # Don't even both with the test(s) if there's no chrome.manifest.\n chrome = err.get_resource('chrome.manifest')\n if not chrome:\n return\n\n for triple in chrome.triples:\n subject = triple['subject']\n # Test to make sure that the triple's subject is valid\n if subject not in ('skin', 'style'):\n err.warning(\n err_id=('themes', 'test_theme_manifest',\n 'invalid_chrome_manifest_subject'),\n warning='Invalid chrome.manifest subject',\n description=('chrome.manifest files for full themes are only '\n \"allowed to have 'skin' and 'style' items. \"\n 'Other types of items are disallowed for '\n 'security reasons.',\n 'Invalid subject: %s' % subject),\n filename=triple['filename'],\n line=triple['line'],\n context=triple['context'])", "def test_admin_freeze_schema():\n legal_filing = {'adminFreeze': ADMIN_FREEZE}\n\n is_valid, errors = validate(legal_filing, 'admin_freeze')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid", "def validate():", "def test_is_valid_manifest_format_using_line_limit(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\",\n line_limit=3,\n )\n error_log = caplog.text\n assert \"line 2\" in error_log\n assert \"line 3\" in error_log\n assert \"line 4\" not in error_log\n assert \"line 5\" not in error_log\n assert result == False", "def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def _validate_yaml(schema, config):\n check = pykwalify_core.Core(\n source_data=config, schema_files=[\"{}/{}.yml\".format(conf_schema_path, schema)])\n try:\n check.validate(raise_exception=True)\n except pykwalify_errors.SchemaError as e:\n _logger.error(\"Schema validation failed\")\n raise Exception(\"File does not conform to {} schema: {}\".format(schema, e))", "def test_sa_mismatch_manifest_file_and_ecosystem(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/202/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n with pytest.raises(Exception) as exception:\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='pypi', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n sa.post_request()\n self.assertIs(exception.type, ValidationError)", "def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False", "def is_manifest_list(self):\n return False", "def validate():\n description = f\"Validate XML metadata.\"\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n help = \"XML file or URL\"\n parser.add_argument('infile', help=help)\n\n help = (\n \"Format ID for metadata standard. If this argument is supplied, \"\n \"only that format ID will be checked. If not, all format IDs will be \"\n \"checked.\"\n )\n parser.add_argument('--format-id',\n help=help,\n choices=d1_scimeta.util.get_supported_format_id_list())\n\n help = \"Verbosity of log messages.\"\n choices = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']\n parser.add_argument('-v', '--verbosity', help=help, choices=choices,\n default='INFO')\n\n args = parser.parse_args()\n\n validator = XMLValidator(verbosity=args.verbosity)\n validator.validate(args.infile, format_id=args.format_id)", "def read_manifest(self): # -> None:\n ...", "def _validate_against_schema(config):\n logging.info(\"Validating config file against the schema\")\n try:\n c = Core(source_data=config, schema_files=[CONFIG_SCHEMA])\n c.validate(raise_exception=True)\n except Exception as e:\n logging.error(\"Failed when validating schema: %s\", e)\n logging.info(\"Dumping rendered template:\\n%s\", dump_rendered_config_file(config))\n raise", "def validateBoot (self):\n self.mountBootPartition()\n stateDictionary = self._createBootInstallationDictionary()\n self._writeDictionaryAsJson(stateDictionary, self._getBootInstallationFilePath())\n self._log(\"validate-boot\").notice(\"boot partition is validated\")", "def test_required_fields_schema_version(self):\n\n del self.validator.adata.uns[\"schema_version\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: adata has no schema definition in 'adata.uns'. \"\n \"Validation cannot be performed.\"\n ],\n )", "def validate_data(self, deployment='ops'):\n self.validator.set_example(self.example)\n\n # Don't just use the built in validate_data method as this needs to be future proofed against C100 firmware\n # upgrades. This upgrade will result in a new mode SELAP (R...CNTL2MODE == 64).\n self.validator.validate_capture_file_counts()\n self.validator.validate_capture_file_waveforms()\n\n # Many of these examples will have some amount of rounding error.\n self.validator.validate_waveform_times(min_end=10.0, max_start=-1534.0, step_size=0.2)\n self.validator.validate_cavity_modes(mode=(4, 64), deployment=deployment)\n self.validator.validate_zones()", "def validate_schema(self):\n for _, certificate in self.certificates_to_issue.items():\n with open(certificate.signed_cert_file_name) as cert:\n cert_json = json.load(cert)\n validate_unsigned_v1_2(cert_json)", "def validate(self):\n with open(os.path.join(settings.MEDIA_ROOT, self.file.name)) as file:\n lines = file.readlines()\n validators = ['os.', 'from os', 'io.', 'from io', 'open(', 'system(']\n for line in lines:\n for validator in validators:\n if validator in line:\n return False\n return True", "def test_validate_business_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',\n 'lastPreBobFilingTimestamp': '2019-04-15T20:05:49.068272+00:00',\n 'legalName': 'legal name - CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def validate(self) -> bool:\n\n # Start by reading in the blueprint schema json\n schema = json.loads(pkgutil.get_data(\"FactorioTools\", \"blueprintSchema.json\"))\n\n # Validate the object's schema against the blueprintSchema JSON\n try:\n jsonschema.validate(self.data, schema)\n return True\n except jsonschema.ValidationError:\n pass\n\n return False", "def semantic_validate(instance):\n unknown_templates = {}\n for name, requires in instance[\"application\"][\"requires\"].items():\n if name in instance[\"application\"][\"services\"]:\n raise ValidationError(errors=[\n \"/application/requires/{}: the name {} conflicts with service\"\n \" /application/services/{}\".format(name,\n repr(name),\n name),\n ])\n if requires[\"template\"] not in instance[\"local\"][\"templates\"]:\n unknown_templates[\"/application/requires/{}/template\".format(\n name)] = requires[\"template\"]\n for service_name, service in instance[\"application\"][\"services\"].items():\n for name, requires in service[\"requires\"].items():\n if name in instance[\"application\"][\"requires\"]:\n raise ValidationError(errors=[\n \"/application/services/{}/requires/{}: the name {}\"\n \" conflicts with /application/requires/{}\".format(\n service_name, name, repr(name), name)\n ])\n if requires[\"template\"] not in instance[\"local\"][\"templates\"]:\n unknown_templates[\n \"/application/services/{}/requires/{}/template\".\n format(service_name, name)] = requires[\"template\"]\n if unknown_templates:\n raise ValidationError(errors=[\n \"{}: the template {} does not exist \"\n \"in /local/templates\".format(path, repr(name))\n for (path, name) in unknown_templates.items()\n ])", "def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def validate(self):\n return _libsbml.SBMLExternalValidator_validate(self)", "def test_is_valid_manifest_with_missing_md5_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_md5_column.tsv\",\n )\n missing_md5_message = (\n 'could not find a column name corresponding to required \"Columns.MD5\"'\n )\n assert missing_md5_message in caplog.text\n assert result == False", "def validate(self):\n allowed_backbones = ['resnet50', 'resnet101', 'resnet152']\n backbone = self.backbone.split('_')[0]\n\n if backbone not in allowed_backbones:\n raise ValueError('Backbone (\\'{}\\') not in allowed backbones ({}).'.format(backbone, allowed_backbones))", "def test_manifest(self):\n self.parse_manifest()\n\n ids = {}\n errors = []\n collisions = []\n manifest = self.cryptomattes[self.selection][\"names_to_IDs\"]\n for name, idvalue in manifest.iteritems():\n if mm3hash_float(name) != idvalue:\n errors.append(\"computed ID doesn't match manifest ID: (%s, %s)\" % (idvalue, mm3hash_float(name)))\n else:\n if idvalue in ids:\n collisions.append(\"colliding: %s %s\" % (ids[idvalue], name))\n ids[idvalue] = name\n\n print \"Tested %s, %s names\" % (self.nuke_node.name(), len(manifest))\n print \" \", len(errors), \"non-matching IDs between python and c++.\"\n print \" \", len(collisions), \"hash collisions in manifest.\"\n\n return errors, collisions", "def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False", "def __validate():\n # TODO: implement", "def test_invalid_schema():\n # setup\n uid = uuid.uuid4()\n schema_dir = f'/tmp/{uid}'\n schema_file = f'{schema_dir}/bad_schema.json'\n os.makedirs(schema_dir)\n text_file = open(schema_file, 'w')\n text_file.write('this will fail[];fail()')\n text_file.close()\n\n data = {}\n\n # test\n is_valid, errors = validate(data, 'bad_schema', validate_schema=True)\n\n # teardown\n os.remove(schema_file)\n os.removedirs(schema_dir)\n\n assert not is_valid\n assert errors", "def testGetManifest(self):\n manifest = self.dl_object._GetManifest()\n self.assertEqual(manifest.get('mediaType'),\n 'application/vnd.docker.distribution.manifest.v2+json')\n self.assertIn('layers', manifest)", "def require_manifest(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # Assume the manifest is in the current directory\n try:\n # If we are in a repository, we want to look in\n # the root of that repository for the manifest\n current_repo = vcs_git.RepoTool(Path.cwd(), search_parent=True)\n root_path = current_repo.get_root_path()\n except vcs_git.InvalidRepository:\n # Since we are not in a repository we will look\n # for the manifest in the current directory\n root_path = Path.cwd()\n\n manifest_path = root_path / manifest.MANIFEST_NAME\n\n try:\n loaded_manifest = manifest.load_manifest(manifest_path)\n return func(loaded_manifest, root_path, *args, **kwargs)\n except manifest.NotFound:\n ui.error(f\"Unable to load manifest: Not found: {str(manifest_path)}\")\n sys.exit(1)\n except manifest.ValidationFailed as exc:\n ui.error(f\"Unable to load manifest: Validation failed\")\n ui.error(str(exc))\n sys.exit(1)\n\n return wrapper", "def is_manifest(location):\n return as_posixpath(location).lower().endswith('meta-inf/manifest.mf')", "def test_required_term(self):\n schema = yaml.load(self.yaml_multiple_term, Loader=yaml.FullLoader)\n\n val = DwcaValidator(schema, error_handler=WhipErrorHandler)\n document = {'abundance': 'many'}\n val.validate(document)\n self.assertEqual(val.errors, {'eventDate': ['required field']})", "def test_valid_and_empty_manifest(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(None, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 1\n }", "async def validate(self):\n pass", "def schema_validate_kubernetes_output(validate_data, cache_dir):\n (kind, version), validate_files = validate_data\n KubernetesManifestValidator(cache_dir).validate(validate_files, kind=kind, version=version)", "def test_schema_version(self):\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def test_validate_schema(schema_path):\n # Make sure that each schema itself is valid.\n schema_tree = schema.load_schema(schema_path, resolve_references=True)\n schema.check_schema(schema_tree)", "def validateMain (self):\n self.mountMainPartition()\n sdMainValidity = self._getMainValidityMarker() \n sdMainValidity.markValid(self._getOscarVersionString())\n self._log(\"validate-main\").notice(\"main partition is validated\")", "def test_is_valid_manifest_with_missing_url_column(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == True", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def _validate_data(self, vms, fw_rules):\n self._validate_vms(vms)\n self._validate_fw_rules(fw_rules)\n self._validated = True", "def validateMetadata(self, cur, hist):\n raise NotImplementedError(\"missing validateMetadata() method\")", "def validate(self, args: argparse.Namespace) -> int:\n trestle_root = args.trestle_root # trestle root is set via command line in args. Default is cwd.\n\n # validate by type - all of type or just specified by name\n if 'type' in args and args.type is not None:\n models = []\n if 'name' in args and args.name is not None:\n models = [args.name]\n else:\n models = fs.get_models_of_type(args.type, trestle_root)\n models_path = trestle_root / fs.model_type_to_model_dir(args.type)\n for m in models:\n model_path = models_path / m\n try:\n _, _, model = load_distributed(model_path, trestle_root)\n except TrestleError as e:\n logger.warning(f'File load error {e}')\n return 1\n if not self.model_is_valid(model):\n logger.info(f'INVALID: Model {model_path} did not pass the {self.error_msg()}')\n return 1\n logger.info(f'VALID: Model {model_path} passed the {self.error_msg()}')\n return 0\n\n # validate all\n if 'all' in args and args.all:\n model_tups = fs.get_all_models(trestle_root)\n for mt in model_tups:\n\n model_dir = trestle_root / fs.model_type_to_model_dir(mt[0]) / mt[1]\n extension_type = fs.get_contextual_file_type(model_dir)\n model_path = model_dir / f'{mt[0]}{FileContentType.to_file_extension(extension_type)}'\n _, _, model = load_distributed(model_path, trestle_root)\n if not self.model_is_valid(model):\n logger.info(f'INVALID: Model {model_path} did not pass the {self.error_msg()}')\n return 1\n logger.info(f'VALID: Model {model_path} passed the {self.error_msg()}')\n return 0\n\n # validate file\n if 'file' in args and args.file:\n file_path = trestle_root / args.file\n _, _, model = load_distributed(file_path, trestle_root)\n if not self.model_is_valid(model):\n logger.info(f'INVALID: Model {file_path} did not pass the {self.error_msg()}')\n return 1\n logger.info(f'VALID: Model {file_path} passed the {self.error_msg()}')\n return 0", "def Validate(self, relative_file, contents):\n pass", "def validate_schema(self):\n\n _schema_translator = {\n 'dav': 'http',\n 'davs': 'https',\n }\n\n _logger.debug(\n \"[%s]Validating URN schema: %s\",\n self.id,\n self.uri['scheme']\n )\n\n if self.uri['scheme'] in _schema_translator:\n\n _logger.debug(\n \"[%s]Using URN schema: %s\",\n self.id,\n _schema_translator[self.uri['scheme']]\n )\n\n self.uri['scheme'] = _schema_translator[self.uri['scheme']]\n\n else:\n _logger.debug(\n \"[%s]Using URN schema: %s\",\n self.id,\n self.uri['scheme']\n )", "def test_validators_meta_schemas(self):\n\n with self.assertWarns(DeprecationWarning) as w:\n value = validators.meta_schemas\n self.assertEqual(value, validators._META_SCHEMAS)\n\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.validators.meta_schemas is deprecated\",\n ),\n )", "def test_is_valid_manifest_format_using_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n error_on_empty_url=True,\n )\n assert '\"\"' in caplog.text\n assert result == False", "def validate_bagit_file(bagit_path):\n _assert_zip_file(bagit_path)\n bagit_zip = zipfile.ZipFile(bagit_path)\n manifest_info_list = _get_manifest_info_list(bagit_zip)\n _validate_checksums(bagit_zip, manifest_info_list)\n return True", "def test_validate_invalid(self):\r\n self.assertEqual(get_tree_and_validate(self.invalid_xml, open(self.SCHEMA, 'r').read()), 0)", "def load_validator_schema():\n logger.info('Loading validator schemas')\n SchemaLoader.load_all_from_path(validator_config_path)", "def validate(self):\r\n return self.specs.validate(self)", "def validate_schema_consistent(self, node):\n debug(\"validate_schema_consistent() \" + node.name)\n\n response = node.nodetool('describecluster', True)[0]\n schemas = response.split('Schema versions:')[1].strip()\n num_schemas = len(re.findall('\\[.*?\\]', schemas))\n assert num_schemas == 1, \"There were multiple schema versions: \" + pprint.pformat(schemas)" ]
[ "0.70706165", "0.67949265", "0.6726037", "0.6626515", "0.6575376", "0.650649", "0.6169406", "0.6119033", "0.6040567", "0.59697014", "0.59644884", "0.59519655", "0.5935721", "0.591002", "0.5886837", "0.58650386", "0.5852848", "0.5783439", "0.5762712", "0.5759838", "0.5739801", "0.57347035", "0.5726784", "0.5710004", "0.5693958", "0.5693534", "0.5673112", "0.56656915", "0.56504315", "0.5649177", "0.5633841", "0.56313825", "0.56237936", "0.56111836", "0.560569", "0.5600829", "0.55967593", "0.5590038", "0.5589815", "0.55742365", "0.55581766", "0.55367655", "0.553094", "0.55308384", "0.5528924", "0.55285746", "0.5517953", "0.55127114", "0.5511322", "0.5510334", "0.54839706", "0.5457767", "0.5457429", "0.5441134", "0.5440951", "0.5440062", "0.53989565", "0.5391574", "0.53773606", "0.5367243", "0.5364569", "0.53270596", "0.5309413", "0.5289387", "0.5285121", "0.5281236", "0.5279765", "0.5275161", "0.52578974", "0.52578956", "0.52507865", "0.52462655", "0.524545", "0.5240002", "0.5238868", "0.5228455", "0.5219578", "0.5213035", "0.5201603", "0.51988834", "0.519841", "0.51963055", "0.51918846", "0.5188026", "0.5181621", "0.51804537", "0.5174792", "0.5173135", "0.5172104", "0.51489264", "0.51478297", "0.514756", "0.5140056", "0.513984", "0.51387894", "0.51375234", "0.5135871", "0.5135654", "0.51249135", "0.51189345" ]
0.6218101
6
Check that all nonoptional datasets specified in the corresponding manifest strand in the twine are present in the given manifest.
def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest): # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files. manifest_schema = getattr(self, manifest_kind) for expected_dataset_name, expected_dataset_schema in manifest_schema["datasets"].items(): if expected_dataset_name in manifest["datasets"]: continue if expected_dataset_schema.get("optional", False): continue raise exceptions.invalid_contents_map[manifest_kind]( f"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def _IsApplicable(self, manifest):\n check_list = [(self.tool, manifest.tool), (self.board, manifest.board)]\n\n return all(fnmatch(text, pattern) for text, pattern in check_list)", "def _warn_for_missing_datasets(self, datasets: set[str]):\n any_missing = False\n for ds in datasets:\n if not self.frames.has_dataset(ds):\n any_missing = True\n logger.warn(f'dataset \"{ds}\" is not in the database')\n if any_missing:\n logger.warn(f\"datasets in the databse: {self.all_datasets()}\")", "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def test_valid_and_empty_manifest(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(None, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 1\n }", "def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)", "def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True", "def supports_manifest(manifest):\n pass", "def test_theme_manifest(err, xpi_package=None):\n\n # Don't even both with the test(s) if there's no chrome.manifest.\n chrome = err.get_resource('chrome.manifest')\n if not chrome:\n return\n\n for triple in chrome.triples:\n subject = triple['subject']\n # Test to make sure that the triple's subject is valid\n if subject not in ('skin', 'style'):\n err.warning(\n err_id=('themes', 'test_theme_manifest',\n 'invalid_chrome_manifest_subject'),\n warning='Invalid chrome.manifest subject',\n description=('chrome.manifest files for full themes are only '\n \"allowed to have 'skin' and 'style' items. \"\n 'Other types of items are disallowed for '\n 'security reasons.',\n 'Invalid subject: %s' % subject),\n filename=triple['filename'],\n line=triple['line'],\n context=triple['context'])", "def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def test_is_valid_manifest_with_missing_url_column(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == True", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def test_required_attributes(self):\n\n required_attributes = ('ID', )\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(DatasetLoader_Jakob2019))", "def sanity_check(hdf):\n required_paths = ['Analyses', 'UniqueGlobalKey', 'Analyses/EventDetection_000']\n try:\n for p in required_paths:\n if p not in hdf:\n return False\n return True\n except:\n return False", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def count_missing_stats(manifest):\n num_missing = 0\n for element in manifest:\n if element.missing_stats():\n num_missing += 1\n return num_missing", "def is_manifest_list(self):\n return False", "def split_manifest(root_path, manifest_file_path):\n\n train_manifest = open(os.path.join(root_path,\"dataset\", \"train_manifest.txt\"), \"w+\")\n test_manifest = open(os.path.join(root_path, \"dataset\",\"test_manifest.txt\"), \"w+\")\n val_manifest = open(os.path.join(root_path,\"dataset\" ,\"valid_manifest.txt\"), \"w+\")\n with open(os.path.join(root_path, manifest_file_path), 'r') as f:\n data_manifest = f.read().strip().split('\\n')\n data_len = len(data_manifest)\n k = 0\n for i in data_manifest:\n if k == 0:\n k = k+1\n continue\n elif k == 1:\n train_manifest.write(i+'\\n')\n test_manifest.write(i+'\\n')\n val_manifest.write(i+'\\n')\n elif k <= data_len*0.6: # 60% on train set\n train_manifest.write(i+'\\n')\n elif k > data_len*0.6 and k <= data_len*0.8: # 20 % on test\n test_manifest.write(i+'\\n')\n else: #20 % on test\n val_manifest.write(i+'\\n')\n k = k+1\n print(\"Spliting attritutes Done!\")", "def test_is_valid_manifest_with_missing_md5_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_md5_column.tsv\",\n )\n missing_md5_message = (\n 'could not find a column name corresponding to required \"Columns.MD5\"'\n )\n assert missing_md5_message in caplog.text\n assert result == False", "def verify_wilds(self):\n self.check_dataset_duplicate_ids(self.wilds)", "def test_manifest(self):\n self.parse_manifest()\n\n ids = {}\n errors = []\n collisions = []\n manifest = self.cryptomattes[self.selection][\"names_to_IDs\"]\n for name, idvalue in manifest.iteritems():\n if mm3hash_float(name) != idvalue:\n errors.append(\"computed ID doesn't match manifest ID: (%s, %s)\" % (idvalue, mm3hash_float(name)))\n else:\n if idvalue in ids:\n collisions.append(\"colliding: %s %s\" % (ids[idvalue], name))\n ids[idvalue] = name\n\n print \"Tested %s, %s names\" % (self.nuke_node.name(), len(manifest))\n print \" \", len(errors), \"non-matching IDs between python and c++.\"\n print \" \", len(collisions), \"hash collisions in manifest.\"\n\n return errors, collisions", "def test_sa_mismatch_manifest_file_and_ecosystem(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/202/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n with pytest.raises(Exception) as exception:\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='pypi', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n sa.post_request()\n self.assertIs(exception.type, ValidationError)", "def check_dataset(number_episode_dics):\n\n for env_name in number_episode_dics.keys():\n\n check_folder(env_name, number_episode_dics[env_name])", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def test_is_valid_manifest_format_with_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n )\n assert caplog.text == \"\"\n assert result == True", "def check_unstructured(extractions):\n if not extractions:\n return True\n for ext in extractions:\n if not hasattr(ext, 'args'):\n return False\n return True", "def cross_validate(self, contents, required=None, forbidden=None):\n if required:\n for item in required:\n self.assertTrue(\n item in contents,\n \"Required entry [{item}] not found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )\n if forbidden:\n for item in forbidden:\n self.assertTrue(\n item not in contents,\n \"Forbidden entry [{item}] found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def _check_availability(self, names: Iterable) -> None:\n unavailable = [x for x in names if x not in self.__by_name.keys()]\n if unavailable:\n raise ValueError(f'datasets: {unavailable} not available in the {self.region} region.')", "def check_dataset_exists(dataset):\n result = subprocess.call(['das_client.py', '--query', 'dataset dataset=%s' % dataset])\n return result == 0", "def _verify(self) -> None:\n # Check if the files already exist\n if os.path.exists(os.path.join(self.root, self.image_root)):\n return\n\n # Check if .zip files already exists (if so extract)\n exists = []\n for filename, md5 in zip(self.filenames, self.md5s):\n filepath = os.path.join(self.root, filename)\n if os.path.isfile(filepath):\n if self.checksum and not check_integrity(filepath, md5):\n raise RuntimeError(\"Dataset found, but corrupted.\")\n exists.append(True)\n extract_archive(filepath)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n raise RuntimeError(\n \"Dataset not found in `root` directory, either specify a different\"\n + \" `root` directory or manually download the dataset to this directory.\"\n )", "def check_data_struct():\n if not os.path.exists(PROJECT_ROOT+'/data'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data')\n\n if not os.path.exists(PROJECT_ROOT+'/data/CUB_200_2011'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/CUB_200_2011')\n\n if not os.path.exists(PROJECT_ROOT+'/data/segmentations'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/segmentations')\n\n if not os.path.exists(PROJECT_ROOT+'/data/attributes.txt'):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), PROJECT_ROOT+'/data/attributes.txt')", "def checkMetadata(self):\n super(WorldfileMultiple, self).checkMetadata()\n \n # Check for necessary information in metadata\n if not 'basin_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a basin raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'subbasins_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a sub-basin raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'dem_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a DEM raster in a GRASS mapset\" % (self.context.projectDir,)) \n if not 'soil_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a soil raster in a GRASS mapset\" % (self.context.projectDir,))\n if not 'patch_rast' in self.grassMetadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a patch raster in a GRASS mapset\" % (self.context.projectDir,))\n \n if not 'rhessys_dir' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a RHESSys directory\" % (self.context.projectDir,))\n if not 'g2w_bin' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a grass2world executable\" % (self.context.projectDir,))\n if not 'rat_bin' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain an AverageTables executable\" % (self.context.projectDir,))\n if not 'template' in self.metadata:\n raise MetadataException(\"Metadata in project directory %s does not contain a world template\" % (self.context.projectDir,))\n if not 'rhessys_dir' in self.metadata:\n raise MetadataException(\"Metadata in project directory {0} does not contain a RHESSys directory\".format(self.context.projectDir))", "def testGetManifest(self):\n manifest = self.dl_object._GetManifest()\n self.assertEqual(manifest.get('mediaType'),\n 'application/vnd.docker.distribution.manifest.v2+json')\n self.assertIn('layers', manifest)", "def test_existence_checking(self):\n params = [f'2010-{num:0>2}' for num in range(1, 13)] # all month of 2010\n for month in params:\n with self.assertRaises(ValueError) as cm: # ValueError should be raised with proper message\n download_data(month)\n self.assertEqual(f'Dataset from {month} cannot be found on lichess.org', cm.exception.args[0], msg=month)\n # check if error message was as expected", "def test_MergeManifests_missing_files():\n d1 = dpack_pb2.DataPackage()\n f1 = d1.file.add()\n f1.relative_path = \"a\"\n f1.comment = \"abc\"\n d2 = dpack_pb2.DataPackage()\n f2 = d2.file.add()\n f2.relative_path = \"b\"\n f2.comment = \"def\"\n dpack.MergeManifests(d1, d2)\n assert d1.file[0].comment == \"abc\"\n assert d2.file[0].comment == \"def\"", "def has_required_programs(program_list):\n \n returnValue = True\n \n for program in program_list:\n if commands.getstatusoutput(\"which \"+program)[0] != 0:\n log.error(program+\" is required by \"+PROGRAM_NAME)\n returnValue = False\n \n return returnValue", "def dataset_name_matches_in_xif_and_schema(self):\n\n def get_dataset_from_xif(xif_file_path: str) -> List[str]:\n with open(xif_file_path) as xif_file:\n xif_content = xif_file.read()\n dataset = re.findall('dataset[ ]?=[ ]?([\"a-zA-Z_0-9]+)', xif_content)\n if dataset:\n return [dataset_name.strip('\"') for dataset_name in dataset]\n return []\n\n xif_file_path = get_files_in_dir(\n os.path.dirname(self.file_path), [\"xif\"], False\n )\n if xif_file_path and self.schema_content:\n xif_datasets = set(get_dataset_from_xif(xif_file_path[0]))\n schema_datasets = self.schema_content.keys()\n if len(xif_datasets) == len(schema_datasets) and len(xif_datasets) >= 1:\n all_exist = all(dataset in schema_datasets for dataset in xif_datasets)\n if all_exist:\n return True\n\n error_message, error_code = Errors.modeling_rule_schema_xif_dataset_mismatch()\n if self.handle_error(error_message, error_code, file_path=self.file_path):\n self._is_valid = False\n return False", "def test_index_manifest_packages_failure(data, gen3_index, gen3_auth, logfile):\n with patch(\n \"gen3.tools.indexing.index_manifest.Gen3Metadata.create\", MagicMock()\n ) as mock_mds_create:\n index_object_manifest(\n manifest_file=f\"{CURRENT_DIR}/test_data/{data['manifest']}\",\n auth=gen3_auth,\n commons_url=gen3_index.client.url,\n thread_num=1,\n replace_urls=False,\n submit_additional_metadata_columns=True,\n )\n mds_records = {\n kwargs[\"guid\"]: kwargs[\"metadata\"]\n for (_, kwargs) in mock_mds_create.call_args_list\n }\n assert len(mds_records) == 0\n\n indexd_records = {r[\"did\"]: r for r in gen3_index.get_all_records()}\n assert len(indexd_records) == 0\n\n for error in data[\"expected_error_msgs\"]:\n assert error in logfile.read()", "def exists_dataset(self, dataset):\n assert dataset, \"Must input a valid dataset name.\"\n return any(self.get_by_dataset(dataset))", "def check_dataset_presence(dataset, verbose=False):\n quantities = ['name', 'dataset_fraction', 'replica_fraction', 'block_fraction', 'block_completion']\n\n # Get site name and fractions for each site\n # yes, you prob shouldn't use shell=True, but CBA to figure out how to split the string for das_client\n grep_str = ' '.join(['site.%s' % q for q in quantities])\n out = check_output('das_client --query=\"site dataset=%s | grep %s\"' % (dataset, grep_str), shell=True)\n\n site_dicts = [] # hold info about each site\n\n # Process the important lines, store in a dict for each site\n results = [x for x in out.split('\\n') if 'Showing' not in x and x != '']\n for line in results:\n # Don't care if it's at a T1\n if line.startswith('T1'):\n continue\n sdict = {q : p for q, p in zip(quantities, line.split())}\n site_dicts.append(sdict)\n\n if verbose:\n # Print output for each site with fractions, colour coded\n print TColour.BLUE, dataset, TColour.ENDC\n print '\\t %s' % ' - '.join(quantities)\n for sdict in site_dicts:\n fracs = [sdict[k] for k in quantities]\n status_col = TColour.RED\n if all([f == '100.00%' for f in fracs if not f.startswith('T')]):\n status_col = TColour.GREEN\n print '\\t', status_col, ' - '.join(fracs), TColour.ENDC\n else:\n # Figure out if fully transferred anywhere, in which case print in green\n transferred = False\n for sdict in site_dicts:\n fracs = [sdict[k] for k in quantities]\n if all([f == '100.00%' for f in fracs if not f.startswith('T')]):\n transferred = True\n break\n\n status_col = TColour.GREEN if transferred else TColour.RED\n status_letter = 'v' if transferred else 'x'\n print status_col, status_letter, dataset, TColour.ENDC", "def test_is_valid_manifest_format_using_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n error_on_empty_url=True,\n )\n assert '\"\"' in caplog.text\n assert result == False", "def test_no_updated_datasets(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for dataset in self.datasets_v3:\n self.assertNotIn(dataset, table.data)", "def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False", "def requirement_missing(script):\n if \"requires\" in script:\n if script[\"requires\"] is None:\n return False\n for package in script[\"requires\"].split():\n try:\n pkg_resources.working_set.require(package)\n except Exception:\n return True\n return False", "def check_structured(extractions):\n if not extractions:\n return True\n for ext in extractions:\n if not hasattr(ext, 'arg1'):\n return False\n return True", "def is_empty(self):\n for key, dataset in self.datasets.items():\n try:\n has_data = dataset.has_data()\n except MFDataException as mfde:\n raise MFDataException(\n mfdata_except=mfde,\n model=self._container_package.model_name,\n package=self._container_package._get_pname(),\n message=\"Error occurred while verifying\"\n ' data of dataset \"{}\" in block '\n '\"{}\"'.format(dataset.structure.name, self.structure.name),\n )\n\n if has_data is not None and has_data:\n return False\n return True", "def test_with_valid_input(self):\n for dataset_type in ['regular', 'raw', 'REGULAR', 'RAW']:\n try:\n check_dataset_type(dataset_type)\n except ValueError:\n self.fail(\"Dataset {0} should be valid\".format(dataset_type))", "def test_is_valid_manifest_with_wide_row(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_wide_row.tsv\",\n )\n wide_warning = f\"line 3, number of fields (6) in row is unequal to number of column names in manifest (5)\"\n assert wide_warning in caplog.text\n assert result == True", "def _validate_manifest(self, kind, source, cls=None, **kwargs):\n data = self._load_json(kind, source, **kwargs)\n\n # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive\n inbound = True\n if hasattr(data, \"to_primitive\"):\n inbound = False\n data = data.to_primitive()\n\n self._validate_against_schema(kind, data)\n self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)\n\n if cls and inbound:\n return cls(**data)\n\n return data", "def test_metadata_atleast_latin_menu_subsets_exist(self):\n self.assertIn('menu', self.metadata.get('subsets', []),\n msg=\"Subsets missing menu\")\n self.assertIn('latin', self.metadata.get('subsets', []),\n msg=\"Subsets missing latin\")", "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "def checkIfThereIsData(self, i):\n _, amountOfThings = self.weaviate.Get(\"/\" + i)\n if len(amountOfThings[i]) == 0:\n return False\n else:\n return True", "def _check_manifest_resources(self, documents: list) -> str:\n for doc in documents:\n kind = doc.get(\"kind\")\n\n # If this kind defines a job template, pull it out\n if kind in JOB_TEMPLATE_RESOURCES:\n doc = doc.get(\"spec\").get(\"jobTemplate\")\n if doc is None:\n return f\"{kind} resources MUST specify a job template!\"\n\n if kind in POD_TEMPLATE_RESOURCES:\n pod_template = doc.get(\"spec\").get(\"template\")\n if pod_template is None:\n return f\"{kind} resources MUST specify a pod template!\"\n\n pod_spec = pod_template.get(\"spec\")\n if pod_spec is None:\n return f\"{kind} resources MUST specify a pod spec!\"\n\n containers = pod_spec.get(\"containers\")\n if not containers:\n return f\"{kind} resources MUST specify at least one container!\"\n\n init_containers = pod_spec.get(\"initContainers\")\n if init_containers:\n containers = containers + init_containers\n\n missing_resources_msg = (\n f\"All containers and initContainers in a {kind}\"\n \"must define resource constraints!\"\n )\n for cont in containers:\n resources = cont.get(\"resources\")\n if not resources:\n return missing_resources_msg\n\n limits = resources.get(\"limits\")\n if not limits or not limits.get(\"cpu\") or not limits.get(\"memory\"):\n return missing_resources_msg\n\n requests = resources.get(\"requests\")\n if (\n not requests\n or not requests.get(\"cpu\")\n or not requests.get(\"memory\")\n ):\n return missing_resources_msg", "def test_check_metadata_fields(self):\n contents = self.read_metadata_contents()\n family = Metadata.get_family_metadata(contents)\n\n keys = [(\"name\", str), (\"postScriptName\", str),\n (\"fullName\", str), (\"style\", str),\n (\"weight\", int), (\"filename\", str),\n (\"copyright\", str)]\n\n missing = set([])\n unknown = set([])\n\n for j, itemtype in keys:\n\n for font_metadata in family.fonts:\n if j not in font_metadata:\n missing.add(j)\n\n for k in font_metadata:\n if k not in map(lambda x: x[0], keys):\n unknown.add(k)\n\n if unknown:\n msg = 'METADATA.json \"fonts\" property has unknown items [%s]'\n self.fail(msg % ', '.join(unknown))\n\n if missing:\n msg = 'METADATA.json \"fonts\" property items missed [%s] items'\n self.fail(msg % ', '.join(missing))", "def check_dataset_files_outside_datadir(fix, dataset_gateway: IDatasetGateway, **_):\n invalid_files = []\n for dataset in dataset_gateway.get_provenance_tails():\n if dataset.date_removed:\n continue\n\n data_dir = dataset.get_datadir()\n\n detected_files = []\n\n for file in dataset.files:\n if file.is_external or file.linked:\n continue\n try:\n get_safe_relative_path(project_context.path / file.entity.path, project_context.path / data_dir)\n except ValueError:\n detected_files.append(file)\n\n if not detected_files:\n continue\n\n if fix:\n communication.info(f\"Fixing dataset '{dataset.name}' files.\")\n dataset.unfreeze()\n for file in detected_files:\n dataset.unlink_file(file.entity.path)\n dataset.freeze()\n add_to_dataset(dataset.name, urls=[file.entity.path for file in detected_files], link=True)\n else:\n invalid_files.extend(detected_files)\n\n if invalid_files:\n problems = (\n WARNING\n + \"There are dataset files that aren't inside their dataset's data directory \"\n + \"(use 'renku doctor --fix' to fix them):\\n\\n\\t\"\n + \"\\n\\t\".join(click.style(file.entity.path, fg=\"yellow\") for file in invalid_files)\n + \"\\n\"\n )\n return False, True, problems\n\n return True, False, None", "def is_manifest(location):\n return as_posixpath(location).lower().endswith('meta-inf/manifest.mf')", "def _dist_has_meta_data(dist: pkg_resources.Distribution) -> bool:\n return dist.has_metadata('direct_url.json')", "def test_compatible_data_presence(allparams):\n compatible_data_keys_set = set(['puf', 'cps'])\n\n # Nested function used only in test_compatible_data_presence\n def valid_compatible_data(compatible_data):\n \"\"\"\n Return True if compatible_data is a valid dictionary;\n otherwise return False\n \"\"\"\n if not isinstance(compatible_data, dict):\n return False\n if set(compatible_data.keys()) != compatible_data_keys_set:\n return False\n for key in compatible_data:\n boolean = (compatible_data[key] is True or\n compatible_data[key] is False)\n if not boolean:\n return False\n return True\n\n # Main logic of test_compatible_data_presence function\n problem_pnames = list()\n for pname in allparams:\n if 'compatible_data' in allparams[pname]:\n compatible_data = allparams[pname]['compatible_data']\n else:\n compatible_data = None\n if not valid_compatible_data(compatible_data):\n problem_pnames.append(pname)\n if problem_pnames:\n msg = '{} has no or invalid compatible_data field'\n for pname in problem_pnames:\n print(msg.format(pname))\n assert 'list of problem_pnames' == 'empty list'", "def check_expected_present(self, data, expected, premessage):\n # check missing\n missing = list(d for d in expected if d not in data)\n if missing:\n self.raiseAWarning(premessage, '| Expected variables are missing:', missing)\n # check None\n nones = list(d for d, v in data.items() if (v is None and v in expected))\n if nones:\n self.raiseAWarning(premessage, '| Expected variables are None:', nones)\n if missing or nones:\n self.raiseAnError(RuntimeError, 'Some variables were missing or None! See warning messages above for details!')", "def exists():\n check50.include(\"data\")\n check50.exists(\"adventure.py\")\n check50.exists(\"room.py\")", "def test_incorrect_dependency(self):\n load_manifest(StringIO(manifest_incorrect_dependency))", "def has_extras(self):\n return any(map(utils.assert_package_has_extras, self.pkg_arguments))", "def _verify(self) -> None:\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.data_dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.data_dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()", "def check_data():\n check_docs(\"Training\")\n check_docs(\"dev\")\n check_docs(\"Test\")", "def validate_dataset(self):\n pass", "def test_sanity(self):\n\n parser = ManifestParser()\n mozmill_example = os.path.join(here, 'mozmill-example.ini')\n parser.read(mozmill_example)\n tests = parser.tests\n self.assertEqual(len(tests), len(file(mozmill_example).read().strip().splitlines()))\n\n # Ensure that capitalization and order aren't an issue:\n lines = ['[%s]' % test['name'] for test in tests]\n self.assertEqual(lines, file(mozmill_example).read().strip().splitlines())\n\n # Show how you select subsets of tests:\n mozmill_restart_example = os.path.join(here, 'mozmill-restart-example.ini')\n parser.read(mozmill_restart_example)\n restart_tests = parser.get(type='restart')\n self.assertTrue(len(restart_tests) < len(parser.tests))\n self.assertEqual(len(restart_tests), len(parser.get(manifest=mozmill_restart_example)))\n self.assertFalse([test for test in restart_tests\n if test['manifest'] != os.path.join(here, 'mozmill-restart-example.ini')])\n self.assertEqual(parser.get('name', tags=['foo']),\n ['restartTests/testExtensionInstallUninstall/test2.js',\n 'restartTests/testExtensionInstallUninstall/test1.js'])\n self.assertEqual(parser.get('name', foo='bar'),\n ['restartTests/testExtensionInstallUninstall/test2.js'])", "def _validate_data(self):\n logger.debug(\"Validating directory\")\n root = self.data_dir\n for path in self._walk_cases():\n print(path)\n full_path = os.path.join(root, path)\n logger.debug(\" \" + full_path)\n try:\n assert os.path.exists(full_path)\n except AssertionError:\n raise AssertionError(\n \"Couldn't find data on path {}\".format(full_path)\n )", "def _validate_num_profiles(self):\n for fp in [self.solar_fpath, self.wind_fpath]:\n with Resource(fp) as res:\n profile_dset_names = [\n n for n in res.dsets\n if self.__profile_reg_check.match(n)\n ]\n if not profile_dset_names:\n msg = (\"Did not find any data sets matching the regex: \"\n \"{!r} in {!r}. Please ensure that the profile data \"\n \"exists and that the data set is named correctly.\")\n e = msg.format(PROFILE_DSET_REGEX, fp)\n logger.error(e)\n raise FileInputError(e)\n elif len(profile_dset_names) > 1:\n msg = (\"Found more than one profile in {!r}: {}. \"\n \"This module is not intended for hybridization of \"\n \"multiple representative profiles. Please re-run \"\n \"on a single aggregated profile.\")\n e = msg.format(fp, profile_dset_names)\n logger.error(e)\n raise FileInputError(e)\n else:\n self.profile_dset_names += profile_dset_names", "def _detect_files(data):\n return any(attr[\"extra\"].get(\"files\")\n for attr in data[\"attributes\"] if attr[\"extra\"])", "def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def test_talbot_manifest_fetch():\n request, response = app.test_client.get(\"/iiif/manifest/452d6b51-949c-447d-9880-1108ffdfd96e.json\")\n assert response.status == 200", "def test_check_presence_only(self):\n schema = yaml.load(self.yaml_presence_check, Loader=yaml.FullLoader)\n val = DwcaValidator(schema, allow_unknown=True,\n error_handler=WhipErrorHandler)\n\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': ''}\n self.assertTrue(val.validate(document))\n document = {'eventDate': ''}\n val.validate(document)\n self.assertEqual(val.errors, {})", "def check_ekin_dataset(cls, ekindatasets):\n for k in ekindatasets.keys():\n ekindataset = ekindatasets[k]\n xdatapoints=ekindataset[0].keys()\n count=0\n for datapoint in xdatapoints:\n if datapoint=='label' or datapoint=='label_widget':\n continue\n else:\n if not isinstance(ekindataset[0][datapoint], dict):\n if ekindataset[0][datapoint] != '':\n return 1\n elif ekindataset[0][datapoint].has_key('var'):\n if ekindataset[0][datapoint]['var']!='':\n return 1\n return 0", "def check_missing_files(dataset_gateway: IDatasetGateway, **_):\n missing = defaultdict(list)\n\n for dataset in dataset_gateway.get_all_active_datasets():\n # NOTE: Datasets with storage backend don't have local copies of files\n if dataset.storage:\n continue\n for file_ in dataset.files:\n path = project_context.path / file_.entity.path\n file_exists = path.exists() or (file_.is_external and os.path.lexists(path))\n if not file_exists:\n missing[dataset.name].append(file_.entity.path)\n\n if not missing:\n return True, False, None\n\n problems = WARNING + \"There are missing files in datasets.\"\n\n for dataset_name, files in missing.items():\n problems += (\n \"\\n\\t\"\n + click.style(dataset_name, fg=\"yellow\")\n + \":\\n\\t \"\n + \"\\n\\t \".join(click.style(path, fg=\"red\") for path in files)\n )\n\n return False, False, problems", "def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False", "def _check_variables(datasets, necessary_short_names):\n dataset_name = datasets[0]['dataset']\n necessary_short_names = set(necessary_short_names)\n short_names = set(group_metadata(datasets, 'short_name').keys())\n if short_names != necessary_short_names:\n raise ValueError(\n f\"Expected variables {necessary_short_names} for dataset \"\n f\"'{dataset_name}', got {short_names}\")", "def check_existing_dataset(path: str):\n x_path = os.path.join(path, IMG_DIR)\n y_path = os.path.join(path, MSK_DIR)\n\n if os.path.isdir(x_path) and os.path.isdir(y_path):\n _, _, x_files = next(os.walk(x_path))\n _, _, y_files = next(os.walk(y_path))\n x = len(x_files)\n y = len(y_files)\n\n if x != y:\n logger.warning(\n \"Found un-even numbers of x-y for dataset. x = %i, y = %i.\", x, y\n )\n\n return -1\n\n if x == 0:\n logger.info(\"Found 0 existing sets.\")\n\n return 0\n logger.info(\"Found %s sets in existing dataset.\", x)\n\n return x\n logger.error(\"Could not locate x and y folder.\")\n sys.exit()", "def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False", "def test_exist_entry_on_rebuild(self):\n self.validate_attributes_in_exist_response()", "def is_dataset(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = [\"data_vars\", \"coords\", \"dims\", \"to_array\"]\n\n return all([hasattr(X, name) for name in require_attrs])", "def _check_dataset_consistency(self):\n if not self.multi_dataset: \n raise MutantError(\"_check_dataset_consistency only makes sense for multi-datasets!\")\n def _check_sets_raise_error(set1, set2, set1_name, set2_name):\n if not set1==set2:\n raise MutantError(\"Multi-dataset mutant pool has different %s and %s dataset sets! %s, %s\"%(set1_name, \n set2_name, set1, set2))\n datasets_from_summary = set(self.summary.keys())\n datasets_from_mutants = set.union(*[set(m.by_dataset.keys()) for m in self])\n _check_sets_raise_error(datasets_from_summary, datasets_from_mutants, \"from summary\", \"from mutants\")\n try:\n if self._dataset_order is not None: \n datasets_from_order = set(self._dataset_order)\n _check_sets_raise_error(datasets_from_order, datasets_from_summary, \"from dataset_order\", \"from summary\")\n except AttributeError:\n pass", "def check_image_manifest(self,\n idf,\n cids,\n cols = ['md5sum',\n 'storage_urls',\n 'file_size',\n 'case_ids',\n 'study_uid',\n 'series_uid',\n 'file_name']):\n errors = []\n for col in cols:\n missing = len(idf[idf[col].isnull()])\n if missing > 0:\n error = \"'{}' values issing for image manifest column '{}'.\".format(len(missing),col)\n print(error)\n errors.append(error)\n if \"case_ids\" in idf:\n icids = list(set(idf[\"case_ids\"]))\n extra_cids = list(set(icids).difference(cids))\n if len(extra_cids) > 0:\n error = \"The image manifest TSV contains {} case IDs that are not present in the case TSV!\".format(len(extra_cids))\n print(error)\n errors.append(error)\n else:\n error = \"'case_ids' column missing from image manifest!\"\n print(error)\n errors.append(error)\n return errors", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def check_invalid_datasets_derivation(fix, dataset_gateway: IDatasetGateway, **_):\n invalid_datasets = []\n\n def fix_or_report(dataset):\n if fix:\n dataset.unfreeze()\n dataset.derived_from = None\n dataset.freeze()\n communication.info(f\"Fixing dataset '{dataset.name}'\")\n else:\n invalid_datasets.append(dataset.name)\n\n for dataset in dataset_gateway.get_provenance_tails():\n while dataset.derived_from is not None and dataset.derived_from.url_id is not None:\n if dataset.same_as or dataset.derived_from.url_id == dataset.id:\n fix_or_report(dataset)\n break\n\n try:\n dataset = dataset_gateway.get_by_id(dataset.derived_from.url_id)\n except errors.ObjectNotFoundError:\n fix_or_report(dataset)\n break\n\n if not invalid_datasets:\n return True, False, None\n\n problems = (\n WARNING\n + \"There are invalid dataset metadata in the project (use 'renku doctor --fix' to fix them):\"\n + \"\\n\\n\\t\"\n + \"\\n\\t\".join(click.style(name, fg=\"yellow\") for name in invalid_datasets)\n + \"\\n\"\n )\n\n return False, True, problems", "def _check_data_valid(self):\n\n is_valid = (sum(~np.isnan(self.data).flatten()) > 0 and self.data.flatten().sum() != 0)\n if not is_valid:\n raise FITSException(f\"No data in {self.survey}\")", "def check(self):\n missing = []\n for name in self.data[\"locations\"]:\n try:\n n = self.data[\"names\"][name]\n except KeyError:\n missing.append(name)\n if missing:\n raise RuntimeError(\"\\\"names\\\" list lacks:\\n \" + \"\\n \".join(missing))", "def needs_dataset(*names, default=None):\n non_ion_datasets = ['abundance', 'ip', 'ioneq']\n names = [f'_{n}' if n not in non_ion_datasets else f'{n}' for n in names]\n\n def decorator(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n if any([args[0].__getattribute__(n) is None for n in names]):\n return default\n else:\n return func(*args, **kwargs)\n return func_wrapper\n return decorator", "def test_no_removed_datasets(self):\n removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)\n removed_dataset_2 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(removed_dataset_1, table.data)\n self.assertNotIn(removed_dataset_2, table.data)\n self.assertEqual(len(table.data), 0)", "def isFilePresent(fileName):\n global dataFolder,blackListFiles\n allDataFiles = [f for f in listdir(dataFolder) if (isfile(join(dataFolder, f)) and f.endswith('.zip'))]\n return fileName in allDataFiles and not (fileName in blackListFiles)", "def _CheckFileExistsWithData(self, logs, graph):\n self.assertTrue(graph in logs, 'File %s was not output.' % graph)\n self.assertTrue(logs[graph], 'File %s did not contain data.' % graph)", "def test_get_manifest(self):\n expected_start, expected_end = self.mock_data.month_range.split(\"-\")\n\n manifest, _ = self.downloader._get_manifest(self.mock_data.test_date)\n\n self.assertEqual(manifest.get(\"assemblyId\"), self.mock_data.export_uuid)\n self.assertEqual(manifest.get(\"reportKeys\"), [self.mock_data.export_file])\n self.assertEqual(manifest.get(\"Compression\"), \"PLAIN\")\n self.assertEqual(manifest.get(\"billingPeriod\").get(\"start\"), expected_start)\n self.assertEqual(manifest.get(\"billingPeriod\").get(\"end\"), expected_end)", "def verify_entry(b):\n for f in minimalfields:\n if f not in b:\n return False\n return True", "def check_integrity(self) -> None:\n for subset in self.subsets:\n if not self._check_subset_integrity(subset):\n raise ValueError(f\"subset {subset} not found or corrupt\")", "def check_missing_files(self):\n files = [getattr(self, attr) for attr in self._required]\n try:\n utilities.check_missing_files(files)\n except utilities.MissingConstraintError as err:\n err.message += \"\\nSkipping {}\\n\".format(self.__class__.__name__)\n raise err", "def get_manifest_data(bucket,team, dataset,manifest_key):\n dynamo_config = DynamoConfiguration()\n dynamo_interface = DynamoInterface(dynamo_config)\n s3_interface = S3Interface()\n local_path = s3_interface.download_object(bucket, manifest_key)\n ddb_keys=[]\n items=[]\n with open(local_path, \"r\") as raw_file:\n file_names = [file_name.strip().split(\"/\")[-1]\n for file_name in raw_file]\n for file in file_names:\n ddb_keys.append({\n \"dataset_name\": team+\"-\"+dataset,\n \"manifest_file_name\": manifest_key.split(\"/\")[-1], \"datafile_name\": file\n })\n for ddb_key in ddb_keys:\n try:\n items.append(dynamo_interface.get_item_from_manifests_control_table(\n ddb_key[\"dataset_name\"], ddb_key[\"manifest_file_name\"], ddb_key[\"datafile_name\"]))\n except KeyError:\n logger.error(\"The manifest file has not been processed in Stage A\")\n raise Exception(\"Manifest File has not been processed in Stage A\")\n\n return items" ]
[ "0.68781096", "0.5914848", "0.5894921", "0.58664596", "0.5797446", "0.5790559", "0.575301", "0.57257426", "0.56155634", "0.5589998", "0.55779296", "0.5571627", "0.55710405", "0.5568376", "0.55295604", "0.5499033", "0.5491544", "0.5466877", "0.54596764", "0.5446938", "0.54257554", "0.5356052", "0.533595", "0.5322388", "0.5315691", "0.5311172", "0.5294386", "0.5277329", "0.52738976", "0.52700686", "0.5246219", "0.52426744", "0.5232312", "0.52162683", "0.52074647", "0.5203824", "0.51894635", "0.51869464", "0.517388", "0.5173813", "0.51704115", "0.51655155", "0.51499987", "0.51427585", "0.5139918", "0.51379037", "0.5133375", "0.5124356", "0.5113062", "0.51105046", "0.5108641", "0.5107189", "0.51001096", "0.5097492", "0.50935715", "0.50910616", "0.5078984", "0.50726235", "0.50709295", "0.50663847", "0.5066297", "0.5060553", "0.5056738", "0.5053627", "0.50475043", "0.50458884", "0.5034705", "0.5032661", "0.50280607", "0.50097257", "0.50054884", "0.50051874", "0.5002973", "0.5001303", "0.49932414", "0.49870738", "0.49865437", "0.49675947", "0.49650425", "0.49643654", "0.49510133", "0.4949947", "0.49473888", "0.49438706", "0.4939257", "0.49361664", "0.4929928", "0.4929928", "0.4929879", "0.49209255", "0.49160096", "0.4913835", "0.49129403", "0.49121457", "0.48999614", "0.48975095", "0.48974168", "0.48956493", "0.48917636", "0.48802915" ]
0.7935964
0
Get the names of strands that are found in this twine.
def available_strands(self): return self._available_strands
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_strands(self):\n return iter(self.strand_list)", "def getStationsName(self) :\n names = []\n for sts in self._stations :\n names.append(sts.getName())\n\n return names", "def get_well_aliases(self):\n return self.info_wells['well'].unique()", "def list_available_strains(self):\n return [strain for strain in self.sample_dict]", "def find_hypernyms(self, syns):\n names = set()\n # Find hypernyms of each syn\n for syn in syns:\n hypernyms = syn.hypernyms()\n # find hypernyms one more level up\n for hypernym in hypernyms:\n names.add(hypernym.name())\n hypernyms_second = hypernym.hypernyms()\n for h in hypernyms_second:\n names.add(h.name())\n return names", "def wells_list(self, wtype='all'):\n list_names = []\n for well_data in self.wells:\n if wtype == 'all':\n list_names.append(well_data.drawdown.name)\n elif wtype == well_data._type - 2:\n list_names.append(well_data.drawdown.name)\n return(list_names)", "def names(self) -> list[str]:", "def stl_names(self):\n return [stl.member.get_full_name() for stl in self.stls.all()]", "def _getAllWorklistNames(self):\n log.debug(\"Finding all worklists mentioned in this statemachine.\")\n worklists = {}\n names = [s.getTaggedValue('worklist')\n for s in self.sm.getStates(no_duplicates = 1)\n if s.getTaggedValue('worklist')]\n for name in names:\n worklists[name] = 'just filtering out doubles'\n result = worklists.keys()\n log.debug(\"Found the following worklists: %r.\", result)\n return result", "def synonyms(self):\n\n return [synonym[\"name\"] for synonym in self._get_synonym_json()]", "def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)", "def getNames(self) -> List[unicode]:\n ...", "def speciesNames(self):\n nsp = self.nSpecies()\n return map(self.speciesName,range(nsp))", "def get_station_names(self):\n station_names = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n station_name = ' '.join(wrapper.find(\"h3\").text.split(' ')[:-1])\n station_names.append(station_name)\n return np.array(station_names).T", "def names(self) -> PlaceNames | None:\n pass", "def get_names(self):\n return self.names", "def get_words_from_sysets(synset):\n synlist = []\n for s in synset:\n syns = s.lemmas()[0].name()\n synlist.append(syns)\n return synlist", "def get_holonyms(synset):\n return set(\n synset.member_holonyms() + synset.substance_holonyms() + synset.part_holonyms()\n )", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def tank_name_list(self):\n return list(self._node_reg.tank_names)", "def names(cls) -> List[str]:", "def names(self) -> List:\n ...", "def synonyms(self) -> List[str]:\n return self._synonyms", "def synonyms(self) -> List[str]:\n return pulumi.get(self, \"synonyms\")", "def return_names(self):\n return self.__name_list", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def available_manifest_strands(self):\n return self._available_manifest_strands", "def namelist(self):\n return set(self.names())", "def wt_strains(df):\n \n ts_plates = []\n dma_plates = []\n for plate in df.Plate.unique():\n if ('_26C_' in plate) or ('_37C_' in plate):\n ts_plates.append(plate)\n else:\n dma_plates.append(plate)\n\n wt_strain_ids_dma = df[(df['ORF'].isin(['YOR202W'])) &\n (df['Plate'].isin(dma_plates))]['Strain ID'].unique()\n wt_strain_ids_ts = df[(df['ORF'].isin(['YOR202W', 'YMR271C'])) &\n (df['Plate'].isin(ts_plates))]['Strain ID'].unique()\n wt_strain_ids = np.append(wt_strain_ids_ts, wt_strain_ids_dma)\n \n return wt_strain_ids", "def get_short_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[0])\n return result", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def species(self):\n return self.name", "def list_building_names(self):\n return self.building_names", "def get_names_short(self):\r\n return [p.get_name() for p in self.people]", "def names(self):\n return [da.name for da in self]", "def availableSquares(self):\n List2=[]\n for item in self.all:\n if item.retrieve()==\"\":\n List2.append(item.name())\n return List2", "def get_names(self):\n return self.__names", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def get_strains(names, q_dof):\n strain_functions = []\n for n, this_dof in zip(names, q_dof):\n check_qdof(n, this_dof)\n if n == 'linear_helix':\n strain_functions.append(linear_helix_strain)\n elif n == 'pure_helix':\n strain_functions.append(pure_helix_strain)\n elif n == 'torsion_helix':\n strain_functions.append(torsion_helix_strain)\n elif n == 'torsion_linear_helix':\n strain_functions.append(torsion_linear_helix_strain)\n elif n == 'quadratic':\n strain_functions.append(quadratic_strain)\n elif n == 'linear':\n strain_functions.append(linear_strain)\n elif n == 'constant':\n strain_functions.append(constant_strain)\n elif n == 'full':\n strain_functions.append(full_strain)\n else:\n print(f'{n} is not a defined strain base.')\n return strain_functions", "def names(self):\n return self._names", "def names(self):\n return self._names", "def names(self):\n return self._names", "def sons(self):\n\n return self._sons", "def names(self):\n\t\treturn", "def getNames(self):\n return self._Names", "def get_chebi_synonyms(chebi_ent):\n if hasattr(chebi_ent, 'Synonyms'):\n return [entry.data for entry in chebi_ent.Synonyms]\n else:\n return []", "def get_search_tag_names(self):\n return self._ruleset.keys()", "def get_search_results(self):\n return self.get_list_of_names(self.SEARCH_RESULTS)", "def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names", "def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names", "def names(self):\n return self.__names", "def senses(self):\n sense_set = wordnet.synsets(self.text)\n\n result = list()\n for s in sense_set:\n result.append(s.lemma_names())\n\n return result", "def name(self):\n return [o.name for o in self.obs]", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def names(\n self\n ) -> Tuple[str, ...]:\n return self._names", "def getRegisterNames(self):\n pass", "def get_names(self):\n return sorted(list(self.df[[Data.SDATA_NAME]].drop_duplicates().iloc[:, 0]))", "def fqns(self):\n return [fqn for fqn in self.runinfos]", "def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames", "def getSamplesList(self):\n return self.sample_names", "def species(self):\n return [node.species for node in self]", "def train_stations(self) -> List[str]:\n return sorted([train_info['HE'] for train_info in train_api.stations_info.values()])", "def tank_names(self):\n return self._tanks", "def names():\n pass", "def watershedlist():\n opts = watersheds_db()\n return [(opts[opt]['name'] + ' (' + opts[opt]['delineation'] + ')', opt) for opt in opts]", "def names(cls):\n return cls.__by_name.keys()", "def _find_names(place):\n tags = place['tags']\n tags_names = ['name', 'place_name', 'alt_name']\n names = []\n for tag in tags_names:\n try:\n names.extend(tags[tag].split(';'))\n except KeyError:\n pass\n if not names:\n print \"Place has no name (#{})\".format(place['id'])\n return names", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def logicalnames(self):\n return self.__logicalnames.keys()", "def get_words(self):\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT word\"\r\n +\" FROM word_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {word[0] for word in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.word_dict.keys()", "def get_afferents_names(self):\n\t\treturn self._afferentsNames", "def _getWorklistStateNames(self, worklistname):\n results = [s.getName()\n for s in self.sm.getStates(no_duplicates = 1)\n if s.getTaggedValue('worklist') == worklistname]\n log.debug(\"Associated with worklist '%s' are the \"\n \"following states: %r.\", worklistname, results)\n return results", "def names(self):\r\n return resource.Name(self)", "def _get_wells(self):\n wells = []\n for well in self.plate_meta['wells']:\n wells.append(well['path'])\n self.wells = wells", "def getStations(self) :\n return self._stations", "def getOthNames( self ):\n\n if self.othNames:\n return self.othNames.keys()\n \n nSets = self.adb.get( \"nOths\" )\n for id1 in range( nSets ):\n name = self.adb.get( \"othName\", id1 )\n self.othNames[ name ] = id1\n\n return self.othNames.keys()", "def names(filter=None):", "def findBrains():\r\n keys = livingDead.Frankenstein.genKeys()\r\n return keys", "def get_standard_names(self):\n return [s for (i, s) in list(self._I2SMAP.items())\n if (i != self._CUSTOM) and s.strip()]", "def search_brands_by_name(mystr):\n \n return Brand.query.filter(Brand.name.like('%' + mystr + '%')).all()", "def names(self):\n return self.dark_name, self.light_name", "def outputStateNames(self):\n names = []\n for item in self.mechanisms:\n for output_state in item.outputStates:\n names.append(output_state)\n return names", "def names(self):\n if type(self.name) is types.StringType:\n return [self.name]\n else:\n return list(self.name)", "def ligands(self, species=None):\n\n ligands = []\n for interaction in self.interactions(species=species):\n ligand = interaction.ligand()\n if ligand not in ligands:\n ligands.append(ligand)\n return ligands", "def getOqiNames( self ):\n\n if self.oqiNames:\n return self.oqiNames.keys()\n\n n = self.adb.get( \"nSrss\" )\n for indx in xrange( n ):\n name = self.adb.get( \"srsName\", indx )\n self.oqiNames[ name ] = indx\n\n return self.oqiNames.keys()", "def names(self):\n return list(item.name for item in self.mechanisms)", "def obs_names(self):\n return self._obs_names", "def _lookup_bands(platform, wavelengths):\r\n wave_bands = {\r\n Platform.Landsat5: {\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"nir\": \"4\",\r\n \"swir1\": \"5\",\r\n \"tirs\": \"6\",\r\n \"swir2\": \"7\",\r\n },\r\n Platform.Landsat7: {\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"nir\": \"4\",\r\n \"swir1\": \"5\",\r\n \"tirs1\": \"6_VCID_1\",\r\n \"tirs2\": \"6_VCID_2\",\r\n \"swir2\": \"7\",\r\n \"pan\": \"8\",\r\n },\r\n Platform.Landsat8: {\r\n \"aerosol\": \"1\",\r\n \"blue\": \"2\",\r\n \"green\": \"3\",\r\n \"red\": \"4\",\r\n \"nir\": \"5\",\r\n \"swir1\": \"6\",\r\n \"swir2\": \"7\",\r\n \"pan\": \"8\",\r\n \"cirrus\": \"9\",\r\n \"tirs1\": \"10\",\r\n \"tirs2\": \"11\",\r\n },\r\n Platform.Sentinel2: {\r\n \"aerosol\": \"0\",\r\n \"blue\": \"1\",\r\n \"green\": \"2\",\r\n \"red\": \"3\",\r\n \"rededge1\": \"4\",\r\n \"rededge2\": \"5\",\r\n \"rededge3\": \"6\",\r\n \"nir\": \"7\",\r\n \"rededge4\": \"8\",\r\n \"watervapor\": \"9\",\r\n \"cirrus\": \"10\",\r\n \"swir1\": \"11\",\r\n \"swir2\": \"12\",\r\n },\r\n }\r\n\r\n return [wave_bands[platform][wavelength.lower()] for wavelength in wavelengths]", "def stations():\n\n return station_list", "def hyponym(self, sense=None):\n s = self._synset(self.text)\n\n if not s:\n return []\n\n hypo = s.hyponyms()\n\n results = list()\n for h in hypo:\n results.append(h.lemma_names())\n\n if not sense:\n return results\n\n # TODO: Exception when not an int\n return results[:sense + 1]", "def getControlTowerNames(self):\n\n table = self.metadata.tables['invTypes']\n\n stmt = select(\n [table.c.typeID, table.c.typeName], \n table.c.marketGroupID == CONTROL_TOWER_MARKET_GROUP,\n ).order_by(\n table.c.typeName)\n\n return self.select(stmt)", "def find_all_ORFs_both_strands(dna):\n \n # YOUR IMPLEMENTATION HERE", "def reservoir_names(self):\n return self._reservoirs", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def get_weathers():\n names = [\n name for name in dir(carla.WeatherParameters)\n if re.match('[A-Z].+', name)\n ]\n weathers = {x: getattr(carla.WeatherParameters, x) for x in names}\n return weathers", "def get_names(self):\n\n return self.mod_suites.keys()" ]
[ "0.6584263", "0.65621185", "0.63851863", "0.62019277", "0.61951375", "0.60891676", "0.60425985", "0.6007916", "0.59496164", "0.59036535", "0.5884974", "0.58607626", "0.57966846", "0.57783294", "0.5773814", "0.5770074", "0.57655287", "0.5721144", "0.5684944", "0.5684944", "0.5680266", "0.56731397", "0.565833", "0.5656931", "0.5639807", "0.5628358", "0.5610615", "0.5599485", "0.5598051", "0.5592574", "0.55816174", "0.55744606", "0.5561899", "0.55568933", "0.5552316", "0.55410725", "0.5533148", "0.5518508", "0.55063796", "0.55063796", "0.5489818", "0.5478198", "0.5478198", "0.5478198", "0.5469389", "0.5453498", "0.53958565", "0.5395759", "0.53941816", "0.5388838", "0.5386587", "0.5386587", "0.5386012", "0.5381485", "0.53766817", "0.5371399", "0.5352772", "0.5347049", "0.53447205", "0.5344479", "0.53146034", "0.5310374", "0.5307961", "0.53049284", "0.5304021", "0.52941394", "0.5290515", "0.52808475", "0.52768147", "0.52710783", "0.52710783", "0.5269443", "0.52683145", "0.5267394", "0.52660096", "0.5265372", "0.52586836", "0.5250815", "0.52506655", "0.5247058", "0.52468354", "0.52247256", "0.5215784", "0.520869", "0.5206171", "0.5202638", "0.5200922", "0.5200802", "0.51960826", "0.51917684", "0.51904", "0.51898015", "0.5185919", "0.5185543", "0.51833427", "0.517914", "0.5177792", "0.5177792", "0.5177746", "0.5177566" ]
0.71010107
0
Get the names of the manifest strands that are found in this twine.
def available_manifest_strands(self): return self._available_manifest_strands
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def available_strands(self):\n return self._available_strands", "def list_manifests():\n import enaml\n with enaml.imports():\n from .pulses.manifest import PulsesManagerManifest\n from .tasks.manifest import PulsesTasksManifest\n from .measure.manifest import PulsesMeasureManifest\n return [PulsesManagerManifest, PulsesTasksManifest, PulsesMeasureManifest]", "def getAtomNames(self):\n return self._raw_data['ATOM_NAME']", "def names(self) -> List[str]:\n names = set()\n for summary_dir in self._summary_dirs:\n for subdir in summary_dir.glob(\"*\"):\n if subdir == _METADATA:\n continue\n if subdir.is_dir():\n names.add(subdir.name)\n return sorted(names)", "def names(self) -> list[str]:", "def get_well_aliases(self):\n return self.info_wells['well'].unique()", "def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames", "def fqns(self):\n return [fqn for fqn in self.runinfos]", "def GetResourceNames(self):\r\n return [x.name for x in self.resources]", "def synonyms(self):\n\n return [synonym[\"name\"] for synonym in self._get_synonym_json()]", "def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())", "def get_all_fullpaths(self):\n files = []\n for mf in self.manifests:\n files.extend(self.manifests[mf].get_fullpaths())\n return files", "def onboot_names(self):\n ext_names = []\n for ext in self.extensions.values():\n if not ext.onboot:\n continue\n ext_names.append(ext.name)\n return ', '.join(sorted(ext_names))", "def app_names(self):\n return self.get_app_names()", "def get_app_names(self):\n groups = self['__store']\n lookup = {\n g.group_id: g.name[2:]\n for g in groups\n if (g.name.startswith('a_'))\n }\n return set(map(lookup.get, self.get_app_ids()))", "def get_app_manifests(self,sfilter = None):\n if sfilter:\n try:\n return filter(lambda app: app[\"developer\"] == sfilter[\"developer\"] and\n app[\"name\"] == sfilter[\"name\"] and\n app[\"version\"] == sfilter[\"version\"], self.app_manifests)\n except:\n return []\n else :\n return self.app_manifests", "def RAppNames(self):\n\t\tnames=[]\n\t\tfor item in range(self.rApps.Count):\n\t\t\tnames.append(self.rApps.Item(item).Name)\n\t\treturn names", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def names() -> Tuple[str, ...]:\n return plugins.list_all(package_name=__name__)", "def find_hypernyms(self, syns):\n names = set()\n # Find hypernyms of each syn\n for syn in syns:\n hypernyms = syn.hypernyms()\n # find hypernyms one more level up\n for hypernym in hypernyms:\n names.add(hypernym.name())\n hypernyms_second = hypernym.hypernyms()\n for h in hypernyms_second:\n names.add(h.name())\n return names", "def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def mapped_names(self):\n return [x.distro for x in DistroMapping.distros_mapped_to(self.name, self.version)]", "def get_afferents_names(self):\n\t\treturn self._afferentsNames", "def names(self):\r\n return resource.Name(self)", "def names(self):\n return list(item.name for item in self.mechanisms)", "def getStationsName(self) :\n names = []\n for sts in self._stations :\n names.append(sts.getName())\n\n return names", "def get_short_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[0])\n return result", "def resource_names(self):\n return self._resource_names", "def get_namespaces():\n return list(StaticAsset._load_namespaces().keys())", "def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)", "def names(cls) -> List[str]:", "def getNames(self) -> List[unicode]:\n ...", "def applications(self):\n return [self.app] + self.mounts.values()", "def GetScaffolderNames(cls) -> Iterator[str]:\n for scaffolder_name, _ in cls.GetScaffolders():\n yield scaffolder_name", "def names(self) -> List:\n ...", "def synonyms(self) -> List[str]:\n return self._synonyms", "def names(self):\n\t\treturn", "def namelist(self):\n return set(self.names())", "def get_setup_names(self):\n self.setup_names = list(self._optimetrics.GetSetupNames())\n return self.setup_names.copy()", "def getRegisterNames(self):\n pass", "def getOutputNames(self):\n return self.dirs", "def get_names(self):\n return self.names", "def synonyms(self) -> List[str]:\n return pulumi.get(self, \"synonyms\")", "def monomers(self):\n return sorted(set([self[x.split(\"_\")[-1]][\"name\"] for x in self.keys]), key=lambda x: -len(x))", "def all_present_subsystems(self):\n return _yield_subdir_names(self.subsys_configs)", "def get_all_setups_roots():\n ta_roots = cmds.ls(\"*.{}\".format(CONFIG[\"config_attr\"]), r=True, o=True)\n return ta_roots", "def names(self):\n return self._names", "def names(self):\n return self._names", "def names(self):\n return self._names", "def pump_name_list(self):\n return list(self._link_reg.pump_names)", "def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)", "def get_allref(self):\n return self.__applicationList.keys()", "def masters(self):\n return sorted(self.get_ns_name(ns) for ns in self.profile.masters.all())", "def full_names(self) -> List[str]:\n self.names = [\n \".\".join(prod)\n for prod in product(*self._namespaces, self.terminals)\n ]\n return self.names", "def get_names(self):\n\n return self.mod_suites.keys()", "def _get_all_saves_names(wit_path):\n names = []\n for item in os.listdir(os.path.join(wit_path, '.wit', 'images')):\n path = os.path.join(wit_path, '.wit', 'images', item)\n if os.path.isdir(path):\n names.append(item)\n return names", "def _getAllWorklistNames(self):\n log.debug(\"Finding all worklists mentioned in this statemachine.\")\n worklists = {}\n names = [s.getTaggedValue('worklist')\n for s in self.sm.getStates(no_duplicates = 1)\n if s.getTaggedValue('worklist')]\n for name in names:\n worklists[name] = 'just filtering out doubles'\n result = worklists.keys()\n log.debug(\"Found the following worklists: %r.\", result)\n return result", "def getLocalPluginNames():\r\n return [os.path.basename(f) for f in glob(buildPath('*.dll'))]", "def names(self):\n return self.__names", "def getAllXsSuffixes(self):\n return sorted(set(b.getMicroSuffix() for b in self.getBlocks()))", "def list_minerals():\n return _list_tindyb_unique_values(\"name\", dbpath=__dbpath__)", "def get_names(self):\n return self.__names", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def names(self):\n return [da.name for da in self]", "def resource_names(self) -> pulumi.Output[Optional[Mapping[str, Sequence[str]]]]:\n return pulumi.get(self, \"resource_names\")", "def list_building_names(self):\n return self.building_names", "def get_manifest(self):\n return self.manifest", "def names_for(self, name):\n names = [\"%s.%s\" % (self.package, name)]\n if self.prefix:\n names.append(\"%s.%s\" % (self.prefix, names[0]))\n return names", "def get_manifest_extensions_mapping(self):\n return {\"stitching-element\":\n \"http://example.com/stitching-element\"} # /manifest.xsd", "def get_available_data_asset_names(self) -> List[str]:\n raise NotImplementedError", "def get_chebi_synonyms(chebi_ent):\n if hasattr(chebi_ent, 'Synonyms'):\n return [entry.data for entry in chebi_ent.Synonyms]\n else:\n return []", "def nombresHojas(libro):\r\n return [sh.name for sh in libro.sheets()]", "def return_names(self):\n return self.__name_list", "def head_pump_name_list(self):\n return list(self._link_reg.head_pump_names)", "def name(self):\n\n return self.manifest[\"name\"]", "def names(self):\n return [x for x in self._dict.keys()]", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def resource_names(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resource_names\")", "def resource_names(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"resource_names\")", "def list_builders(self) -> List[str]:\n return sorted(_iter_builder_names(self._ns2data_dir))", "def getModuleNames():\n import setup\n names = [e.name[1:] for e in setup.wxpExtensions]\n return names", "def namelist(self):\n return []", "def get_dashboard_names(cls):\n dashboards = cls._get_all_dashboards()\n return [str(dboard[\"filename\"]) for dboard in dashboards]", "def stl_names(self):\n return [stl.member.get_full_name() for stl in self.stls.all()]", "def get_names(dep):\n res = [dep.name]\n return res", "def iter_strands(self):\n return iter(self.strand_list)", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def list_available_strains(self):\n return [strain for strain in self.sample_dict]", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def tank_name_list(self):\n return list(self._node_reg.tank_names)", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def get_startup_extensions(self):\n final_list = []\n for entry in self.bot_data_file[\"startup_extensions\"]:\n final_list.append(str(entry[\"name\"]))\n return final_list", "def bsw_getAllAssetNames(astType, episode=None):\n # get basic inputs.\n basePath = ProjectNamingInputs().basePath\n projectName = ProjectNamingInputs().projectName\n assetType = ProjectNamingInputs().assetType\n projectType = ProjectNamingInputs().projectType\n if projectType == 'series':\n assetDirectory = '{basePath}/{projectName}/01_pre/{assetType}/{episode}/' \\\n .format(basePath=basePath, projectName=projectName, assetType=assetType[astType], episode=episode)\n else:\n assetDirectory = '{basePath}/{projectName}/01_pre/{assetType}/' \\\n .format(basePath=basePath, projectName=projectName, assetType=assetType[astType])\n # if asset directory is not found then return message.\n if os.path.exists(assetDirectory):\n allAstNames = [each for each in os.listdir(assetDirectory) if os.path.isdir(os.path.join(assetDirectory, each))]\n else:\n allAstNames = ['asset is not found']\n return allAstNames", "def get_words_from_sysets(synset):\n synlist = []\n for s in synset:\n syns = s.lemmas()[0].name()\n synlist.append(syns)\n return synlist", "def getAssemblies(pth):\n if pth.lower().endswith(\".manifest\"):\n return []\n # check for manifest file\n manifestnm = pth + \".manifest\"\n if os.path.isfile(manifestnm):\n with open(manifestnm, \"rb\") as fd:\n res = {RT_MANIFEST: {1: {0: fd.read()}}}\n else:\n # check the binary for embedded manifest\n try:\n res = GetManifestResources(pth)\n except winresource.pywintypes.error as exc:\n if exc.args[0] == winresource.ERROR_BAD_EXE_FORMAT:\n logger.info('Cannot get manifest resource from non-PE '\n 'file %s', pth)\n return []\n raise\n rv = []\n if RT_MANIFEST in res and len(res[RT_MANIFEST]):\n for name in res[RT_MANIFEST]:\n for language in res[RT_MANIFEST][name]:\n # check the manifest for dependent assemblies\n try:\n manifest = Manifest()\n manifest.filename = \":\".join([pth, str(RT_MANIFEST),\n str(name), str(language)])\n manifest.parse_string(res[RT_MANIFEST][name][language],\n False)\n except Exception as exc:\n logger.error(\"Can not parse manifest resource %s, %s\"\n \" from %s\", name, language, pth, exc_info=1)\n else:\n if manifest.dependentAssemblies:\n logger.debug(\"Dependent assemblies of %s:\", pth)\n logger.debug(\", \".join([assembly.getid()\n for assembly in\n manifest.dependentAssemblies]))\n rv.extend(manifest.dependentAssemblies)\n return rv" ]
[ "0.6490598", "0.61462444", "0.6061251", "0.6059069", "0.59545153", "0.59168273", "0.5911148", "0.5903504", "0.5897712", "0.5851716", "0.58406675", "0.5829558", "0.58154243", "0.5781835", "0.57704556", "0.57661283", "0.57341903", "0.56977165", "0.56977165", "0.56939626", "0.5673895", "0.56689405", "0.5650809", "0.5650809", "0.5626021", "0.5612284", "0.55900735", "0.5579343", "0.5575883", "0.55578464", "0.55495936", "0.5538037", "0.55195975", "0.5518561", "0.549977", "0.54934454", "0.54904467", "0.54861504", "0.5473182", "0.5470953", "0.5431308", "0.542964", "0.54272795", "0.54192716", "0.5418092", "0.5402415", "0.54015356", "0.5393227", "0.53920037", "0.5384717", "0.5384717", "0.5384717", "0.5381068", "0.53771704", "0.5375933", "0.53716", "0.5370175", "0.536531", "0.5353076", "0.5349269", "0.534405", "0.5336683", "0.5334404", "0.53322655", "0.53313214", "0.5314561", "0.5314561", "0.5307938", "0.5298948", "0.5283195", "0.5268926", "0.52688473", "0.5265227", "0.526392", "0.52555835", "0.5243347", "0.5241348", "0.52382857", "0.52369785", "0.5225406", "0.52213395", "0.52213395", "0.52176785", "0.52176785", "0.5210682", "0.5209672", "0.5208581", "0.5208516", "0.52082384", "0.52056503", "0.5194234", "0.51907253", "0.5189849", "0.5181321", "0.5178837", "0.5178417", "0.5169882", "0.5169874", "0.51693195", "0.51668674" ]
0.7861807
0