query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Return the two ends of an iterable
def ends(iter): li = list(iter) return li[0], li[-1]
[ "def _tails(iterable: Iterable[T], *, num_from_each_tail=Union[int, Tuple[int, int]]) -> Tuple[List[T], List[T], int]:\n num_start, num_end = (num_from_each_tail, num_from_each_tail) if isinstance(num_from_each_tail, int) else num_from_each_tail\n iterator = iter(iterable)\n start = list(it.islice(iterator, 0, num_start)) # Convert to list before next line iterates more\n end, num_omitted = _tail(iterator, num_end)\n return start, end, num_omitted", "def every_other(iterable):\n items = iter(iterable)\n while True:\n try:\n yield next(items)\n next(items)\n except StopIteration:\n return", "def every_other_item(my_list):\n # return a slice of the list that skips every 2nd number\n\n every_other_item = my_list[::2]\n \n return every_other_item", "def reverse_enumerate(iterable):\n return izip(reversed(range(len(iterable))), reversed(iterable))", "def intersperse(sep, iterable):\n\n for i, item in enumerate(iterable):\n if i:\n yield sep\n yield item", "def _tail(inp: Iterable[T], num_from_each_tail=int) -> Tuple[List[T], int]:\n assert num_from_each_tail >= 0\n iterable = iter(inp)\n res = collections.deque(it.islice(iterable, 0, num_from_each_tail))\n res\n num_omitted = 0\n for x in iterable:\n res.append(x)\n res.popleft()\n num_omitted += 1\n return list(res), num_omitted", "def even(generator):\n return (i for i in generator if i % 2 == 0)", "def v5_tail(iterable, n):\n items = []\n if n <= 0:\n return []\n for item in iterable:\n items = [*items[-(n-1):], item]\n return items", "def this_and_prev(iterable):\n try:\n item = next(iterable)\n while True:\n next_item = next(iterable)\n yield item, next_item\n item = next_item\n except StopIteration:\n return", "def filter_even(iterable):\n return imap(lambda i: iterable[i],filter(lambda i: i%2 == 0,range(len(iterable))))", "def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False", "def v7_tail(iterable, n):\n items = []\n if n == 1:\n for item in iterable:\n items = [item]\n elif n > 0:\n for item in iterable:\n items = [*items[-n+1:], item]\n return items", "def reverse_enumerate(iterable):\n\t# Lifted from http://galvanist.com/post/53478841501/python-reverse-enumerate\n\treturn itertools.izip(reversed(xrange(len(iterable))), reversed(iterable))\n\t# Alternative python3 version:\n\t# return zip(reversed(range(len(iterable))), reversed(iterable))", "def lookahead(iterable):\n # Get an iterator and pull the first value.\n it = iter(iterable)\n last = next(it)\n # Run the iterator to exhaustion (starting from the second value).\n for val in it:\n # Report the *previous* value (more to come).\n yield last, True\n last = val\n # Report the last value.\n yield last, False", "def v6_tail(iterable, n):\n items = []\n if n <= 0:\n return []\n for item in iterable:\n if n == 1:\n items = [item]\n else:\n items = [*items[-n+1:], item]\n return items", "def iter_pairs(l, last=True):\r\n i = iter(l)\r\n b = i.next()\r\n done = 0\r\n while not done:\r\n a = b\r\n try:\r\n b = i.next()\r\n except StopIteration:\r\n if not last:\r\n raise\r\n b = None\r\n done = 1\r\n yield a, b", "def double_range(limit1, limit2): #y - x\n for i1 in range(limit1):\n for i2 in range(limit2):\n yield i1, i2", "def reverse(iterator):\n for i in iterator:\n yield from reverse(iterator)\n yield i", "def pairs(seq):\n iterable, copied = tee(seq)\n next(copied)\n for x, y in zip(iterable, copied):\n yield x, y" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a Course object parsed from a file
def from_file(cls, fn): fp = os.path.join('courses', fn) with open(fp, 'r') as f: lines = f.readlines() name = os.path.splitext(fn)[0] start, stop = map(date.fromisoformat, lines[0].split()) nbr_of_exams = int(lines[1].rstrip()) exercises = [f'{chapter.rstrip()}.{exercise}' for (chapter, exercises) in grouped(lines[2:], 2) for exercise in exercises.split()] return cls(name, start, stop, nbr_of_exams, exercises)
[ "def load_courses(self, file):\n with open(file) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n self.courses.append(Course(row[0], row[1], row[2],row[3],row[4],row[5],row[6],row[7]))\n\n # removing the first row because it contains column headers\n self.courses.pop(0)", "def read_course_file(file):\n with file.open() as f:\n for line in f:\n # if line is empty, continue\n if not line:\n continue\n\n # normalize the string to upper case + trimmed\n course = line.replace('\\n', '').strip().upper()\n courses_to_search.append(course)", "def loadcourseinfo(json_file):\n courseurl = config.SERVER_URL + '/datasources/course_structure/' + json_file\n courseinfofile = urllib2.urlopen(courseurl)\n if courseinfofile:\n courseinfo = json.load(courseinfofile)\n return courseinfo\n return None", "def courses(cls):\n for fn in os.listdir('courses'):\n yield cls.from_file(fn)", "def loadcourseinfo(self, json_file):\n print self\n courseurl = config.SERVER_URL + '/datasources/course_structure/' + json_file\n print \"ATTEMPTING TO LOAD \"+courseurl\n try:\n courseinfofile = urllib2.urlopen(courseurl)\n if courseinfofile:\n courseinfo = json.load(courseinfofile)\n return courseinfo\n except urllib2.HTTPError as e:\n print \"Failed to load %s: %s \" % (courseurl, e.message)\n return None", "def load_course_from_fixture(model, filename):\n\n with (fixture_path / filename).open() as f:\n renderer = DummyRenderer(**yaml.safe_load(f))\n course = models.Course.load_local(\n parent=model,\n repo_info=get_local_repo_info('/dummy'),\n slug='courses/complex',\n renderer=renderer,\n )\n model.add_course(course)\n return course", "def parse_course(course_node, campus):\n (s, *remaining) = course_node.itertext()\n\n p = re.compile(r'^([A-Z& ]+) (\\d+) (.+) \\((.+).*\\)(.*)$')\n m = p.match(s)\n if not m:\n logging.warning('Unable to parse title: %s', s)\n return\n\n department = m.group(1)\n code = m.group(2)\n title = titlecase.titlecase(m.group(3))\n crs = parse_credits(m.group(4))\n knowledge_areas = sorted([j.strip() for j in re.split(',|/', m.group(5))])\n prerequisites = parse_prerequisites(\n ''.join([j for j in remaining if 'Prerequisite:' in j]))\n offered = parse_offered(''.join([j for j in remaining if 'Offered:' in j]))\n return Course(campus, department, code, title, crs, knowledge_areas,\n prerequisites, offered)", "def load_file(filename, attributes):\n \n r = Rel(attributes)\n \n for line in open(\"ccat.txt\"):\n r.add_tuple(tuple(line.strip().split()))\n \n return r", "def __loadFromFile(self):\n try:\n f = open(self.__fName, \"r\")\n except IOError:\n #file not exist\n return\n line = f.readline().strip()\n rez = []\n while line!=\"\":\n attrs = line.split(\";\")\n st = Student(attrs[0], attrs[1], Address(attrs[2], attrs[3], attrs[4]))\n rez.append(st)\n line = f.readline().strip()\n f.close()\n return rez", "def __loadFromFile(self):\r\n try:\r\n f=open(self.__fileName,\"r\")\r\n except IOError:\r\n raise RepositoryError()\r\n linie = f.readline().strip()\r\n while linie!=\"\":\r\n part=linie.split(\",\")\r\n st=Student(part[0],part[1])\r\n self.__listStudents.append(st)\r\n linie=f.readline().strip()\r\n f.close()", "def get_courses():\n basedir = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(basedir, 'data.json')) as file:\n data = json.load(file)\n return data['courses']", "def parse_course(c):\n course_model = {}\n # Data in first row\n course_model['code'] = str(c.contents[2].contents[0].contents[0].string).strip()\n course_model['name'] = str(c.contents[3].contents[0].contents[0].string).strip()\n course_model['credits'] = str(c.contents[4].contents[0].contents[0].string).strip()\n\n row = c.next_sibling\n \n course_model['times'] = []\n \n # Data in following rows of the same course\n while (type(row) == Tag and row.has_attr('bgcolor') and row['bgcolor'] != \"LightBlue\" and not (row['bgcolor'] == \"White\" and not row.has_attr('align'))):\n row_data = row.contents\n\n # Prerequisite row, if present\n if str(row_data[2].contents[0].string).find(\"Prerequisite:\") != -1:\n course_model['prereq'] = str(row_data[3].contents[0].string).strip()\n\n # Special note row, if present\n elif str(row_data[2].contents[0].string).find(\"Special Note:\") != -1:\n course_model['special_note'] = str(row_data[3].contents[0].string).strip()\n\n # Row containing a session, name, time, location, prof\n elif len(row_data) > 5 and row_data[2].contents[0].contents and len(row_data) > 5 and (str(row_data[2].contents[0].contents[0].string) == \"/1\" or str(row_data[2].contents[0].contents[0].string) == \"/2\" or str(row_data[2].contents[0].contents[0].string) == \"/3\" or str(row_data[2].contents[0].contents[0].string) == \"/4\"):\n time = parse_time(row_data)\n course_model['times'].append(time)\n\n row = row.next_sibling\n return course_model", "def parseFile(filename):\n game = slippi.Game(filename)\n return game", "def load_courses(filename):\n course_ids = set()\n try:\n in_file = open(filename, mode=\"r\")\n for line in in_file:\n if line and not line.startswith(\"#\"):\n course_ids.add(line.strip())\n except IOError:\n logging.error(\"Can't read file %s\", filename)\n finally:\n in_file.close()\n return course_ids", "def from_file(cls, file=None):\n file = file or cls.file\n file = (line for line in open(file)\n if len(line) > 2) # skip empty lines\n\n def assert_match(regex, line, fail_msg):\n m = re.match(regex, line)\n if not m:\n raise ParseError(fail_msg + ': ' + line)\n\n assert_match(r' {10,}Todo', next(file), 'No Todo')\n assert_match(r'-{10,}', next(file), 'No Todo line')\n\n def parse(line_class, file, break_word):\n break_re = re.compile(r' {10,}%s' % break_word)\n for line in file:\n if break_re.match(line):\n assert_match(r'-{10,}', next(file), 'No line')\n return\n yield line_class.from_string(line)\n\n if break_word is not None:\n # If we get here, we never found the next header.\n raise ParseError('Missing section: ' + break_word)\n\n todo = ((t.name, t) for t in parse(Task, file, 'Time'))\n time = parse(Entry, file, 'Done')\n done = ((t.name, t) for t in parse(Task, file, None))\n\n sheet = TymeSheet(todo, time, done)\n sheet._validate()\n return sheet", "def parse(filename):\n return Solution(filename)", "def fromFile(tc_file_path, tc_name='', desc='', jid=''):\n with open(tc_file_path, 'r') as tcf:\n return TestCase.fromString(tcf.read(), tc_name, desc, jid)", "def loadSubjects(filename):\n\n #loadSubjects(\"shortened_subjects.txt\")\n \n # The following sample code reads lines from the specified file and prints\n # each one.\n\n inputFile = open(filename)\n subject_dict = {}\n parse_list = []\n # read the file, remove formatting, split 3nth elements into lists of vals\n for line in inputFile:\n #print line\n line = line.strip()\n parse_list.append(line.split(','))\n # build a dictionary from lists with course: value, work as key:val pairs\n for tlist in parse_list:\n #print tlist\n subject_dict[tlist[0]] = int(tlist[1]), int(tlist[2])\n\n return subject_dict", "def load(filename):\n\t\tbuffer = [];\n\t\tb64_contents = \"\";\n\t\ttry:\n\t\t\thandle = open(filename, \"r\");\n\t\t\traw_contents = handle.readlines();\n\t\t\tfor line in raw_contents:\n\t\t\t\tif line.startswith(\"----\"):\n\t\t\t\t\tcontinue\n\t\t\t\tb64_contents += line.strip();\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Failed to read PEM file: \" + str(e));\n\t\tbuffer = b64decode(b64_contents);\n\t\treturn X509v3Certificate(buffer);" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse each course file in the courses directory into a Course object and yield it.
def courses(cls): for fn in os.listdir('courses'): yield cls.from_file(fn)
[ "def read(cls, filename):\n for item in cls().parse(filename):\n yield item", "def load_courses(self, file):\n with open(file) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n self.courses.append(Course(row[0], row[1], row[2],row[3],row[4],row[5],row[6],row[7]))\n\n # removing the first row because it contains column headers\n self.courses.pop(0)", "def iter(self):\n for seq in SeqIO.parse(self.filename, 'fasta'):\n yield Read(seq.id, str(seq.seq))", "def iter_sentences(self):\n self.download()\n for filename in FILENAMES:\n full_filename = join(self.data_directory(), filename)\n lcc_file = LCCFile(full_filename)\n for sentence in lcc_file.iter_sentences():\n yield sentence", "def process_course(course):\n course_title, course_link = course\n print()\n print(\"PROCESSING COURSE \", course)\n soup = soup_autolab(course_link)\n assns = get_assns(soup)\n for assn in assns:\n process_assn(assn, course_title)", "def get_courses():\n basedir = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(basedir, 'data.json')) as file:\n data = json.load(file)\n return data['courses']", "def get_courses(self) -> dict:\n\n courses = {}\n next_page_token = None\n\n while True:\n response = self.service.courses().list(\n pageToken=next_page_token,\n fields=\"nextPageToken,courses(name,id)\"\n ).execute()\n\n courses.update({course[\"name\"] : course[\"id\"] for course in response[\"courses\"]})\n\n next_page_token = response.get(\"nextPageToken\")\n\n if next_page_token is None:\n break\n\n return courses", "def parse_all_courses(username: str, password: str,\n parsed_users: typing.List[dict]):\n\n # get all the components\n components = component.available()\n\n # find out the fields we need\n fields = set()\n for c in components:\n fields.update(c.fields)\n\n # get all the students with the given fields\n courses = queries.get_all_courses(username, password)\n\n if courses is None:\n return None\n\n # parse all the users individually\n return list(\n map(lambda c: parse_course(c, parsed_users, components), courses))", "def iteritems(self):\n\n number_of_cmty = 0\n converter = self.converter\n names = self.cmtynames()\n label = self.label\n\n file = open(os.path.join(self.abspath_dir, self.fname), 'rU')\n\n cmty_id = 0\n for lineno, line in enumerate(file):\n line = line.strip()\n if not line: continue\n if line[0] == '#':\n if not label and line.startswith('# label: '):\n self.label = line[9:].strip() # maxsplit of 1\n if names is None and line.startswith('# community names: '):\n names = self._cmtynames = line[19:].strip().split()\n # Comment line, do not parse it.\n continue\n\n # If we found labels, then use that as the name of the\n # community. This is written out write_clusters.\n if names: cname = names[cmty_id]\n else: cname = cmty_id\n\n nodes = set(converter(x) for x in line.split())\n number_of_cmty += 1\n\n yield cname, nodes\n cmty_id += 1\n\n # Insert this into dict directly, since q is a property of a\n # superclass and doesn't support item assignment.\n self.__dict__['q'] = number_of_cmty", "def _get_courses(cls, spec, fields=None):\n try:\n cursor = cls.coll.find(\n spec, fields, sort=[('_id', ASCENDING)])\n\n courses = yield cursor.to_list(None)\n return courses\n\n except TypeError as te:\n if not isinstance(spec, dict):\n raise NotDictError('spec') from te\n\n if not isinstance(fields, (dict, list)) and \\\n fields is not None:\n e = TypeError(\n 'The fields parameter should be a '\n 'dictionary or a list.'\n )\n raise e from te\n\n else:\n raise", "def get_courses(self):\n return self.q(css='ul.listing-courses .course-item')", "def _discover_courses(session, di, lvl, total):\n query_string = '|'.join((f'di-{di}', # Discipline\n 'en-3002', # Don't know what this is, could be a mechanism for rate limiting\n f'lv-{lvl}', # Degree level\n 'tc-EUR', # Currency\n 'uc-30', # Don't know what this is\n 'ur-38')) # Don't know what this is\n n_pages = (total // PAGE_SIZE) + (total % PAGE_SIZE > 0)\n for page in range(0, n_pages):\n r = session.get(SEARCH_URL, params={'start': page*PAGE_SIZE, 'q': query_string})\n r.raise_for_status()\n for course in r.json():\n # Don't double count sublevels (e.g. preparation is a level & also incl under bachelor)\n if course['level'] != lvl:\n continue\n yield course", "def load_local_courses(self, path):\n self.repo_info = get_local_repo_info(path)\n\n self_study_course_path = path / 'courses'\n run_path = path / 'runs'\n lesson_path = path / 'lessons'\n\n if not lesson_path.exists():\n # At least one of 'runs' or 'courses' should exist for any courses\n # to be loaded. But \"lessons\" needs to exist either way.\n raise FileNotFoundError(lesson_path)\n\n def _load_local_course(course_path, slug, canonical_if_local=False):\n link_path = course_path / 'link.yml'\n if link_path.is_file():\n with link_path.open() as f:\n link_info = yaml.safe_load(f)\n checked_url = '{repo}#{branch}'.format(**link_info)\n if any(\n fnmatch(checked_url, l) for l in self.trusted_repo_patterns\n ):\n course = Course.load_remote(\n slug, parent=self, link_info=link_info,\n )\n self.add_course(course)\n else:\n logger.debug(f'Untrusted repo: {checked_url}')\n if (course_path / 'info.yml').is_file():\n course = Course.load_local(\n slug, parent=self, repo_info=self.repo_info, path=path,\n canonical=canonical_if_local,\n )\n self.add_course(course)\n\n if self_study_course_path.exists():\n for course_path in self_study_course_path.iterdir():\n slug = 'courses/' + course_path.name\n _load_local_course(course_path, slug, canonical_if_local=True)\n\n if run_path.exists():\n for year_path in sorted(run_path.iterdir()):\n if year_path.is_dir():\n self.explicit_run_years.add(int(year_path.name))\n for course_path in year_path.iterdir():\n slug = f'{year_path.name}/{course_path.name}'\n _load_local_course(course_path, slug)\n\n self.add_course(Course.load_local(\n 'lessons',\n repo_info=self.repo_info,\n canonical=True,\n parent=self,\n path=path,\n ))\n\n self_study_order_path = self_study_course_path / 'info.yml'\n if self_study_order_path.exists():\n with (path / 'courses/info.yml').open() as f:\n course_info = yaml.safe_load(f)\n self.featured_courses = [\n self.courses[f'courses/{n}'] for n in course_info['order']\n ]\n\n self.edit_info = self.repo_info.get_edit_info('.')\n self.runs_edit_info = self.repo_info.get_edit_info('runs')\n self.course_edit_info = self.repo_info.get_edit_info('courses')", "def read_course_file(file):\n with file.open() as f:\n for line in f:\n # if line is empty, continue\n if not line:\n continue\n\n # normalize the string to upper case + trimmed\n course = line.replace('\\n', '').strip().upper()\n courses_to_search.append(course)", "def iter_lines(module):\n for filename in module_files(module):\n with open(filename) as f:\n yield from f", "def parse(self,response):\n self.add_qipu_list(response)\n for href in response.css('.courselist ul li.c a::attr(href)'):\n url = response.urljoin(href.extract()).replace('/..','')\n yield scrapy.Request(url, callback=self.parse_qipu_text)", "def list_courses(context: SolidExecutionContext) -> List[String]:\n access_token = get_access_token(\n client_id=context.solid_config[\"edx_client_id\"],\n client_secret=context.solid_config[\"edx_client_secret\"],\n edx_url=context.solid_config[\"edx_base_url\"],\n token_type=context.solid_config[\"edx_token_type\"],\n )\n course_ids = []\n course_id_generator = get_edx_course_ids(\n context.solid_config[\"edx_base_url\"],\n access_token,\n page_size=context.solid_config[\"edx_course_api_page_size\"],\n )\n for result_set in course_id_generator:\n course_ids.extend([course[\"id\"] for course in result_set])\n yield ExpectationResult(\n success=bool(course_ids),\n label=\"edx_course_list_not_empty\",\n description=\"Ensure course list is not empty.\",\n metadata_entries=[\n EventMetadataEntry.text(\n text=str(len(course_ids)),\n label=\"number_of_course_ids\",\n description=\"The number of course IDs retrieved from the course API.\",\n )\n ],\n )\n yield Output(course_ids, \"edx_course_ids\")", "def gen_lines(files):\n for file in files:\n with open(file, \"r\") as f:\n yield from f", "def Scan(self):\n line = self.file_obj.readline()\n while line:\n yield self.__scanner.scan(line)\n line = self.file_obj.readline()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Chunk the exercises over a stretch of days
def chunk_over_days(self): # - 1 for full-day repetition # - nbr_of_exams for studying exams return self._chunk_over_days(self.duration - self.nbr_of_exams - 1)
[ "def _chunk_over_days(self, days):\n x = len(self.exercises) # see docs\n d = x % days # see docs\n n = x // days # see docs\n\n sliced_at = (days - d) * n\n pt1 = self.exercises[:sliced_at]\n pt2 = self.exercises[sliced_at:]\n\n return list(grouped(pt1, n)) + list(grouped(pt2, n + 1))", "def _generate_extra_rounds(self) -> None:\n for doc in self.index:\n if \"Rounds\" in doc:\n if doc[\"Rounds\"][0] != \"normal\":\n exercise_template = copy.deepcopy(doc[\"Exercises\"])\n roundskip = 0\n else:\n exercise_template = doc[\"Exercises\"]\n roundskip = 1\n else:\n doc[\"Rounds\"] = [\"normal\"]\n roundskip = 1\n\n for eround in doc[\"Rounds\"][roundskip:]:\n tmp_exercises = copy.deepcopy(exercise_template)\n if eround == \"transpose random\":\n transpose: Union[bool, str] = \"random\"\n else:\n transpose = False\n\n for exer in tmp_exercises:\n if transpose == \"random\":\n transpose_key = get_random_transpose_key()\n exer[\"transpose\"] = transpose_key\n for alt in exer[\"confusers\"]:\n alt[\"transpose\"] = transpose_key\n elif transpose: # Unknown transpose type\n assert False\n\n doc[\"Exercises\"].extend(tmp_exercises)", "def test_with_remainder(self):\n data = range(21)\n grouped = util.make_even_groups(data, 5)\n self.assertEqual(len(grouped), 4)\n for group in grouped:\n self.assertEqual(len(group), 5)\n full = sorted(flatten(grouped))\n self.assertEqual(full, data[:-1])", "def __extract_samples_exercise(self, exercise: Exercise) -> List[MoneySample]:\n major_earned = exercise.major_earned()\n minor_earned = exercise.minor_earned()\n\n return [self.__extract_single_sample(exercise, player, major_earned, minor_earned) for player in exercise.alive]", "def next_round(self):\r\n\t\ttesting_round = []\r\n\t\tif self.schedule[self.day]:\r\n\t\t\ttesting_round = next(self._tested_chunks)\r\n\t\tself.day = (self.day + 1) % len(self.schedule)\r\n\t\treturn testing_round", "def do_exercise(self, group, i, reps):\n for j in range(reps):\n for pos in self.conf[\"exercises\"][group][i]:\n args = (pos[\"head\"], pos[\"torso\"], pos[\"arms\"], pos[\"speed\"])\n self.go_to_position(*args)", "def split_into_sections(self, formats=None):\n if formats is None:\n formats = ['html', 'xhtml', 'mobile', 'html5']\n\n for form in formats:\n if 'tex' in form:\n continue\n if form == 'xhtml':\n ext = '.xhtml'\n else:\n ext = '.html'\n\n chapterfilepath = os.path.join('build', form, self.file + ext)\n\n with open(chapterfilepath) as chapterfile:\n html = etree.HTML(chapterfile.read())\n # add unique IDs to all the section titles.\n html = add_unique_ids(html)\n # make a copy of the html, want to use as template.\n html_template = copy.deepcopy(html)\n for bodychild in html_template.find('.//body'):\n bodychild.getparent().remove(bodychild)\n\n if form != 'html5':\n # build up a list of the sections\n sections = []\n chapter = [c.getparent() for c in html.findall('.//div[@class=\"section\"]/h1')][0]\n\n thissection = []\n for child in chapter:\n if (child.tag != 'div'):\n thissection.append(child)\n else:\n if len(child) == 0:\n pass\n elif (child[0].tag == 'h2') or (child.attrib.get('class') == 'exercises'):\n thissection.append(child)\n sections.append(thissection)\n thissection = []\n else:\n thissection.append(child)\n else:\n # build up a list of the sections\n sections = []\n try:\n chapter = [c.getparent() for c in html.findall('.//section[@class=\"section\"]/h1')][0]\n except IndexError:\n continue\n\n thissection = []\n for child in chapter:\n if (child.tag != 'section'):\n thissection.append(child)\n else:\n if len(child) == 0:\n pass\n elif (child[0].tag == 'h2'):\n thissection.append(child)\n sections.append(thissection)\n thissection = []\n else:\n thissection.append(child)\n #sections.append(thissection)\n # write each section to a separate file\n for num, section in enumerate(sections):\n template = copy.deepcopy(html_template)\n body = template.find('.//body')\n for child in section:\n body.append(child)\n secfilename = self.file.replace('.cnxmlplus',\n '-{:02d}.cnxmlplus'.format(num))\n secfilepath = os.path.join('build', form, secfilename + ext)\n\n # add css to head\n css = '<link rel=\"stylesheet\" type=\"text/css\" href=\"css/stylesheet.css\"></link>'\n css = etree.fromstring(css)\n template.find('.//head').append(css)\n\n with open(secfilepath, 'w') as outfile:\n outfile.write(etree.tostring(template))\n\n # remove the original html\n os.remove(chapterfilepath)\n # create the ToC file.\n self.create_toc(os.path.dirname(chapterfilepath))", "def test_day_24_puzzle_solve():\n with open('tests/test_data/day_24_part_1_test.txt') as local_file:\n read_data = local_file.read()\n example_result = day_24_puzzle_solve(read_data)\n print(example_result)\n assert example_result['Part 1'] == 10\n assert example_result['Part 2'] == 2208", "def exercises_from_workout(workout_id):\n\n exercises_from_workout = Workout_exercise.query.filter(Workout_exercise.workout_id == workout_id).all()\n \n return exercises_from_workout", "def chunk(elist, size):\n for i in range(0, len(elist), size):\n yield elist[i:i + size]", "def chunks(collection, chunkSize):\n \n for i in range(0, len(collection), chunkSize):\n yield collection[i:i + chunkSize]", "def split_input_into_sections(input_objects, outputdir, num_divisions=20, \n verbose=True, filespec='division%s.gz', opener=GzipFile):\n import itertools\n num_lines = 0\n input_objects = [open_file_or_filename(obj) for obj in input_objects]\n temp_output = tempfile.NamedTemporaryFile(mode='r+w')\n for line in itertools.chain(input_objects):\n num_lines += 1\n temp_output.write(line.rstrip() + '\\n')\n temp_output.seek(0)\n \n # the last section gets any extra lines\n section_size, extra_lines = divmod(num_lines, num_divisions)\n last_section_size = section_size + extra_lines\n \n if verbose:\n print \"Making %d divisions of input data...\" % num_divisions\n print num_divisions, num_lines\n print section_size, last_section_size\n print section_size * (num_divisions - 1)\n print (section_size * (num_divisions - 1)) + last_section_size\n \n # make a directory to put the division files\n mkdirparents(outputdir)\n \n # now we'll make a file for each division\n num_lines = 0\n current_division = -1 # this will get incremented soon\n current_division_file = None\n for line in temp_output:\n if (num_lines % section_size == 0) and \\\n (current_division < (num_divisions - 1)):\n current_division += 1\n div_filename = os.path.join(outputdir,\n filespec % zfill_by_num(current_division, num_divisions))\n if current_division_file:\n current_division_file.flush()\n current_division_file = opener(div_filename, mode='w')\n if verbose:\n print \"division\", current_division, \"at\", num_lines, \"lines\"\n num_lines += 1\n current_division_file.write(line)\n current_division_file.flush()\n \n return section_size, last_section_size", "def create_section():\n dummies = [DummyOperator(task_id=f'task-{i + 1}') for i in range(5)]\n\n with TaskGroup(\"inside_section_1\") as inside_section_1:\n _ = [DummyOperator(task_id=f'task-{i + 1}',) for i in range(3)]\n\n with TaskGroup(\"inside_section_2\") as inside_section_2:\n _ = [DummyOperator(task_id=f'task-{i + 1}',) for i in range(3)]\n\n dummies[-1] >> inside_section_1\n dummies[-2] >> inside_section_2", "def chunk(points: List[DesignPoint],\n size: int) -> Generator[List[DesignPoint], None, None]:\n\n for i in range(0, len(points), size):\n yield points[i:i + size]", "def split_by_episode(self, key: Optional[str] = None) -> List[\"SampleBatch\"]:\n\n assert key is None or key in [SampleBatch.EPS_ID, SampleBatch.DONES], (\n f\"`SampleBatch.split_by_episode(key={key})` invalid! \"\n f\"Must be [None|'dones'|'eps_id'].\"\n )\n\n def slice_by_eps_id():\n slices = []\n # Produce a new slice whenever we find a new episode ID.\n cur_eps_id = self[SampleBatch.EPS_ID][0]\n offset = 0\n for i in range(self.count):\n next_eps_id = self[SampleBatch.EPS_ID][i]\n if next_eps_id != cur_eps_id:\n slices.append(self[offset:i])\n offset = i\n cur_eps_id = next_eps_id\n # Add final slice.\n slices.append(self[offset : self.count])\n return slices\n\n def slice_by_terminateds_or_truncateds():\n slices = []\n offset = 0\n for i in range(self.count):\n if self[SampleBatch.TERMINATEDS][i] or (\n SampleBatch.TRUNCATEDS in self and self[SampleBatch.TRUNCATEDS][i]\n ):\n # Since self[i] is the last timestep of the episode,\n # append it to the batch, then set offset to the start\n # of the next batch\n slices.append(self[offset : i + 1])\n offset = i + 1\n # Add final slice.\n if offset != self.count:\n slices.append(self[offset:])\n return slices\n\n key_to_method = {\n SampleBatch.EPS_ID: slice_by_eps_id,\n SampleBatch.DONES: slice_by_terminateds_or_truncateds,\n }\n\n # If key not specified, default to this order.\n key_resolve_order = [SampleBatch.EPS_ID, SampleBatch.DONES]\n\n slices = None\n if key is not None:\n # If key specified, directly use it.\n if key == SampleBatch.EPS_ID and key not in self:\n raise KeyError(f\"{self} does not have key `{key}`!\")\n slices = key_to_method[key]()\n else:\n # If key not specified, go in order.\n for key in key_resolve_order:\n if key == SampleBatch.DONES or key in self:\n slices = key_to_method[key]()\n break\n if slices is None:\n raise KeyError(f\"{self} does not have keys {key_resolve_order}!\")\n\n assert (\n sum(s.count for s in slices) == self.count\n ), f\"Calling split_by_episode on {self} returns {slices}\"\n f\"which should in total have {self.count} timesteps!\"\n return slices", "def splitByDay(self, offset=0, skip_factor=0):\n\n print(\"Spliting DataFrame into daily chunks...\")\n\n self.data_x['date'] = pd.to_datetime(\n self.data_x['date'], format=\"%Y-%m-%d %H:%M:%S\")\n\n first_date = self.data_x.date.iloc[0]\n end_date = self.data_x.date.iloc[-1]\n\n list_of_day_dfs = []\n\n for result in perdelta(first_date+timedelta(hours=offset), end_date-timedelta(hours=offset), timedelta(hours=24+skip_factor)):\n print(result)\n mask = (self.data_x['date'] > result) & (\n self.data_x['date'] < (result+timedelta(hours=24)))\n list_of_day_dfs.append(self.data_x.loc[mask])\n\n print(len(list_of_day_dfs))\n return list_of_day_dfs", "def main():\n aoc_day = 5\n exercise_input = get_exercise_input_from_file(aoc_day)\n part_one, part_two = solution_part_one(exercise_input)\n print(\"Advent of Code part one:\", part_one)\n print(\"Advent of Code part two:\", solution_part_two(part_two))", "def get_num_n_day_slices(start_date, end_date, days_per_chunk):\n start = dateparser.parse(start_date)\n end = dateparser.parse(end_date)\n if (not isinstance(start, datetime)) or (not isinstance(end, datetime)):\n raise RuntimeError(\"invalid time strings\")\n td = end - start\n if td.days <= 0:\n raise RuntimeError(\"end_date must be after start_date\")\n n = td.days//days_per_chunk\n\n return n", "def _get_equipped_exercises(self, exercises: Dict[str, Equipment]) -> List[str]:\n equipped_exercises = []\n\n for ex, eq in exercises.items():\n if eq is None and not self._equipment_only:\n equipped_exercises.append(ex)\n continue\n if self._equipment:\n if Equipment.dumbbells in self._equipment and eq == Equipment.dumbbells:\n equipped_exercises.append(ex)\n elif Equipment.kettle_bell in self._equipment and eq == Equipment.kettle_bell:\n equipped_exercises.append(ex)\n elif Equipment.trx in self._equipment and eq == Equipment.trx:\n equipped_exercises.append(ex)\n elif Equipment.jump_rope in self._equipment and eq == Equipment.jump_rope:\n equipped_exercises.append(ex)\n\n random.shuffle(equipped_exercises)\n return equipped_exercises" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Chunk the exercises over a stretch of days Imagine the list of exercises [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] to be chunked over 3 days. That is, after doing a certain amount of exercises each day (the day's chunk size), after 3 days, all exercises will be completed.
def _chunk_over_days(self, days): x = len(self.exercises) # see docs d = x % days # see docs n = x // days # see docs sliced_at = (days - d) * n pt1 = self.exercises[:sliced_at] pt2 = self.exercises[sliced_at:] return list(grouped(pt1, n)) + list(grouped(pt2, n + 1))
[ "def chunk_over_days(self):\n \n # - 1 for full-day repetition\n # - nbr_of_exams for studying exams\n\n return self._chunk_over_days(self.duration - self.nbr_of_exams - 1)", "def chunk(elist, size):\n for i in range(0, len(elist), size):\n yield elist[i:i + size]", "def chunks(collection, chunkSize):\n \n for i in range(0, len(collection), chunkSize):\n yield collection[i:i + chunkSize]", "def _chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]", "def chunks(lst, amount):\n return [lst[i:i + amount] for i in range(0, len(lst), amount)]", "def repeat_or_chunk(data, chunk_size):\n if len(data) < chunk_size:\n repeats = chunk_size // len(data)\n if (repeats * len(data)) != chunk_size:\n logging.info('skipping something that does not divide four bars')\n data = []\n else:\n data = list(data) * repeats\n return [data]\n return chunk_iterator(data, chunk_size)", "def splitByDay(self, offset=0, skip_factor=0):\n\n print(\"Spliting DataFrame into daily chunks...\")\n\n self.data_x['date'] = pd.to_datetime(\n self.data_x['date'], format=\"%Y-%m-%d %H:%M:%S\")\n\n first_date = self.data_x.date.iloc[0]\n end_date = self.data_x.date.iloc[-1]\n\n list_of_day_dfs = []\n\n for result in perdelta(first_date+timedelta(hours=offset), end_date-timedelta(hours=offset), timedelta(hours=24+skip_factor)):\n print(result)\n mask = (self.data_x['date'] > result) & (\n self.data_x['date'] < (result+timedelta(hours=24)))\n list_of_day_dfs.append(self.data_x.loc[mask])\n\n print(len(list_of_day_dfs))\n return list_of_day_dfs", "def chunks(lst, chunk_size):\n for i in range(0, len(lst), chunk_size):\n yield lst[i:i + chunk_size]", "def slice_time_period_into_n(start_date, end_date, n):\n start = dateparser.parse(start_date)\n end = dateparser.parse(end_date)\n if (not isinstance(start, datetime)) or (not isinstance(end, datetime)):\n raise RuntimeError(\"invalid time strings\")\n td = end - start\n if td.days <= 0:\n raise RuntimeError(\"end_date must be after start_date\")\n days_per_chunk = td.days // n\n output_list = []\n for i in range(n):\n chunk_start = start + timedelta(days=(i*days_per_chunk))\n chunk_end = start + timedelta(days=((i+1)*days_per_chunk))\n ## unless we are in the last chunk, which should finish at end_date\n if i == n-1:\n chunk_end = end\n output_list.append((chunk_start.isoformat().split(\"T\")[0],\n chunk_end.isoformat().split(\"T\")[0]))\n return output_list", "def divide_list_in_n_equal_chunks(_list, n):\n for i in range(0, len(_list), n):\n yield _list[i : i + n]", "def get_num_n_day_slices(start_date, end_date, days_per_chunk):\n start = dateparser.parse(start_date)\n end = dateparser.parse(end_date)\n if (not isinstance(start, datetime)) or (not isinstance(end, datetime)):\n raise RuntimeError(\"invalid time strings\")\n td = end - start\n if td.days <= 0:\n raise RuntimeError(\"end_date must be after start_date\")\n n = td.days//days_per_chunk\n\n return n", "def _chunks(iterable, size=100):\n iterator = iter(iterable)\n for first in iterator:\n yield chain([first], islice(iterator, size - 1))", "def split(self, duration: Decimal = 1) -> List['TaskChunk']:\n assert self.duration > duration\n assert not self.finished\n\n relevant_chunks = TaskChunk.objects.filter(\n task__user_id=self.task.user_id,\n day=self.day, day_order__gte=self.day_order).order_by(\n 'day_order').select_for_update()\n\n # force evaluation of queryset\n relevant_chunks = list(relevant_chunks)\n\n new_chunk = TaskChunk.objects.create(\n task=self.task,\n day=self.day,\n day_order=self.day_order + 1,\n duration=self.duration - duration)\n self.duration = duration\n self.save(update_fields=('duration',))\n\n # update duration in relevant_chunks\n for chunk in relevant_chunks:\n if chunk.id == self.id:\n chunk.duration = self.duration\n\n # increase all future day orders\n for chunk in relevant_chunks:\n if chunk.pk == self.pk:\n continue\n chunk.day_order += 1\n chunk.save(update_fields=('day_order',))\n\n return [new_chunk] + relevant_chunks", "def Chunk(iterable, n, container=None):\n chunks = itf.chunked(iterable, n)\n return map(container, chunks) if container else chunks", "def chunks(l, n):\n \n if n<1:\n n=1\n return [l[i:i+n] for i in range(0, len(l), n)]", "def do_chunkify(lst,n):\n return [lst[i::n] for i in range(n)]", "def IterChunks(iterable, chunk_size, fill=None):\n for _, group in itertools.groupby(\n enumerate(iterable), lambda pair: pair[0] // chunk_size\n ):\n items = list(pair[1] for pair in group)\n while len(items) < chunk_size:\n items.append(fill)\n yield tuple(items)", "def chunks(iterator, size):\n for item in iterator:\n yield [item] + list(islice(iterator, size - 1))", "def chunk_by_n_minutes(ds, n):\n from itertools import groupby\n\n if not (60%n == 0):\n print \"ERROR, n (=\", n, \") can only be a fraction of 60\"\n return []\n\n chunks = []\n for year_k, year in groupby(ds, key = lambda x : x['Date'].year):\n #year = dataset per year\n for month_k, month in groupby(year, key = lambda x: x['Date'].month):\n #month = dataset per month\n for day_k, day in groupby(month, key = lambda x: x ['Date'].day):\n #day = dataset per day\n for hour_k, hour in groupby(month, key = lambda x: x ['Date'].hour):\n #hour = dataset per hour\n for minute_k, minutes in groupby(month, key = lambda x: x ['Date'].minute/n):\n #minutes = dataset per n minutes\n chunks.append([d for d in minutes])\n return chunks" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the condition_type of this ConditionRequireBookingData.
def condition_type(self) -> str: return self._condition_type
[ "def _GetConditionForType(obj, condition_type):\n conditions = _GetPathValue(obj, ['status', 'conditions'])\n if not conditions:\n return False\n for condition in conditions:\n if condition['type'] == condition_type:\n return condition\n return None", "def getCondition(self):\r\n return self.condition", "def contract_type(self):\n if \"contractType\" in self._prop_dict:\n return self._prop_dict[\"contractType\"]\n else:\n return None", "def condition(self):\n return self.__condition", "def condition(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"condition\")", "def getType ():\n return self.constraintType", "def condition(self):\n if self.is_cocked():\n return COCKED_PISTOL\n return self.calculate_condition()", "def type(self):\n return self.recipe_settings[\"type\"]", "def get_type(self) -> str:\n return self.request_type", "def get_request_type(self, ):\n return self._request_type", "def get_betting_type(self):\n return constants.BETTING_TYPE", "def get_type(self):\n return self.scope_type", "def get_type(self) -> ModelType:\n pass", "def bond_type(self):\n return self._bond_type", "def model_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"model_type\")", "def getStatusType(self):\n return self.base.get(\"status_type\", [])", "def is_conditional(self, func_type):\n _validate_func_type(func_type)\n return self.conditionals[func_type]", "def pending_type(self):\n return self._pending_type", "def type(self):\n return self._service_type", "def calendarType(self):\n unit = booking_types[self.booking_unit][1]\n\n if unit in (\"day\", \"week\"):\n return \"month\"\n else:\n return \"week\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the condition_type of this ConditionRequireBookingData.
def condition_type(self, condition_type: str): if condition_type is None: raise ValueError("Invalid value for `condition_type`, must not be `None`") # noqa: E501 self._condition_type = condition_type
[ "def relaxation_type(self, relaxation_type):\n\n self._relaxation_type = relaxation_type", "def set_conditional(self, func_type):\n _validate_func_type(func_type)\n self.conditionals[func_type] = True", "def set_type(self, the_type: [bool, int, float, str]):\n if self._value:\n raise CloudioModificationException('The Attribute has already a type (Changing the type is not allowed)!')\n\n if the_type in (bool, int, float, bytes, str):\n self._value = the_type()\n\n # Init to invalid\n self._type = AttributeType(AttributeType.Invalid)\n\n # Set cloudio attribute type accordingly\n if the_type in (bool,):\n self._type = AttributeType(AttributeType.Boolean)\n elif the_type in (int,):\n self._type = AttributeType(AttributeType.Integer)\n elif the_type in (float,):\n self._type = AttributeType(AttributeType.Number)\n else:\n assert the_type in (bytes, str), 'Seems we got a new type!'\n self._type = AttributeType(AttributeType.String)\n else:\n raise InvalidCloudioAttributeException(the_type)", "def price_type(self, price_type: str):\n\n self._price_type = price_type", "def setCondAttribute(self, cond: 'char const *') -> \"void\":\n return _coin.ScXMLIfElt_setCondAttribute(self, cond)", "def set_rock_type( self, rock_type_list ):\r\n self.rock_type_selections = rock_type_list\r\n if len(rock_type_list) > 0:\r\n self.conditions_set = True", "def rental_event_type(self, rental_event_type):\n\n self._rental_event_type = rental_event_type", "def type(self, type):\n allowed_values = [\"simple\", \"complex\", \"collection\"]\n if type not in allowed_values:\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\"\n .format(type, allowed_values)\n )\n\n self._type = type", "def set_type(self, type):\n return _raw_util.raw_message_set_type(self, type)", "def set_type(self, type):\n return _raw_util.raw_message_sptr_set_type(self, type)", "def setCondAttribute(self, cond: 'char const *') -> \"void\":\n return _coin.ScXMLElseIfElt_setCondAttribute(self, cond)", "def add_condition(self, owner, condition):\n self.conditions.add_condition(owner, condition)", "def test_set_cond_type(self, setup):\n # To remember to update the test\n assert setup[\"widget\"].c_bar_type.count() == 2\n # Check init position\n assert type(setup[\"widget\"].w_bar) is PCondType21\n assert type(setup[\"test_obj\"].rotor.winding.conductor) is CondType21\n setup[\"widget\"].c_bar_type.setCurrentIndex(1)\n assert type(setup[\"widget\"].w_bar) is PCondType22\n assert type(setup[\"test_obj\"].rotor.winding.conductor) is CondType22\n setup[\"widget\"].c_bar_type.setCurrentIndex(0)\n assert type(setup[\"widget\"].w_bar) is PCondType21\n assert type(setup[\"test_obj\"].rotor.winding.conductor) is CondType21", "def set_data_type(self, a_data_type):\n self.parameters[\"type\"] = str(a_data_type)\n return self", "def setCCM_TYPE(self, ccm_type) -> None:\n ...", "def client_type(self, client_type):\n \n self._client_type = client_type", "def _GetConditionForType(obj, condition_type):\n conditions = _GetPathValue(obj, ['status', 'conditions'])\n if not conditions:\n return False\n for condition in conditions:\n if condition['type'] == condition_type:\n return condition\n return None", "def type_version(self, type_version: str):\n\n self._type_version = type_version", "def email_template_type(self, email_template_type):\n self._email_template_type = email_template_type", "def set_sync_type(self, sync_type, operator):\n if sync_type not in VulnerabilityQuery.VALID_SYNC_TYPE:\n raise ApiError(\"Invalid sync type\")\n self._update_criteria(\"sync_type\", sync_type, operator)\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the required_fields of this ConditionRequireBookingData.
def required_fields(self) -> List[str]: return self._required_fields
[ "def check_required_fields(self):\n required_fields = set()\n for field_id in self.REQUIRED_FIELDS:\n if not getattr(self, field_id):\n required_fields.add(field_id)\n return required_fields", "def get_required_keys(self):\n return self.REQUIRED_KEYS", "def check_required_fields(self) -> None:\n required_fields = DF_BIB_ENTRY_TYPES[\n DF_BIB_ENTRY_TYPES[\"entry_type\"] == self.entry_type\n ].iloc[0][\"required_fields\"]\n for item in required_fields:\n # \"xx|yy\" means \"xx or yy\"\n # \"xx+|yy\" means \"xx and/or yy\"\n check_num = sum([rf in self.__fields for rf in re.findall(\"\\\\w+\", item)])\n if re.search(\"[\\\\+\\\\|]\", item):\n assert check_num in [\n 1,\n 2,\n ], f\"required field(s) \\042{item}\\042 is (are) missing\"\n else:\n assert check_num == 1, f\"required field \\042{item}\\042 is missing\"", "def getRequiredAttrs(self):\n required = []\n for type_uri, attribute in self.requested_attributes.items():\n if attribute.required:\n required.append(type_uri)\n\n return required", "def get_required_fields(self) -> Iterable[fields.Field]:\n for model_field in self.get_fields():\n if model_field.required:\n\n if isinstance(\n model_field,\n (\n fields.RelationListField,\n fields.GenericRelationListField,\n fields.BaseTemplateField,\n ),\n ):\n raise NotImplementedError(\n f\"{self.collection.collection}.{model_field.own_field_name}\"\n )\n yield model_field", "def required_fields(self, required_fields: List[str]):\n allowed_values = [\"FROM_ADDRESS\", \"TO_ADDRESS\", \"BIRTHDATE\", \"EMAIL\", \"PERSONAL_ADDRESS\", \"PHONE_NUMBERS\", \"LICENSES\", \"BANK_CARDS\", \"DISCOUNT_CARDS\", \"TRAVEL_CARDS\", \"ID_CARDS\", \"CREDIT_CARDS\", \"NAME\", \"AGE\", \"BLOCKCHAIN_CLAIMS\"] # noqa: E501\n if not set(required_fields).issubset(set(allowed_values)):\n raise ValueError(\n \"Invalid values for `required_fields` [{0}], must be a subset of [{1}]\" # noqa: E501\n .format(\", \".join(map(str, set(required_fields) - set(allowed_values))), # noqa: E501\n \", \".join(map(str, allowed_values)))\n )\n\n self._required_fields = required_fields", "def __validate_required_fields(self, data, field_name):\n\n errs = []\n try:\n if data[field_name] == '':\n errs.append(self.return_field_message(field_name, \"required\"))\n except KeyError:\n errs.append(self.return_no_field_message(field_name, 'required'))\n\n return errs", "def _get_invalid_required_fields(self, row, required_fields):\n return [f for f in required_fields if row[f] is BLANK_VALUE]", "def get_required_components(self):\n return []", "def _check_required_fields(self, **kwargs):\n\n for field in self._required_fields:\n if not hasattr(self, field):\n raise NotImplementedError(f\"[{self.__class__.__name__}] requires attr '{field}'\")", "def get_required_attrs():\n default_required_attrs = []\n return getattr(settings, 'REQUIRED_ATTRS', default_required_attrs)", "def required_parameters(self):\n required = []\n for k, v in self.parameters.items():\n if not hasattr(v, \"Default\"):\n required.append((k, v))\n return required", "def getRequiredHeaders(self):\n requiredHeaders = {'string.h'}\n for member in self._members:\n requiredHeaders = requiredHeaders.union(member.getRequiredHeaders())\n return requiredHeaders", "def getRequirements(self, registry=DEFAULT_BOOKING_REGISTRY):\n if self.requirements:\n # Note that booking requirements are stored in the DEFAULT_EQUIPMENT_REGISTRY\n return BookingReqsInfo( reqs_id=self.requirements, registry=registry )\n else:\n return None", "def required_parts(self):\n parts = []\n\n for item in self.part.bom_items.all():\n part = {'part': item.sub_part,\n 'per_build': item.quantity,\n 'quantity': item.quantity * self.quantity\n }\n\n parts.append(part)\n\n return parts", "def _required_global_field_names(self):\n cache_key = '%s-%d.required_global_field_names' % (self.__class__.__name__, self.pk)\n result = cache.get(cache_key)\n if result:\n return result\n \n req_fields = settings.REQUIRED_FIELDS[self.site_type] \n field_names = {}\n for key, internal_name in req_fields.items():\n try:\n dsf = self.datasheetfield_set.get(field_id__internal_name=internal_name)\n except DataSheetField.DoesNotExist:\n raise DataSheetError(\"DataSheet (id=%d) should have a field with internal_name of '%s'\" % (self.pk, internal_name,))\n field_names[key] = {'internal_name': internal_name, 'field_name': dsf.field_name}\n\n cache.set(cache_key, field_names)\n return field_names", "def required_colnames(self):\n return self._required_colnames[:]", "def required(self):\n required_step = False\n\n for element in self.functional_elements:\n if element.required:\n required_step = True\n break\n\n return required_step", "def testRequiredFields(self):\n required = Cytokine.required_fields()\n\n self.assertEqual(type(required), tuple,\n \"required_fields() returns a tuple.\")\n\n self.assertTrue(len(required) > 0,\n \"required_field() did not return empty value.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the required_fields of this ConditionRequireBookingData.
def required_fields(self, required_fields: List[str]): allowed_values = ["FROM_ADDRESS", "TO_ADDRESS", "BIRTHDATE", "EMAIL", "PERSONAL_ADDRESS", "PHONE_NUMBERS", "LICENSES", "BANK_CARDS", "DISCOUNT_CARDS", "TRAVEL_CARDS", "ID_CARDS", "CREDIT_CARDS", "NAME", "AGE", "BLOCKCHAIN_CLAIMS"] # noqa: E501 if not set(required_fields).issubset(set(allowed_values)): raise ValueError( "Invalid values for `required_fields` [{0}], must be a subset of [{1}]" # noqa: E501 .format(", ".join(map(str, set(required_fields) - set(allowed_values))), # noqa: E501 ", ".join(map(str, allowed_values))) ) self._required_fields = required_fields
[ "def required(self):\n self.is_required = True\n return self", "def check_required_fields(self):\n required_fields = set()\n for field_id in self.REQUIRED_FIELDS:\n if not getattr(self, field_id):\n required_fields.add(field_id)\n return required_fields", "def setRequired(self, required):\n self.__isRequired = required", "def _check_required_fields(self, **kwargs):\n\n for field in self._required_fields:\n if not hasattr(self, field):\n raise NotImplementedError(f\"[{self.__class__.__name__}] requires attr '{field}'\")", "def check_required_fields(self) -> None:\n required_fields = DF_BIB_ENTRY_TYPES[\n DF_BIB_ENTRY_TYPES[\"entry_type\"] == self.entry_type\n ].iloc[0][\"required_fields\"]\n for item in required_fields:\n # \"xx|yy\" means \"xx or yy\"\n # \"xx+|yy\" means \"xx and/or yy\"\n check_num = sum([rf in self.__fields for rf in re.findall(\"\\\\w+\", item)])\n if re.search(\"[\\\\+\\\\|]\", item):\n assert check_num in [\n 1,\n 2,\n ], f\"required field(s) \\042{item}\\042 is (are) missing\"\n else:\n assert check_num == 1, f\"required field \\042{item}\\042 is missing\"", "def required_fields(self) -> List[str]:\n return self._required_fields", "def all_mandatory_bpk_fields_are_set(self):\n assert self.ensure_one(), _(\"all_mandatory_bpk_fields_are_set() is only allowed for one partner at once\")\n # HINT: For r in self is just done for better readability but not really needed since this should only operate\n # for a single partner: see assert above\n for r in self:\n if any(r[f] for f in self._bpk_forced_fields()):\n if all(r[f] for f in self._bpk_forced_fields()):\n # TODO: Maybe we should also check here if Birthdate is not in the future?\n return True\n elif all(r[f] for f in self._bpk_regular_fields()):\n # TODO: Maybe we should also check here if Birthdate is not in the future?\n return True\n\n return False", "def fpolicy_set_required(self, policy_name, required):\n return self.request( \"fpolicy-set-required\", {\n 'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],\n 'required': [ required, 'required', [ bool, 'None' ], False ],\n }, {\n } )", "def required_flag_level(self, required_flag_level):\n\n self._required_flag_level = required_flag_level", "def update_conditions(self, **required_variables):\n if sorted(list(required_variables)) == sorted(self.required_variables):\n parameters = self.condition_func(self.reference_params, **required_variables)\n for key, value in parameters.items():\n setattr(self, key, value)\n elif len(required_variables) == 0:\n return\n else:\n raise IllegalRequiredVariablesException(\"Expected kwargs for {}. Got kwargs for {} instead.\"\n .format(self.required_variables,\n list(required_variables.keys())))", "def _confirm_field_supplied(_fields_dict):\n _field_supplied = False\n for _field_value in _fields_dict.values():\n if _field_value[0]:\n _field_supplied = True\n break\n if not _field_supplied:\n _error_msg = \"At least one field must be enabled to retrieve a response.\"\n logger.error(_error_msg)\n raise errors.exceptions.MissingRequiredDataError(_error_msg)", "def check_required_params(self):\n for param in self.REQUIRED_FIELDS:\n if param not in self.params:\n raise ValidationError(\"Missing parameter: {}\".format(param))", "def check_required_fields(self, ignore_fields: List[str] = list()) -> None:\n self._check_required_fields(\"user\", ignore_fields)", "def _get_invalid_required_fields(self, row, required_fields):\n return [f for f in required_fields if row[f] is BLANK_VALUE]", "def attach_required(self, value: bool):\n self._properties[\"attachRequired\"] = value", "def test_required_attr(self):\n # Make sure all of the required attributes are required\n self.assertTrue(models.UserModel.fname.required)\n self.assertTrue(models.UserModel.lname.required)\n self.assertTrue(models.UserModel.cwruid.required)\n self.assertTrue(models.UserModel.salt.required)\n self.assertTrue(models.UserModel.hash.required)", "def get_required_fields(self) -> Iterable[fields.Field]:\n for model_field in self.get_fields():\n if model_field.required:\n\n if isinstance(\n model_field,\n (\n fields.RelationListField,\n fields.GenericRelationListField,\n fields.BaseTemplateField,\n ),\n ):\n raise NotImplementedError(\n f\"{self.collection.collection}.{model_field.own_field_name}\"\n )\n yield model_field", "def validate_required(self, value):\n if self.status == STATUS.active and self.required and (\n value is None or len(str(value)) == 0):\n raise InputError('The field %s is required.' % self.name)", "def set_required_value_upload(self, required_value):\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the claims of this ConditionRequireBookingData.
def claims(self) -> List[str]: return self._claims
[ "def getClaims(self, property):\r\n if not hasattr(self, 'claims'):\r\n self.get()\r\n if self.claims:\r\n if property in self.claims:\r\n return self.claims[property]\r\n return []", "def getClaim():", "def value(self):\n return getattr(self, 'claim')", "def _get_required_empty_claims(self) -> Set[str]:\n return {name for (name, value) in self._get_claim_set(with_empty_claims=True).items()\n if not self._is_optional_claim(name) and value is None\n }", "def claimed(self):\n return self.concepts.exclude(status='p')", "def claim(self) -> str:\n return self._claim", "def get_patent_claims(soup):\n try:\n claim_header = next(t for t in soup.find_all('center')\n if t.text.lower() == 'claims')\n except StopIteration:\n return None\n\n claim_tag_w_junk = claim_header.find_next('hr')\n claim_text = str(claim_tag_w_junk).split('<hr>')[1]\n\n claims = [text.lstrip() for text in claim_text.split('<br><br>')\n if re.match(r'^\\d+\\. ', text.strip())]\n\n return claims if claims else None", "def aud(self):\n return self.claims.get('aud')", "async def getConsentInfo(self):\n try:\n await self.set_token('connect')\n atoken = self._session_tokens['connect']['access_token']\n # Try old pyJWT syntax first\n try:\n subject = jwt.decode(atoken, verify=False).get('sub', None)\n except:\n subject = None\n # Try new pyJWT syntax if old fails\n if subject is None:\n try:\n exp = jwt.decode(atoken, options={'verify_signature': False}).get('sub', None)\n except:\n raise Exception(\"Could not extract sub attribute from token\")\n\n data = {'scopeId': 'commonMandatoryFields'}\n response = await self.post(f'https://profileintegrityservice.apps.emea.vwapps.io/iaa/pic/v1/users/{subject}/check-profile', json=data)\n if response.get('mandatoryConsentInfo', False):\n data = {\n 'consentInfo': response\n }\n return data\n elif response.get('status_code', {}):\n _LOGGER.warning(f'Could not fetch realCarData, HTTP status code: {response.get(\"status_code\")}')\n else:\n _LOGGER.info('Unhandled error while trying to fetch consent information')\n except Exception as error:\n _LOGGER.debug(f'Could not get consent information, error {error}')\n return False", "def get_full_claims(self, request, id_claims: Dict, access_token: str) -> Dict:\n if settings.OIDC_OP_FETCH_USER_INFO and constants.SESSION_OP_USERINFO_URL in request.session and access_token:\n claims = id_claims.copy()\n claims.update(request_get(\n request.session[constants.SESSION_OP_USERINFO_URL],\n headers={'Authorization': f'{settings.OIDC_AUTHORIZATION_HEADER_PREFIX} {access_token}'},\n ).json())\n return claims\n else:\n return id_claims", "def getClaim(self):\n\n if self._vb:\n print(f\"Calling {__name__}.getClaim\")\n if self._claimCounter == 0:\n # Caller shouldn't be calling if there are no expected\n # answers, but is anyway, so just return\n return {'query': {'sql': 'None'}, 'error': 'Nothing to do',\n 'stillToCome': 0, 'claimResult': 'Error'}\n job = self._claimQ.get()\n claim = job['claim']\n self._claimQ.task_done()\n self._claimCounter -= 1\n job['stillToCome'] = self._claimCounter\n self._addToAtkRes('claimTrials', job['spec'], 1)\n # The claim is tested against the first reply\n reply = job['replies'][0]\n job['claimResult'] = 'Wrong'\n if claim:\n self._addToAtkRes('claimMade', job['spec'], 1)\n if 'error' in reply:\n self._addToAtkRes('claimError', job['spec'], 1)\n job['claimResult'] = 'Error'\n else:\n if self._cr == 'singlingOut':\n claimIsCorrect = self._checkSinglingOut(reply['answer'])\n elif self._cr == 'inference':\n claimIsCorrect = self._checkInference(reply['answer'])\n elif self._cr == 'linkability':\n claimIsCorrect = self._checkLinkability(reply['answer'])\n if claim == 1 and claimIsCorrect:\n self._addToAtkRes('claimCorrect', job['spec'], 1)\n job['claimResult'] = 'Correct'\n elif claim == 0 and claimIsCorrect:\n self._addToAtkRes('claimPassCorrect', job['spec'], 1)\n job['claimResult'] = 'Correct'\n if self._cr == 'singlingOut' or self._cr == 'inference':\n # Then measure confidence against the second and third replies\n if 'answer' in job['replies'][1]:\n if job['replies'][1]['answer']:\n guessedRows = job['replies'][1]['answer'][0][0]\n else:\n guessedRows = 0\n elif 'error' in job['replies'][1]:\n self._pp.pprint(job)\n print(f\"Error: conf query:\\n{job['replies'][1]['error']}\")\n self.cleanUp(cleanUpCache=False, doExit=True)\n if 'answer' in job['replies'][2]:\n if job['replies'][2]['answer']:\n totalRows = job['replies'][2]['answer'][0][0]\n else:\n totalRows = 0\n elif 'error' in job['replies'][2]:\n self._pp.pprint(job)\n print(f\"Error: conf query:\\n{job['replies'][2]['error']}\")\n self.cleanUp(cleanUpCache=False, doExit=True)\n if totalRows:\n self._addToAtkRes('sumConfidenceRatios', job['spec'],\n guessedRows / totalRows)\n self._addToAtkRes('numConfidenceRatios', job['spec'], 1)\n self._atrs['tableStats']['totalRows'] = totalRows\n else:\n # For linkability, the confidence is always 1/2\n self._addToAtkRes('sumConfidenceRatios', job['spec'], 0.5)\n self._addToAtkRes('numConfidenceRatios', job['spec'], 1)\n if 'q' in job:\n del job['q']\n return (job)", "def get_claim(self, claim):\r\n return self._claim_manager.get(claim)", "def _get_access_token_claims(self, user, **options):\n\n return {}", "def kingdom_has_claim(self, kingdom):\n\n\ttry:\n\t\tclaim = self.offended_set.get(offender=kingdom)\n\t\treturn claim.level\n\texcept Claim.DoesNotExist:\n\t\treturn None", "def _get_refresh_token_claims(self, user, **options):\n\n return {}", "def getchildren(self):\n data_claims = self.data['patient']['claims']['claim']\n claims = []\n if isinstance(data_claims, dict):\n # only a single claim exists\n claim = Claim(data_claims) \n claims.append(claim)\n else:\n # claims is a list\n for claim in data_claims:\n claims.append(Claim(claim))\n return claims", "def _get_claim_names(self) -> Set[str]:\n\n return set(self._get_claim_set(with_empty_claims=True).keys())", "def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrometheusSpecStorageVolumeClaimTemplateStatusConditionsArgs']]]]:\n return pulumi.get(self, \"conditions\")", "def get_fcas_requirements(self):\n return self.fcas_requirements" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the claims of this ConditionRequireBookingData.
def claims(self, claims: List[str]): self._claims = claims
[ "def claim(self, claim: str):\n\n self._claim = claim", "def edit_claim(self, claim):\n token = self.get_csrf_token()\n params = {\n \"action\": \"wbsetclaim\",\n \"claim\": json.dumps(claim),\n \"token\": token,\n }\n r1 = self.session.post(self.WIKIBASE_API, data=params)\n r1.json = r1.json()\n\n if \"error\" in r1.json.keys():\n raise WBAPIException(r1.json[\"error\"])", "def desired_defenders(self, desired_defenders):\n\n self._desired_defenders = desired_defenders", "def claim_name(self, claim_name):\n if claim_name is None:\n raise ValueError(\"Invalid value for `claim_name`, must not be `None`\")\n\n self._claim_name = claim_name", "def set_participant_properties(self,iSurveyID,iTokenID,aTokenData):", "def claim(self, item_id, name, email):\n raise NotImplementedError(\"'claim' is required to be implemented on a subclass\")", "def cas_required(self, cas_required):\n\n self._cas_required = cas_required", "def consent_token(self, consent_token):\n\n self._consent_token = consent_token", "def claimed_at(self, claimed_at):\n if claimed_at is None:\n raise ValueError(\"Invalid value for `claimed_at`, must not be `None`\")\n\n self._claimed_at = claimed_at", "def consent_basis(self, consent_basis):\n\n self._consent_basis = consent_basis", "def set_as_president(self):\n with transaction.atomic():\n self.is_member = False\n self.is_secretary = False\n self.is_treasurer = False\n self.is_president = True\n self.is_inactive = False", "def fillin_allowed(self, fillin_allowed):\n\n self._fillin_allowed = fillin_allowed", "def claim_updating(self, metadata):\n\n ttl = metadata['ttl']\n\n if not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl):\n msg = _(u'The TTL for a claim may not exceed {0} seconds, and '\n 'must be at least {1} seconds long.')\n\n raise ValidationFailed(\n msg, self._limits_conf.max_claim_ttl, MIN_CLAIM_TTL)", "def update_claim(self, claim, ttl=None, grace=None):\r\n return self._claim_manager.update(claim, ttl=ttl, grace=grace)", "def _enable_consent_checking(self) -> None:\n self.event_creator._block_events_without_consent_error = \"No consent from user\"\n consent_uri_builder = Mock()\n consent_uri_builder.build_user_consent_uri.return_value = \"http://example.com\"\n self.event_creator._consent_uri_builder = consent_uri_builder", "def _restore_claim_set(self, claim_set: Dict[str, Any]) -> None:\n\n for name, value in claim_set.items():\n # Find the corresponding instance variable.\n name = self._map_claim_name_to_instance_var(name)\n\n # Restore the value (if necessary).\n restore_method = self._get_restore_method_for_claim(name)\n if restore_method is not None and value is not None:\n value = restore_method(value)\n\n # Actually set the value.\n setattr(self, name, value)", "def set_survey_properties(self,iSurveyID,aSurveyData):", "def _verify_claim_set(self, claim_set: Dict[str, Any]) -> bool:\n\n # Check the token's class: it must be specified and be this class.\n if self.strict_verification:\n class_name = self._get_class_name()\n claim_class_name = claim_set.get('_easyjwt_class', None)\n if claim_class_name is None:\n raise UnspecifiedClassError()\n\n if claim_class_name != class_name:\n raise InvalidClassError(expected_class=class_name, actual_class=claim_class_name)\n\n # Determine missing and unexpected claims. Missing claims are those specified in this class but not given in the\n # claim set. Unexpected claims are those given in the claim set but not specified in this class.\n expected_claims = self._get_claim_names()\n actual_claims = set(claim_set.keys())\n\n # Use the name of the instance variable for missing claims to avoid confusion.\n # For unexpected claims, use the name of the claim.\n missing_claims = {self._map_claim_name_to_instance_var(name) for name\n in expected_claims.difference(actual_claims) if not self._is_optional_claim(name)}\n unexpected_claims = actual_claims.difference(expected_claims)\n\n # If there are no missing or unexpected claims, everything is fine.\n if len(missing_claims) == 0 and len(unexpected_claims) == 0:\n return True\n\n # Otherwise, raise an exception.\n raise InvalidClaimSetError(missing_claims, unexpected_claims)", "def _validate_and_set_permitted_range(self, params):\r\n self.permitted_range = None\r\n if 'permitted_range' in params:\r\n self.permitted_range = params['permitted_range']\r\n # PY: checks and set the features range for the rest of the parameters for\r\n # which the range is not provided.\r\n if not self.check_features_range():\r\n raise ValueError(\r\n \"permitted range of features should be within their original range\")\r\n else:\r\n self.permitted_range, feature_ranges_orig = self.get_features_range(self.permitted_range)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Little helper routine that will return a UTCDateTime object with the beginning of the next month of the given UTCDateTime object.
def _getNextMonth(self, datetime): year = datetime.year month = datetime.month next_month = month + 1 if next_month != 12: next_month = next_month % 12 if next_month == 1: year += 1 return UTCDateTime(year, next_month, 1)
[ "def _getBeginningOfMonth(self, datetime):\n return UTCDateTime(datetime.year, datetime.month, 1)", "def get_next_month(date):\n if date.month == 12:\n return date.replace(year=date.year + 1, month=1, day=1)\n else:\n return date.replace(month=date.month + 1, day=1)", "def _get_end_of_january():\n return datetime(datetime.utcnow().year + 1, 1, 31, 23, 59, 59, 999999).replace(tzinfo=tz_utc)", "def next_first_of_month_in_20th():\n first = date(1901, 1, 1)\n yield first\n while first.year < 2001:\n if first.month == 12:\n first = first.replace(year=first.year + 1)\n first = first.replace(month=1)\n else:\n first = first.replace(month=first.month + 1)\n yield first", "def get_next_month_year() -> str:\n today = datetime.datetime.today()\n year = today.year\n\n # Make sure January follows December!\n if today.month + 1 == 13:\n month = 1\n year += 1\n else:\n month = today.month + 1\n\n future = datetime.datetime.replace(today, month=month, year=year)\n return datetime.datetime.strftime(future, \"%b-%Y\")", "def get_next_cashflow_date(settle_date, delay, offset_months=0):\r\n delay_plus_one = delay+1\r\n day = settle_date.day\r\n offset = 1 + offset_months if (delay_plus_one <= day) and (delay > 0) else offset_months\r\n date = settle_date + relativedelta(months=offset)\r\n date = date.replace(day=delay_plus_one)\r\n return date", "def next_month(self, start_date):\n current = start_date.month\n potential = [m for m in self.months if m >= current]\n year_wraps = 0\n\n while True:\n if not potential:\n year_wraps += 1\n potential = list(self.months)\n\n yield potential.pop(0), start_date.year + year_wraps", "def start_date(date):\n if date.month < 12:\n return datetime.date(date.year - 1, date.month + 1, 1)\n else:\n return datetime.date(date.year, 1, 1)", "def getnextdate(date):\n day,month,year=breakdate(str(date))\n tdate = datetime.datetime(year, month, day)\n tdate=tdate+datetime.timedelta(1)\n year=zfill(tdate.year, 4)\n month=zfill(tdate.month, 2)\n day=zfill(tdate.day, 2)\n return year+month+day", "def get_month_start(dt):\n return dt.replace(day=1)", "def next_hour_in_tz():\n now = pendulum.now().in_tz(\"UTC\")\n next_hour = now.start_of(\"hour\").add(hours=1)\n\n return next_hour", "def start_month(self, month):", "def test_naive_ceil_day_next_month(self):\n t = datetime.datetime(2013, 2, 28, 12, 23, 4, 40)\n t = fleming.ceil(t, day=1)\n self.assertEquals(t, datetime.datetime(2013, 3, 1))", "def month_offset(dt, offset):\n month_seq = (dt.year * 12 + dt.month - 1) + offset\n year, month0 = divmod(month_seq, 12)\n try:\n return dt.replace(year=year, month=month0 + 1)\n except ValueError:\n # Clip day to last day of month.\n return dt.replace(year=year, month=month0 + 2, day=1) - timedelta(1)", "def IncMonth(self):\n self.month = self.month + 1\n if self.month > 12:\n self.month = 1\n self.year = self.year + 1\n self.set_day = None", "def incrementMonth(month, n=1):\n\tdate = monthToDate(month)\n\tnext_month = date + relativedelta(months=n)\n\n\treturn next_month.strftime(\"%Y-%m-%d\")", "def month_date_range(date: datetime.date) -> Tuple[datetime.datetime, datetime.datetime]:\n start = datetime.datetime(date.year, date.month, 1, 0, 0, 0, tzinfo=utc)\n y: int = date.year\n m: int = date.month + 1\n if m == 13:\n m = 1\n y = y + 1\n end = datetime.datetime(y, m, 1, 0, 0, 0, tzinfo=utc) - datetime.timedelta(days=1)\n return start, end", "def last_month() -> date:\n return date.today().replace(day=1) - timedelta(1)", "def make_release_date(**kwargs) -> pendulum.DateTime:\n\n return kwargs[\"next_execution_date\"].subtract(days=1).start_of(\"day\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as _getNextMonth but this one will return the beginning of the month as a UTCDateTime object.
def _getBeginningOfMonth(self, datetime): return UTCDateTime(datetime.year, datetime.month, 1)
[ "def _getNextMonth(self, datetime):\n year = datetime.year\n month = datetime.month\n next_month = month + 1\n if next_month != 12:\n next_month = next_month % 12\n if next_month == 1:\n year += 1\n return UTCDateTime(year, next_month, 1)", "def get_month_start(dt):\n return dt.replace(day=1)", "def start_month(self, month):", "def get_next_month(date):\n if date.month == 12:\n return date.replace(year=date.year + 1, month=1, day=1)\n else:\n return date.replace(month=date.month + 1, day=1)", "def last_month() -> date:\n return date.today().replace(day=1) - timedelta(1)", "def _get_end_of_january():\n return datetime(datetime.utcnow().year + 1, 1, 31, 23, 59, 59, 999999).replace(tzinfo=tz_utc)", "def get_start_month(self) -> int:\n return self.start_date.month", "def start_date(date):\n if date.month < 12:\n return datetime.date(date.year - 1, date.month + 1, 1)\n else:\n return datetime.date(date.year, 1, 1)", "def next_first_of_month_in_20th():\n first = date(1901, 1, 1)\n yield first\n while first.year < 2001:\n if first.month == 12:\n first = first.replace(year=first.year + 1)\n first = first.replace(month=1)\n else:\n first = first.replace(month=first.month + 1)\n yield first", "def next_month(self, start_date):\n current = start_date.month\n potential = [m for m in self.months if m >= current]\n year_wraps = 0\n\n while True:\n if not potential:\n year_wraps += 1\n potential = list(self.months)\n\n yield potential.pop(0), start_date.year + year_wraps", "def get_previous_month(self):\r\n end = utils.get_month_start() - relativedelta(days=1)\r\n end = utils.to_datetime(end)\r\n start = utils.get_month_start(end)\r\n return start, end", "def get_previous_month(self, date):\r\n first_day, last_day = _month_bounds(date)\r\n prev = (first_day - datetime.timedelta(days=1)).replace(day=1)\r\n return _get_next_prev_month(self, prev, is_previous=True, use_first_day=True)", "def generate_dates(self):\n last_month = []\n today = DT.date.today()\n\n for day in range(1,31):\n new_day = today - DT.timedelta(days=day)\n date_to_add = str(new_day.year)+self.format_date(str(new_day.month))+self.format_date(str(new_day.day))\n last_month.append(date_to_add)\n return last_month", "def start_of_month(adate, holidays=[]):\r\n\tthe1st = date(adate.year, adate.month, 1)\r\n\treturn business_day(the1st, 0, holidays)", "def set_allowed_month(self, schedule):\n month = get_next_value(schedule.month, self.months)\n if month > schedule.month:\n year = schedule.year\n else:\n year = schedule.year + 1\n return datetime.datetime(year, month, 1)", "def each_month(self):\n start = date(self.start_date.year, self.start_date.month, 1)\n if self.months() == 0:\n days = calendar.monthrange(start.year, start.month)[1]\n end = date(start.year, start.month, days)\n yield (start, end)\n else:\n for n in xrange(self.months()):\n days = calendar.monthrange(start.year, start.month)[1]\n end = date(start.year, start.month, days)\n yield (start, end)\n start = end + timedelta(days=1)", "def _shift_index_by_month(self, current_idx):\n\n dt = date.fromordinal(np.int(self.time[current_idx]))\n if dt.month < 12:\n mi = dt.month + 1\n y = dt.year\n else:\n mi = 1\n y = dt.year + 1\n\n return self.find_date_ndx(date(y, mi, dt.day))", "def start_first_of_the_month(df):\r\n first_date_gap = df.iloc[0].gap_in_day\r\n try:\r\n first_of_month_gap = min([i - 1 for i in firsts_of_the_month if i - 1 >= first_date_gap])\r\n except:\r\n return df\r\n\r\n last_date_gap = df.iloc[-1].gap_in_day\r\n try:\r\n last_of_month_gap = min([i - 1 for i in firsts_of_the_month if i >= last_date_gap])\r\n except:\r\n return df\r\n\r\n df = df[df['gap_in_day'] >= first_of_month_gap]\r\n df = df[df['gap_in_day'] <= last_of_month_gap]\r\n\r\n return df", "def get_next_month_year() -> str:\n today = datetime.datetime.today()\n year = today.year\n\n # Make sure January follows December!\n if today.month + 1 == 13:\n month = 1\n year += 1\n else:\n month = today.month + 1\n\n future = datetime.datetime.replace(today, month=month, year=year)\n return datetime.datetime.strftime(future, \"%b-%Y\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the relative position of datetime within the graph in respect to self.starttime and self.time_range.
def _getRelativePosition(self, datetime): return (datetime - self.starttime) / self.time_range *\ parent.graph_width
[ "def position_timed(self):\r\n actual_time = time.time()\r\n self.position[0] = self.speed[0] * self.time_speed + self.position[0]\r\n self.position[1] = self.speed[1] * self.time_speed + self.position[1]\r\n self.last_time_position = actual_time\r\n return self.position", "def get_position(self, time: datetime) -> Tuple[float, float]:\n # Get the amount of time elapsed since the bikes journey started\n current_time = (time - self.start_time).total_seconds()\n # Get the total time that will elapse in the bikes journey\n total_time = (self.end_time - self.start_time).total_seconds()\n # Get the longitudinal distance the bike will travel\n lon_distance = (self.end.location[0] - self.start.location[0])\n # Get the latitudinal distance the bike will travel\n lat_distance = (self.end.location[1] - self.start.location[1])\n # the lon and lat positions will be equal to\n # the entire distance * the fraction of time elapsed\n # + starting position\n return (current_time*lon_distance/total_time + self.start.location[0],\n current_time*lat_distance/total_time + self.start.location[1])", "def get_start_time(self):\n\n return self.time_vector[0]", "def getResultPosition(self, pTime):\n return _almathinternal.AnticipationTracker_getResultPosition(self, pTime)", "def start_time(self):\n # if this hunt is configured for full coverage, then the starting time for the search\n # will be equal to the ending time of the last executed search\n if self.full_coverage:\n # have we not executed this search yet?\n if self.last_end_time is None:\n return local_time() - self.time_range\n else:\n return self.last_end_time\n else:\n # if we're not doing full coverage then we don't worry about the last end time\n return local_time() - self.time_range", "def start(self):\n return self.end - timedelta(minutes=self.minutes)", "def curr_curve_start_xyt(self):\n if self._curr_curve_start_index is None:\n return None\n else:\n return self._recent_near_coords[self._curr_curve_start_index]", "def planned_start(self):\n all_task_estimations = Estimation.objects.filter(task=self.id)\n planned_task_start = None\n if len(all_task_estimations) == 0:\n planned_task_start = None\n else:\n for estimation in all_task_estimations:\n if planned_task_start is None:\n planned_task_start = estimation.date_from\n elif estimation.date_from < planned_task_start:\n planned_task_start = estimation.date_from\n return planned_task_start", "def start_coord(self):\n return self.lat_s, self.lon_s", "def current_position(self):\n try:\n pos, format = self.player.query_position(gst.FORMAT_TIME)\n except:\n position = 0\n else:\n position = pos * 1.0 / gst.MSECOND\n return position", "def position(self):\n return self.polargraph.position()", "def getStartPos(self):\n return self.startPos", "def abs_time_points(self):\n return self.get_abs_time_points(box_open=True)", "def span(self):\n return self.end - self.start", "def getResultPosition(self, pTime):\n return _almathinternal.OpenLoopFeedBack_getResultPosition(self, pTime)", "def get_start(self) -> int:\n return self.__pos_x", "def duration(self):\n return self.points[self.end].time - self.points[self.start].time", "def observation_time_start(self):\n return self.time_ref + u.Quantity(self.table.meta[\"TSTART\"], \"second\")", "def GetMinPoint(self):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the tick positions for the months in relative units, e.g. 0 is at the left border of the graph and 1 at the right border.
def _calculateMonthlyTicks(self): first_tick = self._getNextMonth(self.starttime) last_tick = self._getBeginningOfMonth(self.endtime) self.ticks = [self._getRelativePosition(first_tick)] # Loop and get the relative positions. while first_tick < last_tick: first_tick = self._getNextMonth(first_tick) self.ticks.append(self._getRelativePosition(first_tick))
[ "def setup_ticks(self):\r\n ndana = self.zavrsnoVrijeme - self.pocetnoVrijeme\r\n #major ticks\r\n majorLocator = HourLocator(interval=ndana.days+1)\r\n majorFormat = DateFormatter('%H:%M')\r\n #minor ticks\r\n minorLocator = AutoMinorLocator(n=4)\r\n minorFormat = NullFormatter()\r\n\r\n self.axes.xaxis.set_major_locator(majorLocator)\r\n self.axes.xaxis.set_major_formatter(majorFormat)\r\n self.axes.xaxis.set_minor_locator(minorLocator)\r\n self.axes.xaxis.set_minor_formatter(minorFormat)\r\n\r\n self.fig.autofmt_xdate()\r\n allXLabels = self.axes.get_xticklabels(which='both') #dohvati sve labele\r\n for label in allXLabels:\r\n #label.set_rotation(30)\r\n label.set_fontsize(8)", "def xticks_every_3months(ax_to_update, day0_date, time_values, include_tick_labels):\n import datetime as dt\n from dateutil.relativedelta import relativedelta # add 3 months and check not after end\n from matplotlib.ticker import AutoMinorLocator \n \n \n xtick_label_angle = 315\n \n tick_labels_days = ax_to_update.get_xticks().tolist() # get the current tick labels\n day0_date_dt = dt.datetime.strptime(day0_date, \"%Y%m%d\") \n dayend_date_dt = day0_date_dt + dt.timedelta(int(time_values[-1])) # the last time value is the number of days we have, so add this to day0 to get the end. \n \n # 1: find first tick date (the first of the jan/ april/jul /oct) \n date_tick0 = day0_date_dt \n while not ( (date_tick0.day) == 1 and (date_tick0.month == 1 or date_tick0.month == 4 or date_tick0.month == 7 or date_tick0.month == 10 )):\n date_tick0 += dt.timedelta(1)\n \n # 2: get all the other first of the quarters\n ticks = {'datetimes' : [date_tick0],\n 'yyyymmdd' : [],\n 'n_day' : []}\n \n while ticks['datetimes'][-1] < (dayend_date_dt - relativedelta(months=+3)): # subtract 3 months to make sure we don't go one 3 month jump too far. \n ticks['datetimes'].append(ticks['datetimes'][-1] + relativedelta(months=+3))\n \n # 3: work out what day number each first of the quarter is. \n for tick_dt in ticks['datetimes']: # find the day nubmers from this. \n ticks['yyyymmdd'].append(dt.datetime.strftime(tick_dt, \"%Y/%m/%d\"))\n ticks['n_day'].append((tick_dt - day0_date_dt).days)\n \n # 4: Update the figure. \n ax_to_update.set_xticks(ticks['n_day']) # apply major tick labels to the figure\n minor_locator = AutoMinorLocator(3) # there are three months in each quarter, so a minor tick every month\n ax_to_update.xaxis.set_minor_locator(minor_locator) # add to figure. \n if include_tick_labels:\n ax_to_update.set_xticklabels(ticks['yyyymmdd'], rotation = xtick_label_angle, ha = 'left') # update tick labels, and rotate\n plt.subplots_adjust(bottom=0.15)\n ax_to_update.set_xlabel('Date')\n else:\n ax_to_update.set_xticklabels([]) # remove any tick lables if they aren't to be used. \n \n # add vertical lines every year. \n for major_tick_n, datetime_majortick in enumerate(ticks['datetimes']):\n if datetime_majortick.month == 1:\n ax_to_update.axvline(x = ticks['n_day'][major_tick_n], color='k', alpha=0.1, linestyle='--')", "def get_month_control (self):\n return self.__months", "def _get_tick_frac_labels(self):\n minor_num = 4 # number of minor ticks per major division\n if (self.axis.scale_type == 'linear'):\n domain = self.axis.domain\n if domain[1] < domain[0]:\n flip = True\n domain = domain[::-1]\n else:\n flip = False\n offset = domain[0]\n scale = domain[1] - domain[0]\n\n transforms = self.axis.transforms\n length = self.axis.pos[1] - self.axis.pos[0] # in logical coords\n n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi\n\n # major = np.linspace(domain[0], domain[1], num=11)\n # major = MaxNLocator(10).tick_values(*domain)\n major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2)\n\n labels = ['%g' % x for x in major]\n majstep = major[1] - major[0]\n minor = []\n minstep = majstep / (minor_num + 1)\n minstart = 0 if self.axis._stop_at_major[0] else -1\n minstop = -1 if self.axis._stop_at_major[1] else 0\n for i in range(minstart, len(major) + minstop):\n maj = major[0] + i * majstep\n minor.extend(np.linspace(maj + minstep,\n maj + majstep - minstep,\n minor_num))\n major_frac = (major - offset) / scale\n minor_frac = (np.array(minor) - offset) / scale\n major_frac = major_frac[::-1] if flip else major_frac\n use_mask = (major_frac > -0.0001) & (major_frac < 1.0001)\n major_frac = major_frac[use_mask]\n labels = [l for li, l in enumerate(labels) if use_mask[li]]\n minor_frac = minor_frac[(minor_frac > -0.0001) &\n (minor_frac < 1.0001)]\n elif self.axis.scale_type == 'logarithmic':\n return NotImplementedError\n elif self.axis.scale_type == 'power':\n return NotImplementedError\n return major_frac, minor_frac, labels", "def tick_per_month(ticks):\r\n ticks.groupby(by = 'totalpermonth')['success_rate'].mean().plot(kind = 'bar')\r\n plt.xlabel('Total Sweeps per Month')\r\n plt.ylabel('Tickets per Mile Swept')\r\n plt.title('Average Tickets per Mile Swept by Number od Sweeps Per Month')\r\n plt.savefig(image_loc + 'TicksbySweep.png')\r\n plt.show()\r\n return", "def set_days_in_month(month_picked):\n if month_picked in ['July', 'August']:\n days = 31\n marks = {1: '1', 10: '10', 20: '20', 31: '31'}\n else:\n days = 30\n marks = {1: '1', 10: '10', 20: '20', 30: '30'}\n\n return days, marks", "def getFYMonths(self, m: list, fy_start: int):\n delta_if_true = fy_start - 2 * fy_start + 1\n delta_if_false = fy_start - 2 + fy_start + 13\n fy_months = []\n for x in m:\n if x >= fy_start and x <= 12:\n fy_months.append(x - abs(delta_if_true))\n else: \n fy_months.append(x + abs(delta_if_false))\n return fy_months", "def faxes_this_month(self):\n return self._faxes_this_month", "def start_month(self, month):", "def test_timeseries_months(self):\n ts = Timeseries()\n ts.dseries = datetime(2015, 12, 31).toordinal() + np.arange(1000)\n ts.tseries = np.arange(1000)\n\n self.assertDictEqual(\n ts.months(),\n {\n \"2015-12\": 0,\n \"2016-01\": 31,\n \"2016-02\": 60,\n \"2016-03\": 91,\n \"2016-04\": 121,\n \"2016-05\": 152,\n \"2016-06\": 182,\n \"2016-07\": 213,\n \"2016-08\": 244,\n \"2016-09\": 274,\n \"2016-10\": 305,\n \"2016-11\": 335,\n \"2016-12\": 366,\n \"2017-01\": 397,\n \"2017-02\": 425,\n \"2017-03\": 456,\n \"2017-04\": 486,\n \"2017-05\": 517,\n \"2017-06\": 547,\n \"2017-07\": 578,\n \"2017-08\": 609,\n \"2017-09\": 639,\n \"2017-10\": 670,\n \"2017-11\": 700,\n \"2017-12\": 731,\n \"2018-01\": 762,\n \"2018-02\": 790,\n \"2018-03\": 821,\n \"2018-04\": 851,\n \"2018-05\": 882,\n \"2018-06\": 912,\n \"2018-07\": 943,\n \"2018-08\": 974,\n \"2018-09\": 999,\n },\n )", "def months_passed(self):\n\n return relativedelta(self.start_date, date.today()).months", "def createMonthCalendar(self, month):\n cal = calendar.monthcalendar(self.year, month + 1)\n self.createLayout()\n self.createHeader(self.months[month])\n # weeks\n rowCnt = 2\n for i in cal:\n colCnt = 0\n for j in i: # days\n cel = createText(self.marginl + colCnt * self.colSize,\n self.calHeight + rowCnt * self.rowSize,\n self.colSize, self.rowSize)\n colCnt += 1\n if j != 0:\n self.applyTextToFrame(str(j), cel)\n rowCnt += 1", "def each_month(self):\n start = date(self.start_date.year, self.start_date.month, 1)\n if self.months() == 0:\n days = calendar.monthrange(start.year, start.month)[1]\n end = date(start.year, start.month, days)\n yield (start, end)\n else:\n for n in xrange(self.months()):\n days = calendar.monthrange(start.year, start.month)[1]\n end = date(start.year, start.month, days)\n yield (start, end)\n start = end + timedelta(days=1)", "def __exp_x_ticklabels(self) -> list:\n x_tick_labels = []\n for i in range(-1*self.interval, self.interval):\n x_tick_labels.append(i)\n return x_tick_labels", "def molad(cls, month, year):\n y = year + 1 if month < HebrewMonth.TISHRI else year\n months_elapsed = month - HebrewMonth.TISHRI + quotient(235 * y - 234, 19)\n return cls.EPOCH - Fraction(876, 25920) + months_elapsed * (29 + Clock.days_from_hours(12) + Fraction(793,25920))", "def test_linear_positions_change_margin(self):\n pass", "def get_dummy_month_of_year(self):\n moy = pd.get_dummies(pd.to_datetime(self.calendar_df.index).month).values\n x = torch.from_numpy(moy).type(torch.get_default_dtype())\n assert x.shape == (self.num_days, 12)\n return x", "def sweep_per_month(ticks):\r\n by_street = ticks.groupby('lineid')[['totalpermonth', 'distance', 'TicketNumber']].sum()\r\n by_street['miles_sweeped_year'] = by_street['totalpermonth'] * 12 * by_street['distance']\r\n by_street['success_rate'] = by_street['TicketNumber'] / by_street['miles_sweeped_year']\r\n by_street.groupby('totalpermonth')['success_rate'].mean().plot(kind = 'bar')\r\n plt.xlabel('Total Sweeps per Month')\r\n plt.ylabel('Sweep Success Rate')\r\n plt.savefig(image_loc + 'SweepsPerMonth.png')\r\n plt.show()\r\n return", "def set_ticks_number(self, axis, n):\n if axis == \"x\":\n xlim = self.ax.get_xlim()\n ticks = np.linspace(xlim[0], xlim[1]*1.1, n)\n self.ax.set_xticks(ticks)\n if axis == \"y\":\n ylim = self.ax.get_ylim()\n ticks = np.linspace(ylim[0], ylim[1]*1.1, n)\n self.ax.set_yticks(ticks)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sets in __objects the obj with key .id
def new(self, obj): key = str((type(obj).__name__) + '.' + (obj.id)) return self.__objects.update({key: obj})
[ "def update(self, obj, id):", "def register_object( self, obj ):\n obj_id = id( obj )\n self.objects[ obj_id ] = obj\n return obj_id", "def test_patch_obj_id_put(self):\n pass", "def id_dict(self):\n return {obj.id: obj for obj in self}", "def test_patch_obj_id_get(self):\n pass", "def _restore_objs_from_IDs(self):\n if isinstance(self.location, str):\n self.location = Thing.ID_dict[self.location] # XXX will this work correctly for the room if it isn't loaded yet? \n if self.contents != None:\n self.contents = [Thing.ID_dict[id] for id in self.contents if (isinstance(id, str) and id in Thing.ID_dict)]", "def _id(self, _id):\n self.__id = _id", "def set_object(self, obj_id, model_id=None):\n self.grasp_planner.load_object(obj_id=obj_id, model_id=model_id)\n self.root_node = self.grasp_planner.get_root_node()", "def fetch( self, obj, id ):\n\t\treturn obj.ById( id )", "def _UpdateObject(self, _id, obj):\n if self._vbo is None:\n return\n index = self._indices.get(_id, -1)\n # have to check since object could have been deleted since marked as\n # dirty\n if index < 0:\n return\n num_values = type(self).__num_values\n self._vbo[index * num_values:\n (index + 1) * num_values] = narray(self.__descToArray(obj), \"f\")", "def setObjectId(self, idnum):\r\n oldnum = self._idnum\r\n deallocateIdNum(oldnum)\r\n self._idnum = idnum", "def set_id(self):\n self._id = hash(\n (self.__class__, self.name)\n + tuple([child.id for child in self.children])\n + tuple([(k, tuple(v)) for k, v in self.domains.items() if v != []])\n )", "def add(uid, obj):", "def _add_identities(self):\r\n for name,catobject in sorted(self.objects.items()):\r\n identity_morphism = CatMorphism(\"id_\"+name,catobject,catobject)\r\n identity_morphism.set_to_identity()\r\n self._add_morphisms([identity_morphism])", "def store(self, objs, keys, complete_sets=[]):\r\n pass", "def id(obj):\n try:\n return key(obj).id_or_name()\n except AttributeError:\n return obj", "def object_id(obj):\n if isinstance(obj,ObjectWrapper):\n return obj.id()\n else:\n return id(obj)", "def set_object(bucket, key, data):\n _objstore_backend.set_object(bucket, key, data)", "def _MarkDirty(self, _id, obj):\n if self._vbo is None:\n return\n self._dirty_objects[_id] = obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get starting hash for given device
def get_hash_for_device(uuid: int, location: str) -> ElementModQ: return hash_elems(uuid, location)
[ "def __get_current_hash__(self):\n hasher = hashlib.sha256()\n hasher.update(self.previous_hash.encode() + self.data.encode())\n return hasher.hexdigest()", "def hash(self):\n return self.wh", "def get_hash(self):\r\n path = self.files[self.idx_image]\r\n filename = path.split(\"/\")[-1]\r\n with open(path,\"rb\") as f:\r\n hash_object = hashlib.sha512(f.read())\r\n hex_dig = hash_object.hexdigest()\r\n hash = filename + \" \"+ hex_dig\r\n return hash", "def _get_hash_number(self, prefix, start):\n hash_num = 0 if start is None else start\n md5_hash = hashlib.md5(self._puzzle_input.encode('utf-8'))\n while True:\n hash_input = md5_hash.copy()\n hash_input.update(str(hash_num).encode('utf-8'))\n if hash_input.hexdigest().startswith(prefix):\n break\n hash_num += 1\n return hash_num", "def _getfingerprint(self):\n\n return base64.b64encode(encryption.sha512(self.publickeyxml.encode())).decode()", "def get_working_hash(args):\n if args.dense_track:\n param_str = str(args.grid_size)\n else:\n param_str = str(args.corner_thresh) + \\\n str(args.block_size) + \\\n str(args.sobel_size) + \\\n str(args.free_k) + \\\n str(args.nonm_size) + \\\n str(args.nonm_num)\n\n string = bytearray(args.image_path + args.flow_path + param_str, \"utf8\")\n return hashlib.sha1(string).hexdigest()[:8]", "def get_digest(clue):\n return clue.info['digest']", "def get_latest_hash(self):\n db_query = u\"SELECT hash_block FROM block_chain ORDER BY ROWID DESC LIMIT 1;\"\n db_result = self.execute(db_query).fetchone()\n\n return str(db_result[0]) if db_result else ''", "def getHash(self):\n # using following attributes to find the block hash\n # version, priorBlockHash, target, time and nonce\n blockHash = hashlib.sha256()\n blockHash.update(self.version.to_bytes(32,\"big\"))\n blockHash.update(self.parentBlockHash.to_bytes(32,\"big\"))\n blockHash.update(self.target.to_bytes(32,\"big\"))\n blockHash.update(self.time.to_bytes(32,\"big\"))\n blockHash.update(self.nonce.to_bytes(32,\"big\"))\n\n return int.from_bytes(blockHash.digest(),\"big\")", "def short_hash(self, length=8):\n return self.hash[:length]", "def header_hash(self): \n return hashlib.sha256((str(self.index) + str(self.timestamp) + str(self.tx) + str(self.previous_block)).encode('utf-8')).hexdigest()", "def hash(self):\n\n return hash_barcode(self.data)", "def _get_device_start_time(self):\n result = self.shell(command='date \\\"+%Y%m%d %H:%M:%S\\\"').response()\n #result = result+'.000'\n epoch = float(time.mktime(time.strptime(result, '%Y%m%d %H:%M:%S')))\n self._device_start_time = epoch", "def host_fingerprint(cls) -> str:\n hasher = sha256()\n for component in os.uname():\n hasher.update(component.encode())\n return hasher.hexdigest()[:12]", "def calculate_hash(self):\n digests = {s.header.section_digest: s.hash() for s in self.sections}\n\n header_size = self.header.size_of_headers\n self._xbe_stream.seek(0)\n header_bytes = self._xbe_stream.read(header_size)\n header_bytearray = bytearray(header_bytes)\n section_header_addr = self.header.section_headers_addr - self.header.base_addr\n\n for i in range(section_header_addr + 36,\n section_header_addr + (XbeSectionHeader.size * self.header.sections),\n XbeSectionHeader.size):\n header_bytearray[i:i + 20] = digests[header_bytes[i:i + 20]]\n\n sha1 = hashlib.sha1()\n sha1.update(struct.pack('I', header_size-260))\n header_bytes = bytes(header_bytearray)\n sha1.update(header_bytes[260:])\n return sha1.digest()", "def get(self) -> str:\n platform_hashes = self.get_for_platform()\n platform_hash = platform_hashes.get(\n get_platform_version(), platform_hashes[\"generic\"]\n )\n assert isinstance(platform_hash, str)\n\n return platform_hash", "def hash(self):\n assert self.__hash, \\\n \"Tried to use hash() after spent. See:\\n\" \\\n + TREEPRNG_DOC_URL + \"#the-treeprng-life-cycle\"\n hash = self.__hash.copy()\n hash.update(\"h\")\n self.is_dict = True\n return long(hash.hexdigest(), 16)", "def get_hash(image):\n import hashlib\n \n hashobj = hashlib.md5(image.read()).hexdigest()\n print(hashobj)\n return hashobj", "def get_hash(self):\n return hashlib.md5(next(iter(self.get_clusters())).encode('utf-8') + '-'.join(sorted(host.host_id for host in set(self.hosts))).encode('utf-8')).hexdigest()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the rotated tracker hash for a particular ballot.
def get_rotating_tracker_hash( prev_hash: ElementModQ, timestamp: int, ballot_hash: ElementModQ ) -> ElementModQ: return hash_elems(prev_hash, timestamp, ballot_hash)
[ "def get_hash(self, label):\n labelhash = self._hasher.copy()\n labelhash.update(pickle.dumps(label))\n return labelhash.digest()", "def hash(polygon):\n crc = zlib.adler32(polygon.wkb)\n return crc", "def zobrist_hash(self) -> int:\n return chess.polyglot.zobrist_hash(self)", "def __get_current_hash__(self):\n hasher = hashlib.sha256()\n hasher.update(self.previous_hash.encode() + self.data.encode())\n return hasher.hexdigest()", "def __getHash(self, hspl):\n subject = hspl.find(\"{%s}subject\" % getHSPLNamespace())\n action = hspl.find(\"{%s}action\" % getHSPLNamespace())\n trafficConstraints = hspl.find(\"{%s}traffic-constraints\" % getHSPLNamespace())\n h = 1\n h = 37 * h + hash(etree.tostring(subject))\n h = 37 * h + hash(etree.tostring(action))\n h = 37 * h + hash(etree.tostring(trafficConstraints))\n return h", "def get_reactant_hash(self,reactant_alias):\n\t\treactant_hash=hash(reactant_alias)\n\t\tif reactant_hash in self.reactant:\n\t\t\treturn reactant_hash\n\t\telse:\n\t\t\treturn self.insert_new_reactant(reactant_alias,unexpected=1)", "def hash(self):\n return self.wh", "def get_hash(self, descriptor):", "def hash_spin(self, challenge, spin):\n spinArray = bytearray.fromhex(spin);\n byteChallenge = bytearray.fromhex(challenge);\n spinArray.extend(byteChallenge)\n return hashlib.sha512(spinArray).hexdigest()", "def get_id(torrent):\n\n return torrent.get_status([\"hash\"])[\"hash\"]", "def get_commit_hash():\n return git.Repo().head.object.hexsha", "def get_hash(self):\r\n if not hasattr(self, 'signed_tx') or not self.signed_tx:\r\n raise AttributeError('You need to sign transaction before')\r\n\r\n # Create SHA256\r\n sha = hashlib.sha256()\r\n sha.update(bytes.fromhex(self.signed_tx))\r\n\r\n # Return first 64 symbols with prefix\r\n return MinterHelper.prefix_add(sha.hexdigest()[:64], PREFIX_TX)", "def hash(self):\n\n return hash_barcode(self.data)", "def hash(self):\n assert self.__hash, \\\n \"Tried to use hash() after spent. See:\\n\" \\\n + TREEPRNG_DOC_URL + \"#the-treeprng-life-cycle\"\n hash = self.__hash.copy()\n hash.update(\"h\")\n self.is_dict = True\n return long(hash.hexdigest(), 16)", "def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )", "def _getfingerprint(self):\n\n return base64.b64encode(encryption.sha512(self.publickeyxml.encode())).decode()", "def jsonrpc_sync_hash(self, wallet_id=None):\n wallet = self.wallet_manager.get_wallet_or_default(wallet_id)\n return hexlify(wallet.hash).decode()", "def header_hash(self): \n return hashlib.sha256((str(self.index) + str(self.timestamp) + str(self.tx) + str(self.previous_block)).encode('utf-8')).hexdigest()", "def __hash__(self):\n x = self.flatten()\n if self.x_turn:\n x.append(1)\n else:\n x.append(-1)\n return hash(tuple(x))", "def get_hash(self) -> int:\n return encode_base62(id_dec=self.pk)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
shows board with PyGame functions
def show_board(self) -> None: pygame.display.set_caption("Qwixx Board") if self.is_turn_invalid: self.screen.fill(PyGameUi.red_vibrant) else: self.screen.fill(PyGameUi.white) font = pygame.font.SysFont('Comic Sans MS', PyGameUi.font_numbers_size, True, False) lock = pygame.font.SysFont('Comic Sans MS', PyGameUi.font_lock_size, True, False) self._render_colored_rows(font, lock) self._render_penalties(font) self._render_skip_button(font) self._render_dice(font) self._show_player_mode(font) clock = pygame.time.Clock() clock.tick(60) pygame.display.flip()
[ "def drawBoard(self):\n\n # \"board\" is a list of 10 strings representing the board (ignore index 0)\n print(' | |')\n print(' ' + self.board[7] + ' | ' + self.board[8] + ' | ' + self.board[9])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + self.board[4] + ' | ' + self.board[5] + ' | ' + self.board[6])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + self.board[1] + ' | ' + self.board[2] + ' | ' + self.board[3])\n print(' | |')", "def show_game(self):\n self.G.show_board() # Call the graph's show_board.", "def displayBoard(board):\n print(\"\\n\\t\",board[0],\"|\",board[1],\"|\",board[2])\n print(\"\\t ----------\")\n print(\"\\n\\t\",board[3],\"|\",board[4],\"|\",board[5])\n print(\"\\t---------\")\n print(\"\\n\\t\",board[6],\"|\",board[7],\"|\",board[8])", "def displayBoard():\n return render_template('board.html',\n clubs=clubs)", "def draw_board(self):\r\n for i in range(9):\r\n for j in range(9):\r\n # Draw black lines to demarkate the 'boxes'\r\n if j%3 == 0 and j != 0:\r\n pygame.draw.line(self.window, BLACK, ((j//3)*180, 0), ((j//3)*180, 540), 4)\r\n if i%3 == 0 and i != 0:\r\n pygame.draw.line(self.window, BLACK, (0, (i//3)*180), (540, (i//3)*180), 4)\r\n \r\n # Draw the cells \r\n self.cells[i][j].draw(BLACK, 1)\r\n\r\n # Don't draw the placeholder 0s on the grid\r\n if self.cells[i][j].value != 0:\r\n self.cells[i][j].display(self.cells[i][j].value, (21+(j*60), (16+(i*60))), (0, 0, 0))\r\n \r\n # Bottom most line\r\n pygame.draw.line(self.window, (0, 0, 0), (0, ((i+1) // 3) * 180), (540, ((i+1) // 3) * 180), 4)", "def draw_board(self) -> None:\n for row in range(LENGTH):\n print('--------------')\n print('| ', end='')\n for col in range(LENGTH):\n if self.board[row, col] == self.x:\n print(' x |', end='')\n elif self.board[row, col] == self.o:\n print(' o |', end='')\n else:\n print(' |', end='')\n print('') # End of column\n print('--------------') # End of rows", "def drawBoard(self):\r\n board = Canvas(self.mainWindow, width=longPlus[9] +\r\n TILE_LONG, height=longPlus[9]+TILE_LONG, bg=\"#c0e2ca\")\r\n for tile in self.game.getBoard():\r\n self._drawTile(tile, board)\r\n board.grid(row=0, column=0, rowspan=3)\r\n self._drawPlayers(board)", "def draw_pieces(self):\n for i in range(8):\n for j in range(8):\n if self.get_board_array()[i, j].get_content() is not None:\n self.screen.blit(\n self.get_board_array()[i, j].get_content().get_visual(),\n (int(j * self.h / 8), int(i * self.h / 8))\n )", "def print_board():\n \n print \"\"\n print \" | | \"\n print \" \" + grid_status[(1,1)] + \" | \" + grid_status[(1,2)] + \" | \" + grid_status[(1,3)]\n print \"___|___|___\"\n print \" | | \"\n print \" \" + grid_status[(2,1)] + \" | \" + grid_status[(2,2)] + \" | \" + grid_status[(2,3)]\n print \"___|___|___\"\n print \" | | \"\n print \" \" + grid_status[(3,1)] + \" | \" + grid_status[(3,2)] + \" | \" + grid_status[(3,3)]\n print \" | | \"\n print \"\"", "def show(self,surface):\n f=self.getForm()\n for case in self.cases:\n case.show(surface,side_color=mycolors.BLACK)\n f.side_color=mycolors.BLACK\n f.side_width=3\n f.show(surface)\n f[0].showText(surface,\"Board\")", "def display_board(self):\n board = self.get_board()\n\n # print the column headers\n print(' a b c d e f g h i')\n\n # print the board, one row at a time, by first making a list for the row\n for row in range(1, 11):\n\n # start row with number\n row_list = [str(row)]\n\n # extra space after single digits so they match the '10 '\n if row < 10:\n row_list.append('')\n\n # fill the row_list\n for col in 'abcdefghi':\n position = col + str(row)\n row_list.append(board[position])\n\n # display piece_ids in row_list separated by spaces\n print(*row_list)\n\n # finish with blank line\n print()", "def render(self):\n # Clear the old board.\n self.clear()\n\n # Draw the board in a single batch.\n batch = Batch()\n batch = self.draw_board(batch)\n batch.draw()\n\n # Send to screen.\n self.flip()", "def draw(self):\r\n self._board_view = BoardView(self._master, self._grid_size, self._board, self.move_to, self.flag_cell)\r\n self._board_view.pack()", "def _show(self):\n\t\t#print ('showing rect')\n\t\tpg.draw.rect(gameDisplay, self._c, [self._x, self._y, self._w, self._h])", "def draw_whole_screen(board):\n Screen.draw_top_bar()\n Screen.draw_btm_bar()\n Screen.draw_checkered_board()\n Screen.draw_all_chessmen(board)", "def print_board(self):\n print\n print \"%s %56s\" % (\"My Board:\", self.opponent_name + \"'s Board:\"),\n\n print\n print \"%-3s\" % \"\",\n for i in range(BOARD_SIZE):\n print \"%-3s\" % str(i+1),\n\n print(\" ||| \"),\n print \"%-3s\" % \"\",\n for i in range(BOARD_SIZE):\n print \"%-3s\" % str(i+1),\n\n print\n\n for i in range(BOARD_SIZE):\n print \"%-3s\" % Client.letters[i],\n for j in range(BOARD_SIZE):\n print \"%-3s\" % self.board[i, j],\n\n print(\" ||| \"),\n print \"%-3s\" % Client.letters[i],\n for j in range(BOARD_SIZE):\n print \"%-3s\" % self.enemy_board[i, j],\n print\n\n print", "def draw(self, players):\r\n\r\n\t\tfor line in self.board:\r\n\t\t\tprint(line)\r\n\r\n\t\tprint('Name : Space')\r\n\t\tprint('------------')\r\n\t\tfor player in players:\r\n\t\t\tif player.isPlaying():\r\n\t\t\t\tprint(player.getName() + ': ' + str(player.getSpace()))", "def show_board():\n print('Player 1 Cards:')\n show_hand(player_one.cards)\n print('Player 2 Cards:')\n show_hand(player_two.cards)\n print('Discard:')\n print(Deck.discard)\n print('\\n')", "def draw_board(self, board):\n self.delete(tk.ALL)\n for y in range(self._grid_size):\n x = 0\n for char in board[:self._grid_size]:\n if char == UNEXPOSED:\n\n self.create_image(self._grid_width * (x + 0.5), self._grid_width * (y + 0.5),\n image=self.images['unrevealed'])\n elif char == FLAG:\n self.create_image(self._grid_width * (x + 0.5), self._grid_width * (y + 0.5),\n image=self.images['pokeball'])\n elif char == POKEMON:\n self.create_image(self._grid_width * (x + 0.5), self._grid_width * (y + 0.5),\n image=self.images[self._pokemon_list[random.randint(0, 5)]])\n else:\n self.create_image(self._grid_width * (x + 0.5), self._grid_width * (y + 0.5),\n image=self.images[char])\n x += 1\n board = board[self._grid_size:]\n self.pack()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
draws a skip button
def _render_skip_button(self, font) -> None: pygame.draw.rect(self.screen, PyGameUi.light_grey, [PyGameUi.skip_button_x, PyGameUi.penalty_box_y, PyGameUi.skip_button_x_length, PyGameUi.penalty_box_y_length], 0) self.button(0, PyGameUi.skip_button_x_length, PyGameUi.penalty_box_y_length, PyGameUi.light_grey, PyGameUi.dark_grey) text = font.render("skip", True, PyGameUi.white) self.screen.blit(text, [PyGameUi.skip_button_x + PyGameUi.penalty_text_y_offset, PyGameUi.penalty_box_y + PyGameUi.penalty_text_y_offset])
[ "def btn_func_skip(self):\n self.new_point(QtCore.QPoint(-1, -1))", "def story_skip(self):\r\n #if self.skip.displayed(max_wait=5):\r\n self.skip.click()\r\n # return not self.skip.displayed(max_wait=5)\r", "def skip():\n Playlist.skip_song()\n current_song_text.setText(\"{}\".format(Playlist.get_current_song()))\n play_pause_btn.setText(\"Pause\")", "def __init__(self, controller, parent=None):\n super(SkipTool, self).__init__(controller, parent)\n self._skipItem = SkipItem()\n _pen = QPen(styles.redstroke, 2)\n self.baseWidth = styles.PATH_BASE_WIDTH\n self.hide()\n self.setZValue(styles.ZPATHTOOL)", "def OnIgnore(self, evt):\r\n self.Advance()", "def main_printSkip(pName, name, tup, rest):\n main_printRest(pName, rest)\n print \"{\", pName, \"} Skip image:\\t\", (name)\n print \"{\", pName, \"} Tuple value:\\t\", tup\n print \"-----------------------------------------------------------------\"", "def drawContinue():\n msg = \"Press any to continue...\"\n w = len(str(msg)) + 2\n l = 3\n y = 40\n x = 20\n win = curses.newwin(l, w, y, x)\n win.addstr(1,1,msg)\n win.box()\n\n pan = curses.panel.new_panel(win)\n curses.panel.update_panels()\n win.noutrefresh();curses.doupdate()\n STDSCR.getch()\n pan.hide()\n return pan", "def white_draw(self):\n val = messagebox.askyesno(title=\"Info\", message=\"White Offered Draw, Accept ?\")\n if val == True:\n self.adraw()", "def black_draw(self):\n val = messagebox.askyesno(title=\"Info\", message=\"Black Offered Draw, Accept ?\")\n if val == True:\n self.adraw()", "def button3(self, event):\n\n if self.inline:\n pass\n else:\n self.cs.pop_label()\n self.cs.ax.figure.canvas.draw()", "def Down(self):\n \n self.Draw = True", "def click_continue(self):\n QTest.mouseClick(self.config.q_continue, Qt.LeftButton)", "def next_on_click(layer, event):\n if layer.mode == 'add':\n next_label()\n\n # by default, napari selects the point that was just added\n # disable that behavior, as the highlight gets in the way\n layer.selected_data = []", "def skip():\n _stop_player_process()", "def disable_button(self, display):\r\n self.button.destroy()\r\n self.create_label(display)", "async def skip(self, inter: disnake.GuildCommandInteraction):\r\n if not self.bot.data.save['gw']['skip']:\r\n with self.bot.data.lock:\r\n self.bot.data.save['gw']['skip'] = True\r\n self.bot.data.pending = True\r\n await inter.response.send_message(embed=self.bot.util.embed(title=\"The command ran with success\", color=self.color), ephemeral=True)\r\n else:\r\n await inter.response.send_message(embed=self.bot.util.embed(title=\"Error\", description=\"The next set of buffs is already beind skipped\", color=self.color), ephemeral=True)", "def _draw_discard(self) -> Card:\n return self._discards.draw()", "def skipped(self):\n return self.get_progress()==\"skip\"", "def _draw(self):\n self._connection.write(\"3\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
renders the dice onto the board
def _render_dice(self, font) -> None: for dice in range(len(self.lst_eyes)): text = font.render(f"{self.lst_eyes[dice]}", True, self.convert_number_to_color(dice, True)) self.screen.blit(text, [PyGameUi.button_length + PyGameUi.button_x_distance * dice, PyGameUi.penalty_box_y + PyGameUi.penalty_text_y_offset]) text = font.render("your dice", True, PyGameUi.dark_grey) self.screen.blit(text, [PyGameUi.box_x + PyGameUi.dice_text_x_offset, PyGameUi.penalty_box_y + PyGameUi.dice_text_y_offset])
[ "def roll_two_dice(self, graphical=True):\n\n die_1 = self.get_random_die() # Set random value between 1 - 6\n die_2 = self.get_random_die() # Set random value between 1 - 6\n\n # -------------------\n # DIE MAP DESCRIPTION\n # -------------------\n # The die map holds the layout sequence for a dice roll (1 - 6).\n # Use the Dictionary's key to produce a map that\n # can be replaced with whatever symbol you want. (i.e. *)\n die_map = {1: [\" \", \" \", \" \", \" \", \"*\", \" \", \" \", \" \", \" \"],\n 2: [\" \", \" \", \"*\", \" \", \" \", \" \", \"*\", \" \", \" \"],\n 3: [\"*\", \" \", \" \", \" \", \"*\", \" \", \" \", \" \", \"*\"],\n 4: [\"*\", \" \", \"*\", \" \", \" \", \" \", \"*\", \" \", \"*\"],\n 5: [\"*\", \" \", \"*\", \" \", \"*\", \" \", \"*\", \" \", \"*\"],\n 6: [\"*\", \" \", \"*\", \"*\", \" \", \"*\", \"*\", \" \", \"*\"]}\n\n # Prepare the ASCII print sequence for each die.\n left = die_map[die_1]\n right = die_map[die_2]\n\n if graphical is True:\n print(\n f\"+-----------+ +-----------+\\n\" +\n f\"| {left[0]} {left[1]} {left[2]} | \" +\n f\"| {right[0]} {right[1]} {right[2]} |\\n\" +\n f\"| {left[3]} {left[4]} {left[5]} | \" +\n f\"| {right[3]} {right[4]} {right[5]} |\\n\" +\n f\"| {left[6]} {left[7]} {left[8]} | \" +\n f\"| {right[6]} {right[7]} {right[8]} |\\n\" +\n f\"+-----------+ +-----------+\"\n )\n\n return (die_1, die_2)", "def print_dice_row(dice):\n top_row = \" .---------. \" * 5 + \"\\n\"\n bottom_row = \" '---------' \" * 5 + \"\\n\"\n one_rows = [\" | | \", \" | O | \", \" | | \"]\n two_rows = [\" | O | \", \" | | \", \" | O | \"]\n three_rows = [\" | O | \", \" | O | \", \" | O | \"]\n four_rows = [\" | O O | \", \" | | \", \" | O O | \"]\n five_rows = [\" | O O | \", \" | O | \", \" | O O | \"]\n six_rows = [\" | O O | \", \" | O O | \", \" | O O | \"]\n dice_rows = [one_rows, two_rows, three_rows, four_rows, five_rows, six_rows]\n second_row = \"\"\n third_row = \"\"\n fourth_row = \"\"\n\n dice_numbers = \"\"\n for x in range(1, 6):\n dice_numbers += str(x).center(13)\n\n for die in dice:\n second_row += dice_rows[die.value - 1][0]\n third_row += dice_rows[die.value - 1][1]\n fourth_row += dice_rows[die.value - 1][2]\n\n print(\"\\n \" + dice_numbers + \"\\n \" + top_row + \" \" + second_row + \"\\n \" + third_row + \"\\n \" + fourth_row + \"\\n \" + bottom_row)", "def display_dice(dice):\n dice1, dice2 = dice # unpack the tuple nito variables dice1 and dice2\n print(f'Player rolled {dice1} + {dice2} = {sum(dice)}')", "def draw_dice(c, f, b, s, dot):\n assert len(dot) == 1, 'Dot must be a single symbol'\n border = ' -------'\n def draw(b):\n return dot if b else ' '\n c, f, b, s = map(draw, [c, f, b, s])\n top = ' '.join(['|', b, ' ', f, '|'])\n middle = ' '.join(['|', s, c, s, '|'])\n bottom = ' '.join(['|', f, ' ', b, '|'])\n return '\\n'.join([border, top, middle, bottom, border])", "def add_dice(self, dice_movement, win, dice_number):\n win.blit(pygame.image.load(os.path.join(sys.path[0]) + \"/graphics/dice\" + str(dice_number) + \".png\"), (0, 650))\n dice_movement.move_player(win, dice_number)", "def drawBoard(self):\n\n # \"board\" is a list of 10 strings representing the board (ignore index 0)\n print(' | |')\n print(' ' + self.board[7] + ' | ' + self.board[8] + ' | ' + self.board[9])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + self.board[4] + ' | ' + self.board[5] + ' | ' + self.board[6])\n print(' | |')\n print('-----------')\n print(' | |')\n print(' ' + self.board[1] + ' | ' + self.board[2] + ' | ' + self.board[3])\n print(' | |')", "def draw_cells(self, surface):\n for cell in self.cells:\n pygame.draw.rect(surface, cell.color, cell, 2)", "def draw_food(self):\n\n pygame.draw.rect(self.screen, self.food_color, self.rect)", "def draw_board(self):\r\n for i in range(9):\r\n for j in range(9):\r\n # Draw black lines to demarkate the 'boxes'\r\n if j%3 == 0 and j != 0:\r\n pygame.draw.line(self.window, BLACK, ((j//3)*180, 0), ((j//3)*180, 540), 4)\r\n if i%3 == 0 and i != 0:\r\n pygame.draw.line(self.window, BLACK, (0, (i//3)*180), (540, (i//3)*180), 4)\r\n \r\n # Draw the cells \r\n self.cells[i][j].draw(BLACK, 1)\r\n\r\n # Don't draw the placeholder 0s on the grid\r\n if self.cells[i][j].value != 0:\r\n self.cells[i][j].display(self.cells[i][j].value, (21+(j*60), (16+(i*60))), (0, 0, 0))\r\n \r\n # Bottom most line\r\n pygame.draw.line(self.window, (0, 0, 0), (0, ((i+1) // 3) * 180), (540, ((i+1) // 3) * 180), 4)", "def _draw_cell(x, y, color):\n px = x * length\n py = y * length\n\n rectangle = pygame.Rect((px, py), size)\n pygame.draw.rect(pygame.display.get_surface(), color, rectangle)", "def draw_board(self) -> None:\n for row in range(LENGTH):\n print('--------------')\n print('| ', end='')\n for col in range(LENGTH):\n if self.board[row, col] == self.x:\n print(' x |', end='')\n elif self.board[row, col] == self.o:\n print(' o |', end='')\n else:\n print(' |', end='')\n print('') # End of column\n print('--------------') # End of rows", "def draw_score_board(self):\r\n self.score_board.draw()", "def drawDucks(duckSize):\n pass #TODO drawduck ", "def draw(self):\n\n # Top-left corner of the world\n width, height = self.size\n x = 0 - width // 2\n y = height // 2\n\n turtle.clear()\n for person in self.people:\n person.draw()\n draw_rect(x, y, width, height)\n draw_text(x, y, f'Hours: {self.hours}')\n draw_text(0, y, f'Infected: {self.count_infected()}', align='center')", "def draw(self):\n #Set colors\n WHITE = (255, 255, 255)\n GREEN = (25, 200, 25)\n\n #Set text\n score_text = self.HUD_font.render(\"Score: \" + str(self.score), True, WHITE)\n score_rect = score_text.get_rect()\n score_rect.topleft = (10, WINDOW_HEIGHT - 50)\n\n health_text = self.HUD_font.render(\"Health: \" + str(self.player.health), True, WHITE)\n health_rect = health_text.get_rect()\n health_rect.topleft = (10, WINDOW_HEIGHT - 25)\n\n title_text = self.title_font.render(\"Zombie Knight\", True, GREEN)\n title_rect = title_text.get_rect()\n title_rect.center = (WINDOW_WIDTH//2, WINDOW_HEIGHT - 25)\n\n round_text = self.HUD_font.render(\"Night: \" + str(self.round_number), True, WHITE)\n round_rect = round_text.get_rect()\n round_rect.topright = (WINDOW_WIDTH - 10, WINDOW_HEIGHT - 50)\n\n time_text = self.HUD_font.render(\"Sunrise In: \" + str(self.round_time), True, WHITE)\n time_rect = time_text.get_rect()\n time_rect.topright = (WINDOW_WIDTH - 10, WINDOW_HEIGHT - 25)\n\n #Draw the HUD\n display_surface.blit(score_text, score_rect)\n display_surface.blit(health_text, health_rect)\n display_surface.blit(title_text, title_rect)\n display_surface.blit(round_text, round_rect)\n display_surface.blit(time_text, time_rect)", "def draw_number(n, dot='*'):\n if n == 1:\n return draw_dice(1,0,0,0,dot)\n elif n == 2:\n return draw_dice(0,1,0,0,dot)\n elif n == 3:\n return draw_dice(1,1,0,0,dot)\n elif n == 4:\n return draw_dice(0,1,1,0,dot)\n elif n == 5:\n return draw_dice(1,1,1,0,dot)\n elif n == 6:\n return draw_dice(0,1,1,1,dot)\n else:\n return ''", "def render_colored(room_state):\n for x in range(room_state.shape[0]):\n for y in range(room_state.shape[1]):\n end = \"\" if y < room_state.shape[0] - 1 else \" \"\n bg_color = BG_COLORS[room_state[x][y]]\n color = \"white\" if bg_color == \"black\" else \"black\"\n if room_state[x][y] == 5:\n colored_print(\" P \", \"red\", bg_color, end)\n elif room_state[x][y] == 0:\n colored_print(f\" \", color, bg_color, end)\n else:\n colored_print(f\" {room_state[x][y]} \", color, bg_color, end)\n\n return", "def __str__(self):\n return \" | \".join([str(die) for die in self.dice])", "def DrawNum(self, DC):\n f = wx.Font(10, self.fontfamily, self.fontstyle, self.fontweight) # initial font setting\n self._CalcFontSize(DC, f)\n\n cnt_x = 0\n cnt_y = 1\n for val in self.cal_days:\n x = self.gridx[cnt_x]\n y = self.gridy[cnt_y]\n\n self._DrawDayText(x, y, val, f, DC)\n\n if cnt_x < 6:\n cnt_x = cnt_x + 1\n else:\n cnt_x = 0\n cnt_y = cnt_y + 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
shows whether the player is active or passive
def _show_player_mode(self, font) -> None: if self.is_active_player: player_mode = "active player" else: player_mode = "passive player" text = font.render(f"{player_mode}", True, PyGameUi.dark_grey) self.screen.blit(text, [PyGameUi.box_x + PyGameUi.player_mode_x_offset, PyGameUi.penalty_box_y + PyGameUi.player_mode_y_offset])
[ "def external_player(self, player_obj):\n return plugin_addon.getSetting('external_player').lower() == 'true'", "def is_paused(self) -> bool:", "def is_playing(self, user):\r\n\t\treturn user is self.player1 or user is self.player2", "def test_toggle_active(self):\n the_game = game.Game()\n the_game.create_player('player1')\n the_game.create_player('player2')\n the_game.add_player('player1')\n the_game.add_player('player2')\n the_game.start()\n res = the_game.active_player.get_name()\n exp = 'player1'\n self.assertEqual(res, exp)\n\n # Toggle the active player\n the_game.toggle_active()\n res = the_game.active_player.get_name()\n exp = 'player2'\n self.assertEqual(res, exp)", "def check_state_game(stats, play_button, pause_button):\n if stats.game_pause:\n pause_button.draw_button()\n pygame.mouse.set_visible(True)\n elif not stats.game_active:\n play_button.draw_button()\n pygame.mouse.set_visible(True)", "def get_player_status(self):\n\n return self.isin", "def is_paused():\n return paused", "def show_playing(self):\n #if there is a playing video and no paused one\n if self._current_video and not self._paused_video:\n video_info = self._video_library.get_video(self._current_video.video_id)\n tagString = str(video_info.tags).strip(\"()\")\n print('Currently playing: '+ video_info.title + \" (\" + video_info.video_id + \")\", \"[\" + (tagString.translate({39: None})).replace(',', '') + \"]\")\n else:\n #no video is playing or paused\n if not self._paused_video:\n print(\"No video is currently playing\")\n #no video is playing but there is a paused one\n else:\n video_info = self._video_library.get_video(self._paused_video.video_id)\n tagString = str(video_info.tags).strip(\"()\")\n print('Currently playing: ' + video_info.title + \" (\" + video_info.video_id + \")\", \"[\" + (tagString.translate({39: None})).replace(',', '') + \"]\"+ \" - PAUSED\")\n return\n return", "def check_win():\n if MY.player1_hp < 1:\n Manager.current = 1\n MY.state = 1\n MY.display_text = TextObject(WHITE, 24, \"Player 2 wins! Play again?\")\n \n elif MY.player2_hp < 1:\n Manager.current = 1\n MY.state = 1\n MY.display_text = TextObject(WHITE, 24, \"Player 1 wins! Play again?\")", "def isPlayer(self):\n \n return False # Player class contains an implementation that returns True", "def isplaying(self):\n\t\treturn self.play is not None", "def player_alive(self)->bool:\n return self.tries_used < 9", "def current_status(self):\n self.screen.blit(f\"Humanoid is currently {self.model.action()}\\\n \\n Try to keep them alive.\")", "def show_playing(self):\n\n video_id = self.playing;\n video = self._video_library.get_video(video_id);\n \n if video:\n out = 'Currently playing: {title} ({id}) [{tags}]'.format(title=video.title, id=video.video_id, tags=' '.join(video.tags))\n if self.is_paused:\n out = out + \" - PAUSED\"\n print(out)\n else:\n print(\"No video is currently playing\")", "def is_alive(self):\n # if self.current_health <= 0:\n # False\n # else:\n # True", "def playback_available(self):\n return 1", "def show_playing(self):\r\n\r\n if self.current_playing == \"\":\r\n print(\"No video is currently playing\")\r\n else:\r\n v = self._video_library.get_video(self.current_playing)\r\n paused_status = \"\" if self.fl_playing_video else \" - PAUSED\"\r\n tags = \"[\" + ' '.join(v.tags) + \"]\"\r\n video_info = v.title + \" (\" + v.video_id + \") \" + tags\r\n print(\"Currently playing: \" + video_info + paused_status)", "def IsPositionActive( self, position ):\r\n singlePlayer = lambda x: self.__currentActivePosition == x\r\n multiPlayer = lambda x: True\r\n unbound = lambda x: False\r\n return {\r\n GameState.UNBOUND : unbound,\r\n GameState.SINGLE : singlePlayer,\r\n GameState.SINGLE_INVITE : singlePlayer,\r\n GameState.MULTIPLE : multiPlayer,\r\n GameState.VICTORY : unbound,\r\n }[self.__currentGameState](position)", "def display_status(self):\n\n if self.game.is_end:\n if self.game.status == \"win\":\n text = \"\\nYou won !\\nPress any key to continue...\"\n elif self.game.status == \"lose\":\n text = f\"\\nYou lost ! You only had {str(self.game.player.inventory)}/3 items.\\nPress any key to continue...\"\n\n print(text)\n self.game.is_running = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
converts the color of a button to a row number
def convert_color_to_row(color) -> int: if color == PyGameUi.red_vibrant: return 0 if color == PyGameUi.yellow_vibrant: return 1 if color == PyGameUi.green_vibrant: return 2 if color == PyGameUi.blue_vibrant: return 3 if color == PyGameUi.black: return 4 if color == PyGameUi.dark_grey: return 5
[ "def __get_row_ids(self, r) -> Tuple[int, int, int]:\n return r*self.col, r*self.col+self.col, 1", "def pressed(self):\n\n for i, col in enumerate(self.cols):\n col.value(1)\n for j, row in enumerate(self.rows):\n if row.value() == 1:\n col.value(0)\n return self.buttons[j][i]\n col.value(0)\n return None", "def buttonAtRow(self, row):\n return self.layout.itemAtPosition(row, 3).widget()", "def number_from_label(label):\n row_number = 0\n row_label_chars = list(label.upper())\n for i, c in enumerate(reversed(row_label_chars)):\n colnum = ord(c) - 64\n row_number += colnum * pow(26, i)\n return row_number", "def get_row(master):\r\n\r\n try:\r\n row = int(master.grid_slaves()[0].grid_info()[\"row\"]) + 1\r\n except IndexError:\r\n # If master widget does not have any widgets plotted yet\r\n row = 0\r\n return row", "def add_number_buttons(self):\r\n add_rowspace, add_colspace = False, False\r\n for i in range(len(self.board)): # for each row i of the board\r\n add_rowspace = ((i+1)%self.base_size == 1 and i != 0)\r\n if add_rowspace:\r\n [self.widget_board.add_widget(Label(text='')) for _ in range(self.side_size + self.base_size - 1)] # add sudoku row spacing\r\n for j in range(len(self.board[i])): # for each index j in row i of the board\r\n add_colspace = ((j+1)%self.base_size == 1 and j != 0)\r\n if add_colspace:\r\n self.widget_board.add_widget(Label(text='')) # add sudoku spacing\r\n button = NumberButton(row=i, column=j, number=self.board[i][j])\r\n button.bind(on_release=button.use_button)\r\n self.widget_board.add_widget(button)", "def row_number(self) -> int:\n return pulumi.get(self, \"row_number\")", "def _color_to_number(self, color):\n if color == 'black':\n return 1\n elif color == 'blue':\n return 2\n elif color == 'green':\n return 3\n elif color == 'yellow':\n return 4\n elif color == 'red':\n return 5\n else: # color == 'white'\n return 6", "def getColorIndex(arg1: 'SoState', num: 'int') -> \"int32_t\":\n return _coin.SoLazyElement_getColorIndex(arg1, num)", "def get_excel_row_index(row: Union[str, int]) -> int:\n\treturn int(row)-1", "def color_column(self):\n return 14", "def getRandomBtnIdx(self):\r\n N = len(self.btns)\r\n if N < 1:\r\n return -1\r\n else:\r\n return randint(0, N-1)", "def get_row_col(this):\n return int(this[:-1]), ord(this[-1]) - ord('A')", "def getNumColorIndices(self) -> \"int32_t\":\n return _coin.SoLazyElement_getNumColorIndices(self)", "def countButtonLeft(self):\n count = 0\n for i in range(self.grid.count()):\n try:\n if isinstance(self.grid.itemAt(i).widget(), QPushButton):\n count +=1\n except Exception:\n pass\n return count", "def radiobutton_getToggled(self):\n for row in range(0, len(self.invest_widget_radiobutton)):\n if self.invest_widget_radiobutton[row].isChecked() == True:\n break\n if row == (len(self.invest_widget_radiobutton)-1):\n row = -1\n return row", "def getNum(self) -> \"int32_t\":\n return _coin.SoGLColorIndexElement_getNum(self)", "def opponent_row(self):\n return self.player_row ^ 1", "def _col_index(elt):\n selector = 'td, th' # accept both td and th elements\n col_elt = elt.closest(selector)\n tr = col_elt.parent('tr')\n return 1 + tr.children(selector).index(col_elt[0]) # [0] gets bare element" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
converts numbers of dice or rows to one or a tuple of colors
def convert_number_to_color(number, is_dice=False) -> tuple: if is_dice: if number in (0, 1): return PyGameUi.dark_grey if number == 2: return PyGameUi.red if number == 3: return PyGameUi.yellow_vibrant if number == 4: return PyGameUi.green_vibrant if number == 5: return PyGameUi.blue_vibrant else: # inactive, background, active if number == 0: return PyGameUi.red, PyGameUi.light_red, PyGameUi.red_vibrant if number == 1: return PyGameUi.yellow, PyGameUi.light_yellow, PyGameUi.yellow_vibrant if number == 2: return PyGameUi.green, PyGameUi.light_green, PyGameUi.green_vibrant if number == 3: return PyGameUi.blue, PyGameUi.light_blue, PyGameUi.blue_vibrant if number == 4: return PyGameUi.light_grey, PyGameUi.dark_grey, PyGameUi.black
[ "def get_colours(self, number):\n if number <= 0:\n number = 1\n ix = int(log2(number))\n return self.colours[ix]", "def compute_colors(number):\n return COLORS[number % len(COLORS)]", "def _next_colour():\n return tuple(numpy.concatenate(\n (numpy.random.choice(range(256), size=3) / 256, [1.0])))", "def getColor(rgb=None, hsv=None):\n # recursion, return a list if input is list of colors:\n if _isSequence(rgb) and (len(rgb) > 3 or _isSequence(rgb[0])):\n seqcol = []\n for sc in rgb:\n seqcol.append(getColor(sc))\n return seqcol\n\n # because they are most common:\n if rgb=='r':\n return (0.9960784313725, 0.11764705882352, 0.121568627450980)\n elif rgb=='g':\n return (0.0156862745098, 0.49803921568627, 0.062745098039215)\n elif rgb=='b':\n return (0.0588235294117, 0.0, 0.984313725490196)\n\n if str(rgb).isdigit():\n rgb = int(rgb)\n\n if hsv:\n c = hsv2rgb(hsv)\n else:\n c = rgb\n\n if _isSequence(c):\n if c[0] <= 1 and c[1] <= 1 and c[2] <= 1:\n return c # already rgb\n else:\n if len(c) == 3:\n return list(np.array(c) / 255.0) # RGB\n else:\n return (c[0] / 255.0, c[1] / 255.0, c[2] / 255.0, c[3]) # RGBA\n\n elif isinstance(c, str): # is string\n c = c.replace(\"grey\", \"gray\").replace(\" \", \"\")\n if 0 < len(c) < 3: # single/double letter color\n if c.lower() in color_nicks.keys():\n c = color_nicks[c.lower()]\n else:\n vedo.logger.warning(f\"Unknown color nickname {c}\\nAvailable abbreviations: {color_nicks}\")\n return (0.5, 0.5, 0.5)\n\n if c.lower() in colors.keys(): # matplotlib name color\n c = colors[c.lower()]\n # from now format is hex!\n\n if c.startswith(\"#\"): # hex to rgb\n h = c.lstrip(\"#\")\n rgb255 = list(int(h[i : i + 2], 16) for i in (0, 2, 4))\n rgbh = np.array(rgb255) / 255.0\n if np.sum(rgbh) > 3:\n vedo.logger.error(f\"in getColor(): Wrong hex color {c}\")\n return (0.5, 0.5, 0.5)\n return tuple(rgbh)\n\n else: # vtk name color\n namedColors = vtk.vtkNamedColors()\n rgba = [0, 0, 0, 0]\n namedColors.GetColor(c, rgba)\n return (rgba[0]/255.0, rgba[1]/255.0, rgba[2]/255.0)\n\n elif isinstance(c, int): # color number\n if c >= 0:\n return colors1[c % 10]\n else:\n return colors2[-c % 10]\n\n elif isinstance(c, float):\n if c >= 0:\n return colors1[int(c) % 10]\n else:\n return colors2[int(-c) % 10]\n\n # print(\"Unknown color:\", c)\n return (0.5, 0.5, 0.5)", "def colorTuple(c):\n return c.getRgb()", "def color_to_tuple(value):\n if isinstance(value, tuple):\n return value\n if isinstance(value, int):\n if value >> 24:\n raise ValueError(\"Only bits 0->23 valid for integer input\")\n r = value >> 16\n g = (value >> 8) & 0xFF\n b = value & 0xFF\n return [r, g, b]\n\n raise ValueError(\"Color must be a tuple or 24-bit integer value.\")", "def convert_color_to_row(color) -> int:\n if color == PyGameUi.red_vibrant:\n return 0\n if color == PyGameUi.yellow_vibrant:\n return 1\n if color == PyGameUi.green_vibrant:\n return 2\n if color == PyGameUi.blue_vibrant:\n return 3\n if color == PyGameUi.black:\n return 4\n if color == PyGameUi.dark_grey:\n return 5", "def _color_int2tuple(bitmask):\n return (\n (bitmask >> 2) & 1,\n (bitmask >> 1) & 1,\n bitmask & 1\n )", "def check_color(c_tuple):\n for i in range(len(c_tuple)):\n if c_tuple[i]>255:\n c_tuple[i] = 255\n elif c_tuple[i]<0:\n c_tuple[i] = 0\n return c_tuple", "def make_colors(n,noGrey=True):\n k = int(np.ceil(n**(1.0/3)))\n if noGrey and n > (k**3-k):\n k += 1\n assert k > 1\n basis = np.arange(k)/(k-1)\n combinations = np.array([np.array(i) for i in product(basis,basis,basis)])\n stddevs = np.array([np.std(i) for i in combinations])\n return [(255.0*i).astype(int) for i in combinations[stddevs.argsort()][::-1]]", "def roll_dice():\n dice1 = random.randrange(1, 7)\n dice2 = random.randrange(1, 7)\n return (dice1, dice2) # pack dice face values into a tuple", "def color_by_value( red, green, blue ):\n red = hexify( red )\n grn = hexify( green )\n blu = hexify( blue ) \n return red+grn+blu", "def roll_dice():\r\n die1 = random.randrange(1, 7)\r\n die2 = random.randrange(1, 7)\r\n return (die1, die2) # pack die face values into a tuple\r", "def colorize_list(numbers, tolerance=0.001):\n retval = []\n for number in numbers:\n if abs(number) <= tolerance:\n retval.append(green('%12.4e' % number))\n else:\n retval.append(red('%12.4e' % number))\n return tuple(retval)", "def color_to_triple(color: Optional[str] = None) -> Tuple[int, int, int]:\n if color is None:\n r = np.random.randint(0, 0x100)\n g = np.random.randint(0, 0x100)\n b = np.random.randint(0, 0x100)\n return (r, g, b)\n else:\n return ImageColor.getrgb(color)", "def getColorIndices(arg1: 'SoState') -> \"int32_t const *\":\n return _coin.SoLazyElement_getColorIndices(arg1)", "def to_rgb(value: TypeColor) -> Tuple[int, int, int]:\n if isinstance(value, tuple):\n return value\n\n if isinstance(value, int):\n value = (value, value, value)\n\n elif isinstance(value, str):\n value = COLORS[value]\n\n return value", "def _background_color(s):\n seed(s)\n r = v = b = 255\n while r + v + b > 255*2:\n r = randint(0, 255)\n v = randint(0, 255)\n b = randint(0, 255)\n return (r, v, b)", "def rendered_color(self, color_id):\n return tuple([round(float(x)/255, 2) for x in self.color_palette['color'][str(color_id)]])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the values of the hidden layer
def get_hidden_values(self, input): # print T.dot(input, self.W).eval() return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
[ "def get_hidden(self, layer):", "def compute_visible(self, h): \n hidden = tf.placeholder(tf.float32, [None, self.n_hidden], name=\"hidden\")\n compute = sample(tf.sigmoid(tf.matmul(hidden, tf.transpose(self.W)) + self.vb))\n \n x = self.sess.run(compute, feed_dict={hidden:h})\n return x", "def _compute_hidden_acts(self, X, W, b):\n A_h = self.activation(T.dot(X, W) + b)\n return A_h", "def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n values = initializer(shape=(self.batch, self.units))\n\n return values", "def run_hidden(self, data):\r\n\r\n num_examples = data.shape[0]\r\n\r\n #print (num_examples) ;\r\n\r\n # Create a matrix, where each row is to be the visible units (plus a bias unit)\r\n # sampled from a training example.\r\n visible_states = np.ones((num_examples, self.num_visible + 1))\r\n\r\n #print (visible_states.shape);\r\n\r\n # Insert bias units of 1 into the first column of data.\r\n #data = np.insert(data, 0, 1, axis = 1)\r\n\r\n data[:,0] = 1 ;\r\n\r\n #print (data.shape) ;\r\n #print (self.weights.shape)\r\n\r\n # Calculate the activations of the visible units.\r\n visible_activations = np.dot(data, self.weights.T)\r\n # Calculate the probabilities of turning the visible units on.\r\n visible_probs = self._logistic(visible_activations)\r\n # Turn the visible units on with their specified probabilities.\r\n #visible_states[:,:] = visible_probs > np.random.rand(num_examples, self.num_visible + 1)\r\n visible_states[:,:] = visible_probs ; \r\n # Always fix the bias unit to 1.\r\n # visible_states[:,0] = 1\r\n\r\n # Ignore the bias units.\r\n visible_states = visible_states[:,1:]\r\n return visible_states", "def value(self, states_summary):\n hidden_1 = self.critic_dense_1(states_summary) # (batch_sz * self.critic_H1)\n hidden_2 = self.critic_dense_2(hidden_1) # (batch_sz * self.critic_H2)\n value_out = self.critic_dense_3(hidden_2) # (batch_sz * 1)\n return value_out", "def evaluateHidden2(self, inputs):\n # Get hidden layer 1 output\n hidden1_output = self.evaluateHidden1(inputs)\n # Get hidden layer 2 output\n if self.bias:\n inputs = np.concatenate((hidden1_output, np.ones((hidden1_output.shape[0], 1))), axis=1)\n output = np.dot(inputs, self.w2.T)\n # Linear thresholding activation\n output[output <= 0] = 0\n output[output > 0] = 1\n return output", "def init_hidden(self):\n weight = next(self.parameters())\n nlayers = self.eta_nlayers\n nhid = self.eta_hidden_size\n return (weight.new_zeros(nlayers, 1, nhid), weight.new_zeros(nlayers, 1, nhid))", "def evaluateFinal(self, inputs):\n # Get hidden layer 1 output\n hidden1_output = self.evaluateHidden1(inputs)\n # Get hidden layer 2 output\n hidden2_output = self.evaluateHidden2(inputs)\n # Get final output\n if self.bias:\n inputs = np.concatenate((hidden2_output, np.ones((hidden2_output.shape[0], 1))), axis=1)\n output = np.dot(inputs, self.w3.T)\n # Linear thresholding activation\n output[output <= 0] = 0\n output[output > 0] = 1\n return output", "def evaluateHidden1(self, inputs):\n if self.bias:\n inputs = np.concatenate((inputs, np.ones((inputs.shape[0], 1))), axis=1)\n output = np.dot(inputs, self.w1.T)\n # Linear thresholding activation\n output[output <= 0] = 0\n output[output > 0] = 1\n return output", "def get_output(self, input_, mask_, hidden_init):\n # input_ are (n_batch, n_timesteps, n_features)\n # change to (n_timesteps, n_batch, n_features)\n input_ = input_.dimshuffle(1, 0, 2)\n # mask_ are (n_batch, n_timesteps)\n masks = masks.dimshuffle(1, 0, 'x')\n sequence_length = input_.shape[0]\n batch_num = input_.shape[1]\n\n # precompute input\n if self.precompute:\n additional_dims = tuple(input.shape[k] for k in range(2, input.ndim)) # (output_dim,)\n input = T.reshape(input, (sequence_length*batch_num,) + additional_dims)\n input = T.dot(input, self.W)\n additional_dims = tuple(input.shape[k] for k in range(1, input.ndim)) # (output_dim,)\n input = T.reshape(input, (sequence_length, batch_num,) + additional_dims)\n\n # step function\n def step(input_, hidden):\n if self.precompute:\n return self.out_activation.get_output(input_ + T.dot(hidden, self.U) + self.b)\n else:\n return self.out_activation.get_output(T.dot(input_, self.W) + T.dot(hidden, self.U) + self.b)\n\n # step function, with mask\n def step_masked(input_, mask_, hidden):\n hidden_computed = step(input_, hidden)\n return T.switch(mask_, hidden_computed, hidden)\n\n # main operation\n if self.unroll:\n counter = range(self.gradient_steps)\n if self.backward:\n counter = counter[::-1] # reversed index\n iter_output = []\n outputs_info = [hidden_init]\n for index in counter:\n step_input = [input_[index], mask_[index]] + outputs_info\n step_output = step_masked(*step_input)\n iter_output.append(step_output)\n outputs_info = [iter_output[-1]]\n hidden_output = T.stack(iter_output, axis=0)\n\n else:\n hidden_output = theano.scan(fn=step_masked,\n sequences=[input_, mask_],\n outputs_info=[hidden_init],\n go_backwards=self.backward,\n n_steps = None,\n truncate_gradient=self.gradient_steps)[0] # only need outputs, not updates\n\n # computed output are (n_timesteps, n_batch, n_features)\n # select only required\n if self.output_return_index is None:\n hidden_output_return = hidden_output\n else:\n hidden_output_return = hidden_output[self.output_return_index]\n # change to (n_batch, n_timesteps, n_features)\n hidden_output_return = hidden_output_return.dimshuffle(1, 0, *range(2, hidden_output_return.ndim))\n\n # backward order straight\n if self.backward:\n hidden_output_return = hidden_output_return[:, ::-1]\n\n return hidden_output_return", "def compute_output(self):\n s = 0\n if self._selfw:\n s += self._selfw * self._value\n for (w, i) in zip(self._weights, self._inputs):\n s += w * i.value()\n self._value = self._f(s)\n _logger.info('Neuron {0}: activation: {1}'.format(self._name, self._value))", "def get_bias_hidden(self, visibles, hiddens):\n\n B_H = np.zeros((self.num_hidden,1)) + self.bias_hidden\n\n for n in range(self.num_delays):\n B_H = B_H + np.dot(self.B[n].transpose(), hiddens[n])\n B_H = B_H + np.dot(self.C[n].transpose(), visibles[n])\n\n return B_H", "def calculate_hidden_layer_delta(self):\n downstream_delta = reduce(lambda ret, conn: ret + conn.downstream_node.delta * conn.weight, self.downstream, 0.0)\n self.delta = self.output * (1 - self.output) * downstream_delta", "def hidden(self, sess, batcher, layers=None):\n\n if layers is None:\n layers = list(range(self.cnn_layers))\n\n # initialize layer representation data structure\n layer_reprs = []\n for li in range(1 + np.max(layers)):\n layer_reprs.append([])\n preds = []\n\n # setup feed dict\n fd = self.set_mode('test')\n\n # get first batch\n Xb, _, _, Nb = batcher.next()\n\n while Xb is not None:\n # update feed dict\n fd[self.inputs] = Xb\n\n # compute predictions\n layer_reprs_batch, preds_batch = sess.run(\n [self.layer_reprs, self.preds_op], feed_dict=fd)\n\n # accumulate representationsmakes the number of members for self smaller and also\n for li in layers:\n # squeeze (conv_2d-expanded) second dimension\n if layer_reprs_batch[li].shape[1] == 1:\n layer_reprs_batch[li] = layer_reprs_batch[li].squeeze(axis=1)\n\n # append\n layer_reprs[li].append(layer_reprs_batch[li][:Nb].astype('float16'))\n\n # accumualte predictions\n preds.append(preds_batch[:Nb])\n\n # next batch\n Xb, _, _, Nb = batcher.next()\n\n # reset batcher\n batcher.reset()\n\n # accumulate representations\n for li in layers:\n layer_reprs[li] = np.vstack(layer_reprs[li])\n\n preds = np.vstack(preds)\n\n return layer_reprs, preds", "def _make_outputs(self, hidden):\n if self.return_sequences:\n outputs = self.final_linear(hidden)\n\n # If rectilinear and return sequences, return every other value\n if (self.interpolation == \"rectilinear\") and self.return_filtered_rectilinear:\n outputs = outputs[:, ::2]\n else:\n outputs = self.final_linear(hidden[:, -1, :])\n return outputs", "def Q_values(x, W1, W2, bias_W1, bias_W2):\n\n def ReLU(x): # activation function\n return x * (x > 0)\n\n hiddenLayerInput1=np.dot(x,W1)\n hiddenLayerInput=hiddenLayerInput1 + bias_W1\n hiddenLayerActivations = ReLU(hiddenLayerInput)\n\n outputLayerInput1=np.dot(hiddenLayerActivations,W2)\n outputLayerInput= outputLayerInput1+ bias_W2\n Q = ReLU(outputLayerInput)\n\n # YOUR CODE ENDS HERE\n\n return Q, outputLayerInput, hiddenLayerActivations, hiddenLayerInput", "def _get_gate_values(self) -> Tensor:\n gate_values = (\n torch.sigmoid(self.log_alpha_param) * (self.upper_bound - self.lower_bound)\n + self.lower_bound\n )\n return gate_values", "def gradient(self, error, hidden_output):\n error = np.reshape(error, (len(error), 1))\n hidden_output = np.reshape(hidden_output, (len(hidden_output), 1))\n return np.matmul(error, hidden_output.T)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the reconstructed input given the values of the hidden layer
def get_reconstructed_input(self, hidden): return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
[ "def get_hidden_values(self, input):\n# print T.dot(input, self.W).eval()\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def get_output(self, input_, mask_, hidden_init):\n # input_ are (n_batch, n_timesteps, n_features)\n # change to (n_timesteps, n_batch, n_features)\n input_ = input_.dimshuffle(1, 0, 2)\n # mask_ are (n_batch, n_timesteps)\n masks = masks.dimshuffle(1, 0, 'x')\n sequence_length = input_.shape[0]\n batch_num = input_.shape[1]\n\n # precompute input\n if self.precompute:\n additional_dims = tuple(input.shape[k] for k in range(2, input.ndim)) # (output_dim,)\n input = T.reshape(input, (sequence_length*batch_num,) + additional_dims)\n input = T.dot(input, self.W)\n additional_dims = tuple(input.shape[k] for k in range(1, input.ndim)) # (output_dim,)\n input = T.reshape(input, (sequence_length, batch_num,) + additional_dims)\n\n # step function\n def step(input_, hidden):\n if self.precompute:\n return self.out_activation.get_output(input_ + T.dot(hidden, self.U) + self.b)\n else:\n return self.out_activation.get_output(T.dot(input_, self.W) + T.dot(hidden, self.U) + self.b)\n\n # step function, with mask\n def step_masked(input_, mask_, hidden):\n hidden_computed = step(input_, hidden)\n return T.switch(mask_, hidden_computed, hidden)\n\n # main operation\n if self.unroll:\n counter = range(self.gradient_steps)\n if self.backward:\n counter = counter[::-1] # reversed index\n iter_output = []\n outputs_info = [hidden_init]\n for index in counter:\n step_input = [input_[index], mask_[index]] + outputs_info\n step_output = step_masked(*step_input)\n iter_output.append(step_output)\n outputs_info = [iter_output[-1]]\n hidden_output = T.stack(iter_output, axis=0)\n\n else:\n hidden_output = theano.scan(fn=step_masked,\n sequences=[input_, mask_],\n outputs_info=[hidden_init],\n go_backwards=self.backward,\n n_steps = None,\n truncate_gradient=self.gradient_steps)[0] # only need outputs, not updates\n\n # computed output are (n_timesteps, n_batch, n_features)\n # select only required\n if self.output_return_index is None:\n hidden_output_return = hidden_output\n else:\n hidden_output_return = hidden_output[self.output_return_index]\n # change to (n_batch, n_timesteps, n_features)\n hidden_output_return = hidden_output_return.dimshuffle(1, 0, *range(2, hidden_output_return.ndim))\n\n # backward order straight\n if self.backward:\n hidden_output_return = hidden_output_return[:, ::-1]\n\n return hidden_output_return", "def get_hidden(self, layer):", "def reconstruct(self, input):\n # Perform the feed-forward pass for testing:\n deconstructed_layer_value = self.forward_encoding(input, 0,\n self.architecture.shape[0])\n reconstructed_layer_value = self.forward_decoding(\n deconstructed_layer_value,\n 0, self.architecture.shape[0])\n\n n_row_input = input.get_value(borrow=True).shape[0]\n n_column_input = input.get_value(borrow=True).shape[1]\n\n # If the input is a column\n if n_row_input == 0:\n error = T.sum(T.sum(abs(reconstructed_layer_value - input)))\\\n / (n_column_input) * 100\n # If the input is a row\n elif n_column_input == 0:\n error = T.sum(T.sum(abs(reconstructed_layer_value - input)))\\\n / (n_row_input) * 100\n # If the input is a matrix\n else:\n error = T.sum(T.sum(abs(reconstructed_layer_value - input)))\\\n / (n_row_input * n_column_input) * 100\n\n return reconstructed_layer_value, error", "def reshape_input(self):\n import numpy as np\n\n if self.rnn:\n\n while len(self.x_train) % self.lookback != 0:\n self.x_train = self.x_train[:-1, :]\n self.y_train = self.y_train[:-1]\n\n self.x_train = np.array([self.x_train]).reshape(int(len(self.x_train)/self.lookback), self.lookback, self.indim)\n self.y_train = self.y_train[::self.lookback]\n\n while len(self.x_val) % self.lookback != 0:\n self.x_val = self.x_val[:-1, :]\n self.y_val = self.y_val[:-1]\n\n self.x_val = np.array([self.x_val]).reshape(int(len(self.x_val)/self.lookback), self.lookback, self.indim)\n self.y_val = self.y_val[::self.lookback]", "def _build_inner_layer(hyp, encoded_features, train):\n\n grid_size = hyp['grid_width'] * hyp['grid_height']\n outer_size = grid_size * hyp['batch_size']\n\n num_ex = hyp['batch_size'] * hyp['grid_width'] * hyp['grid_height']\n # 512\n channels = int(encoded_features.shape[-1])\n hyp['cnn_channels'] = channels\n # batch_size,12,39,512 -> [batch_size*468,512]\n hidden_input = tf.reshape(encoded_features, [num_ex, channels])\n # 0.1\n scale_down = hyp['scale_down']\n # hidden_input中值变为scale_down倍\n hidden_input = tf.reshape(hidden_input * scale_down, (hyp['batch_size'] * grid_size, channels))\n # 均匀分布初始化器\n initializer = tf.random_uniform_initializer(-0.1, 0.1)\n\n model_2D_path = os.path.join(hyp['dirs']['data_dir'], 'model_2D.pkl')\n with open(model_2D_path, 'rb') as file:\n data_dict = pickle.load(file, encoding='latin1')\n file.close()\n\n with tf.variable_scope('Overfeat'):\n # 常量初始化器(512,512)\n trained_ip = tf.constant_initializer(value=data_dict['ip'])\n # 创建新的tensorflow变量\n w = tf.get_variable(name='ip', initializer=trained_ip, shape=data_dict['ip'].shape)\n # 矩阵乘法 (batch_size*468,512)\n output = tf.matmul(hidden_input, w)\n\n if train:\n # Adding dropout during training\n # 减轻过拟合带来的问题\n # 让每个神经元按照一定的概率停止工作,这次训练过程中不更新权值,也不参加神经网络的计算。但是它的权重依然存在,下次更新时可能会使用到它。\n # 每一个元素被保存下的概率keep_prob\n output = tf.nn.dropout(output, 0.5)\n return output, data_dict", "def reduce_2DtoVec(self, U, mask, num_hidden, num_steps, reuse=None):\n with tf.variable_scope(\"Reduction\"):\n batch_size = tf.shape(U)[0]\n num_in_channels = U.get_shape().as_list()[3]\n h = tf.zeros([tf.shape(U)[0], num_hidden])\n for i in xrange(num_steps):\n with tf.name_scope(\"Step\" + str(i+1)):\n reuse_layer = None if reuse is None and i is 0 else True\n # Emit query\n q = tf.layers.dense(\n h, num_in_channels, activation=tf.nn.tanh,\n name=\"Query1\", reuse=reuse_layer\n )\n q = tf.layers.dense(\n q, num_in_channels, activation=None,\n name=\"Query2\", reuse=reuse_layer\n )\n q = tf.reshape(q, [batch_size, 1, 1, num_in_channels])\n\n # Compute attention weights\n a = tf.reduce_sum(U * q, axis=3, keep_dims=True)\n a_max = tf.reduce_max(a, axis=[1, 2], keep_dims=True)\n a_weights = mask * tf.exp(a - a_max)\n Z = tf.reduce_sum(a_weights, axis=[1, 2], keep_dims=True)\n a_weights = a_weights / (Z + 1E-3)\n\n # Compute attention-weighted result\n U_avg = tf.reduce_sum(a_weights * U, axis=[1, 2])\n\n # GRU update\n hU = tf.concat(axis=1, values=[h, U_avg])\n gate1 = tf.layers.dense(\n hU, num_hidden, activation=tf.nn.sigmoid,\n name=\"Gate1\", reuse=reuse_layer\n )\n gate2 = tf.layers.dense(\n hU, num_hidden, activation=tf.nn.sigmoid,\n name=\"Gate2\", reuse=reuse_layer\n )\n update = tf.nn.tanh(tf.layers.dense(\n gate2 * h, num_hidden, activation=None,\n name=\"h\", reuse=reuse_layer\n ) + tf.layers.dense(\n U_avg, num_hidden, activation=None,\n name=\"U\", reuse=reuse_layer\n ))\n h = gate1 * h + (1. - gate1) * update\n return h", "def calc_activation(self, inp):\n inp_rightform = ny.matrix( inp ).T\n self.a = [inp_rightform]\n tmp = ny.dot( self.weights_layer[0], inp_rightform ) + self.bias[0]\n tmp = self.activation_function(tmp)\n\n self.a.append(tmp)\n\n for i in range(self.number_hidden_layers-1):\n\n tmp = ny.dot( self.weights_layer[i+1], tmp ) + self.bias[i+1]\n tmp = self.activation_function(tmp)\n self.a.append(tmp)\n\n tmp = ny.dot( self.weights_layer[self.number_hidden_layers], tmp )+self.bias[self.number_hidden_layers]\n tmp = self.activation_function(tmp)\n\n self.a.append(tmp)\n #eventuell muss shape von tmp angepasst werden", "def autoencoder_feedforward(theta, visible_size, hidden_size, data):\n\n ### YOUR CODE HERE ###\n # theta is an array with order [{W(1)}, {W(2)}, {b(1)}, {b(2)}]\n # in W, ROWS INDICATE \"TO\" NODES AND COLUMNS INDICATE \"FROM\" NODES\n # Pull values from theta vector and reshape:\n W1 = theta[0:(hidden_size * visible_size)]\n W1 = numpy.reshape(W1, (hidden_size, visible_size))\n \n W2 = theta[(hidden_size * visible_size):((hidden_size * visible_size) + (visible_size * hidden_size))]\n W2 = numpy.reshape(W2, (visible_size, hidden_size))\n \n b1 = theta[((hidden_size * visible_size) + (visible_size * hidden_size)):(((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size)]\n b2 = theta[(((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size) : (((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size + visible_size)]\n \n ##########################################################################################################################################\n # FEED FORWARD/FORWARD PROPOGATION:\n # in W, ROWS INDICATE \"TO\" NODES (i) AND COLUMNS INDICATE \"FROM\" NODES (j)\n # Activations at layer 1 = inputs, i.e., aSup1 = x\n # Number of neurons = number of input data points (pixels), e.g. 784, which we can also say is the visible size?\n \n # In the sequel, we also let z^{(l)}_i denote the total weighted sum of inputs to unit i in layer l, including the bias term (e.g., \\textstyle z_i^{(2)} = \\sum_{j=1}^n W^{(1)}_{ij} x_j + b^{(1)}_i), so that a^{(l)}_i = f(z^{(l)}_i).\n # http://ufldl.stanford.edu/wiki/index.php/Neural_Networks\n \n # Number of training points\n m = data.shape[1]\n \n # note that activations at the first layer are equal to the input data:\n # a_i^{(1)} = x_i\n # Compute z values at second layer\n # zSup2 (i.e., z^{(2)}) is the matrix of z values at layer 2\n # zSup2 = W^{(1)} x + b^{(1)}\n zSup2 = W1.dot(data) + numpy.tile(b1, (m, 1)).transpose()\n \n # Compute activations at second layer by mapping z^{(2)} to sigmoid(z^{(2)})\n aSup2 = sigmoid(zSup2)\n \n #Compute z at third layer, z^{(3)}\n zSup3 = W2.dot(aSup2) + numpy.tile(b2, (m, 1)).transpose()\n # z at third layer is the total weighted sum of inputs to unit i in layer 3,\n # hypothesis = activation at the third layer: hypothesis = f(z^{(3)})\n output_activations = sigmoid(zSup3)\n \n return output_activations", "def preprocess_inputs(self, state, goal):\n #state, goal = self.clip_states_goals(state, goal)\n state_norm = self.state_normalizer.normalize(state)\n goal_norm = self.goal_normalizer.normalize(goal)\n inputs = np.concatenate([state_norm, goal_norm])\n return torch.tensor(inputs, dtype=torch.float32).unsqueeze(0)", "def build_decoder_output(self, X):\n decoder_outlayer = layers.Conv2DTranspose(filters=1,\n kernel_size=self.num_kernel[0],\n strides=self.num_strides[0],\n padding=\"same\",\n name=\"Decoder_output\")\n X = decoder_outlayer(X)\n #decoder_final = layers.Activation(\"sigmoid\", name=\"Decoder_activation\")\n return X # decoder_final(X)", "def build_decoder_input(self):\n decoder_input_layer = layers.Input(\n shape=(self.latent_dim,),\n name=\"decoder_input\")\n\n return decoder_input_layer", "def _inverse_prediction(self):\n embed_t0 = self._inverse_embedding(self.state_t0)\n embed_t1 = self._inverse_embedding(self.state_t1)\n x = concatenate([embed_t0, embed_t1])\n x = self.dense1(x)\n x = self.dense2(x)\n #x = self.flatten(x)\n\n return x", "def _decoder(self, z):\n nn = fully_connected(z,50,activation_fn=tf.nn.relu)\n nn = fully_connected(nn,100,activation_fn=tf.nn.relu)\n f = fully_connected(nn,self._ndims,activation_fn=tf.nn.sigmoid)\n ####### Implementation Here ######\n return f", "def backward(ctx, grad_output):\n print(\"MYrelu\")\n input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n #grad_input[input < 0] = 0\n #grad_input[input < 0] = 0\n return grad_input", "def build(self, hp, inputs=None):\n input_node = nest.flatten(inputs)\n\n # expland all the tensors to 3D tensor \n for idx, node in enumerate(input_node):\n if len(node.shape) == 1:\n input_node[idx] = tf.expand_dims(tf.expand_dims(node, -1), -1)\n elif len(node.shape) == 2:\n input_node[idx] = tf.expand_dims(node, 1) \n elif len(node.shape) > 3:\n raise ValueError(\n \"Unexpected inputs dimensions %d, expect to be smaller than 3\" % len(node.shape)\n )\n\n # align the embedding_dim of input nodes if they're not the same\n embedding_dim = self.embedding_dim or hp.Choice('embedding_dim',\n [4, 8, 16],\n default=8)\n output_node = [tf.keras.layers.Dense(embedding_dim)(node)\n if node.shape[2] != embedding_dim else node for node in input_node]\n output_node = tf.concat(output_node, axis=1)\n\n att_embedding_dim = self.att_embedding_dim or hp.Choice('att_embedding_dim',\n [4, 8, 16],\n default=8)\n head_num = self.head_num or hp.Choice('head_num',\n [1, 2, 3, 4],\n default=2)\n residual = self.residual or hp.Choice('residual',\n [True, False],\n default=True)\n outputs = []\n for _ in range(head_num):\n query = tf.keras.layers.Dense(att_embedding_dim, use_bias=False)(output_node) \n key = tf.keras.layers.Dense(att_embedding_dim, use_bias=False)(output_node) \n value = tf.keras.layers.Dense(att_embedding_dim, use_bias=False)(output_node) \n \n outputs.append(self._scaled_dot_product_attention(query, key, value))\n\n outputs = tf.concat(outputs, axis=2)\n\n if self.residual:\n outputs += tf.keras.layers.Dense(att_embedding_dim * head_num, use_bias=False)(output_node)\n \n return output_node", "def decoder_initial_inputs(self, batch_size):\r\n\t\tinputs = variable(np.full((1,), self.init_idx)).expand((batch_size,))", "def call(self, dec_input, enc_output, state_h, state_c):\n\n # dec_hidden - (batch_size, encoder_units)\n dec_hidden = [state_h, state_c]\n\n # expand state_h to match enc_output dimension\n # state_h_time, state_c_time - (batch_size, 1, encoder_units)\n # state_hc_time - (batch_size, 1, 2 * encoder_units)\n state_h_time = tf.expand_dims(state_h, axis=1)\n state_c_time = tf.expand_dims(state_c, axis=1)\n state_hc_time = tf.concat([state_h_time, state_c_time], axis=-1)\n\n # additive score\n # score_s - (batch_size, timesteps_in, conv1d_units)\n # score_hc - (batch_size, 1, conv1d_units)\n # score - (batch_size, timesteps_in, 1)\n score_s = self.conv1d_s(enc_output)\n score_h = self.conv1d_h(state_hc_time)\n score = self.tanh(score_s + score_h)\n score = self.conv1d_v(score)\n\n # attention weights - (batch_size, timesteps_in, 1)\n attention_weights = self.softmax(score)\n \n # content vector - before sum (batch_size, timesteps_in, encoder_units)\n # content vector - after sum (batch_size, encoder_units)\n # content vector - after expansion (batch_size, 1, encoder_units)\n # content vector - after concatenation (batch_size, 1, encoder_units + 1)\n content_vector = attention_weights * enc_output\n content_vector = tf.reduce_sum(content_vector, axis=1)\n content_vector = tf.expand_dims(content_vector, axis=1)\n content_vector = tf.concat([content_vector, dec_input], axis=-1) \n \n # compute the output and hidden and cell state\n # dec_output - (batch_size, 1, decoder_units)\n # state_h, state_c - (batch_size, encoder_units)\n dec_output, state_h, state_c = self.lstm(content_vector, initial_state=dec_hidden)\n\n # output - (batch_size, 1, 1) \n output = self.conv1d_output(dec_output)\n\n return output, attention_weights, state_h, state_c", "def update(self):\r\n \r\n # find the total input current to this population of neurons\r\n input_current = numpy.tile(self.bias, (self.array_size, 1)) # apply respective biases to neurons in the population \r\n X = numpy.zeros(self.dimensions * self.array_size) # set up matrix to store accumulated decoded input, same size as decoded_input\r\n\r\n for a in self.accumulators.values(): \r\n if hasattr(a, 'new_decoded_input'): # if there's a decoded input in this accumulator,\r\n X += a.new_decoded_input # add its values to the total decoded input\r\n if hasattr(a, 'new_encoded_input'): # if there's an encoded input in this accumulator\r\n # encoded input is the same to every array network\r\n input_current += a.new_encoded_input # add its values directly to the input current \r\n\r\n #TODO: optimize for when nothing is added to X (ie there are no decoded inputs)\r\n X = X.reshape((self.array_size, self.dimensions)) # reshape decoded input for network arrays\r\n # find input current caused by decoded input signals \r\n input_current += TT.dot(X, self.encoders.T) # calculate input_current for each neuron as represented input signal x preferred direction\r\n \r\n # pass that total into the neuron model to produce the main theano computation\r\n updates = self.neurons.update(input_current) # updates is an ordered dictionary of theano internal variables to update\r\n\r\n for a in self.accumulators.values(): \r\n # also update the filtered decoded and encoded internal theano variables for the accumulators\r\n if hasattr(a, 'new_decoded_input'): # if there's a decoded input in this accumulator,\r\n updates[a.decoded_input] = a.new_decoded_input.astype('float32') # add accumulated decoded inputs to theano internal variable updates\r\n if hasattr(a, 'new_encoded_input'): # if there's an encoded input in this accumulator,\r\n updates[a.encoded_input] = a.new_encoded_input.astype('float32') # add accumulated encoded inputs to theano internal variable updates\r\n\r\n # and compute the decoded origin decoded_input from the neuron output\r\n for o in self.origin.values():\r\n # in the dictionary updates, set each origin's output decoded_input equal to the self.neuron.output() we just calculated\r\n updates.update(o.update(updates[self.neurons.output]))\r\n \r\n return updates", "def forward(self, t):\r\n\r\n t = F.relu(self.conv1(t))\r\n t = self.pool(t)\r\n t = F.relu(self.conv2(t))\r\n #t = self.pool(t)\r\n t = F.relu(self.conv3(t))\r\n #t = F.relu(self.conv4(t))\r\n t = t.flatten(start_dim = 1)\r\n t = F.relu(self.fc(t))\r\n t = self.out(t)\r\n return t" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
configuration interaction for hydrogen molecule
def configuration_interaction(R,Z): # Hartree Fock computations yield a set of MOs C, Hcore, nuclear_energy, two_electron = hartree_fock(R, Z, CI=True) # number of configurations considered in the calculation ND = 2 P = np.zeros(Hcore.shape) K = Hcore.shape[0] print('number of MOs = ', K) # density matrix for mu in range(K): for v in range(K): P[mu,v] = 2*C[mu,1]*C[v,1] coulomb = np.zeros(Hcore.shape) exchange = np.zeros(Hcore.shape) for i in range(K): for j in range(K): for mu in range(K): for v in range(K): for lamb in range(K): for sigma in range(K): coulomb[i,j] += two_electron[mu, v, sigma, lamb]\ * C[mu,i] *\ C[v,i] * C[sigma,j] * C[lamb,j] exchange[i,j] += two_electron[mu, v, sigma, lamb] \ * C[mu,i] *\ C[v,j] * C[sigma,j] * C[lamb,i] F = np.matmul(C.T, np.matmul(Hcore, C)) electronic_energy = F[0,0]*2 + coulomb[0,0] electronic_energy1 = F[1,1]*2 + coulomb[1,1] H = np.zeros((ND,ND)) # construct the Hamiltonian # for i in range(1, ND): # for j in range(i,ND): # H[i,j] = H[0,0] = electronic_energy H[1,1] = electronic_energy1 H[0,1] = H[1,0] = exchange[0,1] # diagonalizing the matrix eigvals, U = scipy.linalg.eigh(H) # density matrix represented in terms of Slater Determinants Temp = 50000. # K # transfer to Hartree Temp *= 3.1667909e-6 print('Temperature = {} au.'.format(Temp)) energy_SD = np.array([electronic_energy, electronic_energy1]) Z = sum(np.exp(-energy_SD/Temp)) naive_rho = np.diagflat(np.exp(-energy_SD/Temp)) print('naive density matrix = \n',naive_rho/Z) # density matrix represented in terms of Slater Determinants Z = sum(np.exp(- eigvals/Temp)) D = np.diagflat(np.exp(- eigvals/Temp))/Z rho = np.matmul(U, np.matmul(D, U.T)) print('full density matrix = \n', rho) total_energy = eigvals + nuclear_energy print('nuclear energy = {} \n'.format(nuclear_energy)) print('total energy = ', total_energy) return total_energy
[ "def PlotConfig(self) -> _n_1_t_3:", "def hxconfig(self, cmd):\n \n if self.backend is not 'hxhal' or self.controller is None:\n cmd.fail('text=\"No hxhal controller\"')\n return\n\n cmdKeys = cmd.cmd.keywords\n configName = cmdKeys['configName'].values[0]\n \n sam = self.sam\n\n try:\n configGroup, configName = configName.split('.')\n except:\n configGroup = 'h4rgConfig' if self.actor.instrument == 'PFS' else 'h2rgConfig'\n \n sam.updateHxRgConfigParameters(configGroup, configName)\n cmd.finish()", "def help_config(self):\n print(help_msg.cmds['config'])", "def SetupConfig(**args):\n\n\t#config file\n\tif 'configFile' in args:\n\t\tconfigFile = args['config']\n\telse:\n\t\tconfigFile = \"h2p.ini\"\n\tconf = pyprop.Load(configFile)\n\t\n\t#Radial Grid size\n\tif 'radialGridSize' in args:\n\t\tradialGridSize = args['radialGridSize']\n\t\tprint \"Using Grid Size = \", radialGridSize\n\t\t#set grid size of both representations, but we will\n\t\t#only use one of them in a given run\n\t\tconf.CartesianRadialRepresentation.rank0[2] = radialGridSize\n\t\tconf.TransformedRadialRepresentation.n = radialGridSize\n\n\t#Radial Grid type\n\tif 'radialGridType' in args:\n\t\tradialGridType = args['radialGridType']\n\telse:\n\t\tradialGridType = None\n\tSetRadialGridType(conf, radialGridType)\n\n\t#Nuclear Grid size\n\tif 'nuclearGridSize' in args:\n\t\tnuclearGridSize = args['nuclearGridSize']\n\t\tprint \"Using Nuclear Grid Size = \", nuclearGridSize\n\t\t#set grid size of both representations, but we will\n\t\t#only use one of them in a given run\n\t\tconf.CartesianNuclearRepresentation.rank0[2] = nuclearGridSize\n\t\tconf.TransformedNuclearRepresentation.n = nuclearGridSize\n\n\t#Nuclear Grid type\n\tif 'nuclearGridType' in args:\n\t\tnuclearGridType = args['nuclearGridType']\n\telse:\n\t\tnuclearGridType = None\n\tSetNuclearGridType(conf, nuclearGridType)\n\t\n\t#TimeStep\n\tif 'dt' in args:\n\t\tdt = args['dt']\n\t\tprint \"Using TimeStep = \", dt\n\t\tconf.Propagation.timestep = abs(dt)\n\n\t#Imaginary Time Propagation\n\tif 'imTime' in args:\n\t\timTime = args['imTime']\n\t\tprint \"Using ImaginaryTime = \", imTime\n\t\tdt = conf.Propagation.timestep\n\t\tif imTime:\n\t\t\tconf.Propagation.renormalization = True\n\t\t\tconf.Propagation.timestep = -1.0j * abs(dt)\n\t\telse:\n\t\t\tconf.Propagation.renormalization = False\n\t\t\tconf.Propagation.timestep = abs(dt)\n\n\tif 'softing' in args:\n\t\tsofting = args['softing']\n\t\tprint \"Using softing \", softing\n\t\tconf.Potential.softing = softing\n\n\tif 'lmax' in args:\n\t\tlmax = args['lmax']\n\t\tprint \"Using LMax = \", lmax\n\t\tconf.AngularRepresentation.maxl = lmax\n\n\tif 'duration' in args:\n\t\tduration = args['duration']\n\t\tconf.Propagation.duration = duration\n\n\tif 'orientation' in args:\n\t\torientation = args['orientation']\n\t\tconf.Potential.nuclear_orientation = orientation\n\t\t\n\n\tprint \"Setup Config Complete\"\n\tprint \"\"\n\treturn conf", "def experiment_configurations(self):\n pass", "def setup_configuration(config_num):\n \n mask = max_power\n \n for pos in pos_list:\n current_value = config_num / mask\n lattice[pos] = current_value\n \n config_num -= current_value*mask\n mask /= q", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HSimpleConst_opt, self).__init__(name='HSimpleConst_opt', num_nodes=24, edges=[])\n \n # Add the edges\n self.add_edges([(1, 14), (2, 16), (3, 23), (3, 22), (3, 21), (3, 20), (11, 5), (12, 6), (13, 7), (8, 18), (9, 17), (10, 19), (0, 11), (1, 12), (1, 13), (14, 8), (15, 9), (16, 10), (17, 6), (18, 5), (19, 7), (20, 0), (21, 1), (22, 4), (23, 2), (4, 15)])\n # Set the graph attributes\n self[\"mm__\"] = pickle.loads(\"\"\"(lp1\nS'Simulink'\np2\na.\"\"\")\n self[\"name\"] = \"\"\"HSimpleConst_opt\"\"\"\n self[\"GUID__\"] = UUID('228d200f-ffdc-4c0e-bc88-61f51a74216f')\n \n # Set the node attributes\n self.vs[0][\"Name\"] = \"\"\"Out1\"\"\"\n self.vs[0][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[0][\"mm__\"] = \"\"\"Outport\"\"\"\n self.vs[0][\"Position\"] = pickle.loads(\"\"\"(lp1\nF390\naF118\naF420\naF132\na.\"\"\")\n self.vs[0][\"Port\"] = 1\n self.vs[0][\"GUID__\"] = UUID('85b89a53-92e7-4a49-98b9-ade6251eff77')\n self.vs[1][\"Name\"] = \"\"\"Product\"\"\"\n self.vs[1][\"SampleTime\"] = -1.0\n self.vs[1][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"Product\"\"\"\n self.vs[1][\"Position\"] = pickle.loads(\"\"\"(lp1\nF305\naF132\naF335\naF163\na.\"\"\")\n self.vs[1][\"GUID__\"] = UUID('46ea4a04-b727-4b0d-90bd-c82db7ccd7ee')\n self.vs[2][\"Name\"] = \"\"\"In1\"\"\"\n self.vs[2][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[2][\"mm__\"] = \"\"\"Inport\"\"\"\n self.vs[2][\"Position\"] = pickle.loads(\"\"\"(lp1\nF170\naF178\naF200\naF192\na.\"\"\")\n self.vs[2][\"Port\"] = 1\n self.vs[2][\"GUID__\"] = UUID('9747bb91-1feb-4f90-9a9f-b5fa912ce93d')\n self.vs[3][\"Name\"] = \"\"\"HSimpleConst\"\"\"\n self.vs[3][\"mm__\"] = \"\"\"SubSystem\"\"\"\n self.vs[3][\"Position\"] = pickle.loads(\"\"\"(lp1\n.\"\"\")\n self.vs[3][\"GUID__\"] = UUID('8cb30c14-54b2-4127-b62a-66894052c034')\n self.vs[4][\"Name\"] = \"\"\"Constant23\"\"\"\n self.vs[4][\"SampleTime\"] = \"\"\"inf\"\"\"\n self.vs[4][\"value\"] = 545.54\n self.vs[4][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[4][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[4][\"GUID__\"] = UUID('ca0d82da-c399-491d-a2ef-47cce29260b8')\n self.vs[5][\"Name\"] = \"\"\"1\"\"\"\n self.vs[5][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[5][\"GUID__\"] = UUID('d6c8213f-7671-4399-bb17-4544bd189947')\n self.vs[6][\"Name\"] = \"\"\"1\"\"\"\n self.vs[6][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[6][\"GUID__\"] = UUID('d8ec0094-2bd3-4fa1-a451-22f598058f1d')\n self.vs[7][\"Name\"] = \"\"\"2\"\"\"\n self.vs[7][\"mm__\"] = \"\"\"Port_Input\"\"\"\n self.vs[7][\"GUID__\"] = UUID('1ae7bae5-a888-4d2d-b0da-44c5148f25ac')\n self.vs[8][\"Name\"] = \"\"\"1\"\"\"\n self.vs[8][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[8][\"GUID__\"] = UUID('c0eecf70-8bc1-4bb7-b26c-9fab522ab97a')\n self.vs[9][\"Name\"] = \"\"\"1\"\"\"\n self.vs[9][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[9][\"GUID__\"] = UUID('7923f3b4-c14a-4b1f-afe1-ea84f047bfc8')\n self.vs[10][\"Name\"] = \"\"\"1\"\"\"\n self.vs[10][\"mm__\"] = \"\"\"Port_Output\"\"\"\n self.vs[10][\"GUID__\"] = UUID('4e504525-e7e4-4213-96b7-4b72ab33e667')\n self.vs[11][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[11][\"GUID__\"] = UUID('c9f626f5-b777-4681-88aa-4e62be54ddef')\n self.vs[12][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[12][\"GUID__\"] = UUID('87b35ee0-6d8e-45f5-86aa-483684061f38')\n self.vs[13][\"mm__\"] = \"\"\"__Block_Inport__\"\"\"\n self.vs[13][\"GUID__\"] = UUID('e0486d40-a1fb-4da6-aad4-60e94caf2289')\n self.vs[14][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[14][\"GUID__\"] = UUID('afef3de3-d565-48be-8c61-51111ab2f43d')\n self.vs[15][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[15][\"GUID__\"] = UUID('9166314d-4fe0-4ea5-8b86-7f456bbdd150')\n self.vs[16][\"mm__\"] = \"\"\"__Block_Outport__\"\"\"\n self.vs[16][\"GUID__\"] = UUID('158943c1-b83a-4b4c-8a9f-98801bcf3a90')\n self.vs[17][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[17][\"GUID__\"] = UUID('0502ebca-71f3-43ae-9525-d97fd9cd24bf')\n self.vs[18][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[18][\"GUID__\"] = UUID('5f8605d5-c7e2-421d-a9d3-e349b0e0351a')\n self.vs[19][\"mm__\"] = \"\"\"__Relation__\"\"\"\n self.vs[19][\"GUID__\"] = UUID('47311d73-33ef-4e05-84c8-ae54810328be')\n self.vs[20][\"Name\"] = \"\"\"None\"\"\"\n self.vs[20][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[20][\"GUID__\"] = UUID('c11e849e-70d3-4b22-933f-54aa755b441a')\n self.vs[21][\"Name\"] = \"\"\"None\"\"\"\n self.vs[21][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[21][\"GUID__\"] = UUID('99e19103-6a62-42b6-b7da-016d14f02b20')\n self.vs[22][\"Name\"] = \"\"\"None\"\"\"\n self.vs[22][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[22][\"GUID__\"] = UUID('1bde3c3f-6bba-4112-a44a-a30df40b737b')\n self.vs[23][\"Name\"] = \"\"\"None\"\"\"\n self.vs[23][\"mm__\"] = \"\"\"__Contains__\"\"\"\n self.vs[23][\"GUID__\"] = UUID('a37caaea-f506-4637-a8c9-1dde6c07bc87')", "def _initialise_configure(self, _):\n # Create a configuration for the controller:\n configuration = controller.ControllerConfiguration(self.host)\n\n # The motor controller will start once 8 frames are enqueued:\n configuration.mc_prefill_frames = 8\n\n # Set the motor control output pin polarities and the function of the\n # \"other\" or \"shutdown\" pin. The other pin can be forced high or low\n # permanently, or set to output a clock pulse on each frame or\n # to go high when the controller is running:\n configuration.mc_pin_flags = \\\n controller.MC_PIN_FLAG_INVERT_MCA_SP | \\\n controller.MC_PIN_FLAG_INVERT_MCA_DP | \\\n controller.MC_PIN_FLAG_INVERT_MCB_SP | \\\n controller.MC_PIN_FLAG_INVERT_MCB_DP | \\\n controller.MC_PIN_FLAG_MCA_O_FUNCTION_LOW | \\\n controller.MC_PIN_FLAG_MCB_O_FUNCTION_LOW\n\n # Set the length of a frame, in cycles of the controller clock frequency. In\n # this example a frame is 50ms, or 1/20th of a second:\n configuration.mc_frame_period = self.host.clock_frequency / 20\n\n # Set the velocity limit (in steps per frame) on each axis:\n configuration.mc_a_velocity_limit = 6000\n configuration.mc_b_velocity_limit = 6000\n\n # Set the acceleration limit (in steps per frame per frame) on each axis:\n configuration.mc_a_acceleration_limit = 800 #Should be at least three times the maximum add_to_vel,\n configuration.mc_b_acceleration_limit = 800 # so up to six times MOTOR_ACCEL\n\n # Set the deceleration (in steps per frame per frame) to use when shutting down:\n configuration.mc_a_shutdown_acceleration = 250\n configuration.mc_b_shutdown_acceleration = 250\n\n # Set the pulse width, in cycles of the clock frequency (12MHz). In this\n # example the pulse width is 50 clock cycles, and the off time is 50 clock\n # cycles, for a 100 clock cycle period. At the maximum velocity of 6000\n # steps per frame, this would be a 120kHz square wave:\n configuration.mc_pulse_width = self.host.clock_frequency / 240000\n configuration.mc_pulse_minimum_off_time = self.host.clock_frequency / 240000\n\n # Invert all the GPIO inputs, so they are active when pulled low:\n for pin in configuration.pins[0:40]:\n pin.invert_input = True\n\n # Set all of the motor control pins to motor control instead of just GPIO:\n for pin in configuration.pins[48:60]:\n pin.function = controller.CONTROLLER_PIN_FUNCTION_SPECIAL\n\n # Set the limit switch inputs to the specific pins they are connected to:\n # configuration.mc_a_positive_limit_input = controller.PIN_GPIO_0\n # configuration.mc_a_negative_limit_input = controller.PIN_GPIO_1\n # configuration.mc_b_positive_limit_input = controller.PIN_GPIO_2\n # configuration.mc_b_negative_limit_input = controller.PIN_GPIO_3\n\n # Set the guider sample interval, in cycles of the controller clock frequency.\n # In this example, the guider is polled every 1ms, giving a maximum of\n # 100 for the guider value in each 100ms frame:\n self.mc_guider_counter_divider = self.host.clock_frequency / 1000\n\n # Each guider value is multiplied by a fractional scale factor to get\n # the number of steps. The resulting value then has a maximum applied before\n # being added to the next available frame:\n configuration.mc_guider_a_numerator = 4\n configuration.mc_guider_a_denominator = 50 #4 steps per 50ms slot\n configuration.mc_guider_a_limit = 20\n configuration.mc_guider_b_numerator = 4\n configuration.mc_guider_b_denominator = 50\n configuration.mc_guider_b_limit = 20\n\n if SITE == 'NZ':\n # Set the guider input pins. The SBIG socket has pins: 1=+RA, 2=+DEC, 3=-DEC, 4=-RA, 5=ground:\n configuration.mc_a_positive_guider_input = controller.PIN_GPIO_32 # +RA\n configuration.mc_a_negative_guider_input = controller.PIN_GPIO_35 # -RA\n configuration.mc_b_positive_guider_input = controller.PIN_GPIO_33 # +DEC\n configuration.mc_b_negative_guider_input = controller.PIN_GPIO_34 # -DEC\n\n # Set 8 pins to outputs, the rest to inputs, with values reported (paddles, limits, power state):\n for pin in configuration.pins[0:8] + configuration.pins[16:48]:\n pin.direction = controller.CONTROLLER_PIN_INPUT\n pin.report_input = True\n for pin in configuration.pins[8:16]:\n pin.direction = controller.CONTROLLER_PIN_OUTPUT\n pin.report_input = False\n for pin_number in [16,17,18, 21,22]: # Pin numbers for limit inputs, which (unlike paddles) are active HIGH\n configuration.pins[pin_number].invert_input = False # Normally all inputs to be inverted, see top of this method\n# configuration.shutdown_0_input = 21 # The 'Power' input triggers a hardware shutdown if it goes active\n elif SITE == 'PERTH':\n # Set the guider input pins. The SBIG socket has pins: 1=+RA, 2=+DEC, 3=-DEC, 4=-RA, 5=ground:\n configuration.mc_a_positive_guider_input = controller.PIN_GPIO_0 # +RA\n configuration.mc_a_negative_guider_input = controller.PIN_GPIO_1 # -RA\n configuration.mc_b_positive_guider_input = controller.PIN_GPIO_2 # +DEC\n configuration.mc_b_negative_guider_input = controller.PIN_GPIO_3 # -DEC\n\n for pin in configuration.pins[0:48]: # Set all pins to inputs with values reported (paddles)\n pin.direction = controller.CONTROLLER_PIN_INPUT\n pin.report_input = True\n #Set the actual hand-paddle bits to NOT inverted, as they are active high.\n for pin_number in [24,25,26,27,28, 40,41,42,43,44, 0,1,2,3]: #[0,1,2,3,4, 16,17,18,19,20, 24,25,26,27]\n configuration.pins[pin_number].invert_input = False # Normally all inputs to be inverted, see top of this method\n\n # Set the shutdown pins to outputs:\n for pin_number in (52, 53, 58, 59):\n configuration.pins[pin_number].direction = controller.CONTROLLER_PIN_OUTPUT\n configuration.pins[pin_number].function = controller.CONTROLLER_PIN_FUNCTION_GPIO\n\n # Send the configuration to the controller:\n d = self.host.configure(configuration)\n self.configuration = configuration # Save the configuration for later reference.\n\n # The deferred is completed once the configuration is written:\n d.addCallback(self._initialise_configuration_written)\n d.addErrback(self._initialise_error_occurred)\n\n return d", "def _patch_hydrogens(self):\n to_patch = defaultdict(int)\n for idx in self.exclude:\n h = self.mol.GetAtomWithIdx(idx)\n atom = h.GetNeighbors()[0]\n if atom.GetSymbol() != \"C\":\n to_patch[atom.GetIdx()] += 1\n for idx, nH in to_patch.items():\n node = self.nodes[idx]\n h_str = \"H\" if nH == 1 else f\"H{nH}\"\n label = re.sub(r\"(\\w+)(.*)\", rf\"\\1{h_str}\\2\", node[\"label\"])\n node[\"label\"] = label\n node[\"shape\"] = \"ellipse\"", "def configure(manager):\r\n\r\n ###########################################################################\r\n # configuration of numerics\r\n ###########################################################################\r\n conf = manager.num_conf # DO NOT EDIT THIS LINE\r\n ###########################################################################\r\n\r\n # usage of mpi\r\n conf.mpi_acceleration = False \r\n \r\n # here one can prevent competing multithreating if necessary\r\n conf.cpu_acceleration = True\r\n conf.num_threads = -1 # -1 means to be determined optimally\r\n \r\n # use gpu acceleration (if gpu available)\r\n # this requires pytorch, but it does\r\n # not switch on pytorch usage for\r\n # other than GPU computations\r\n conf.gpu_acceleration = False \r\n \r\n # restrict yourself only to certain GPUs\r\n conf.available_gpus = [0, 1]\r\n \r\n # enables pytorch as an alternative\r\n # to numpy even without GPUs\r\n conf.enable_pytorch = False \r\n \r\n\r\n ###########################################################################\r\n # logging configuration\r\n ###########################################################################\r\n conf = manager.log_conf # DO NOT EDIT THIS LINE\r\n ###########################################################################\r\n conf.log_on_screen = True\r\n conf.log_to_file = False\r\n #conf.log_file_name = \"./qrhei.log\"\r\n \r\n # verbosity is a number from 0 to 10\r\n # 0 == no information written\r\n # 10 == all information is written\r\n conf.verbosity = 5 \r\n conf.verbose=True\r\n\r\n ###########################################################################\r\n # general configuration\r\n ###########################################################################\r\n conf = manager.gen_conf # DO NOT EDIT THIS LINE\r\n ###########################################################################\r\n conf.legacy_relaxation = False", "def get_xc_config():\n config = config_dict.ConfigDict()\n # The names of features for the exchange enhancement factor.\n # Comma-separated string.\n config.feature_names_x = 'x2,w'\n # The names of features for the same-spin correlation enhancement factor.\n # Comma-separated string.\n config.feature_names_css = 'x2,w'\n # The names of features for the opposite-spin correlation enhancement factor.\n # Comma-separated string.\n config.feature_names_cos = 'x2,w'\n # The functional form from which mutations take place. Can be either a\n # functional name defined in xc_functionals, or a path to a json file that\n # includes the definition of a functional form.\n config.mutation_base = ''\n # The training, validation and test losses of the mutation base.\n # Comma separated string of 3 integers.\n config.mutation_base_losses = ''\n # The number of shared parameters in the functional form.\n # Comma-separated string of 1 integer or 3 integers (for 3\n # enhancement_factors). Defaults to those of the mutation_base.\n config.num_shared_parameters = ''\n # The number of temporary variables in the functional form.\n # Comma-separated string of 1 integer or 3 integers (for 3\n # enhancement_factors). Defaults to those of the mutation_base.\n config.num_variables = ''\n # The path to the specification of instruction pool for the experiment.\n config.instruction_pool = (\n '/namespace/gas/primary/california/instruction_pool'\n '/arithmetic_power_transform_functional.json')\n # The specification of mutation pool for the experiment.\n # Comma separated string of mutation rule and probabilities.\n config.mutation_pool = (\n 'insert_instruction,0.25,remove_instruction,0.25,'\n 'replace_instruction,0.25,change_argument,0.25')\n # The mutation probabilities for exchange, same-spin correlation and\n # opposite-spin correlation enhancement factors.\n # Comma separated string of 3 floats.\n config.component_mutation_probabilities = (\n '0.333333333333,0.333333333333,0.333333333333')\n # The maximum number of bound parameters. If less than zero, no constraint\n # is applied to the number of bound parameters.\n config.max_num_bound_parameters = 2\n # The maximum number of instructions per enhancement factor. If less than\n # zero, no constraint is applied to the number of instructions.\n config.max_num_instructions = -1\n # The number of fixed instructions per enhancement factor.\n config.num_fixed_instructions = 0\n return config", "def system_cfg(cls):\r\n #attention_this_may_be_wrong\r\n #gamma is different from that in system.cfg but...\r\n\r\n cls.model_param_ising= {\r\n \"gamma\": 1.0, \r\n \"h\": 1.0, \r\n \"J_NN\": -1.0, \r\n \"J_NNN\": 1.0, \r\n \"alpha\": 2.0, \r\n \"beta\": 0.0 \r\n }\r\n cls.model_param_heisenberg= {\r\n \"h\": 0.0, \r\n \"Jzz\": 1.0, \r\n \"J_NN\": 1.0, \r\n \"J_NNN\": 0.0, #cls.J_NNN=0.241186\r\n \"alpha\": 2.0, \r\n \"beta\": 1.0 \r\n }\r\n\r\n if cls.MODEL == \"Ising\": \r\n cls.model_param.update(cls.model_param_ising)\r\n\r\n cls.Layer=3\r\n cls.SI_Layer=2\r\n cls.D_max=2\r\n \r\n elif cls.MODEL == \"Heisenberg\":\r\n cls.model_param.update(cls.model_param_heisenberg)\r\n \r\n\r\n cls.D_max=2\r\n cls.Layer=3\r\n else:\r\n print \"error, cls.MODEL is not defined\"\r\n print cls.MODEL", "def __init__(self):\n DiffusionConfig.__init__(self)\n self.dipy_recon_config = Dipy_recon_configUI(\n imaging_model=self.diffusion_imaging_model,\n recon_mode=self.diffusion_model,\n tracking_processing_tool=self.tracking_processing_tool,\n )\n self.mrtrix_recon_config = MRtrix_recon_configUI(\n imaging_model=self.diffusion_imaging_model, recon_mode=self.diffusion_model\n )\n self.dipy_tracking_config = Dipy_tracking_configUI(\n imaging_model=self.diffusion_imaging_model,\n tracking_mode=self.diffusion_model,\n SD=self.mrtrix_recon_config.local_model,\n )\n self.mrtrix_tracking_config = MRtrix_tracking_configUI(\n tracking_mode=self.diffusion_model, SD=self.mrtrix_recon_config.local_model\n )\n\n self.mrtrix_recon_config.on_trait_change(\n self.update_mrtrix_tracking_SD, \"local_model\"\n )\n self.dipy_recon_config.on_trait_change(\n self.update_dipy_tracking_SD, \"local_model\"\n )\n self.dipy_recon_config.on_trait_change(\n self.update_dipy_tracking_sh_order, \"lmax_order\"\n )", "def __init__(self, encut, spinaxis, ldaul, Uparam, Jparam, nupdown=None, name='DFTCL_settings'):\n ncl_settings = {\"ISPIN\": 2, \"MAGMOM\": None, \"SAXIS\": spinaxis, \"LSORBIT\": \".TRUE.\", \"LNONCOLLINEAR\": \".TRUE.\", \"NUPDOWN\":nupdown}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=ncl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"ENCUT\", encut)", "def sense_conf():\n\n title = 'SENSE'\n\n content = {\n\n title: {\n\n 'always_emulate': 'False',\n 'never_emulate': 'False',\n 'temp_scale': 'f',\n 'always_auto_refresh': 'False'\n\n },\n }\n\n return content", "def config_2d_mesh(hardware):\n config = hardware.config\n ch_idx = config[\"network_channel_index\"]\n # configure x-dim link\n for p_id_y in range(config[\"num_processor_y\"]):\n for c_id_y in range(config[\"num_core_y\"]):\n for p_id_x in range(config[\"num_processor_x\"]):\n for c_id_x in range(config[\"num_core_x\"]):\n if (\n p_id_x == config[\"num_processor_x\"] - 1\n and c_id_x == config[\"num_core_x\"] - 1\n ):\n # This is an edge node\n continue\n # get current router\n cur_router = hardware\\\n .processor_array[(p_id_x, p_id_y)]\\\n .core_array[(c_id_x, c_id_y)]\\\n .niu.router\n # get next router\n next_c_id_x = (c_id_x + 1) % config[\"num_core_x\"]\n next_p_id_x = \\\n (c_id_x + 1) // config[\"num_core_x\"] + p_id_x\n assert next_p_id_x < config[\"num_processor_x\"]\n next_router = hardware\\\n .processor_array[(next_p_id_x, p_id_y)]\\\n .core_array[(next_c_id_x, c_id_y)]\\\n .niu.router\n # configure bus\n is_onchip = (c_id_x + 1 == config[\"num_core_x\"])\n bus = NetworkBus(\n env=hardware.env,\n log=hardware.log,\n config=config,\n is_onchip=is_onchip,\n uplink_router=next_router,\n downlink_router=cur_router,\n bus_id=(\n (p_id_x, p_id_y), (c_id_x, c_id_y),\n (next_p_id_x, p_id_y), (next_c_id_x, c_id_y)\n )\n )\n bus.config_queue(\n uplink_in_chan=next_router\n .in_packet_chan[ch_idx[\"W\"]],\n uplink_out_chan=next_router\n .out_packet_chan[ch_idx[\"W\"]],\n downlink_in_chan=cur_router\n .in_packet_chan[ch_idx[\"E\"]],\n downlink_out_chan=cur_router\n .out_packet_chan[ch_idx[\"E\"]]\n )\n hardware.bus_array[\n (p_id_x, p_id_y), (c_id_x, c_id_y),\n (next_p_id_x, p_id_y), (next_c_id_x, c_id_y)\n ] = bus\n\n # configure y-dim link\n for p_id_x in range(config[\"num_processor_x\"]):\n for c_id_x in range(config[\"num_core_x\"]):\n for p_id_y in range(config[\"num_processor_y\"]):\n for c_id_y in range(config[\"num_core_y\"]):\n if (\n p_id_y == config[\"num_processor_y\"] - 1\n and c_id_y == config[\"num_core_y\"] - 1\n ):\n # This is an edge node\n continue\n # get current router\n cur_router = hardware\\\n .processor_array[(p_id_x, p_id_y)]\\\n .core_array[(c_id_x, c_id_y)]\\\n .niu.router\n # get next router\n next_c_id_y = (c_id_y + 1) % config[\"num_core_y\"]\n next_p_id_y = \\\n (c_id_y + 1) // config[\"num_core_y\"] + p_id_y\n assert next_p_id_y < config[\"num_processor_y\"]\n next_router = hardware\\\n .processor_array[(p_id_x, next_p_id_y)]\\\n .core_array[(c_id_x, next_c_id_y)]\\\n .niu.router\n # configure bus\n is_onchip = (c_id_y + 1 == config[\"num_core_y\"])\n bus = NetworkBus(\n env=hardware.env,\n log=hardware.log,\n config=config,\n is_onchip=is_onchip,\n uplink_router=next_router,\n downlink_router=cur_router,\n bus_id=(\n (p_id_x, p_id_y), (c_id_x, c_id_y),\n (p_id_x, next_p_id_y), (c_id_x, next_c_id_y)\n )\n )\n bus.config_queue(\n uplink_in_chan=next_router\n .in_packet_chan[ch_idx[\"S\"]],\n uplink_out_chan=next_router\n .out_packet_chan[ch_idx[\"S\"]],\n downlink_in_chan=cur_router\n .in_packet_chan[ch_idx[\"N\"]],\n downlink_out_chan=cur_router\n .out_packet_chan[ch_idx[\"N\"]]\n )\n hardware.bus_array[\n (p_id_x, p_id_y), (c_id_x, c_id_y),\n (p_id_x, next_p_id_y), (c_id_x, next_c_id_y)\n ] = bus", "def setupconfig():\n from Manager import Studio\n studio = Studio.Instance\n cfgeff = studio.configEffect_st\n cfgeff.bloomToggle.isOn = False\n cfgeff.vignetteToggle.isOn = False\n cfgeff.sunShaftsToggle.isOn = False\n cfgeff.fogToggle.isOn = False\n cfgeff.depthOfFieldToggle.isOn = False\n #cfgeff.ssaoToggle.isOn = True\n #cfgeff.selfShadowToggle.isOn = True\n \n # Turn off backgrounds\n studio.uiBGChanger.onOffToggle.isOn = False", "def add_hydrogen(mol):\n pbmol = mol_to_pybel(mol)\n pbmol.OBMol.AddHydrogens()\n newmol = pybel_to_mol(pbmol, reorder_atoms_by_residue=True)\n mdt.helpers.assign_unique_hydrogen_names(newmol)\n return newmol", "def configfilepopulator(self):\n # Set the number of cycles for each read and index using the number of reads specified in the sample sheet\n self.forwardlength = int(self.metadata.header.forwardlength)\n self.reverselength = int(self.metadata.header.reverselength)\n # Create a list of lists containing [cycle start, cycle end, and :runid] for each of forward reads, index 1\n # index 2, and reverse reads\n cycles = [[1, self.forwardlength, self.runid],\n [self.forwardlength + 1, self.forwardlength + 8, self.runid],\n [self.forwardlength + 9, self.forwardlength + 16, self.runid],\n [self.forwardlength + 17, self.forwardlength + 16 + self.reverselength, self.runid]]\n # A dictionary of parameters (keys) and the values to use when repopulating the config file\n parameters = {'RunFolder': self.runid, 'RunFolderDate': self.metadata.date.replace(\"-\", \"\"),\n 'RunFolderId': self.metadata.runnumber, 'RunFlowcellId': self.metadata.flowcell}\n # Load the xml file using element tree\n config = ElementTree.parse(os.path.join(self.miseqpath, self.miseqfolder, 'Data', 'Intensities', 'BaseCalls',\n 'config.xml'))\n # Get the root of the tree\n configroot = config.getroot()\n # The run node is the only child node of the root\n for run in configroot:\n # Iterate through the child nodes. There are three nodes sections that must be populated\n for child in run:\n # Find the cycles tag\n if child.tag == 'Cycles':\n # Set the attributes with a dictionary containing the total reads\n child.attrib = {'Last': '{}'.format(self.forwardlength + 16 + self.reverselength),\n 'Number': '{}'.format(self.totalreads), 'First': '1'}\n elif child.tag == 'RunParameters':\n # Name the child as runparameter for easier coding\n runparameters = child\n for runparameter in runparameters:\n # This replaces data in both 'ImagingReads' and 'Reads' nodes\n if 'Reads' in runparameter.tag:\n # Enumerate through the run parameters\n for indexcount, reads in enumerate(runparameter):\n # The values for the index are 1, 2, 3, 4. Subtract one to get the index of the first\n # list in cycles\n index = int(runparameter.attrib['Index']) - 1\n # Set the text value as the appropriate value from cycles\n reads.text = str(cycles[index][indexcount])\n # Populate the instrument value\n if runparameter.tag == 'Instrument':\n runparameter.text = self.instrument\n # Iterate through the parameters in the parameter dictionary\n for parameter in parameters:\n # If the key is encountered\n if runparameter.tag == parameter:\n # Replace the text with the value\n runparameter.text = parameters[parameter]\n if 'Barcode' in runparameter.tag:\n for cycle, barcode in enumerate(runparameter):\n # Add the barcode cycles. These are the number of forward reads (+ 1 as the barcode\n # starts 1 cycle after the first run) plus the current iterator\n barcode.text = str(self.forwardlength + 1 + cycle)\n # Write the modified config file to the desired location\n config.write(os.path.join(self.miseqfolder, 'Data', 'Intensities', 'BaseCalls', 'config.xml'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the string to print with the recipe info
def __str__(self): text = "Recipe for: " + self.name + "\nIt's a level "+str(self.cooking_lvl)+" recipe that takes "+str(self.cooking_time)+"min to prepare.\n" text = text + "The ingredient list is :" + str(self.ingredients) + "\nRecipe Description:\n" + self.description + "\nIt's a " + self.type return text
[ "def __str__(self):\n return \"\"\"Recipe class containing info about name, cooking_lvl,\n ingredients, recipe_type and description\"\"\"\n return txt", "def test_print_recipe():\n recipe = Recipe(\"Tuna pasta\", ingreds)\n assert str(recipe) == 'Recipe \"Tuna pasta\"\\n - tuna\\n - sweetcorn\\n - pasta'", "def get_recipe_str(recipe, meal_str):\r\n\r\n recipe_object = requests.get_meal_by_name(meal_str)\r\n if recipe_object:\r\n recipe += \"Recipe: \" + recipe_object.get_meal() + \"\\n\\n\"\r\n my_wrap = textwrap.TextWrapper(width=80)\r\n\r\n # Get and format the instructions\r\n wrap_list = my_wrap.wrap(\"Instructions: \" + recipe_object.get_instructions())\r\n for line in wrap_list:\r\n recipe += line + \"\\n\"\r\n\r\n # Get and format the ingredient/measurements\r\n ingredients = requests.get_ingredients_and_measurements(meal_str)\r\n formatting = \"{:<30}\"\r\n recipe += \"\\n\\nIngredients:\\n\"\r\n recipe += \"-\" * 80 + \"\\n\"\r\n\r\n try:\r\n for i in range(len(ingredients)):\r\n ingredient = ingredients[i]\r\n item = formatting.format(ingredient.get_measure() + \" \" + ingredient.get_ingredient())\r\n recipe += item + \"\\n\"\r\n except TypeError:\r\n recipe = \"Error in ingredient format. Try another recipe.\"\r\n\r\n else:\r\n recipe = \"A recipe for this meal was not found.\"\r\n\r\n return recipe", "def __str__(self):\n text = (self.amount + ' ' + (self.measure + ' ' + self.ingredient).strip()).strip()\n if preparation_method:\n text += ' -- ' + preparation_method\n return text", "def info(self):\n s = ''\n s = s + '%-16s = %s\\n'%('pdbfile', self.pdbfile)\n s = s + '%-16s = %s\\n'%('seqfile', self.seqfile)\n s = s + '%-16s = %s\\n'%('salt', self.salt)\n s = s + '%-16s = %s\\n'%('saltconc', self.saltconc)\n s = s + '%-16s = %s\\n'%('pH', self.pH)\n s = s + '%-16s = %s\\n'%('boxProtocol', self.boxProtocol)\n s = s + '%-16s = %s\\n'%('modelPDB_out', self.modelPDBout)\n s = s + '%-16s = %s\\n'%('mccePDBout', self.mccePDBout)\n return s", "def __str__(self):\n return \"Product:\\n\" + '\\n'.join(\"%s : %r\" % (key2, str(val2)) for (key2, val2)\n in self.__get_dictionary().items()) + \"\\n\"", "def __str__(self):\n price = '${:,.2f}'.format(self.total_price)\n i_list = [i.name for i in self.ingredients][::-1]\n return f\"Ingredient List: {', '.join(i_list)}\\n\" \\\n f\"Total Price: {price}\\n\"", "def __str__(self) -> str: #__str__:a built-in function that computes the \"informal\" string representations of an object\n s = \"\"\n # Initialize with cofactor name\n s += \"Cofactor Name: {}\\n\".format(self.name) #\\n:new line in string\n s += \"------------ \\n\" #Draw a line between cofactor info (looks cuter!)\n # Print cofactor info, with state_id and relative redox potential\n for i in range(len(self.redox)):\n s += \"Redox State ID: {}, Oxidation Potential: {}\\n\".format(i, self.redox[i])\n\n return s", "def make_recipe(self, recipe: str) -> str:\n return f\"\"\"make PLATFORM={self.PLATFORM} TARGET_PROJECT={self.TARGET_PROJECT} DESIGN={self.DESIGN} TARGET_CONFIG={self.TARGET_CONFIG} PLATFORM_CONFIG={self.PLATFORM_CONFIG} {recipe}\"\"\"", "def _description_string(self) -> str:", "def __str__(self):\n # Probably some performance issues with this code because of Python's\n # immutable strings. This code is only ever called in development or\n # testing, so it should be fine.\n i = 1\n result_string = \"\"\n for line in self._instructions:\n result_string += \"{0}: {1}\\n\".format(i, line)\n i += 1\n\n result_string += 'Labels:'\n for label, line_number in self._labels.iteritems():\n result_string += \"\\n\\t{0}: {1}\".format(label, line_number+1)\n # Added 1 because the line numbers are stored 0-indexed,\n # but we are printing 1-indexed line numbers.\n\n return result_string", "def __str__(self):\n details = \"{} - Level {} {} spell.\\n\" \\\n \"{} \\n\\n\" \\\n \"It has a duration of {} and a casting time of {}, with a range of {}.\\n\" \\\n \"It requires the components {} and has the tags: {}\\n\".format(self.name, self.level, self.school,\n self.description, self.duration,\n self.castingTime, self.range,\n self.components, self.tags)\n if self.area is not None:\n details += \"It has an area of {}.\".format(self.area)\n if self.damage is not None:\n if self.attack is not None:\n details += \"It is a {} attack. \".format(self.attack)\n if self.save is not None:\n details += \"It required a {} spell save. \".format(self.save)\n details += \"It deals {} damage.\\n\".format(self.damage)\n return details", "def summary(self):\n\n name='name:%s'%self.getName()\n damage='dégat:%s'%self.getDamage()\n ammos='munitions:%s'%self.getAmmos()\n return '\\n'.join([name, damage, ammos])", "def print_resource_details(self):\n print(f\"E-resource Details:\\n{super().get_resource_details()}\")", "def __repr__(self):\n\n result = \"\"\n for dessert in self.desserts:\n result += f\"{dessert}\\n\"\n return result", "def format_recipe_data_as_html(cls, recipe_data):\n ingredients = \"\\n\".join([\n strip_tags(ingredient[\"originalString\"])\n for ingredient in recipe_data[\"extendedIngredients\"]\n ])\n\n raw_instructions = recipe_data['instructions']\n if not raw_instructions:\n instructions = \"This recipe didn't have instructions! =O\"\n else:\n # Clean up instructions\n instructions = re.sub(\" +\", \" \",\n strip_tags(raw_instructions)).strip()\n\n formatted = (f\"<b>{strip_tags(recipe_data['title'])}</b>\\n\"\n f\"Cooktime: {recipe_data['readyInMinutes']} minutes\\n\\n\"\n f\"<u>Ingredients</u>\\n\"\n f\"{ingredients}\\n\\n\"\n f\"<u>Instructions</u>\\n\"\n f\"{instructions}\")\n\n return formatted", "def _getDiagnosticString():\n text = '\\n## Diagnostic output from tacos2 ## \\n\\n'\n text += 'Tacos2 version: ' + __version__ + '\\n'\n text += 'Tacos2 status: ' + __status__ + '\\n'\n text += 'File name (with relative path): ' + __file__ + '\\n'\n text += 'Full file path: ' + os.path.abspath(__file__) + '\\n\\n'\n text += 'pySerial version: ' + serial.VERSION + '\\n'\n text += 'pySerial full file path: ' + os.path.abspath(serial.__file__) + '\\n\\n'\n text += 'Platform: ' + sys.platform + '\\n'\n text += 'Filesystem encoding: ' + repr(sys.getfilesystemencoding()) + '\\n'\n text += 'Byteorder: ' + sys.byteorder + '\\n'\n text += 'Python version: ' + sys.version + '\\n'\n text += 'Python version info: ' + repr(sys.version_info) + '\\n'\n text += 'Python flags: ' + repr(sys.flags) + '\\n'\n text += 'Python argv: ' + repr(sys.argv) + '\\n'\n text += 'Python prefix: ' + repr(sys.prefix) + '\\n'\n text += 'Python exec prefix: ' + repr(sys.exec_prefix) + '\\n'\n text += 'Python executable: ' + repr(sys.executable) + '\\n'\n try:\n text += 'Long info: ' + repr(sys.long_info) + '\\n'\n except:\n text += 'Long info: (none)\\n' # For Python3 compatibility\n try:\n text += 'Float repr style: ' + repr(sys.float_repr_style) + '\\n\\n'\n except:\n text += 'Float repr style: (none) \\n\\n' # For Python 2.6 compatibility\n text += 'Variable __name__: ' + __name__ + '\\n'\n text += 'Current directory: ' + os.getcwd() + '\\n\\n'\n text += 'Python path: \\n'\n text += '\\n'.join(sys.path) + '\\n'\n text += '\\n## End of diagnostic output ## \\n'\n return text", "def recipes(self, args):\n ctx = self.ctx\n if args.compact:\n print(\" \".join(set(Recipe.list_recipes(ctx))))\n else:\n for name in sorted(Recipe.list_recipes(ctx)):\n try:\n recipe = Recipe.get_recipe(name, ctx)\n except (IOError, ValueError):\n warning('Recipe \"{}\" could not be loaded'.format(name))\n except SyntaxError:\n import traceback\n traceback.print_exc()\n warning(('Recipe \"{}\" could not be loaded due to a '\n 'syntax error').format(name))\n version = str(recipe.version)\n print('{Fore.BLUE}{Style.BRIGHT}{recipe.name:<12} '\n '{Style.RESET_ALL}{Fore.LIGHTBLUE_EX}'\n '{version:<8}{Style.RESET_ALL}'.format(\n recipe=recipe, Fore=Out_Fore, Style=Out_Style,\n version=version))\n print(' {Fore.GREEN}depends: {recipe.depends}'\n '{Fore.RESET}'.format(recipe=recipe, Fore=Out_Fore))\n if recipe.conflicts:\n print(' {Fore.RED}conflicts: {recipe.conflicts}'\n '{Fore.RESET}'\n .format(recipe=recipe, Fore=Out_Fore))\n if recipe.opt_depends:\n print(' {Fore.YELLOW}optional depends: '\n '{recipe.opt_depends}{Fore.RESET}'\n .format(recipe=recipe, Fore=Out_Fore))", "def phits_print(self):\n\t\txyz0 = \" \".join(str(i) for i in self.xyz0)\n\t\th = \" \".join(str(i) for i in self.h)\n\t\ttxt = \\\n\t\t\tf\" {self.sn} {self.trn} \" + \\\n\t\t\tf\"{self.symbol} {xyz0} {h} {self.r}\" + \\\n\t\t\tf\" $ name: '{self.name}' (cylinder) [x0 y0 z0] [Hx Hy Hz] R\"\n\n\t\tif self.trn != \"\":\n\t\t\ttxt += f\" with tr{self.trn}\"\n\t\treturn txt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in the initial position (X_o) of the system along with an optional set of parameters (params), and runs DDP to meet some desired state. params
def __init__(self,X_o,**params): #------------------------------------------------> #----------> Possible Parameter Values ----------> #------------------------------------------------> # Horizon - Number of timesteps into the future we wish to program self.Horizon = params.get("Horizon",300) # NumberOfIterations - Number of times to iterate the DDP self.NumberOfIterations = params.get("NumberOfIterations",100) # dt - Discrete timestep self.dt = params.get("dt",0.01) # U_o (U) - Initial input to the system (set to U when intializing DDP) self.U_o = params.get("U_o",None) if self.U_o is None: self.U = np.zeros((self.Horizon-1,)) else: self.U = self.U_o # p_target - Target state for the system to reach. self.p_target = params.get("p_target",np.matrix([[np.pi/2,0]]).T) # LearningRate - rate at which the system converges to the new input. self.LearningRate = params.get("LearningRate",0.2) # Q_f - Terminal cost matrix self.Q_f = params.get("Q_f",50*np.matrix(np.eye(2))) # R - Running cost scalar (only one input). self.R = params.get("R",1e-3) # X_o - Initial state vector is the system self.X_o = X_o
[ "def qp_controller(current_state, desired_state, dt, dim=2):\n\n # torque PD controller values\n wheel_kp = 50.0\n wheel_kd = 10.0\n max_torque = 20.0\n\n # cost on obtaining next state and velocity\n kp = 0.0\n kd = 1.0\n\n # half state length\n hl = len(current_state) / 2\n\n mp = MathematicalProgram()\n\n x = mp.NewContinuousVariables(len(current_state), \"x\")\n u = mp.NewContinuousVariables(1, \"u\")\n force = mp.NewContinuousVariables(8, \"force\")\n\n # set the initial state\n set_initial_state(mp, x, current_state, dim)\n # enforce the dynamics with linearized theta\n state = x + get_nd_dynamics(x, u, force, dim, current_state[dim])*dt\n\n # stay on floor\n # add_floor_constraint(mp, state, dim)\n # for corner to ground\n # fix_corner_to_ground(mp, state, 0, -0.5, dim)\n # don't pull on ground\n dont_pull_on_ground(mp, force, dim)\n # bounded to not leave the ground\n # stay_on_ground(mp, state, dim)\n # only force when on ground\n complimentarity_constraint(mp, state, force, dim)\n\n # linearize theta to set this cost\n add_corner_cost(mp, state, 0, -0.5, dim, current_state[dim])\n\n # unpack the states\n x_s = state[0]\n y = state[1]\n theta = state[dim]\n alpha = state[hl-1]\n xdot = state[0+hl]\n ydot = state[1+hl]\n theta_dot = state[dim+hl]\n alpha_dot = state[-1]\n\n # unpack the desired states\n x_des = desired_state[0]\n y_des = desired_state[1]\n theta_des = desired_state[dim]\n alpha_des = desired_state[hl-1]\n xdot_des = desired_state[0+hl]\n ydot_des = desired_state[1+hl]\n theta_dot_des = desired_state[dim+hl]\n alpha_dot_des = desired_state[-1]\n\n # current_pos = np.asarray([x_s,y,theta,alpha])\n # des_pos = np.asarray([x_des,y_des,theta_des,alpha_des])\n # pos_diff = current_pos - des_pos\n current_pos = np.asarray([x_s,y,theta,0])\n des_pos = np.asarray([x_des,y_des,theta_des,0])\n pos_diff = current_pos - des_pos\n\n # current_vel = np.asarray([xdot,ydot,theta_dot,alpha_dot])\n # des_vel = np.asarray([xdot_des,ydot_des,theta_dot_des,alpha_dot_des])\n # vel_diff = current_vel - des_vel\n current_vel = np.asarray([xdot,ydot,theta_dot,0])\n des_vel = np.asarray([xdot_des,ydot_des,theta_dot_des,0])\n vel_diff = current_vel - des_vel\n\n pos = pos_diff.dot(pos_diff)\n vel = vel_diff.dot(vel_diff)\n\n mp.AddQuadraticCost(kp*pos)\n mp.AddQuadraticCost(kd*vel)\n\n # torque PD controller\n input_torque = wheel_kp*(current_state[dim] - np.pi/4.0) + wheel_kd*current_state[dim+hl]\n input_torque = np.clip(input_torque, -max_torque, max_torque)\n mp.AddConstraint(u[0] == input_torque)\n\n sol = mp.Solve()\n # print(sol)\n\n my_torque = mp.GetSolution(u)\n my_force = mp.GetSolution(force)\n my_start = mp.GetSolution(x)\n\n return my_start, my_torque, my_force", "def init_distributed(self):\n self.model = DDP(self.model, device_ids=[self.device])", "def der_cost ( self, x_dict, state_config ):\n i = 0\n cost = 0.\n n = 0\n n = 0\n for param, typo in state_config.iteritems():\n if typo == CONSTANT:\n n += 1\n elif typo == VARIABLE:\n n_elems = len ( x_dict[param] )\n n += n_elems\n der_cost = np.zeros ( n )\n x_params = np.empty ( ( len( x_dict.keys()), self.nt ) )\n j = 0\n ii = 0\n the_derivatives = np.zeros ( ( len( x_dict.keys()), self.nt ) )\n for param, typo in state_config.iteritems():\n \n if typo == FIXED or typo == CONSTANT:\n x_params[ j, : ] = x_dict[param]\n \n elif typo == VARIABLE:\n x_params[ j, : ] = x_dict[param]\n\n j += 1\n \n\n for itime, tstep in enumerate ( self.state_grid ):\n if self.mask[itime, 0] == 0:\n # No obs here\n continue\n # We use the `get_emulator` method to select the required\n # emulator for this geometry, spectral setting etc\n obs_ops = self.get_emulator ( itime, self.mask, self.emulators )\n sigma_obs_vis, sigma_obs_vis = self.bu[ :, itime ]\n # forward model the proposal\n x = x_params[:, itime]\n model_albedo_vis, vis_var, vis_der = \\\n obs_ops[0] ( np.atleast_2d(x) )\n model_albedo_nir, nir_var, nir_der = \\\n obs_ops[1] ( np.atleast_2d(x) )\n # Calculate the actual cost\n this_cost = 0.5*( model_albedo_vis - albedo_vis )**2/sigma_obs_vis**2 + \\\n 0.5*( model_albedo_nir - albedo_nir )**2/sigma_obs_nir**2\n \n # The partial derivatives of the cost function are then\n this_der= (1./sigma_obs_vis**2)*( model_albedo_vis - \\\n albedo_vis )*vis_der + \\\n (1./sigma_obs_nir**2)*( model_albedo_nir - albedo_nir )*nir_der \n \n\n cost += this_cost\n the_derivatives[ :, itime] = this_der\n \n \n j = 0\n for i, (param, typo) in enumerate ( state_config.iteritems()) :\n if typo == CONSTANT:\n der_cost[j] = the_derivatives[i, 0]\n j += 1\n elif typo == VARIABLE:\n n_elems = len ( x_dict[param] )\n der_cost[j:(j+n_elems) ] = the_derivatives[i, :]\n j += n_elems\n \n return cost, der_cost", "def generateMDP(v,a,G, p =0.9):\n debug = False;\n P= np.zeros((v,v,a)); d = np.zeros((v,a))\n for node in range(v):#x_now = node\n nodeInd = node+1;\n neighbours = list(G.neighbors(nodeInd));\n totalN = len(neighbours);\n # chance of not reaching action\n pNot = (1.-p)/(totalN);\n actionIter = 0;\n if debug: \n print (neighbours);\n for neighbour in neighbours: # neighbour = x_next\n neighbourInd = neighbour - 1;\n P[neighbourInd,node,actionIter] = p;\n # chance of ending somewhere else\n for scattered in neighbours:\n scatteredInd = scattered -1;\n if debug:\n print (scattered);\n if scattered != neighbour:\n # probablity of ending up at a neighbour\n P[scatteredInd,node,actionIter] = pNot;\n # some probability of staying stationary\n P[node,node,actionIter] =pNot;\n actionIter += 1; \n while actionIter < a: # chances of staying still \n P[node, node, actionIter] = 1.0;\n# P[node, node, actionIter] = p;\n# pNot = (1.-p)/(totalN);\n# for scattered in neighbours: \n# scatteredInd = scattered -1;\n# P[scatteredInd,node,actionIter] = pNot;\n actionIter += 1;\n # test the cost function\n c = 1000.*np.ones((v,a))\n c[6] = 0.;\n\n return P,c", "def dfield_dpar(self, X, par):\r\n (D, M) = np.shape(X)\r\n deriv_par = np.zeros((D,M,len(par))) # initialize the output\r\n\r\n #=========================type your code below=========================\r\n no need to change this line if using 'lib_dynamics'\r\n #===============================end here===============================\r\n return deriv_par", "def dsde_NC_opt(E_1,E_2,param):\n if NUDSDE_ERROR :\n quit()\n print \"NC:NEU:XSECTIONS:ERROR: Loading NUSIGMA interface : nudsde.\"\n \n if PC.act_dsde_NC_n_inter == 0 or PC.act_dsde_NC_a_inter == 0 or PC.E_NC_act != E_1 :\n E_lep = gt.LogSpaceEnergies(0.1,E_1,200)\n \n dsde_n = [oxs.dsde(E_1,EE,1,'N','NC')*(param.cm**2/param.GeV) for EE in E_lep]\n dsde_a = [oxs.dsde(E_1,EE,2,'N','NC')*(param.cm**2/param.GeV) for EE in E_lep]\n \n inter_n = interpolate.interp1d(E_lep,dsde_n)\n inter_a = interpolate.interp1d(E_lep,dsde_a)\n \n PC.E_NC_act = E_1\n PC.act_dsde_NC_n_inter = inter_n\n PC.act_dsde_NC_a_inter = inter_a\n \n if param.neutype == \"neutrino\":\n inter = PC.act_dsde_NC_n_inter\n elif param.neutype == \"antineutrino\":\n inter = PC.act_dsde_NC_a_inter\n \n return inter(E_2)", "def run(self, **kwargs):\n\n # Check if we want Matlab struct style output dictionary, remove from kwargs\n matlab_config = kwargs.pop('matlab',False)\n\n # Change design variables if user supplies them from remaining keyword\n # entries or dictionary, validate input\n # kwargs = self.validate_input_vars(kwargs)\n geo_vars = ['wing.A','wing.Iy','wing.Iz','wing.J','fuelburn']\n for name, val in iteritems(kwargs):\n # print('var=',var,' val=',val)\n self.set_var(name, val)\n # self.prob[var] = val\n\n # Have more verbose output about optimization convergence\n if self.prob_dict['print_level']:\n self.prob.print_all_convergence()\n\n # Save an N2 diagram for the problem\n if self.prob_dict['record_db']:\n view_model(self.prob, outfile=self.prob_dict['prob_name']+\".html\", show_browser=False)\n\n # If `optimize` == True in prob_dict, perform optimization. Otherwise,\n # simply pass the problem since analysis has already been run.\n if not self.prob_dict['optimize']:\n # Run a single analysis loop. This shouldn't actually be\n # necessary, but sometimes the .db file is not complete unless we do this.\n self.prob.run_once()\n else:\n # Perform optimization\n self.prob.run()\n\n # If the problem type is aero or aerostruct, we can compute the static margin.\n # This is a naive tempoerary implementation that currently finite differences\n # over the entire model to obtain the static margin.\n if self.prob_dict['compute_static_margin'] and 'aero' in self.prob_dict['type']:\n\n # Turn off problem recording (so nothing for these computations\n # appears in the .db file) and get the current CL and CM.\n self.prob.driver.recorders._recorders = []\n CL = self.prob['wing_perf.CL']\n CM = self.prob['CM'][1]\n step = 1e-5\n\n # Perturb alpha and run an analysis loop to obtain the new CL and CM.\n self.prob['alpha'] += step\n self.prob.run_once()\n CL_new = self.prob['wing_perf.CL']\n CM_new = self.prob['CM'][1]\n\n # Un-perturb alpha and run a single analysis loop to get the problem\n # back to where it was before we finite differenced.\n self.prob['alpha'] -= step\n self.prob.run_once()\n\n # Compute, print, and save the static margin in metadata.\n static_margin = -(CM_new - CM) / (CL_new - CL)\n print(\"Static margin is:\", static_margin)\n self.prob.root.add_metadata('static_margin', static_margin)\n\n # Uncomment this to check the partial derivatives of each component\n # self.prob.check_partial_derivatives(compact_print=True)\n\n # Return dictionary of output values for easy access\n if matlab_config:\n output = {} # Return standard dict for Matlab output\n else:\n output = OrderedDict()\n\n # Note: could also check in self.root._unknowns_dict and self.root._params_dict\n # in OpenMDAO Group() object\n\n \t# Add design variables to output dict\n for name in self.prob.driver._desvars:\n output[name] = self.get_var(name)\n\n # Get overall output variables and constraints, return None if not there\n overall_vars = ['fuelburn','CD','CL','L_equals_W','CM','v','rho','cg',\n 'weighted_obj','total_weight']\n for item in overall_vars:\n try:\n output[item] = self.get_var(item)\n except:\n pass\n\n var_map = OrderedDict()\n # get lifting surface specific variables and constraints, return None if not there\n var_map.update({\n 'mesh' : '<name>.mesh',\n 'thickness' : '<name>.thickness',\n 'twist' : '<name>.twist',\n 'chord' : '<name>.chord'\n })\n if self.prob_dict[\"type\"] == 'struct':\n var_map.update({\n 'structural_weight' : '<name>.structural_weight',\n 'CD' : '<name>.CD',\n 'CL' : '<name>.CL',\n 'failure' : '<name>.failure',\n 'vonmises' : '<name>.vonmises',\n 'thickness_intersects' : '<name>.thickness_intersects',\n 'cg' : '<name>.cg_location',\n })\n elif self.prob_dict[\"type\"] in ['aerostruct','aero']:\n var_map.update({\n 'structural_weight' : '<name>_perf.structural_weight',\n 'CD' : '<name>_perf.CD',\n 'CL' : '<name>_perf.CL',\n 'failure' : '<name>_perf.failure',\n 'vonmises' : '<name>_perf.vonmises',\n 'thickness_intersects' : '<name>_perf.thickness_intersects',\n 'cg' : '<name>_perf.cg_location',\n })\n\n # lifting surface coupling variables\n var_map.update({\n 'loads' : 'coupled.<name>.loads',\n 'def_mesh' : 'coupled.<name>.def_mesh'\n })\n\n for surf in self.surfaces:\n surf_name = surf[\"name\"][:-1]\n for key, val in iteritems(var_map):\n try:\n var_value = self.prob[val.replace('<name>',surf_name)]\n output.update({surf_name+'.'+key : var_value})\n except:\n pass\n\n # Change output dictionary keys to repalce '.' with '_' so that they\n # will work in Matlab struct object\n if matlab_config:\n output_keys = list(output.keys())\n for key in output_keys:\n newkey = key.replace('.','_')\n val = output.pop(key)\n output[newkey] = val\n\n return output", "def main():\r\n\r\n def dxdt_equals_x(t, x):\r\n \"\"\"\r\n Function defining ODE dxdt = x\r\n :param t: t value\r\n :param x: x value\r\n :return: returns value of dxdt at (t,x)\r\n \"\"\"\r\n dxdt = x\r\n return dxdt\r\n\r\n def dxdt_equals_x_true(t):\r\n \"\"\"\r\n Returns true values of x for the ODE dxdt = x for given values of t\r\n :param t: t value(s) to return solution for\r\n :return: Returns true values of x for the ODE dxdt = x for given values of t\r\n \"\"\"\r\n x = np.exp(t)\r\n return x\r\n\r\n t = np.linspace(0, 1, 100)\r\n \"\"\"\r\n Euler, h = 0.01\r\n \"\"\"\r\n ex1_euler_sol = solve_ode(dxdt_equals_x, 1, t, 'euler', 0.01, False)\r\n\r\n \"\"\"\r\n 4th Order Runge-Kutta, h = 0.01\r\n \"\"\"\r\n ex1_rk4_sol = solve_ode(dxdt_equals_x, 1, t, 'rk4', 0.01, False)\r\n\r\n \"\"\"\r\n Plotting solutions and true solution\r\n \"\"\"\r\n plt.plot(t, ex1_euler_sol, label='Euler')\r\n plt.plot(t, ex1_euler_sol, label='RK4')\r\n plt.plot(t, dxdt_equals_x_true(t), label='True')\r\n plt.xlabel('t')\r\n plt.ylabel('x')\r\n plt.legend()\r\n plt.show()\r\n\r\n \"\"\"\r\n Example 2 - System of ODEs\r\n\r\n d2x/dt2 = -x, initial condition x(0) = 1\r\n \r\n This is equivalent to the system of ODEs:\r\n \r\n dx/dt = y, dy/dt = -x, initial conditions x(0) = 1, y(0) = 1\r\n\r\n Solving for t = 0 to t = 10\r\n \"\"\"\r\n\r\n def d2xdt2_equals_minus_x(t, u):\r\n \"\"\"\r\n Function defining system of ODEs dx/dt = y, dy/dt = -x\r\n :param t: t value\r\n :param u: vector u = [x, y]\r\n :return: returns value of dx/dt and dy/dt at (t,u)\r\n \"\"\"\r\n x = u[0]\r\n y = u[1]\r\n\r\n dxdt = y\r\n dydt = -x\r\n\r\n return np.array([dxdt, dydt])\r\n\r\n def d2xdt2_equals_minus_x_true(t):\r\n \"\"\"\r\n Function returning true value of system of ODEs dxdt = y, dy/dt = -x\r\n :param t: t value\r\n :return: returns true value of x and y at t\r\n \"\"\"\r\n x = np.sin(t) + np.cos(t)\r\n y = np.cos(t) - np.sin(t)\r\n return np.array([x, y])\r\n\r\n t = np.linspace(0, 10, 100)\r\n \"\"\"\r\n Euler, h = 0.01\r\n \"\"\"\r\n ex2_euler_sol = solve_ode(d2xdt2_equals_minus_x, [1, 1], t, 'rk4', 0.01, True)\r\n ex2_euler_sol_x = ex2_euler_sol[0]\r\n ex2_euler_sol_y = ex2_euler_sol[1]\r\n\r\n \"\"\"\r\n 4th Order Runge-Kutta, h = 0.01\r\n \"\"\"\r\n ex2_rk4_sol = solve_ode(d2xdt2_equals_minus_x, [1, 1], t, 'rk4', 0.01, True)\r\n ex2_rk4_sol_x = ex2_rk4_sol[0]\r\n ex2_rk4_sol_y = ex2_rk4_sol[1]\r\n\r\n \"\"\"\r\n Plotting solutions and true solution\r\n \"\"\"\r\n true = d2xdt2_equals_minus_x_true(t)\r\n true_x = true[0]\r\n true_y = true[1]\r\n\r\n plt.subplot(2, 1, 1)\r\n plt.plot(t, ex2_euler_sol_x, label='Euler')\r\n plt.plot(t, ex2_rk4_sol_x, label='RK4')\r\n plt.plot(t, true_x, label='True')\r\n plt.legend()\r\n plt.xlabel('t')\r\n plt.ylabel('x')\r\n\r\n plt.subplot(2, 1, 2)\r\n plt.plot(t, ex2_euler_sol_y, label='Euler')\r\n plt.plot(t, ex2_rk4_sol_y, label='RK4')\r\n plt.plot(t, true_y, label='True')\r\n plt.legend()\r\n plt.xlabel('t')\r\n plt.ylabel('y (dx/dt)')\r\n plt.show()", "def ddqn_learning(args):\n algorithm = args.algo\n # make environement and define observation format with Wrappers\n env = gym.make(args.env_name)\n\n # partial obs wrapper for agent \n env = RGBImgPartialObsWrapper(env)\n env = ImgObsWrapper(env) # Get rid of the 'mission' field\n\n env.seed(0) # sets the seed\n\n obs = env.reset()\n #print(obs.shape)\n obsListener = env_unwrapper(env)\n agent = DDQNAgent(gamma=0.99, epsilon=1, lr=0.0001, input_dims=(obs.shape),\n n_actions=env.action_space.n, mem_size=50000, eps_min=0.1,\n batch_size=32, replace=1000, eps_dec=1e-5,\n chkpt_dir='../models/', algo=algorithm, env_name=args.env_name)\n\n load_checkpoint = False\n\n if load_checkpoint:\n agent.load_models()\n\n printFile = open('../results/' + 'results_' +algorithm+ args.env_name + '.txt', 'w')\n\n scores, avg_scores, std_scores, eps_history, steps_array = [], [], [], [], []\n\n best_score = -np.inf\n\n # logical symbols class\n ls = logical_symbols()\n \n print('Episode\\t','Steps\\t','Score\\t',\n 'Best_Score\\t','Epsilon\\t', file=printFile)\n \n for i in range(args.num_games):\n done = False\n observation = env.reset()\n #print(\"Episode: \", i)\n obsListener = env_unwrapper(env)\n \n state_label = \"\"\n special_symbols = ls.get_special_symbols(obsListener)\n u1 = agent.rm.u0 # initial state from reward machine\n\n n_steps = 0\n score = 0\n while not done:\n action = agent.choose_action(observation)\n observation_, reward, done, info = env.step(action)\n score += reward\n \n # Run parallel environment to check for environment objects states\n obsListener_ = env_unwrapper(env)\n special_symbols_ = ls.get_special_symbols(obsListener_)\n state_label = ls.return_symbol(special_symbols, special_symbols_, state_label)\n # Get reward machine state \n u2 = agent.rm.get_next_state(u1, state_label)\n reward_rm = agent.rm.delta_r[u1][u2].get_reward()\n \n if not load_checkpoint:\n agent.store_transition(observation, action,\n reward_rm, observation_, done)\n agent.learn()\n\n if score>0:\n print(\"||\",state_label,\"||\")\n #print(obsListener)\n #print(obsListener_)\n print(u1,u2,\"\\n\",reward_rm,score)\n\n\n # Update params\n u1 = deepcopy(u2)\n special_symbols = deepcopy(special_symbols_)\n obsListener = deepcopy(obsListener_)\n observation = deepcopy(observation_)\n n_steps += 1\n\n scores.append(score)\n steps_array.append(n_steps)\n avg_scores.append(np.mean(scores[-100:]))\n std_scores = np.append(std_scores, np.std(scores[-100:]))\n\n print('%d\\t' %i, '%d\\t' %n_steps,\n '%.2f\\t' %score,'%.2f\\t' %best_score,\n '%.2f\\t' %agent.epsilon, file=printFile)\n\n if score > best_score:\n if not load_checkpoint:\n agent.save_models()\n best_score = score\n\n eps_history.append(agent.epsilon)\n\n return avg_scores", "def run_adjoints(self):\n # Set the design variables\n self.assembler.setDesignVars(self.dv0)\n\n # Set node locations\n self.assembler.setNodes(self.xpts0)\n\n # Assemble the transpose stiffness matrix\n self.assembler.assembleJacobian(\n self.alpha, self.beta, self.gamma, None, self.mat, TACS.TRANSPOSE\n )\n self.pc.factor()\n\n # Solve for the adjoint variables\n self.assembler.addSVSens(\n self.func_list, self.dfdu_list, self.alpha, self.beta, self.gamma\n )\n for i in range(len(self.func_list)):\n self.gmres.solve(self.dfdu_list[i], self.adjoint_list[i])", "def corestationary(self,guess=None):\n if guess is None: guess = np.array(self.y0[:-1])\n else: guess = np.array(guess)\n y = self.model.inputSX(cs.DAE_X)\n t = self.model.inputSX(cs.DAE_T)\n p = self.model.inputSX(cs.DAE_P)\n ode = self.model.outputSX()\n fn = cs.SXFunction([y,t,p],[ode])\n kfn = cs.KinsolSolver(fn)\n abstol = 1E-10\n kfn.setOption(\"abstol\",abstol)\n kfn.setOption(\"constraints\",(2,)*self.NEQ)\n kfn.setOption(\"linear_solver\",\"dense\")\n kfn.setOption(\"numeric_jacobian\",True)\n kfn.setOption(\"u_scale\",(100/guess).tolist())\n kfn.setOption(\"numeric_hessian\",True)\n kfn.setOption(\"disable_internal_warnings\",True)\n kfn.init()\n kfn.setInput(self.paramset,1)\n kfn.setOutput(guess)\n kfn.evaluate()\n y0out = kfn.output().toArray()\n \n if any(np.isnan(y0out)):\n raise RuntimeError(\"findstationary: KINSOL failed to find \\\n acceptable solution\")\n \n self.ss = y0out.flatten()\n \n if np.linalg.norm(self.dydt(self.ss)) >= abstol or any(y0out <= 0):\n raise RuntimeError(\"findstationary: KINSOL failed to reach \\\n acceptable bounds\")\n \n self.eigs = np.linalg.eigvals(self.dfdy(self.ss))", "def generateSystem(num_points, odes, initial_state, parameters, dt):\n # Create matrix to store results [n_states x data_length)\n results = np.zeros([initial_state.shape[0], num_points])\n\n # Store Initial State\n results[:, 0] = initial_state\n\n # Initalize state as initial_state\n state = initial_state\n\n # Calculate Remaining States\n for point in range(num_points-1):\n # Calculate next State from current State using Runge-Kutta 4th order method\n state = rk4_singleStep(odes, state, parameters, dt)\n\n # Append state to results\n results[:, point+1] = state\n\n return results", "def __init__(self,\n mdp,\n abstr_epsilon_list=(),\n corruption_list=(),\n error_dicts=None,\n num_corrupted_mdps=10,\n num_agents=10,\n num_episodes=200,\n results_dir='exp_results',\n agent_type='abstraction',\n agent_exploration_epsilon=0.1,\n agent_learning_rate=0.1,\n decay_exploration=True,\n exploring_starts=False,\n step_limit=10000,\n detach_interval=None,\n prevent_cycles=False,\n variance_threshold=None,\n reset_q_value=False,\n agent_detach='abstr',\n detach_reassignment='group',\n detach_points=None,\n states_to_track=[],\n record_trajectories=False,\n seed=None,\n detach_only=False,\n track_error_states=False,\n include_online_abstraction=False,\n online_abstraction_training_episodes=10,\n online_abstraction_epsilon=0.05,\n noisy_abstr_types=[],\n abstr_error_distribution=None,\n abstr_error_parameters=None,\n per_state_abstr_error_distribution=None,\n per_state_abstr_error_parameters=None,\n noisy_abstr_epsilon=0.0,\n ground_only=False,\n skip_true=False,\n neighbor_factor=1,\n states_per_detach=1):\n # Check that agent_type is valid\n if agent_type not in ['standard', 'abstraction', 'tracking']:\n raise ValueError('\"agent_type\" variable must be \"standard\", \"abstraction\", or \"tracking\". Is currently '+str(agent_type))\n\n self.ground_mdp = mdp\n if abstr_epsilon_list is not None:\n for val in abstr_epsilon_list:\n if val[0] not in Abstr_type or val[1] < 0 or val[1] > 1:\n raise ValueError('Abstraction Epsilon List is invalid', abstr_epsilon_list)\n self.abstr_epsilon_list = abstr_epsilon_list\n self.corruption_list = corruption_list\n self.error_dicts = error_dicts\n self.num_agents = num_agents\n self.num_corrupted_mdps = num_corrupted_mdps\n self.results_dir = results_dir\n self.num_episodes = num_episodes\n self.agent_type = agent_type\n self.decay_exploration = decay_exploration\n self.exploring_starts = exploring_starts\n self.step_limit = step_limit\n self.agent_exploration_epsilon = agent_exploration_epsilon\n self.agent_learning_rate = agent_learning_rate\n self.detach_interval = detach_interval\n self.prevent_cycle = prevent_cycles\n self.variance_threshold = variance_threshold\n self.reset_q_value = reset_q_value\n self.agent_detach = agent_detach\n self.detach_reassignment = detach_reassignment\n self.detach_points = detach_points\n self.states_to_track = states_to_track\n self.record_trajectories = record_trajectories\n self.seed = seed\n self.detach_only = detach_only\n self.include_online_abstraction = include_online_abstraction\n self.online_abstraction_training_episodes = online_abstraction_training_episodes\n self.online_abstraction_epsilon = online_abstraction_epsilon\n self.abstr_error_distribution = abstr_error_distribution\n self.abstr_error_parameters = abstr_error_parameters\n self.noisy_abstr_types = noisy_abstr_types\n self.per_state_abstr_error_distribution = per_state_abstr_error_distribution\n self.per_state_abstr_error_parameters = per_state_abstr_error_parameters\n self.noisy_abstr_epsilon = noisy_abstr_epsilon\n self.include_noisy_abstractions = abstr_error_distribution or per_state_abstr_error_distribution\n self.ground_only = ground_only\n self.skip_true = skip_true\n self.neighbor_factor = neighbor_factor\n self.states_per_detach = 1\n\n # Create results dir if it doesn't exist\n if not os.path.exists(self.results_dir):\n os.makedirs(self.results_dir)\n\n # Clear out contents of results directory\n for filename in os.listdir(self.results_dir):\n full_file = os.path.join(self.results_dir, filename)\n if os.path.isdir(full_file):\n shutil.rmtree(full_file, ignore_errors=True)\n else:\n os.remove(full_file)\n\n # Make sure directory has all the necessary folders (true, corrupted, corrupted_w_detach)\n for folder_to_make in ['true', 'corrupted', 'corrupted_w_detach']:\n if not os.path.exists(os.path.join(self.results_dir, folder_to_make)):\n os.makedirs(os.path.join(self.results_dir, folder_to_make))\n\n\n # Agent ensembles will be stored in a dict where key is the (abstr_type, epsilon) tuple ('ground' in the case\n # of the ground MDP) and values are lists of agents. In the case of corrupted MDPs, the key will be\n # (abstr_type, epsilon, corruption_type, proportion)\n self.agents = {}\n\n all_ground_states = mdp.get_all_possible_states()\n\n # Run Value Iteration to get q-table for abstractions and to hold value of optimal policies\n vi = ValueIteration(mdp)\n vi.run_value_iteration()\n q_table = vi.get_q_table()\n self.vi_table = q_table\n self.vi = vi\n\n print('Finished vi')\n # Create abstract MDPs from each element of abstr_epsilon_list. val[0] is abstraction type, val[1] is epsilon\n self.abstr_mdp_dict = {}\n file_string = 'true/abstractions.csv'\n if not os.path.exists(os.path.join(self.results_dir, 'true')):\n os.makedirs(os.path.join(self.results_dir, 'true'))\n with open(os.path.join(self.results_dir, file_string), 'w', newline='') as abstr_file:\n abstr_writer = csv.writer(abstr_file)\n for val in abstr_epsilon_list:\n print('Making abstraction', val)\n state_abstr = make_abstr(q_table, val[0], val[1])\n self.abstr_mdp_dict[(val[0], val[1])] = AbstractMDP(mdp, state_abstr)\n abstr_writer.writerow((val[0], val[1], AbstractMDP(mdp, state_abstr).abstr_to_string()))\n\n # Create (self.num_corrupted_mdps) corrupted versions of MDPs (if applicable) from each element of\n # corruption_list\n # This is stored in a dictionary, mapping tuples of (abstractMDP type, abstraction_epsilon, corruption_type,\n # proportion, number) to a corrupted abstract MDP\n # This is a messy way of storing things, but it doesn't really matter because the self.agents dictionary\n # is what we use to run the experiment\n # This also writes the corrupted state abstractions to a file\n # These are generated in such a way that all corrupt MDPs with the same batch number will have errors\n # in the same ground states\n self.corrupt_mdp_dict = {}\n if self.corruption_list is not None:\n if not os.path.exists(os.path.join(self.results_dir, 'corrupted')):\n os.makedirs(os.path.join(self.results_dir, 'corrupted'))\n corr_abstr_file = open(os.path.join(self.results_dir, 'corrupted/corrupted_abstractions.csv'), 'w', newline='')\n err_state_file = open(os.path.join(self.results_dir, 'corrupted/error_states.csv'), 'w', newline='')\n abstr_writer = csv.writer(corr_abstr_file)\n err_writer = csv.writer(err_state_file)\n for val in corruption_list:\n # Unpack the values in corruption list; first is corruption type and second is proportion\n corr_type = val[0]\n prop = val[1]\n # We generate 1 corrupt MDP per abstraction type at a time. This way, we can ensure that all\n # corrupt MDPs with the same key and batch number have the same error states. Also write to file\n # so they can be visualized later\n for i in range(self.num_corrupted_mdps):\n states_to_corrupt = np.random.choice(self.ground_mdp.get_all_possible_states(),\n #size=int(np.floor(len(self.ground_mdp.get_all_possible_states()) * prop)),\n size=int(prop),\n replace=False)\n for state in states_to_corrupt:\n while (state.x, state.y) in mdp.goal_location or len(np.unique(states_to_corrupt)) < prop:\n state = np.random.choice(self.ground_mdp.get_all_possible_states())\n # Record in states_to_track if track_error_states is true\n if track_error_states:\n self.states_to_track.append(state)\n for key in self.abstr_mdp_dict.keys():\n # Create a corrupt state abstraction for this batch number and list of states\n abstr_mdp = self.abstr_mdp_dict[key]\n c_s_a = make_corruption(abstr_mdp, states_to_corrupt, corr_type=corr_type)\n # Get the correct and incorrect abstract states for the corrupted ground states\n # and write all these to a file\n error_line = []\n for state in states_to_corrupt:\n true_state = abstr_mdp.get_abstr_from_ground(state)\n corr_state = c_s_a.get_abstr_from_ground(state)\n error_line.append(((state.x, state.y), true_state.data, corr_state.data))\n temp_key = (abstr_mdp.abstr_type, abstr_mdp.abstr_epsilon, corr_type, prop, i)\n # Record the error states\n err_writer.writerow((temp_key, error_line))\n # Make an abstract MDP with this corrupted state abstraction\n corrupt_abstr_mdp = AbstractMDP(self.ground_mdp, c_s_a)\n # Record the dictionary describing the state abstraction\n abstr_writer.writerow((temp_key, corrupt_abstr_mdp.abstr_to_string()))\n self.corrupt_mdp_dict[(abstr_mdp.abstr_type,\n abstr_mdp.abstr_epsilon,\n corr_type,\n prop,\n i)] = corrupt_abstr_mdp\n elif self.error_dicts is not None:\n # Record in states_to_track if applicable\n #for error_state in self.error_dicts.keys():\n # self.states_to_track.append(error_state)\n\n corr_abstr_file = open(os.path.join(self.results_dir, 'corrupted/corrupted_abstractions.csv'), 'w', newline='')\n err_state_file = open(os.path.join(self.results_dir, 'corrupted/error_states.csv'), 'w', newline='')\n abstr_writer = csv.writer(corr_abstr_file)\n err_writer = csv.writer(err_state_file)\n\n # We generate 1 corrupt MDP per abstraction type at a time. This way, we can ensure that all\n # corrupt MDPs with the same key and batch number have the same error states. Also write to file\n # so they can be visualized later\n for j in range(len(self.error_dicts)):\n error_dict = self.error_dicts[j]\n for i in range(self.num_corrupted_mdps):\n for key in self.abstr_mdp_dict.keys():\n # Create a corrupt state abstraction for this batch number and list of states\n abstr_mdp = self.abstr_mdp_dict[key]\n c_s_a = make_corruption(abstr_mdp, reassignment_dict=error_dict)\n # Get the correct and incorrect abstract states for the corrupted ground states\n # and write all these to a file\n error_line = []\n for state in error_dict.keys():\n true_state = abstr_mdp.get_abstr_from_ground(state)\n corr_state = c_s_a.get_abstr_from_ground(state)\n error_line.append(((state.x, state.y), true_state.data, corr_state.data))\n temp_key = (abstr_mdp.abstr_type, abstr_mdp.abstr_epsilon, 'explicit errors', j, i)\n # Record the error states\n err_writer.writerow((temp_key, error_line))\n # Make an abstract MDP with this corrupted state abstraction\n corrupt_abstr_mdp = AbstractMDP(self.ground_mdp, c_s_a)\n # Record the dictionary describing the state abstraction\n abstr_writer.writerow((temp_key, corrupt_abstr_mdp.abstr_to_string()))\n self.corrupt_mdp_dict[(abstr_mdp.abstr_type,\n abstr_mdp.abstr_epsilon,\n 'explicit errors',\n j,\n i)] = corrupt_abstr_mdp\n\n # Create random noise abstractions if that parameter is set\n if self.abstr_error_distribution or self.per_state_abstr_error_distribution:\n self.noisy_abstr_dict = {}\n if not os.path.isdir(os.path.join(self.results_dir, 'noisy')):\n os.mkdir(os.path.join(self.results_dir, 'noisy'))\n file_string = 'noisy/abstractions.csv'\n noisy_abstr_file = open(os.path.join(self.results_dir, file_string), 'w', newline='')\n noisy_abstr_writer = csv.writer(noisy_abstr_file)\n for abstr_type in self.noisy_abstr_types:\n for i in range(self.num_corrupted_mdps):\n #TODO Check that this is returning what you think it is. This is where the problem is\n noisy_mdp = apply_noise_from_distribution(self.ground_mdp,\n abstr_type,\n approximation_epsilon=self.noisy_abstr_epsilon,\n distribution=self.abstr_error_distribution,\n distribution_parameters=self.abstr_error_parameters,\n per_state_distribution=self.per_state_abstr_error_distribution,\n per_state_parameters=self.per_state_abstr_error_parameters\n )\n print(type(mdp), type(noisy_mdp))\n self.noisy_abstr_dict[(abstr_type, i)] = noisy_mdp\n noisy_abstr_writer.writerow((abstr_type, i, noisy_mdp.abstr_to_string()))\n\n print('Making agents')\n # Create agents on ground mdp\n ground_agents = []\n for i in range(self.num_agents):\n if self.seed:\n temp_seed = self.seed + i\n else:\n temp_seed = None\n temp_mdp = self.ground_mdp.copy()\n # If agent_type == 'standard', use regular q-learning. If 'abstraction', use abstraction agent\n if self.agent_type == 'standard':\n agent = Agent(temp_mdp,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n seed=temp_seed)\n\n # Because tracking agent doesn't support agent w/o abstraction, so make it abstraction agent\n elif self.agent_type == 'abstraction':\n agent = AbstractionAgent(temp_mdp,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n seed=temp_seed)\n else:\n print('tracking agent', i)\n agent = TrackingAgent(temp_mdp,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n seed=temp_seed,\n ground_states=all_ground_states)\n ground_agents.append(agent)\n self.agents['ground'] = ground_agents\n\n # Create MDPs for online abstraction construction if applicable\n self.online_abstr_agents = []\n if self.include_online_abstraction:\n self.online_abstr_agents = {}\n file_string = 'online/abstractions.csv'\n if not os.path.exists(os.path.join(self.results_dir, 'online')):\n os.makedirs(os.path.join(self.results_dir, 'online'))\n online_abstr_file = open(os.path.join(self.results_dir, file_string), 'w', newline='')\n online_abstr_writer = csv.writer(online_abstr_file)\n #for abstr_type, eps in self.abstr_epsilon_list:\n for j in range(self.num_corrupted_mdps):\n ensemble = []\n for i in range(self.num_agents):\n mdp = copy.deepcopy(self.ground_mdp)\n if self.agent_type == 'standard':\n agent = Agent(mdp,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration)\n elif self.agent_type == 'abstraction':\n agent = AbstractionAgent(mdp, None,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration)\n elif self.agent_type == 'tracking':\n agent = TrackingAgent(mdp, s_a=None, epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n ground_states=all_ground_states)\n else:\n raise ValueError(\"Agent Type '\" + str(self.agent_type) + \"' is not supported\")\n ensemble.append(agent)\n self.online_abstr_agents[(j, 'online')] = ensemble\n\n # Create agents on abstract MDPs\n for abstract_mdp in self.abstr_mdp_dict.values():\n abstract_mdp_ensemble = []\n for i in range(self.num_agents):\n if self.seed:\n temp_seed = self.seed + i\n else:\n temp_seed = None\n # Create agents according to agent_type parameter\n if self.agent_type == 'standard':\n temp_mdp = abstract_mdp.copy()\n agent = Agent(temp_mdp,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n seed=temp_seed)\n abstract_mdp_ensemble.append(agent)\n elif self.agent_type == 'abstraction':\n temp_mdp = self.ground_mdp.copy()\n s_a = copy.deepcopy(abstract_mdp.state_abstr)\n agent = AbstractionAgent(temp_mdp,\n s_a,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n seed=temp_seed)\n abstract_mdp_ensemble.append(agent)\n elif self.agent_type == 'tracking':\n temp_mdp = self.ground_mdp.copy()\n s_a = copy.deepcopy(abstract_mdp.state_abstr)\n agent = TrackingAgent(temp_mdp,\n s_a,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration)\n abstract_mdp_ensemble.append(agent)\n self.agents[(abstract_mdp.abstr_type, abstract_mdp.abstr_epsilon)] = abstract_mdp_ensemble\n\n # Create agents on noisily-abstracted MDPs if that parameter is set\n if self.abstr_error_distribution:\n self.noisy_abstr_agents = {}\n self.noisy_abstr_detach_agents = {}\n for (abstr_type, num) in self.noisy_abstr_dict.keys():\n ensemble = []\n ensemble2 = []\n for i in range(self.num_agents):\n #mdp = copy.deepcopy(self.noisy_abstr_dict[(abstr_type, num)])\n mdp = copy.deepcopy(self.ground_mdp)\n noisy_s_a = self.noisy_abstr_dict[(abstr_type, num)].state_abstr\n if self.agent_type == 'tracking':\n agent = TrackingAgent(mdp,\n s_a=noisy_s_a,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration)\n else:\n raise ValueError(\"Agent type \" + str(self.agent_type) + \" not supported with noisy abstraction\")\n ensemble.append(agent)\n t_agent = copy.deepcopy(agent)\n ensemble2.append(t_agent)\n self.noisy_abstr_agents[(abstr_type, num)] = ensemble\n self.noisy_abstr_detach_agents[(abstr_type, num)] = ensemble2\n\n # Create agents on corrupted abstract MDPs. Remember that we have self.num_corrupted_mdps ensembles for each\n # combination of abstractMDP type and entry in self.corruption_list.\n # self.corr_agents is now a dictionary mapping (abstr_type, epsilon, corr_type, proportion, batch_num) to\n # a list of agents\n self.corr_agents = {}\n for corr_key in self.corrupt_mdp_dict.keys():\n corr_ensemble = []\n for i in range(self.num_agents):\n if self.seed:\n temp_seed = self.seed + i\n else:\n temp_seed = None\n # Create agents according to agent_type parameter\n if self.agent_type == 'standard':\n temp_mdp = self.corrupt_mdp_dict[corr_key].copy()\n agent = Agent(temp_mdp,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n seed=temp_seed)\n corr_ensemble.append(agent)\n else:\n temp_mdp = copy.deepcopy(self.ground_mdp.copy())\n corr_mdp = copy.deepcopy(self.corrupt_mdp_dict[corr_key])\n s_a = copy.deepcopy(corr_mdp.state_abstr)\n if self.agent_type == 'abstraction':\n agent = AbstractionAgent(temp_mdp,\n s_a,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n seed=temp_seed)\n elif self.agent_type == 'tracking':\n agent = TrackingAgent(temp_mdp,\n s_a,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration)\n else:\n raise ValueError(\"Agent type \\'\" + str(self.agent_type) + \"\\' is not supported\")\n corr_ensemble.append(agent)\n self.corr_agents[corr_key] = corr_ensemble\n\n # If detach_state_interval is set, create another set of agents which will detach inconsistent abstract states\n # every (detach_state_interval) episodes\n self.corr_detach_agents = {}\n for corr_key in self.corrupt_mdp_dict.keys():\n corr_ensemble = []\n for i in range(self.num_agents):\n if self.seed:\n temp_seed = self.seed + i\n else:\n temp_seed = None\n # Create agents according to agent_type parameter\n if self.agent_type == 'standard':\n temp_mdp = self.corrupt_mdp_dict[corr_key].copy()\n agent = Agent(temp_mdp,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n seed=temp_seed)\n corr_ensemble.append(agent)\n else:\n temp_mdp = copy.deepcopy(self.ground_mdp.copy())\n corr_mdp = copy.deepcopy(self.corrupt_mdp_dict[corr_key].copy())\n s_a = copy.deepcopy(corr_mdp.state_abstr)\n if self.agent_type == 'abstraction':\n agent = AbstractionAgent(temp_mdp,\n s_a,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n consistency_check=self.agent_detach,\n detach_reassignment=self.detach_reassignment,\n seed=temp_seed)\n elif self.agent_type == 'tracking':\n agent = TrackingAgent(temp_mdp,\n s_a,\n epsilon=agent_exploration_epsilon,\n alpha=self.agent_learning_rate,\n decay_exploration=decay_exploration,\n consistency_check=self.agent_detach,\n detach_reassignment=self.detach_reassignment)\n else:\n raise ValueError(\"Agent type \\'\" + str(self.agent_type) + \"\\' is not supported\")\n corr_ensemble.append(agent)\n self.corr_detach_agents[corr_key] = corr_ensemble\n print('Done making agents')", "def perform_pdl(self):\n pd = pdl.ParameterDownload()\n if not pd.request():\n tc_fail(\"Failed the PDL\")", "def build_pddl(env: MultiViewEnv, transition_data: pd.DataFrame, operators: List[LearnedOperator], verbose=False,\n **kwargs) -> Tuple[List[List[int]], UniquePredicateList, List[PDDLOperator]]:\n n_jobs = kwargs.get('n_jobs', 1)\n dist_comparator = kwargs.get('dist_comparator', _overlapping_dists)\n vocabulary = UniquePredicateList(dist_comparator)\n # Factorise the state space: see JAIR paper for more\n show(\"Factorising state space...\", verbose)\n factors = _factorise(operators, env.n_dims(kwargs.get('view', View.PROBLEM)), verbose=verbose)\n\n show(\"Final factors:\\n\\n{}\".format(factors), verbose)\n #\n # generate a distribution over start states\n start_symbols = _generate_start_symbols(transition_data, factors, verbose=verbose, **kwargs)\n for new_dist in start_symbols:\n vocabulary.append(new_dist, start_predicate=True)\n\n n_start_propositions = len(vocabulary)\n show(\"Start position generated {} propositions\".format(n_start_propositions), verbose)\n\n # TODO: leaving this out for now\n # # generate a distribution over goal states\n # goal_symbols = _generate_goal_symbols(transition_data, factors, verbose=verbose, **kwargs)\n # for new_dist in goal_symbols:\n # vocabulary.append(new_dist, goal_predicate=True)\n # show(\"Goal condition generated {} propositions\".format(len(vocabulary) - n_start_propositions), verbose)\n\n show(\"Running on {} CPUs\".format(n_jobs), verbose)\n\n show(\"Generating propositions...\", verbose)\n # get propositions directly from effects\n operator_predicates = _generate_vocabulary(vocabulary, operators, factors, verbose=verbose, n_jobs=n_jobs)\n show(\"Total propositions: {}\".format(len(vocabulary)), verbose)\n\n save((factors, vocabulary, operator_predicates))\n (factors, vocabulary, operator_predicates) = load()\n\n show(\"Generating full PDDL...\", verbose)\n\n splits = np.array_split(operators, n_jobs)\n functions = [\n partial(_build_pddl_operators, env, factors, splits[i], vocabulary, operator_predicates, verbose, **kwargs)\n for i in range(n_jobs)]\n schemata = sum(run_parallel(functions), [])\n\n show(\"Found {} PDDL operators\".format(len(schemata)), verbose)\n return factors, vocabulary, schemata", "def SymplecticPropagatorStep1_GPU( X_GPU , dt , dTdp, dVdq):\n NextX = SymplecticDV_GPU(X_GPU, dt,dVdq,1.)\n return SymplecticDT_GPU( NextX ,dt,dTdp,1.)", "def configure_ddp(self):\n self.pre_configure_ddp()\n self._model = DistributedDataParallel(\n LightningDistributedModule(self.model),\n **self._ddp_kwargs,\n )\n self._register_ddp_hooks()", "def executeParamStudy(self):\n\n # --- Create vectors of varying values for each respective parameter that was selected for the study\n if self.inputData.c['paramA']:\n aRange = np.linspace(self.inputData.c['aStart'], self.inputData.c['aEnd'], self.inputData.paramSteps)\n else:\n aRange = [self.inputData.c['a']]\n if self.inputData.c['paramB']:\n bRange = np.linspace(self.inputData.c['bStart'], self.inputData.c['bEnd'], self.inputData.paramSteps)\n else:\n bRange = [self.inputData.c['b']]\n\n # --- Run the parameter study for each combination of parameters\n i = 0\n for a in aRange:\n for b in bRange:\n print(\"\\nExecuting combination a: {0} and b: {1}\" .format(a, b))\n i = i + 1\n self.inputData.c['a'] = float(a)\n self.inputData.c['b'] = float(b)\n if i < 10:\n num_str = \"0\" + str(i)\n else:\n num_str = str(i)\n vtk_filename = self.inputData.paramFilename + \"_\" + num_str\n self.execute()\n self.exportVtk(vtk_filename)\n print(\"Successfully completed %i studies.\" % i)\n self.outputData.paramnum = i", "def test_auto_unit_ddp(self) -> None:\n\n spawn_multi_process(\n 2,\n \"gloo\",\n self._test_stochastic_weight_averaging_with_ddp,\n )\n spawn_multi_process(\n 2,\n \"gloo\",\n self._test_ddp_comm_hook,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
NumberOfIterations Number of times to iterate the DDP. Must be an integer. Default is 100.
def set_NumberOfIterations(self,NumberOfIterations): self.NumberOfIterations = NumberOfIterations
[ "def getNIterations(self):\n return self.n_iterations", "def num_passed_iterations(self) -> int:\n\n return self._num_passed_iterations", "def getNrTimesteps():\n\n timesteps = 25\n return timesteps", "def iterations_count(loop_node: Node):\n assert loop_node.soft_get('type') == 'Loop'\n\n if loop_node.is_in_port_connected(1):\n execution_condition = loop_node.in_port(1).data.get_value()\n if not is_fully_defined(execution_condition): # dynamic execution condition\n return dynamic_dimension_value\n execution_condition = execution_condition.item()\n if not execution_condition: # 0 iterations\n return 0\n num_iterations = loop_node.in_port(0).data.get_value()\n if not is_fully_defined(num_iterations):\n return dynamic_dimension_value\n if num_iterations is not None:\n num_iterations = num_iterations.item(0)\n # in some ONNX models the num_iterations input is equal to max(int64) meaning dynamic number of iterations\n if num_iterations < 0 or num_iterations == np.iinfo(np.int64).max:\n return dynamic_dimension_value\n return num_iterations", "def nsteps(self):\n return self._nsteps", "def number_of_days(iteration):\r\n return iteration // 24", "def nsteps(self): #: Number of steps of Quad Scan: 20\n return deepcopy(int(self.__nsteps))", "def kdf_max_iterations(self):\n return self.config.getint(self.section, 'kdf_max_iterations')", "def maxIterations(number):\n return lambda iterationNumber, corrections, values, datasetSize: iterationNumber < number", "def n_timesteps(self) -> int:\n if self.total_time < self.timestep:\n warnings.warn(\n f\"No simulation possible: you asked for {self.total_time} \"\n f\"simulation time but the timestep is {self.timestep}\"\n )\n return floor(self.total_time.total_seconds() / self.timestep.total_seconds())", "def max_iterations(self) -> int:\n return self._max_epochs", "def run_iters(self, N, logger=None):\n for i in range(N):\n res = None\n if self.PT :\n self.chain_states = do_inference(self.model, self.rng,\n self.kernel_config, self.iters,\n states_at_temps = self.chain_states)\n else:\n res = do_inference(self.model, self.rng, self.kernel_config, self.iters, \n threadpool = self.threadpool)\n self.iters += 1\n\n if logger:\n logger(self.iters, self.model, res)", "def iterations(N):\n i = N\n count = 1\n while i > 1:\n # print(\"\\nDrawing a sample from [0, %d]\" %(i-1))\n i = R(i)\n # print(\"Counts: \", count)\n # print(\"i: \", i)\n if i == 0:\n break\n else:\n count += 1\n return count", "def min_num_iterations():\n err = 1e6\n count = 0\n ERROR_BOUND = 1e-4\n while (err > ERROR_BOUND):\n bkp_utils = utilities.copy()\n update_utils(utilities, map_shape, map_arr, rewards, final_arr, actions, gamma)\n # calc euclidean error norm\n d = bkp_utils.flatten() - utilities.flatten()\n err = np.sqrt(np.dot(d,d)) \n count += 1\n return count", "def iteratorCount(*args, **kwargs):\n \n pass", "def test(self, failure_rate, iteration_n):\n pass", "def get_number_of_workers():", "def set_geom_max_iterations(self, iterations):\n if not isinstance(iterations, int):\n raise Exception(\"max iterations must be an integer\")\n self.keywords[\"CYCLES\"] = iterations", "def run_for_n(self,steps):\n for step in range(steps):\n self.step_all()\n self.steps += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
U_o Initial input to the system. Must be either None (meaning zero initial input to the system) or an array with shape (Horizon1,). Default is None.
def set_U_o(self,U_o): self.U_o = U_o if self.U_o is None: self.U = np.zeros((self.Horizon-1,)) else: self.U = self.U_o
[ "def populate_model(self, **kwargs):\n T = self._opts['T']\n nu = self._dims['u']\n if self._changed['T']: # first time def or change of horizon\n # make sure to delete old optimization variables if they exist\n self._removeOld()\n # define optimization variables for input\n u = {}\n for i in range(nu):\n for t in range(T):\n u[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,\n name='u[{},{}]'.format(t, i))\n # update the model so it knows the variables\n self._model.update()\n # add control constraints\n umin, umax = self._opts['umin'], self._opts['umax']\n has_umin, has_umax = ~np.isnan(umin), ~np.isnan(umax)\n for i in range(nu):\n for t in np.arange(has_umin.shape[0])[has_umin[:, i]]:\n u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n for t in np.arange(has_umax.shape[0])[has_umax[:, i]]:\n u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n self._model.update()\n # indicate that model is up to date\n for name in ['T', 'umin', 'umax']:\n self._changed[name] = False\n # make variables accessible as object variables\n self.u = u\n else:\n # change input constraints\n if self._changed['umin']:\n umin = self._opts['umin']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n self._changed['umin'] = False\n if self._changed['umax']:\n umax = self._opts['umax']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n self._changed['umax'] = False\n # finally update and include all changes\n self._model.update()", "def zero(self):\n v = np.zeros(self.get_dimension())\n self.set_vector(v)", "def populate_model(self, v=None, **kwargs):\n T = self._opts['T']\n nu, nx = self._dims['u'], self._dims['x']\n if self._changed['T']: # first time def or change of horizon\n # make sure to delete old optimization variables if they exist\n self._removeOld()\n # define optimization variables for input and state\n u, x = {}, {}\n for i in range(nu):\n for t in range(T):\n u[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,\n name='u[{},{}]'.format(t, i))\n for i in range(nx):\n for t in range(T+1):\n x[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,\n name='x[{},{}]'.format(t, i))\n # update the model so it knows the variables\n self._model.update()\n # add control constraints\n umin, umax = self._opts['umin'], self._opts['umax']\n has_umin, has_umax = ~np.isnan(umin), ~np.isnan(umax)\n for i in range(nu):\n for t in np.arange(has_umin.shape[0])[has_umin[:, i]]:\n u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n for t in np.arange(has_umax.shape[0])[has_umax[:, i]]:\n u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n # update intitial state, if provided\n if 'x0' in kwargs:\n self.x0 = kwargs['x0']\n # add constraint on initial state\n self.x0con = {}\n for i in range(nx):\n self.x0con[0, i] = self._model.addConstr(\n lhs=x[0, i], sense=GRB.EQUAL, rhs=self.x0[i],\n name='dyn[0,{}]'.format(i))\n # add system dynamics\n A, B = self._mats['A'], self._mats['B']\n if ('E' in self._mats):\n w = np.inner(v, self._mats['E'])\n else:\n w = np.zeros((T, nx))\n # dynamic evolution of state and output\n self.dyncon = {}\n for t in range(T):\n for i in range(nx):\n # put w on RHS to speed up constraint updates\n self.dyncon[t, i] = self._model.addConstr(\n lhs=(x[t+1, i] - quicksum([A[i, k] * x[t, k]\n for k in range(nx)]) -\n quicksum([B[i, k] * u[t, k] for k in range(nu)])),\n sense=GRB.EQUAL, rhs=w[t, i],\n name='dyn[{},{}]'.format(t+1, i))\n self._model.update()\n # add state constraints\n xmin, xmax = self._opts['xmin'], self._opts['xmax']\n has_xmin, has_xmax = ~np.isnan(xmin), ~np.isnan(xmax)\n for i in range(nx):\n for t in np.arange(has_xmin.shape[0])[has_xmin[:, i]]:\n x[t+1, i].setAttr(GRB.Attr.LB, xmin[t, i])\n for t in np.arange(has_xmax.shape[0])[has_xmax[:, i]]:\n x[t+1, i].setAttr(GRB.Attr.UB, xmax[t, i])\n self._model.update()\n # indicate that model is up to date\n for name in ['T', 'x0', 'umin', 'umax', 'xmin', 'xmax', 'v']:\n self._changed[name] = False\n # make variables accessible as object variables\n self.u, self.x, self.v = u, x, v\n else:\n # change input constraints\n if self._changed['umin']:\n umin = self._opts['umin']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n self._changed['umin'] = False\n if self._changed['umax']:\n umax = self._opts['umax']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n self._changed['umax'] = False\n # change state constraints\n if self._changed['xmin']:\n xmin = self._opts['xmin']\n # xmin[np.isnan(xmin)] = - np.Inf\n for i in range(nx):\n for t in range(T):\n self.x[t+1, i].setAttr(GRB.Attr.LB, xmin[t, i])\n self._changed['xmin'] = False\n if self._changed['xmax']:\n xmax = self._opts['xmax']\n # xmax[np.isnan(xmax)] = np.Inf\n for i in range(nx):\n for t in range(T):\n self.x[t+1, i].setAttr(GRB.Attr.UB, xmax[t, i])\n self._changed['xmax'] = False\n # change initial state\n if self._changed['x0']:\n for i in range(nx):\n self._model.getConstrByName('dyn[0,{}]'.format(i)).setAttr(\n GRB.Attr.RHS, self.x0[i])\n self._changed['x0'] = False\n # change effect of disturbance vector on dynamics (if any)\n if v is not None:\n if not np.all(v == self.v):\n self.v = v\n w = np.inner(v, self._mats['E'])\n for i in range(nx):\n for t in range(T):\n self._model.getConstrByName(\n 'dyn[{},{}]'.format(t+1, i)).setAttr(\n GRB.Attr.RHS, w[t, i])\n # finally update and include all changes\n self._model.update()", "def initial_obs(self):\n pass", "def init_input(self):\n m1 = np.diagflat([-1] * (self.n - 1), -1)\n m2 = np.diagflat([-1] * (self.n - 1), 1)\n m3 = np.diagflat([self.gamma] * self.n)\n self.A = np.matrix((m1 + m2 + m3).astype(np.double))\n\n self.b = np.matrix(\n np.full((self.n, 1), self.gamma - 2).astype(np.double)\n )\n self.b[0] = self.gamma - 1\n self.b[self.n - 1] = self.gamma - 1\n\n self.x0 = np.matrix(\n np.full((self.n, 1), 0).astype(np.double)\n )", "def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y", "def __init__(self,X_o,**params):\n #------------------------------------------------>\n #----------> Possible Parameter Values ---------->\n #------------------------------------------------>\n\n # Horizon - Number of timesteps into the future we wish to program\n self.Horizon = params.get(\"Horizon\",300)\n\n # NumberOfIterations - Number of times to iterate the DDP\n self.NumberOfIterations = params.get(\"NumberOfIterations\",100)\n\n # dt - Discrete timestep\n self.dt = params.get(\"dt\",0.01)\n\n # U_o (U) - Initial input to the system (set to U when intializing DDP)\n self.U_o = params.get(\"U_o\",None)\n if self.U_o is None:\n self.U = np.zeros((self.Horizon-1,))\n else:\n self.U = self.U_o\n\n # p_target - Target state for the system to reach.\n self.p_target = params.get(\"p_target\",np.matrix([[np.pi/2,0]]).T)\n\n # LearningRate - rate at which the system converges to the new input.\n self.LearningRate = params.get(\"LearningRate\",0.2)\n\n # Q_f - Terminal cost matrix\n self.Q_f = params.get(\"Q_f\",50*np.matrix(np.eye(2)))\n\n # R - Running cost scalar (only one input).\n self.R = params.get(\"R\",1e-3)\n\n # X_o - Initial state vector is the system\n self.X_o = X_o", "def setInputTimeSeries(self, vTime, vU):\r\n vTime = np.asarray(vTime)\r\n vU = np.asarray(vU)\r\n if self.nInputs==1:\r\n vU = vU.reshape((1,-1))\r\n if vU.shape[0]!=self.nInputs:\r\n raise Exception('Wrong first dimension for Inputs time series ({} instead of {} )'.format(vU.shape[0],self.nInputs))\r\n # Call parent class (create interpolant)\r\n StateSpace.setInputTimeSeries(self, vTime, vU)", "def SetInput1(self, input: 'itkImageUS3') -> \"void\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIUS3IUC3_SetInput1(self, input)", "def initialize_t(self):\n\t\tprint(\"Initializing t uniformly\")\n\t\tself.t = 1. / self.V_e_size * np.ones((self.V_f_size, self.V_e_size))", "def set_x0(self,x0):\n self.x0 = np.array(x0)", "def __init__(self, x_0, t_0):\n # set the model parameters for the observer\n self.A = matrix([[-0.01546814,0.00639784],\n [0.03924884,-0.03924884]])\n self.B = matrix([[5.71428571429e-3],[0]])\n self.C = matrix([[0,1]])\n self.L = matrix([[1],[0.2]])\n\n self.x = x_0\n self.t_prev = t_0", "def initialize(self):\n F = len(self.inputs[0])\n min_val = np.min(self.inputs)\n max_val = np.max(self.inputs)\n \n np.random.seed(1)\n if self.init=='random':\n # create 3D array storing initial models\n self.M = np.random.uniform(min_val, max_val, size=(self.J*self.K, F))\n self.M = np.array(self.M)", "def SetInput1(self, input: 'itkImageUS2') -> \"void\":\n return _itkHistogramThresholdImageFilterPython.itkHistogramThresholdImageFilterIUS2IUC2_SetInput1(self, input)", "def __init__(self, init_pose):\n \n print \"(Basic Simulator) Initializing Basic Simulator...\"\n self.pose = array(init_pose) # current pose\n self.curVel = array([0.0,0.0]) # current velocity\n self.time = 0.0 # used to calculate time elapsed\n self.inertia = 1 # scale from 0 to 1, the bigger the scale the smaller the \"inertia\" is ", "def __init__(self):\n self.array = None\n self.target = None", "def __init_U(self):\n for hot_stream in self.H:\n for cold_stream in self.C:\n self.U[(hot_stream, cold_stream)] = min(self.heats[hot_stream], self.demands[cold_stream])", "def zeroUVArea(*args, **kwargs):\n \n pass", "def initAnim():\n solPlt.set_data(x,uO)\n return solPlt," ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
p_target Target state for the system to reach. Must be a (2,1) numpy matrix. Default is numpy.matrix([[np.pi/2,0]]).T.
def test_p_target(self): assert hasattr(self,'p_target'), "p_target is undefined." assert (str(type(self.p_target))=="<class 'numpy.matrixlib.defmatrix.matrix'>" and np.shape(self.p_target)==(2,1)), \ "p_target must be a (2,1) numpy matrix."
[ "def find_target(self):\n\n # Logic if the path was not provided\n if self.tracking == -1:\n\n #check if current target the robot has just arrived at is the goal\n if self.astar.check_for_goal(self.robot_target_node) == 1:\n arrived = 1\n\n else:\n\n # analyze neighbor nodes and add the lowest cost to open list\n self.astar.analyze_neighbors(self.robot_target_node)\n\n # retrive result form neighbor analysis and set target state\n self.robot_target_node = heapq.heappop(self.astar.open_list)\n self.astar.closed_list.append(self.robot_target_node)\n self.target_state = [self.robot_target_node.x_pos_w,\n self.robot_target_node.y_pos_w]\n\n self.grid.ax.plot(self.robot_target_node.x_pos_w,\n self.robot_target_node.y_pos_w,\n 'ro', markersize=1, zorder=6)\n arrived = 0\n\n # Logic if the path was provided\n else:\n self.tracking += 1\n\n if self.tracking >= len(self.path):\n arrived = 1\n\n else:\n arrived = 0\n self.target_state = [self.path[self.tracking].x_pos_w,\n self.path[self.tracking].y_pos_w]\n return arrived", "def cost_to_target(self, state) -> int:\n raise NotImplementedError", "def get_target_state():\n sdp_state = SDPState()\n errval, errdict = _check_status(sdp_state)\n if errval == \"error\":\n LOG.debug(errdict['reason'])\n return dict(\n current_target_state=\"unknown\",\n last_updated=\"unknown\",\n reason=errdict['reason']\n )\n LOG.debug('Getting target state')\n target_state = sdp_state.target_state\n LOG.debug('Target state = %s', target_state)\n return dict(\n current_target_state=target_state,\n allowed_target_states=sdp_state.allowed_target_states[\n sdp_state.current_state],\n last_updated=sdp_state.target_timestamp.isoformat())", "def compute_target(self, data):\n s,a,r,n,_ = data\n target =r + self.gamma*self.model_target.predict(n) \n target = target - np.mean(target)\n target = target/np.std(target)*20\n return target", "def target_position(self):\n torso_frame = self.data.xmat['torso'].reshape(3, 3)\n torso_pos = self.data.xpos['torso']\n torso_to_target = self.data.site_xpos['target'] - torso_pos\n return torso_to_target.dot(torso_frame)", "def _get_target_location(self):\r\n current_target_locs = np.argwhere(self.rewards == 1)\r\n\r\n if len(current_target_locs) == 0:\r\n raise ValueError(\"No target location on map\")\r\n elif len(current_target_locs) > 1:\r\n raise ValueError(\"Multiple target locations on map\")\r\n \r\n return current_target_locs[0]", "def target_transform(target):\n return torch.from_numpy(np.array(target)).long()", "def target(self, idx):\n if idx == 'coef' or idx == 0:\n return self.V\n elif idx == 'coef1' or idx == 1:\n return self.V1\n raise utils.MFError(\"Unknown specifier for the target matrix.\")", "def target(self):\n return self.params[0] if self.params else None", "def _get_target(self) -> \"adsk::core::Ptr< adsk::core::Point3D >\" :\n return _core.Camera__get_target(self)", "def _initial_target_setup(self):\n # Targets\n self.target = []\n n_targets = self.config['simulation']['n_targets']\n for target in self.config['simulation']['target_building_id']:\n info = {}\n info['target_id'] = target\n info['probability_goals'] = 1 / n_targets\n info['progress_goals'] = 0\n info['probability_goals_indoor'] = 1 / n_targets\n info['progress_goals_indoor'] = 0\n info['defence_perimeter'] = 0\n\n building_info = self.building_info(target)\n info['position'] = building_info['position']\n info['perimeter'] = building_info['perimeter']\n info['area'] = building_info['area']\n info['n_floors'] = building_info['n_floors']\n info['n_defence_perimeter'] = building_info['perimeter'] / (\n self.config['ugv']['defense_radius'] * 2)\n\n self.target.append(info)", "def lookup_transition_prob_matrix(self, action, nextState):\n curState = deepcopy(self)\n action = tuple(action)\n if (curState, action, nextState) in GameState.tpm:\n return GameState.tpm[(curState, action, nextState)]\n else:\n prob = self.transition_prob(curState, action, nextState)\n GameState.tpm[(curState, action, nextState)] = prob\n return prob", "def dim_target(self) -> int:\n return 1", "def jacobian(\n self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:\n pass", "def set_target(self, target, slew_only=False):\n target = katpoint.Target(target)\n target.body.compute(self.observer)\n return target", "def qp_controller(current_state, desired_state, dt, dim=2):\n\n # torque PD controller values\n wheel_kp = 50.0\n wheel_kd = 10.0\n max_torque = 20.0\n\n # cost on obtaining next state and velocity\n kp = 0.0\n kd = 1.0\n\n # half state length\n hl = len(current_state) / 2\n\n mp = MathematicalProgram()\n\n x = mp.NewContinuousVariables(len(current_state), \"x\")\n u = mp.NewContinuousVariables(1, \"u\")\n force = mp.NewContinuousVariables(8, \"force\")\n\n # set the initial state\n set_initial_state(mp, x, current_state, dim)\n # enforce the dynamics with linearized theta\n state = x + get_nd_dynamics(x, u, force, dim, current_state[dim])*dt\n\n # stay on floor\n # add_floor_constraint(mp, state, dim)\n # for corner to ground\n # fix_corner_to_ground(mp, state, 0, -0.5, dim)\n # don't pull on ground\n dont_pull_on_ground(mp, force, dim)\n # bounded to not leave the ground\n # stay_on_ground(mp, state, dim)\n # only force when on ground\n complimentarity_constraint(mp, state, force, dim)\n\n # linearize theta to set this cost\n add_corner_cost(mp, state, 0, -0.5, dim, current_state[dim])\n\n # unpack the states\n x_s = state[0]\n y = state[1]\n theta = state[dim]\n alpha = state[hl-1]\n xdot = state[0+hl]\n ydot = state[1+hl]\n theta_dot = state[dim+hl]\n alpha_dot = state[-1]\n\n # unpack the desired states\n x_des = desired_state[0]\n y_des = desired_state[1]\n theta_des = desired_state[dim]\n alpha_des = desired_state[hl-1]\n xdot_des = desired_state[0+hl]\n ydot_des = desired_state[1+hl]\n theta_dot_des = desired_state[dim+hl]\n alpha_dot_des = desired_state[-1]\n\n # current_pos = np.asarray([x_s,y,theta,alpha])\n # des_pos = np.asarray([x_des,y_des,theta_des,alpha_des])\n # pos_diff = current_pos - des_pos\n current_pos = np.asarray([x_s,y,theta,0])\n des_pos = np.asarray([x_des,y_des,theta_des,0])\n pos_diff = current_pos - des_pos\n\n # current_vel = np.asarray([xdot,ydot,theta_dot,alpha_dot])\n # des_vel = np.asarray([xdot_des,ydot_des,theta_dot_des,alpha_dot_des])\n # vel_diff = current_vel - des_vel\n current_vel = np.asarray([xdot,ydot,theta_dot,0])\n des_vel = np.asarray([xdot_des,ydot_des,theta_dot_des,0])\n vel_diff = current_vel - des_vel\n\n pos = pos_diff.dot(pos_diff)\n vel = vel_diff.dot(vel_diff)\n\n mp.AddQuadraticCost(kp*pos)\n mp.AddQuadraticCost(kd*vel)\n\n # torque PD controller\n input_torque = wheel_kp*(current_state[dim] - np.pi/4.0) + wheel_kd*current_state[dim+hl]\n input_torque = np.clip(input_torque, -max_torque, max_torque)\n mp.AddConstraint(u[0] == input_torque)\n\n sol = mp.Solve()\n # print(sol)\n\n my_torque = mp.GetSolution(u)\n my_force = mp.GetSolution(force)\n my_start = mp.GetSolution(x)\n\n return my_start, my_torque, my_force", "def grasp_target_pos(self):\n return pybullet.getLinkState(self._arm_id, self.get_link_id_from_name('panda_grasptarget'))[0]", "def _get_estimated_target_position(self):\n return self.estimated_targets_pos", "def _get_output_matrix(self, data):\n output_matrix = data[self.actual_output].as_matrix()\n return output_matrix.T" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Q_f Terminal cost matrix. Must be a (2,2) numpy matrix. Default is 50numpy.matrix(numpy.eye(2)). Each element should be positive.
def test_Q_f(self): assert hasattr(self,'Q_f'), "Q_f is undefined." assert (str(type(self.Q_f))=="<class 'numpy.matrixlib.defmatrix.matrix'>" and np.shape(self.Q_f)==(2,2)), \ "Q_f must be a (2,2) numpy matrix. Default is 50*numpy.matrix(numpy.eye(2))."
[ "def init_Q(self):\n self.Q = np.matrix(np.tril(self.A))", "def init_Q(self):\n self.Q = np.matrix(np.diagflat(np.diag(self.A)))", "def constructRateMatrix(self):\n # initialize the rate matrix with proper dimension\n self.K = np.zeros((self.num_state, self.num_state), dtype=float) #The dimension of the rate matrix is basically equal to the total number of states\n # loop through cofactor_id in adjacency matrix\n \"\"\"\n Take the adjacency matrix which is weighted by the distance to construct the full rate matrix\n \"\"\"\n for i in range(self.num_cofactor):\n for j in range(i+1, self.num_cofactor): # These two \"for\" loops take care of (upper triangular - diagonal) part of the adjacency matrix\n if self.D[i][j] != 0: # cofactor i and j are connected! !=:not equal to\n cof_i = self.id2cofactor[i]\n cof_f = self.id2cofactor[j] # Finding the name of cofactor of the ijth of the adjacency matrix\n dis = self.D[i][j] #Distance between cof_i and cof_f is the ij th element of the adjacency matrix\n \"\"\"\n Looping through all the possible transfers from donor to acceptor to find their reduction potentials to get deltaG of that transfer. \n You use that deltaG to get the Marcus rate of that transfer, and then add that rate constant to the rate matrix.\n \"\"\"\n for donor_state in range(1, cof_i.capacity+1): #This is correct!!!! Python is weird #cof.capacity=maximum number of electrons the cofactor can occupy\n for acceptor_state in range(0, cof_f.capacity): #This is correct!!!! Python is weird\n deltaG = cof_i.redox[donor_state-1] - cof_f.redox[acceptor_state] #This is correct!!!! Python is weird\n k = self.ET(deltaG, dis, self.reorgE, self.beta, self.V)\n self.connectStateRate(i, donor_state, j, acceptor_state, k, deltaG,1) #Adding the rate constant to rate matrix. The last parameter is 1 because these are all 1-electron transfers!\n # loop through reservoirInfo to add reservoir-related rate\n for reservoir_id, info in self.reservoirInfo.items():\n name, cofactor, redox_state, num_electron, deltaG, rate = info\n cof_id = self.cofactor2id[cofactor]\n final_redox_state = redox_state - num_electron\n self.connectReservoirRate(cof_id, redox_state, final_redox_state, rate, deltaG)", "def full_matrix(self):\n self.sim_array = self.b + self.b_u[:,np.newaxis] + self.b_i[np.newaxis:,] + self.P.dot(self.Q.T)\n return self.sim_array", "def return_quadratic_cost_function_expansion_variables(self):\n # returns a list of length len(Time)-1, each element with shape (1,1), where n is the number of states.\n l = list(\n map(\n lambda x,u: u.T * self.R * u * self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,1), where n is the number of states.\n lx = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,1)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,1), where n is the number of states.\n lu = list(\n map(\n lambda x,u: self.R * u * self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,n), where m is the number of inputs and n is the number of states.\n lux = list(\n map(\n lambda x,u: np.matrix(np.zeros((1,2)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,m), where n is the number of states and m is the number of inputs.\n lxu = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,1)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,m), where m is the number of inputs.\n luu = list(\n map(\n lambda x,u: self.R*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,n), where n is the number of states.\n lxx = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,2)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n return(l,lx,lu,lux,lxu,luu,lxx)", "def _setup_Q(self):\n self.Q_s = [None for _ in range(self.p+1)]\n self.Q_s[self.p] = np.eye(self.layers[self.p-1])\n for i in range(self.p-1, -1, -1):\n self.Q_s[i] = np.dot(self.U_s[i], self.Q_s[i+1])", "def _gen_QDmatrix(self):\n\n def q_delta(tau):\n n = tau.shape[0]\n Q_delta = np.zeros((n, n))\n i = 0\n for t in tau:\n Q_delta[i:, i] = np.ones(n - i) * t\n i += 1\n # print Q_delta\n return Q_delta\n\n # self.left_is_node = False\n # self.right_is_node = True\n\n\n if self.left_is_node and self.right_is_node:\n # e.g. GaussLobatto\n tau = np.concatenate([np.zeros(1), self.nodes[1:] - self.nodes[:-1]])\n elif not self.left_is_node and self.right_is_node:\n # e.g. GaussRadau_Right\n tau = np.concatenate([self.nodes[0:1], self.nodes[1:] - self.nodes[:-1]])\n elif self.left_is_node and not self.right_is_node:\n # e.g. GaussRadau_Left\n tau = np.concatenate([self.nodes[1:] - self.nodes[:-1], 1. - self.nodes[-1:]])\n else:\n # e.g. GaussLegendre (using right deltas)\n tau = np.concatenate([self.nodes[1:] - self.nodes[:-1], 1. - self.nodes[-1:]])\n # e.g. GaussLegendre (using left deltas)\n # tau = np.concatenate([self.nodes[0:1], self.nodes[1:]-self.nodes[:-1]])\n\n return q_delta(tau)", "def generate_Q(transition_matrix, q_power):\n n = transition_matrix.shape[0]\n q_tmp = tr_0 = identity(n)\n for k in range(1, q_power + 1):\n q_tmp += LA.matrix_power(transition_matrix, k)\n return array(normalize(q_tmp, norm='l1', axis=1), dtype='float64') # float 32 works inappropriately", "def make_q(self):\n self.q = np.zeros((self.Nx+2,self.Ny+2))\n\n\n for i in range(1, self.Nx+1):\n for j in range(1, self.Ny+1):\n self.q[i,j] = self.qq(self.x[i-1], self.y[j-1])\n\n for i in range(1,self.Nx+1):\n self.q[i,0] = 2*self.q[i,1] - self.q[i,2]\n self.q[i,self.Ny +1] = 2*self.q[i,self.Ny] - self.q[i,self.Ny-1]\n\n\n for j in range(1,self.Ny+1):\n self.q[0,j] = 2*self.q[1,j] - self.q[2,j]\n self.q[self.Nx+1,j] = 2*self.q[self.Nx,j] - self.q[self.Nx-1,j]\n\n \"\"\"\n\n self.q[1:-1, 1:-1] = self.qq(self.X, self.Y)\n self.q[1:-1, 0] = 2*self.q[1:-1, 1] - self.q[1:-1, 2]\n self.q[1:-1, self.Ny +1] = 2*self.q[1:-1, self.Ny] - self.q[1:-1, self.Ny-1]\n self.q[0, 1:-1] = 2*self.q[1, 1:-1] - self.q[2, 1:-1]\n self.q[self.Nx+1, 1:-1] = 2*self.q[self.Nx, 1:-1] - self.q[self.Nx-1, 1:-1]\n \"\"\"\n self.stability()", "def calc_target_q(self, **kwargs):\n t_q_1, e_q_1 = self.sess.run([self.t_q, self.e_q], {self.obs_input: kwargs['obs']})\n\n feed_dict = {\n self.obs_input_M: kwargs['obs'],\n }\n if self.use_mf:\n assert kwargs.get('prob', None) is not None\n feed_dict[self.act_prob_input] = kwargs['prob']\n\n t_q_M, e_q_M = self.sess.run([self.t_q_M, self.e_q_M], feed_dict=feed_dict)\n ##e_q = e_q_1 + e_q_M\n ##t_q = t_q_1 + t_q_M\n act_idx_1 = np.argmax(e_q_1, axis=1)\n act_idx_M = np.argmax(e_q_M, axis=1)\n q_values_1 = t_q_1[np.arange(len(t_q_1)), act_idx_1]\n q_values_M = t_q_M[np.arange(len(t_q_M)), act_idx_M]\n\n target_q_value_1 = kwargs['rewards'] + (1. - kwargs['dones']) * q_values_1.reshape(-1) * self.gamma\n target_q_value_M = kwargs['rewards'] + (1. - kwargs['dones']) * q_values_M.reshape(-1) * self.gamma\n\n return target_q_value_1,target_q_value_M", "def testQMatrix(self):\n # The data we have available is only accurate to the 4th decimal place. This should\n # be sufficient. kx and ky are given in the setup, fixed by our angles theta and phi.\n absoluteTolerance = 0.0001;\n relativeTolerance = 0.001;\n kx = 1.0006;\n ky = 0.4247;\n\n # Zeroth, we actually have data for our gap layer\n er = 1.0 + sq(kx) + sq(ky);\n ur = 1.0;\n Q_actual = complexArray([[0.4250, 1.1804],[-2.0013, -0.4250]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);\n\n # First, we have some data for layer 1\n er = 2.0;\n ur = 1.0;\n Q_actual = complexArray([[0.4250, 0.9987],[-1.8196, -0.4250]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);\n\n # Now, we have some data for layer 2.\n er = 1.0;\n ur = 3.0;\n\n Q_actual = complexArray([[0.1417, 0.6662],[-0.9399, -0.1417]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);", "def jacobian(Q, d):\n return zeros([n, n])", "def test_to_matrix_5q(self):\n labels = [\"IXIXI\", \"YZIXI\", \"IIXYZ\"]\n targets = [pauli_mat(i) for i in labels]\n with self.assertWarns(DeprecationWarning):\n values = PauliTable.from_labels(labels).to_matrix()\n self.assertTrue(isinstance(values, list))\n for target, value in zip(targets, values):\n self.assertTrue(np.all(value == target))", "def _to_matrix_func(self) -> np.ndarray:\n empty_matrix = np.diag(np.ones(4, dtype=float))\n\n empty_matrix[2, 3] += self._meta[\"bias\"]\n\n return empty_matrix", "def test_density_matrix_qnode_tf_jit(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev, interface=\"tf\")\n def circuit(x):\n qml.IsingXX(x, wires=[0, 1])\n return qml.state()\n\n density_matrix = tf.function(\n qml.qinfo.reduced_dm(circuit, wires=[0]),\n jit_compile=True,\n input_signature=(tf.TensorSpec(shape=(), dtype=tf.float32),),\n )\n density_matrix = density_matrix(tf.Variable(0.0, dtype=tf.float32))\n assert np.allclose(density_matrix, [[1, 0], [0, 0]])", "def constraint_matrix(self, fq):\n frozen = self.frozen_intco_list\n\n range_frozen = self.ranged_frozen_intco_list(fq)\n frozen = np.logical_or(frozen, range_frozen)\n\n if np.any(frozen):\n return np.diagflat(frozen)\n else:\n return None", "def _compute_Q_vector(self):\n\n self.QVector = list(it.product([fsc.Q for fsc in self.fscs]))", "def test_fcom_simple(self):\n\n rate_matrix = np.array([[-1, 1, 0], [100, -300, 200], [0, 1, -1]]).astype(float)\n K = scipy.linalg.expm(rate_matrix)\n TPT = TransitionPathTheory(K, [0], [1])\n\n fcom = TPT.fcom\n ref_fcom = np.array([0, 1., 0.0156032])\n\n self.assertTrue(np.allclose(fcom, ref_fcom, rtol=1.e-5))", "def liste_Qx (self):\n liste_matrices_Qx = [self.Qx(self.liste_J[self.liste_angles[pli]][1],\n self.liste_J[self.liste_angles[pli]][3],\n self.liste_Q0[pli]) \n for pli in range(len(self.liste_angles))]\n return liste_matrices_Qx" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in the state vector (x), the input vector (u) and returns the discretized and linearized state matrix, Phi.
def return_Phi(self,x,u): # assert (str(type(x)) in ["<class 'numpy.ndarray'>"] # and np.shape(x)==(2,)), "Error with the type and shape of x ["+ return_Phi.__name__+"()]." # assert str(type(u)) in ["<class 'int'>", # "<class 'float'>", # "<class 'numpy.float'>", # "<class 'numpy.float64'>", # "<class 'numpy.int32'>", # "<class 'numpy.int64'>"],\ # "u must be a number. Not " + str(type(u)) + "." # Removed the U split into two scalars because U is already a scalar. h1 = np.array([h,0]) h2 = np.array([0,h]) # Build the dFx matrix dFx = np.zeros((2,2)) # dFx[0,0] = 0 # dF1/dx1⋅dx1 = (F1(x,u)-F1(x-h1,u))/h = 0 dFx[0,1] = 1 # dF1/dx2⋅dx2 = (F1(x,u)-F1(x-h2,u))/h = 1 # F2 is the angular acceleration of the pendulum. dFx[1,0] = (self.F2(x,u)-self.F2(x-h1,u))/h dFx[1,1] = (self.F2(x,u)-self.F2(x-h2,u))/h Phi = np.matrix(np.eye(2) + dFx*self.dt) # assert np.shape(Phi)==(2,2) \ # and str(type(Phi))=="<class 'numpy.matrixlib.defmatrix.matrix'>", \ # "Phi must be a (2,2) numpy matrix. Not " + str(type(Phi)) + " of shape " + str(np.shape(Phi)) + "." return(Phi)
[ "def gate_decomposition(u_):\n assert (u_.shape == (2, 2))\n assert(is_unitary(u_))\n\n det = np.linalg.det(u_)\n delta = 0.5 * cmath.phase(det)\n p = phase(delta)\n\n u = np.linalg.inv(p) @ u_\n\n if u[0][0] == 0:\n theta = math.pi\n alpha = 2 / 1j * cmath.log(u[0][1])\n beta = 0\n elif u[0][1] == 0:\n theta = 0\n alpha = 2 / 1j * cmath.log(u[0][0])\n beta = 0\n else:\n arg = float((u[0][0] * u[1][1]) ** 0.5)\n theta = 2 * math.acos(arg)\n a_p_b = 2 / 1j * cmath.log(u[0][0] / math.cos(theta / 2))\n a_m_b = 2 / 1j * cmath.log(u[0][1] / math.sin(theta / 2))\n\n alpha = 0.5 * (a_p_b + a_m_b)\n beta = 0.5 * (a_p_b - a_m_b)\n\n z1 = z_rotation(alpha)\n y = y_rotation(theta)\n z2 = z_rotation(beta)\n\n assert(np.allclose(p @ z1 @ y @ z2, u_ ))\n\n return delta, alpha, theta, beta", "def forward(s, x, u=None):\n # mean control\n mu = s.m(x)\n # Build u_theta(cdot | x)\n n = Normal(mu, s.std)\n # sample a u if we are simulating the system, use the argument\n # we are calculating the policy gradient\n if u is None:\n u = n.rsample()\n logp = n.log_prob(u)\n return u, logp", "def return_Phi(X,U,dt):\n assert (str(type(X)) in [\"<class 'numpy.ndarray'>\"]\n and np.shape(X)==(2,)), \"Error with the type and shape of X [\"+ return_Phi.__name__+\"()].\"\n assert (str(type(U)) in [\"<class 'numpy.ndarray'>\"]\n and np.shape(U)==(2,)), \"Error with the type and shape of U [\"+ return_Phi.__name__+\"()].\"\n\n # Removed the U split into two scalars because U is already a scalar.\n\n h1 = np.array([h,0])\n h2 = np.array([0,h])\n\n # Build the dFx matrix\n\n dFx = np.zeros((2,2))\n\n # dFx[0,0] = 0 # dF1/dx1⋅dx1 = (F1(X,U)-F1(X-h1,U))/h = 0\n dFx[0,1] = 1 # dF1/dx4⋅dx4 = (F1(X,U)-F1(X-h2,U))/h = 1\n\n # F2 is the angular acceleration.\n dFx[1,0] = (F2(X,U)-F2(X-h1,U))/h\n dFx[1,1] = (F2(X,U)-F2(X-h2,U))/h\n\n Phi = np.matrix(np.eye(2) + dFx*dt)\n assert np.shape(Phi)==(2,2) \\\n and str(type(Phi))==\"<class 'numpy.matrixlib.defmatrix.matrix'>\", \\\n \"Phi must be a (2,2) numpy matrix. Not \" + str(type(Phi)) + \" of shape \" + str(np.shape(Phi)) + \".\"\n return(Phi)", "def calculate(self, state: np.ndarray, u: np.ndarray) -> np.ndarray:\n pass", "def HamSN1D_Hamiltonian(t, u):\n x, y = u.T\n return 0.5*y*y + x**3/3 + 0.5*x*x", "def dcmotor_model(x, u=0.0, Vb=8.00):\n v = Gu * (mu * Vb - Vo) * u\n\n return np.array([x[1],\n -(fm / Jm) * x[1] + (Kt / Jm) * x[2],\n -(Kb / L) * x[1] - (Rm / L) * x[2] + v / L\n ])", "def Duffing1D_Hamiltonian(t, u, PARAMETERS = [1, 1]):\n x, p_x = u.T\n alpha, beta = PARAMETERS\n return 0.5*(p_x**2 - alpha*x**2 + 0.5*beta*x**4)", "def discretize_state(self, state):\n x, x_dot, phi, phi_dot = state\n if x > 1.:\n x = 1\n elif x < -1.:\n x = -1\n else: \n x = 0\n\n if x_dot < -0.1:\n x_dot = -2\n elif x_dot > 0.1:\n x_dot = 2\n elif x_dot < -0.03:\n x_dot = -1\n elif x_dot > 0.03:\n x_dot = 1\n else:\n x_dot = 0\n\n if phi > 0.1:\n phi = 1\n elif phi < -0.1:\n phi = -1\n else: \n phi = 0\n\n if phi_dot < -0.1:\n phi_dot = -2\n elif phi_dot > 0.1:\n phi_dot = 2\n elif phi_dot < -0.03:\n phi_dot = -1\n elif phi_dot > 0.03:\n phi_dot = 1\n else:\n phi_dot = 0\n \n return (x, x_dot, phi, phi_dot)", "def f(self, x , u , t = 0 ):\n \n # from state vector (x) to angle and speeds (q,dq)\n [ q , dq ] = self.x2q( x ) \n \n # compute joint acceleration \n ddq = self.ddq( q , dq , u , t ) \n \n # from angle and speeds diff (dq,ddq) to state vector diff (dx)\n dx = self.q2x( dq , ddq ) \n \n return dx", "def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y", "def jacobian(\n self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:\n pass", "def update_state(self, u=0, y=0):\n\n fb = self.w_fb * (np.ones((self.n_res, 1)) * y + self.rng.randn(self.n_res, 1) * self.scale_feedback_noise)\n fb = fb.sum(axis=1).reshape(-1,1)\n\n #create noise term\n noise = self.rng.randn(self.n_res, 1) * self.scale_noise\n\n # Reservoir update equation if no input\n if self.n_in == 0:\n # x_new = np.dot(self.w_res, self.x) + self.w_bias + np.dot(self.w_fb, y)\n # Tracer()()\n x_new = np.dot(self.w_res*self.p_connect_res, self.x) + self.w_bias + fb*self.p_connect_fb + noise\n\n # Reservoir update equation if input\n else:\n x_new = np.dot(self.w_res*self.p_connect_res, self.x) + self.w_bias + fb*self.p_connect_fb + np.dot(self.w_in, u) + noise\n # leakage\n\n self.x = (1 - self.leakage) * self.x + self.leakage * self.tran_fct(x_new)\n\n return", "def uxv_cart(u, v):\n w_x = (u[1]*v[2] - v[1]*u[2])\n w_y = (-u[0]*v[2] + u[2]*v[0])\n w_z = (u[0]*v[1] - u[1]*v[0])\n return np.array([w_x, w_y, w_z])", "def least_squares_jacobian(x, u, y):\n J = np.empty((u.size, x.size))\n den = u ** 2 + x[2] * u + x[3]\n num = u ** 2 + x[1] * u\n J[:, 0] = num / den\n J[:, 1] = x[0] * u / den\n J[:, 2] = -x[0] * num * u / den ** 2\n J[:, 3] = -x[0] * num / den ** 2\n return J", "def covarMatrix(x):\n return np.matrix(x - np.mean(x, axis=0)[np.newaxis, :]).T * np.matrix(x - np.mean(x, axis=0)[np.newaxis, :])", "def U(i, g, X) :\n d_i = sum(g[i]) # degree of i\n\n direct_u = sum([g[i, j] * u(i, j, X) for j in range(n)])\n\n mutual_u = sum([g[i, j] * g[j, i] * u(i, j, X) for j in range(n)])\n\n indirect_u = 0\n for j in range(n) :\n for k in range(n) :\n if k == i or k == j :\n continue\n else :\n indirect_u += g[i, j] * g[j, k] * u(i, k, X)\n\n return direct_u + gamma * mutual_u + delta * indirect_u - d_i ** alpha * c", "def solve_U(U, b):\n m, k = b.shape\n x = np.zeros((m,k))\n x[-1,:] = b[-1,:] / U[-1,-1]\n for i in range(m-2,-1,-1):\n x[i,:] = (b[i,:] - U[i, i+1:]@x[i+1:,:]) / U[i,i]\n return x", "def _kernel1d(self, u):\n # Normalize Lanczos to unit sum over kernel elements\n k = np.sinc(u) * np.sinc(u/self.order)\n return k / np.sum(k,axis=1)[:,np.newaxis]", "def LinearSystem(self):\n # assembly matrix of linear system\n # to solve u(t) based on u(t-1) and u(t-2)\n # the matrix includes all future values of u\n # in the entire grid, so size is the number of cells\n # start with zeros that is also the boundary condition u(t)=0\n self.mUt = np.zeros([self.Nz*self.Nx, self.Nz*self.Nx])\n\n # assembly linear system, the linear system\n # ignores external part of the grid = locked boundary\n # ln go through all the cells in the grid Ut\n # each cell gives one equation (line)\n for Ln in range(0, self.Nz*self.Nx, 1):\n # 1.0*u(x-1,z) + Gamma(x,z)*u(x,z) + 1.0*u(x+1,z) + 1.0*u(x,z-1) + 1.0*u(x,z+1)\n # turn the indices to the one of original matrix\n i = Ln%self.Nx\n k = Ln/self.Nx\n\n self.mUt[Ln][Ln] = self.Gamma(k, i)\n #is this right?\n if(i-1 >= 0): # u(x-1,z) inside grid in I\n self.mUt[Ln][Ln-1] = 1.0\n if(i+1 < self.Nx): # u(x+1,z) inside grid in I\n self.mUt[Ln][Ln+1] = 1.0\n if(k-1 >= 0): #u(x,z-1)\n self.mUt[Ln][Ln-self.Nx]= 1.0\n if(k+1 < self.Nz): #u(x,z+1)\n self.mUt[Ln][Ln+self.Nx]= 1.0\n\n return self.mUt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in the input U and the the corresponding output X, as well as dt and returns lists that contain the coefficient matrices for the quadratic expansion of the cost function (l(x,u)) for each timestep for range(len(Time)1).
def return_quadratic_cost_function_expansion_variables(self): # returns a list of length len(Time)-1, each element with shape (1,1), where n is the number of states. l = list( map( lambda x,u: u.T * self.R * u * self.dt, self.X[:,1:].T, self.U.T ) ) # returns a list of length len(Time)-1, each element with shape (n,1), where n is the number of states. lx = list( map( lambda x,u: np.matrix(np.zeros((2,1)))*self.dt, self.X[:,1:].T, self.U.T ) ) # returns a list of length len(Time)-1, each element with shape (m,1), where n is the number of states. lu = list( map( lambda x,u: self.R * u * self.dt, self.X[:,1:].T, self.U.T ) ) # returns a list of length len(Time)-1, each element with shape (m,n), where m is the number of inputs and n is the number of states. lux = list( map( lambda x,u: np.matrix(np.zeros((1,2)))*self.dt, self.X[:,1:].T, self.U.T ) ) # returns a list of length len(Time)-1, each element with shape (n,m), where n is the number of states and m is the number of inputs. lxu = list( map( lambda x,u: np.matrix(np.zeros((2,1)))*self.dt, self.X[:,1:].T, self.U.T ) ) # returns a list of length len(Time)-1, each element with shape (m,m), where m is the number of inputs. luu = list( map( lambda x,u: self.R*self.dt, self.X[:,1:].T, self.U.T ) ) # returns a list of length len(Time)-1, each element with shape (n,n), where n is the number of states. lxx = list( map( lambda x,u: np.matrix(np.zeros((2,2)))*self.dt, self.X[:,1:].T, self.U.T ) ) return(l,lx,lu,lux,lxu,luu,lxx)
[ "def test_quadratic_cost_function_expansion_variables(\n self,l,\n lx,lu,\n lux,lxu,\n luu,lxx):\n\n Time = self.return_time_array()\n\n # l should be a list of length len(Time)-1, with each element with shape (1,1), where n is the number of states.\n assert len(l)==len(Time)-1, \"l has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(l)) + \".\"\n assert all([el.shape==(1,1) for el in l]), \"Elements of l have incorrect shape. Should be of length (1,1). Check l function.\"\n\n # lx should be a list of length len(Time)-1, with each element with shape (n,1), where n is the number of states.\n assert len(lx)==len(Time)-1, \"lx has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lx)) + \".\"\n assert all([el.shape==(2,1) for el in lx]), \"Elements of lx have incorrect shape. Should be of length (2,1). Check lx function.\"\n\n # lu should be a list of length len(Time)-1, with each element with shape (m,1), where n is the number of states.\n assert len(lu)==len(Time)-1, \"lu has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lu)) + \".\"\n assert all([el.shape==(1,1) for el in lu]), \"Elements of lu have incorrect shape. Should be of length (1,1). Check lu function.\"\n\n # lux should be a list of length len(Time)-1, with each element with shape (m,n), where m is the number of inputs and n is the number of states.\n assert len(lux)==len(Time)-1, \"lux has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lux)) + \".\"\n assert all([el.shape==(1,2) for el in lux]), \"Elements of lux have incorrect shape. Should be of length (1,1). Check lux function.\"\n\n # lxu should be a list of length len(Time)-1, with each element with shape (n,m), where n is the number of states and m is the number of inputs.\n assert len(lxu)==len(Time)-1, \"lxu has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lxu)) + \".\"\n assert all([el.shape==(2,1) for el in lxu]), \"Elements of lxu have incorrect shape. Should be of length (2,1). Check lxu function.\"\n\n # luu should be a list of length len(Time)-1, with each element with shape (m,m), where m is the number of inputs.\n assert len(luu)==len(Time)-1, \"luu has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(luu)) + \".\"\n assert all([el.shape==(1,1) for el in luu]), \"Elements of luu have incorrect shape. Should be of length (1,1). Check luu function.\"\n\n # lxx should be a list of length len(Time)-1, with each element with shape (n,n), where n is the number of states.\n assert len(lxx)==len(Time)-1, \"lxx has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lxx)) + \".\"\n assert all([el.shape==(2,2) for el in lxx]), \"Elements of lxx have incorrect shape. Should be of length (2,2). Check lxx function.\"", "def __call__(self,X,t):\n xvals = X[:3]-self.locs\n rvals = numpy.sqrt( (xvals**2).sum(1) )\n \n dVdt = sum([ self.halos[i].accel(rvals[i])*xvals[i]/rvals[i] \\\n for i in range(self.N) ])\n return numpy.concatenate([X[3:] * 1E3 * yr/kpc,\n dVdt])", "def _time_derivative(self,xi):\n return np.dot(sym.Jacobian(xi,self.q_o),self.v_o)+np.dot(sym.Jacobian(xi,self.q_m),self.u_m)", "def V(X,w,t):\r\n results = []\r\n amplitudes = []\r\n phases = []\r\n for x in X:\r\n results.append((x)*(e**(1j*w*t)))\r\n amplitudes.append(abs(x))\r\n phases.append(phase((x)*(e**(1j*w*t))))\r\n return [results,amplitudes,phases]", "def quadratic_cost_approximation_l1(xu: np.ndarray, t: np.ndarray, w: np.ndarray, alpha: float) -> QuadraticCosts:\n dXU = xu.shape[-1]\n\n abs_squared = (t - xu) ** 2 + alpha\n abs_d = np.sqrt(abs_squared)\n abs_d_cubed = abs_squared * abs_d\n\n # C = diag(alpha * w / )\n C = np.einsum(\"...i,ij->...ij\", alpha * w / abs_d_cubed, np.eye(dXU))\n c = w * ((xu - t) / abs_d - alpha * xu / abs_d_cubed)\n cc = np.sum(w * (abs_d + xu * (t - xu) / abs_d + alpha * xu**2 / abs_d_cubed / 2), axis=-1)\n\n return QuadraticCosts(C, c, cc)", "def return_Phi(X,U,dt):\n assert (str(type(X)) in [\"<class 'numpy.ndarray'>\"]\n and np.shape(X)==(2,)), \"Error with the type and shape of X [\"+ return_Phi.__name__+\"()].\"\n assert (str(type(U)) in [\"<class 'numpy.ndarray'>\"]\n and np.shape(U)==(2,)), \"Error with the type and shape of U [\"+ return_Phi.__name__+\"()].\"\n\n # Removed the U split into two scalars because U is already a scalar.\n\n h1 = np.array([h,0])\n h2 = np.array([0,h])\n\n # Build the dFx matrix\n\n dFx = np.zeros((2,2))\n\n # dFx[0,0] = 0 # dF1/dx1⋅dx1 = (F1(X,U)-F1(X-h1,U))/h = 0\n dFx[0,1] = 1 # dF1/dx4⋅dx4 = (F1(X,U)-F1(X-h2,U))/h = 1\n\n # F2 is the angular acceleration.\n dFx[1,0] = (F2(X,U)-F2(X-h1,U))/h\n dFx[1,1] = (F2(X,U)-F2(X-h2,U))/h\n\n Phi = np.matrix(np.eye(2) + dFx*dt)\n assert np.shape(Phi)==(2,2) \\\n and str(type(Phi))==\"<class 'numpy.matrixlib.defmatrix.matrix'>\", \\\n \"Phi must be a (2,2) numpy matrix. Not \" + str(type(Phi)) + \" of shape \" + str(np.shape(Phi)) + \".\"\n return(Phi)", "def least_squares_jacobian(x, u, y):\n J = np.empty((u.size, x.size))\n den = u ** 2 + x[2] * u + x[3]\n num = u ** 2 + x[1] * u\n J[:, 0] = num / den\n J[:, 1] = x[0] * u / den\n J[:, 2] = -x[0] * num * u / den ** 2\n J[:, 3] = -x[0] * num / den ** 2\n return J", "def least_squares_lti_sys_id(x, u, x_next):\n n = x.shape[0]\n m = u.shape[0]\n l = x.shape[1]\n if x_next.shape != (n, l):\n raise Exception('x_next must be (n, l)')\n if x.shape != (n, l):\n raise Exception('x must be (n, l)')\n if u.shape != (m, l):\n raise Exception('u must be (m, l)')\n\n obs_vec = x_next.T.flatten()\n obs_vec = np.reshape(obs_vec, (-1, 1))\n\n mat = np.zeros((n*l, n*n + n*m))\n\n count = 0\n for i in range(l):\n for j in range(n):\n idx1 = j*n\n idx2 = (j+1)*n\n mat[count, idx1:idx2] = x[:, i]\n idx1 = n*n + j*m\n idx2 = n*n + (j+1)*m\n mat[count, idx1:idx2] = u[:, i]\n count += 1\n\n params, _, _, _ = np.linalg.lstsq(mat, obs_vec)\n\n A_est = np.reshape(params[:n*n], (n, n))\n B_est = np.reshape(params[n*n:], (n, m))\n\n return A_est, B_est", "def dU_dx(U,z):\n\treturn [U[1], (g*V*(p_atm-p_He)-(m_b+m_p)*g+(1/2)*p_atm*U[1]**2*c_d*S)/(m_He+m_b+m_p)]", "def LinearSystem(self):\n # assembly matrix of linear system\n # to solve u(t) based on u(t-1) and u(t-2)\n # the matrix includes all future values of u\n # in the entire grid, so size is the number of cells\n # start with zeros that is also the boundary condition u(t)=0\n self.mUt = np.zeros([self.Nz*self.Nx, self.Nz*self.Nx])\n\n # assembly linear system, the linear system\n # ignores external part of the grid = locked boundary\n # ln go through all the cells in the grid Ut\n # each cell gives one equation (line)\n for Ln in range(0, self.Nz*self.Nx, 1):\n # 1.0*u(x-1,z) + Gamma(x,z)*u(x,z) + 1.0*u(x+1,z) + 1.0*u(x,z-1) + 1.0*u(x,z+1)\n # turn the indices to the one of original matrix\n i = Ln%self.Nx\n k = Ln/self.Nx\n\n self.mUt[Ln][Ln] = self.Gamma(k, i)\n #is this right?\n if(i-1 >= 0): # u(x-1,z) inside grid in I\n self.mUt[Ln][Ln-1] = 1.0\n if(i+1 < self.Nx): # u(x+1,z) inside grid in I\n self.mUt[Ln][Ln+1] = 1.0\n if(k-1 >= 0): #u(x,z-1)\n self.mUt[Ln][Ln-self.Nx]= 1.0\n if(k+1 < self.Nz): #u(x,z+1)\n self.mUt[Ln][Ln+self.Nx]= 1.0\n\n return self.mUt", "def solver(I, w, dt, T, V, f):\n dt = float(dt)\n Nt = int(round(T/dt)) # 100000\n u = np.zeros(Nt+1)\n t = np.linspace(0, Nt*dt, Nt+1)\n\n u[0] = I\n u[1] = u[0] + dt*V + 0.5*(f(t[0]) - w**2*u[0])*dt**2#compute first step by 1'st order difference\n for n in range(1, Nt):\n u[n+1] = (f(t[n])-w**2*u[n])*dt**2 + 2*u[n]-u[n-1]\n return u, t", "def solver(I, V, f, w, dt, T):\n dt = float(dt)\n Nt = int(round(T/dt))\n u = np.zeros(Nt+1)\n t = np.linspace(0, Nt*dt, Nt+1)\n\n u[0] = I\n u[1] = u[0] - 0.5*dt**2*w**2*u[0] + dt*V + 0.5*dt**2*f(t[0])\n for n in range(1,Nt):\n u[n+1] = dt**2*f(t[n]) + 2*u[n] - u[n-1] - dt**2*w**2*u[n]\n return u,t", "def cost_function(x, svh, svv, theta, gamma, prior_mean, prior_unc, unc=0.8):\n # Fit to the observations\n cost1, dcost1 = cost_obs(x, svh, svv, theta, unc=unc)\n # Fit to the prior\n cost2, dcost2 = cost_prior(x, svh, svv, theta, prior_mean, prior_unc)\n # Smooth evolution of LAI\n n_obs = len(svv)\n lai = x[(6 + n_obs) :]\n cost3, dcost3 = cost_smooth(lai, gamma)\n tmp = np.zeros_like(dcost1)\n tmp[(7 + n_obs) : -1] = dcost3\n return cost1 + cost2 + cost3, dcost1 + dcost2 + tmp", "def calculate_Ct(self, time):\n\n # define the method to calculate C(t)\n ct_list = self.delta**2 * np.exp(-time / self.tau)\n\n t_vs_Ct = np.column_stack((time, ct_list))\n\n return t_vs_Ct", "def cost_obs(x, svh, svv, theta, unc=0.5):\n n_obs = svh.shape[0]\n A_vv, B_vv, R_vv, A_vh, B_vh, R_vh = x[:6]\n vsm = x[6 : (6 + n_obs)]\n lai = x[(6 + n_obs) :]\n sigma_vv, dvv = wcm(A_vv, lai, B_vv, lai, vsm, R_vv,\n pol=\"VV\", theta=theta)\n sigma_vh, dvh = wcm(A_vh, lai, B_vh, lai, vsm, R_vh,\n pol=\"HV\", theta=theta)\n diff_vv = svv - sigma_vv\n diff_vh = svh - sigma_vh\n cost = 0.5 * (diff_vv ** 2 + diff_vh ** 2) / (unc ** 2)\n jac = np.concatenate(\n [\n np.array(\n [\n np.sum(dvv[0] * diff_vv), # A_vv\n np.sum(dvv[2] * diff_vv), # B_vv\n np.sum(dvv[5] * diff_vv), # R_vv\n np.sum(dvh[0] * diff_vh), # A_vh\n np.sum(dvh[2] * diff_vh), # B_vh\n np.sum(dvh[5] * diff_vh), # R_vh\n ]\n ),\n dvv[4] * diff_vv + dvh[4] * diff_vh, # vsm\n (dvv[1] + dvv[3]) * diff_vv + (dvh[1] + dvh[3]) * diff_vh, # LAI\n ]\n )\n return cost.sum(), -jac / (unc ** 2)", "def get_cost_updates(self,corruption_level,learning_rate):\n \n tilde_x=self.get_corrupted_input(self.x, corruption_level)\n y=self.get_hidden_values(tilde_x)\n z=self.get_reconstructed_input(y)\n # note : we sum over the size of a datapoint; if we are using\n # minibatches, L will be a vector, with one entry per\n # example in minibatch\n L=-T.sum(self.x*T.log(z)+(1-self.x)*T.log(1-z),axis=1)\n # note : L is now a vector, where each element is the\n # cross-entropy cost of the reconstruction of the\n # corresponding example of the minibatch. We need to\n # compute the average of all these to get the cost of\n # the minibatch\n cost=T.mean(L)\n \n # compute the gradients of the cost of the `dA` with respect\n # to its parameters\n gparams = T.grad(cost, self.params)\n \n updates = [\n (param, param - learning_rate * gparam)\n for param, gparam in zip(self.params, gparams)\n ]\n return(cost,updates)", "def optimizePLS(x, t, M, lamb): # 'lambda' is reserved\n import numpy as np\n phi = np.zeros((len(x), M))\n for n in range(len(x)):\n for m in range(M):\n phi[n][m] = x[n] ** m\n prod = np.dot(phi.T, phi)\n I = np.eye(prod.shape[1]) * lamb\n i = np.linalg.inv(prod + I)\n m = np.dot(i, phi.T)\n W_pls = np.dot(m, t)\n return W_pls", "def cost_obs_OLD(x, svh, svv, theta, unc=0.5):\n n_obs = svh.shape[0]\n A_vv, B_vv, C_vv, A_vh, B_vh, C_vh = x[:6]\n vsm = x[6 : (6 + n_obs)]\n lai = x[(6 + n_obs) :]\n sigma_vv, dvv = wcm_jac(A_vv, lai, B_vv, lai, C_vv, vsm, theta=theta)\n sigma_vh, dvh = wcm_jac(A_vh, lai, B_vh, lai, C_vh, vsm, theta=theta)\n diff_vv = svv - sigma_vv\n diff_vh = svh - sigma_vh\n cost = 0.5 * (diff_vv ** 2 + diff_vh ** 2) / (unc ** 2)\n jac = np.concatenate(\n [\n np.array(\n [\n np.sum(dvv[0] * diff_vv), # A_vv\n np.sum(dvv[1] * diff_vv), # B_vv\n np.sum(dvv[2] * diff_vv), # C_vv\n np.sum(dvh[0] * diff_vh), # A_vh\n np.sum(dvh[1] * diff_vh), # B_vh\n np.sum(dvh[2] * diff_vh),\n ]\n ), # C_vh\n dvv[3] * diff_vv + dvh[3] * diff_vh, # vsm\n (dvv[4] + dvv[5]) * diff_vv + (dvh[4] + dvh[5]) * diff_vh, # LAI\n ]\n )\n return cost.sum(), -jac / (unc ** 2)", "def polyTransform(inputData: Union[List, Tuple, np.ndarray], q=2):\n if q is None:\n return inputData\n zInput = []\n for inputPt in inputData:\n newPt = []\n for i in range(q+1):\n for j in range(q+1):\n if i+j <= q:\n newPt.append((inputPt[0]**i) * inputPt[1]**j)\n zInput.append(newPt)\n return zInput" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tests the lists made by self.return_quadratic_cost_function_expansion_variables().
def test_quadratic_cost_function_expansion_variables( self,l, lx,lu, lux,lxu, luu,lxx): Time = self.return_time_array() # l should be a list of length len(Time)-1, with each element with shape (1,1), where n is the number of states. assert len(l)==len(Time)-1, "l has incorrect length. Should be of length " + str(len(Time)-1) + ", not " + str(len(l)) + "." assert all([el.shape==(1,1) for el in l]), "Elements of l have incorrect shape. Should be of length (1,1). Check l function." # lx should be a list of length len(Time)-1, with each element with shape (n,1), where n is the number of states. assert len(lx)==len(Time)-1, "lx has incorrect length. Should be of length " + str(len(Time)-1) + ", not " + str(len(lx)) + "." assert all([el.shape==(2,1) for el in lx]), "Elements of lx have incorrect shape. Should be of length (2,1). Check lx function." # lu should be a list of length len(Time)-1, with each element with shape (m,1), where n is the number of states. assert len(lu)==len(Time)-1, "lu has incorrect length. Should be of length " + str(len(Time)-1) + ", not " + str(len(lu)) + "." assert all([el.shape==(1,1) for el in lu]), "Elements of lu have incorrect shape. Should be of length (1,1). Check lu function." # lux should be a list of length len(Time)-1, with each element with shape (m,n), where m is the number of inputs and n is the number of states. assert len(lux)==len(Time)-1, "lux has incorrect length. Should be of length " + str(len(Time)-1) + ", not " + str(len(lux)) + "." assert all([el.shape==(1,2) for el in lux]), "Elements of lux have incorrect shape. Should be of length (1,1). Check lux function." # lxu should be a list of length len(Time)-1, with each element with shape (n,m), where n is the number of states and m is the number of inputs. assert len(lxu)==len(Time)-1, "lxu has incorrect length. Should be of length " + str(len(Time)-1) + ", not " + str(len(lxu)) + "." assert all([el.shape==(2,1) for el in lxu]), "Elements of lxu have incorrect shape. Should be of length (2,1). Check lxu function." # luu should be a list of length len(Time)-1, with each element with shape (m,m), where m is the number of inputs. assert len(luu)==len(Time)-1, "luu has incorrect length. Should be of length " + str(len(Time)-1) + ", not " + str(len(luu)) + "." assert all([el.shape==(1,1) for el in luu]), "Elements of luu have incorrect shape. Should be of length (1,1). Check luu function." # lxx should be a list of length len(Time)-1, with each element with shape (n,n), where n is the number of states. assert len(lxx)==len(Time)-1, "lxx has incorrect length. Should be of length " + str(len(Time)-1) + ", not " + str(len(lxx)) + "." assert all([el.shape==(2,2) for el in lxx]), "Elements of lxx have incorrect shape. Should be of length (2,2). Check lxx function."
[ "def return_quadratic_cost_function_expansion_variables(self):\n # returns a list of length len(Time)-1, each element with shape (1,1), where n is the number of states.\n l = list(\n map(\n lambda x,u: u.T * self.R * u * self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,1), where n is the number of states.\n lx = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,1)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,1), where n is the number of states.\n lu = list(\n map(\n lambda x,u: self.R * u * self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,n), where m is the number of inputs and n is the number of states.\n lux = list(\n map(\n lambda x,u: np.matrix(np.zeros((1,2)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,m), where n is the number of states and m is the number of inputs.\n lxu = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,1)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,m), where m is the number of inputs.\n luu = list(\n map(\n lambda x,u: self.R*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,n), where n is the number of states.\n lxx = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,2)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n return(l,lx,lu,lux,lxu,luu,lxx)", "def test_get_vars(self):\r\n size = (5, 4)\r\n x = create_var(size)\r\n y = create_var(size)\r\n A = create_const(np.ones(size), size)\r\n # Expanding dict.\r\n add_expr = sum_expr([x, y, A])\r\n vars_ = get_expr_vars(add_expr)\r\n self.assertItemsEqual(vars_, [(x.data, size), (y.data, size)])", "def test_scipy_eval(self):\n for method in ['2-point',\n '3-point',\n 'cs']:\n hes = Scipy(self.cost_func.problem, self.cost_func.jacobian)\n hes.method = method\n self.cost_func.hessian = hes\n eval_result = self.cost_func.hes_cost(params=self.params)\n self.assertTrue(np.isclose(self.actual, eval_result).all())", "def test_functionals(self):\n for i, f in enumerate(self.get_basis_functions()):\n for j, d in enumerate(self.dofs):\n if i == j:\n assert d.eval(f).expand().simplify() == 1\n else:\n assert d.eval(f).expand().simplify() == 0\n assert d.entity_dim() is not None", "def test_quadratic(self):\n C = wilson.util.smeftutil.wcxf2arrays_symmetrized(wc_quadratic.dict)\n c_old = wilson.match._smeft_old.match_all_array(C, p)\n c_new = wilson.match.smeft_tree.match_all_array(C, p)\n for k in c_old:\n npt.assert_almost_equal(c_old[k], c_new[k], decimal=10,\n err_msg=f\"Failed for {k}\")", "def test_scipy_eval(self):\n for method in ['2-point',\n '3-point',\n 'cs']:\n hes = Scipy(self.cost_func.problem, self.jacobian)\n hes.method = method\n eval_result = hes.eval(params=self.params)\n self.assertTrue(np.isclose(self.actual_hessian, eval_result).all())", "def evaluate(self, variables,functions):\r\n pass", "def test_coefficients(self):\n self.logTestName()\n H, _ = displacement(self.alpha, hbar=self.hbar)\n phi = np.angle(self.alpha)\n\n for term, coeff in H.terms.items():\n self.assertEqual(len(term), 1)\n j = 1-term[0][1]\n expected = (-1)**j * np.exp(1j*phi*(-1)**j)*self.hbar\n self.assertEqual(coeff, 1j*expected)", "def test_getitem_subterm(self):\n self.assertEqual(self.term[(0,)], self.g_subterm)\n self.assertEqual(self.term[(0, 0)], self.x)\n self.assertEqual(self.term[(1,)], self.f_subterm)\n self.assertEqual(self.term[(1, 0)], self.y)\n self.assertEqual(self.term[(1, 1)], self.z)", "def cost(user_requirements, proposed_solution):", "def test_df_costs(one, two, g_cost, q_cost):\n est = qml.resource.DoubleFactorization(one, two, chemist_notation=True)\n\n assert np.allclose(est.gates, g_cost)\n assert np.allclose(est.qubits, q_cost)", "def test_independence(self):\n basis = self.get_basis_functions()\n all_terms = set()\n\n try:\n basis[0].as_coefficients_dict()\n scalar = True\n except AttributeError:\n scalar = False\n\n if scalar:\n for f in basis:\n for term in f.as_coefficients_dict():\n all_terms.add(term)\n mat = [[0 for i in all_terms] for j in basis]\n for i, t in enumerate(all_terms):\n for j, f in enumerate(basis):\n fd = f.as_coefficients_dict()\n if t in fd:\n mat[j][i] = fd[t]\n else:\n for f in basis:\n for fi, fpart in enumerate(f):\n for term in fpart.as_coefficients_dict():\n all_terms.add((fi, term))\n mat = [[0 for i in all_terms] for j in basis]\n for i, (fi, t) in enumerate(all_terms):\n for j, f in enumerate(basis):\n fd = f[fi].as_coefficients_dict()\n if t in fd:\n mat[j][i] = fd[t]\n mat = sympy.Matrix(mat)\n\n assert mat.rank() == mat.rows", "def test_equation_rewrite(self):\n variables = {}\n variables['x'] = PysolveVariable('x')\n variables['y'] = PysolveVariable('y')\n self.assertEqual('x - y', _rewrite(variables, {}, 'x - y'))\n self.assertEqual('xx - y', _rewrite(variables, {}, 'xx - y'))\n self.assertEqual('xx - yx', _rewrite(variables, {}, 'xx - yx'))\n self.assertEqual('xx(0) - yx', _rewrite(variables, {}, 'xx(0) - yx'))\n self.assertEqual('_series_acc(x,-1)',\n _rewrite(variables, {}, 'x(-1)'))\n self.assertEqual('_series_acc(x,-t)',\n _rewrite(variables, {}, 'x(-t)'))\n\n parameters = {}\n parameters['a'] = Parameter('a')\n parameters['b'] = Parameter('b')\n self.assertEqual('_series_acc(a,-1)',\n _rewrite({}, parameters, 'a(-1)'))", "def test_exp(self):\r\n for n in [5, 10, 25]:\r\n print n\r\n x = Variable(n)\r\n obj = Minimize(sum_entries(exp(x)))\r\n p = Problem(obj, [sum_entries(x) == 1])\r\n p.solve(solver=SCS, verbose=True)\r\n self.assertItemsAlmostEqual(x.value, n*[1./n])", "def solutions_ok_quadratic(eq):\n s = diop_solve(eq)\n x, y = symbols(\"x, y\", Integer=True)\n ok = True\n\n while len(s) and ok:\n u, v = s.pop()\n\n if simplify(simplify(Subs(eq, (x, y), (u, v)).doit())) != 0:\n ok = False\n return ok", "def test_trainable_coeffs_tf(self, simplify, group):\n coeffs = tf.Variable([-0.05, 0.17], dtype=tf.double)\n param = tf.Variable(1.7, dtype=tf.double)\n\n # differentiating a circuit with measurement expval(H)\n @qml.qnode(dev, interface=\"tf\", diff_method=\"backprop\")\n def circuit(coeffs, param):\n qml.RX(param, wires=0)\n qml.RY(param, wires=0)\n return qml.expval(\n qml.Hamiltonian(\n coeffs,\n [qml.PauliX(0), qml.PauliZ(0)],\n simplify=simplify,\n grouping_type=group,\n )\n )\n\n with tf.GradientTape() as tape:\n res = circuit(coeffs, param)\n grad = tape.gradient(res, [coeffs, param])\n\n # differentiating a cost that combines circuits with\n # measurements expval(Pauli)\n\n # we need to create new tensors here\n coeffs2 = tf.Variable([-0.05, 0.17], dtype=tf.double)\n param2 = tf.Variable(1.7, dtype=tf.double)\n half1 = qml.QNode(circuit1, dev, interface=\"tf\", diff_method=\"backprop\")\n half2 = qml.QNode(circuit2, dev, interface=\"tf\", diff_method=\"backprop\")\n\n def combine(coeffs, param):\n return coeffs[0] * half1(param) + coeffs[1] * half2(param)\n\n with tf.GradientTape() as tape2:\n res_expected = combine(coeffs2, param2)\n grad_expected = tape2.gradient(res_expected, [coeffs2, param2])\n\n assert np.allclose(grad[0], grad_expected[0])\n assert np.allclose(grad[1], grad_expected[1])", "def testQuadraticize(self):\n # TODO: use a more complicated custom test Cost that has cross\n # terms in Hessians.\n x = np.array([[1.0], [1.0]])\n u1 = np.array([[1.0], [1.0]])\n u2 = np.array([[1.0], [1.0]])\n\n semi0 = SemiquadraticCost(0, 0.0, True)\n semi1 = SemiquadraticCost(1, 0.0, True)\n cost = PlayerCost()\n cost.add_cost(semi0, \"x\", 1.0)\n cost.add_cost(semi1, \"x\", 2.0)\n cost.add_cost(semi0, 0, 1.0)\n cost.add_cost(semi1, 0, 2.0)\n cost.add_cost(semi0, 1, 1.0)\n cost.add_cost(semi1, 1, 2.0)\n\n # Compute what the cost should be.\n expected_cost = max(x[0, 0], 0.0)**2 + 2.0 * max(x[1, 0], 0.0)**2 + \\\n max(u1[0, 0], 0.0)**2 + 2.0 * max(u1[1, 0], 0.0)**2 + \\\n max(u2[0, 0], 0.0)**2 + 2.0 * max(u2[1, 0], 0.0)**2\n\n # Compute expected gradient and Hessians.\n expected_grad_x = np.array([[2.0], [4.0]])\n expected_hess_x = np.array([[2.0, 0.0], [0.0, 4.0]])\n expected_hess_u1 = expected_hess_x\n expected_hess_u2 = expected_hess_x\n\n # Quadraticize and compare.\n cost, grad_x, hess_x, [hess_u1, hess_u2] = cost.quadraticize(x, [u1, u2])\n self.assertAlmostEqual(cost, expected_cost, delta=SMALL_NUMBER)\n self.assertLess(np.linalg.norm(grad_x - expected_grad_x), SMALL_NUMBER)\n self.assertLess(np.linalg.norm(hess_x - expected_hess_x), SMALL_NUMBER)\n self.assertLess(np.linalg.norm(hess_u1 - expected_hess_u1), SMALL_NUMBER)\n self.assertLess(np.linalg.norm(hess_u2 - expected_hess_u2), SMALL_NUMBER)", "def test_poly_gcd7(self):\n \n result = 0\n\n y = symbol('y')\n p = x - y * z + 1\n q = x - y + z * 3\n\n for j in range(1, max_variables+1):\n for k in range(j+1, 5):\n d = pow(p, j) * pow(q, j)\n f = pow(p, j) * pow(q, k)\n g = pow(p, k) * pow(q, j) \n r = gcd(f, g)\n if not (r-d).expand().is_zero():\n print \"case 7, gcd(\",f,\",\",g,\") = \",r,\" (should be \",d,\")\"\n result += 1\n\n self.assertEqual(result,0)", "def test_qrom_cost(constants, cost_ref, k_ref):\n # pylint: disable=protected-access\n cost, k = qml.resource.DoubleFactorization._qrom_cost(constants)\n\n assert cost == cost_ref\n assert k == k_ref" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load initial data from Cloud Commerce Product Range data.
def create_from_range(cls, data, product_range): data["BasePrice"] = None data["VatRateID"] = None data["WeightGM"] = None data["LengthMM"] = None data["WidthMM"] = None data["HeightMM"] = None data["LargeLetterCompatible"] = None data["ExternalProductId"] = None return cls(data, product_range=product_range)
[ "def product_range(self):\n if self._product_range is None:\n from .functions import get_range\n\n self._product_range = get_range(self.range_id)\n return self._product_range", "def load_product(self, data_path, data, lote):\n\n bulonfer_id = self.env['res.partner'].search(\n [('ref', '=', 'BULONFER')])\n if not bulonfer_id:\n raise Exception('Vendor Bulonfer not found')\n\n next_line = int(self.next_line) if self.next_line else 1\n\n _logger.info('REPLICATION: Load %s products from file '\n '%s' % (lote, data))\n prod_processed = prod_created = barc_changed = barc_created = 0\n with open(data_path + data, 'r') as file_csv:\n reader = csv.reader(file_csv)\n for line in reader:\n # Recorremos el archivo y procesamos los registros que:\n # - tienen el numero de linea >= que next_line\n # - cuando procesamos lote registros terminamos\n if line and reader.line_num >= next_line:\n obj = ProductMapper(line, data_path, bulonfer_id.ref,\n self._productcode)\n stats = obj.execute(self.env)\n\n if 'barc_created' in stats:\n barc_created += 1\n if 'barc_changed' in stats:\n barc_changed += 1\n if 'prod_processed' in stats:\n prod_processed += 1\n if 'prod_created' in stats:\n prod_created += 1\n if reader.line_num - next_line + 1 >= lote:\n self.next_line = next_line\n break\n\n # si terminamos el archivo hay que borrarlo y poner nl=1\n if reader.line_num - next_line + 1 < lote:\n os.remove(data_path + data)\n self.next_line = '1'\n else:\n self.next_line = str(reader.line_num + 1)\n return {'barc_created': barc_created,\n 'barc_changed': barc_changed,\n 'prod_processed': prod_processed,\n 'prod_created': prod_created,\n 'next_line': str(self.next_line)}", "def init():\n products = _load_products_list()\n BASKET.clear()\n PURCHASED.clear()\n AVAILABLE.clear()\n for product in products:\n id = product['id']\n qty = product['in_stock_quantity']\n AVAILABLE[id] = qty\n PRODUCT_DATA[id] = product\n if len(products) != len(AVAILABLE):\n raise RuntimeError('Could not init product database due to duplicate IDs')", "def test_request_returns_product_range(self):\n self.register(json=self.RESPONSE)\n response = self.mock_request(self.RANGE_ID)\n self.assertIsInstance(response, cc_objects.productrange.ProductRange)", "def populate_geoprice_tables(val):\n price_val = format_price(val) \n price = Price(price_val)\n #logger.debug(\"Formatted price info..\")\n try:\n if type(price.product_uuid) is float and np.isnan(price.product_uuid):\n raise Exception(\"Product UUID needs to be generated!\")\n except Exception as e:\n return False\n #logger.info(\"Saving All...\")\n if price.save_all_batch():\n #logger.debug(\"Loaded tables for: {}\".format(val['product_uuid']))\n pass", "def initializeData(self):\n EntityBase.initializeData(self)", "def dataload(self):\n collection_product_price=self.db['price']\n with open('data/data.json') as d:\n price_data = json.load(d)\n\n # Reference: https://stackoverflow.com/questions/44838280/how-to-ignore-duplicate-key-errors-safely-using-insert-many\n try:\n inserted=collection_product_price.insert_many(price_data, ordered = False)\n print(\"{} records inserted\", len(inserted))\n except errors.BulkWriteError as e:\n print(e.details['writeErrors'])", "def init_start_data(self, start_data):\n self.inventory = start_data.load_inventory()\n for i in self.inventory.get_all_items():\n if isinstance(i, SubEntity): i.superentity = self\n meter_amounts = start_data.player_data.meter_amounts\n if \"lantern\" in meter_amounts and self.get_lantern():\n self.get_lantern().oil_meter = meter_amounts[\"lantern\"]", "def init_range(init_address):\r\n new_range = init_address.copy()\r\n new_range[\"house_numbers\"] = [new_range.pop(\"house_nbr\")]\r\n new_range[\"points\"] = [new_range.pop(\"shape@\").firstPoint]\r\n return new_range", "def import_prices(self):\n for index, row in self.df.iterrows():\n self.set_mini_bundle_name(row[\"Title\"])\n self.set_issns(row[\"E-ISSN\"])\n self.set_product_id(row[\"Product\"])\n self.in_electronic_price = False\n for region, currency_acronym in self.regions_and_currencies:\n self.set_currency(currency_acronym)\n self.set_country(region)\n column = currency_acronym + \" Price \" + str(self.year)\n self.set_price(row[column])\n media_type = row[\"Product Description\"]\n price_category = row[\"Price Category Description\"]\n self.add_prices(media_type, price_category)\n self.issns = []\n db.session.commit()", "def test_get_product_filters_by_range_vendor_v2(self):\n pass", "def _load_product_data_cis(self, hub_id):\n\n # dump CIS data into cis staging table: stage_dim_product_cis\n self._export_product_data(hub_id=hub_id, cis_source=True)\n\n # Below part is moved from method self._export_product_data. Put it here when all product data are ready.\n # Override nxg items with CIS if there are duplicated items on both NXG and CIS\n sql = \"\"\"\n DELETE FROM {cmnSchema}.STAGE_{itemTable} \n WHERE (vendor_key, retailer_key, item_key) IN\n (SELECT vendor_key, retailer_key, item_key\n FROM {cmnSchema}.STAGE_{itemTable}_CIS);\n \n INSERT INTO {cmnSchema}.STAGE_{itemTable} \n SELECT * FROM {cmnSchema}.STAGE_{itemTable}_CIS;\n \"\"\".format(itemTable=self._dim_product,\n cmnSchema=self._common_schema)\n self._logger.info(sql)\n self._dw.execute(sql)\n\n # updating column OSM_MAJOR_CATEGORY when NXG and CIS sources are all done.\n sql = \"\"\"\n DROP TABLE IF EXISTS TEMP_OSM_SUB_CATEGORY_CONSISTENCY;\n CREATE LOCAL TEMP TABLE IF NOT EXISTS TEMP_OSM_SUB_CATEGORY_CONSISTENCY ON COMMIT PRESERVE ROWS \n AS /*+ DIRECT, LABEL(GX_IRIS_SYNC_DIM_DATA)*/ \n SELECT distinct OSM_SUB_CATEGORY_NO,MAX(OSM_SUB_CATEGORY) as OSM_SUB_CATEGORY \n FROM {self._common_schema}.{self._dim_product} GROUP BY OSM_SUB_CATEGORY_NO;\n\n DROP TABLE IF EXISTS TEMP_OSM_MAJOR_CATEGORY_CONSISTENCY;\n CREATE LOCAL TEMP TABLE IF NOT EXISTS TEMP_OSM_MAJOR_CATEGORY_CONSISTENCY ON COMMIT PRESERVE ROWS \n AS /*+ DIRECT, LABEL(GX_IRIS_SYNC_DIM_DATA)*/ \n SELECT distinct OSM_MAJOR_CATEGORY_NO,MAX(OSM_MAJOR_CATEGORY) as OSM_MAJOR_CATEGORY \n FROM {self._common_schema}.{self._dim_product} GROUP BY OSM_MAJOR_CATEGORY_NO;\n \"\"\".format(self=self)\n self._logger.info(sql)\n self._dw.execute(sql)\n\n sql = (\"\"\"\n SELECT /*+ LABEL(GX_IRIS_SYNC_DIM_DATA)*/ ANALYZE_STATISTICS('{self._common_schema}.STAGE_{self._dim_product}');\n\n INSERT /*+ DIRECT, LABEL(GX_IRIS_SYNC_DIM_DATA)*/ INTO TEMP_OSM_SUB_CATEGORY_CONSISTENCY\n SELECT a.OSM_SUB_CATEGORY_NO, MAX(a.OSM_SUB_CATEGORY) AS OSM_SUB_CATEGORY\n FROM {self._common_schema}.STAGE_{self._dim_product} a\n LEFT JOIN TEMP_OSM_SUB_CATEGORY_CONSISTENCY b on a.OSM_SUB_CATEGORY_NO = b.OSM_SUB_CATEGORY_NO\n WHERE a.OSM_SUB_CATEGORY_NO IS NOT NULL AND a.OSM_SUB_CATEGORY_NO <> '' AND b.OSM_SUB_CATEGORY_NO IS NULL\n GROUP BY a.OSM_SUB_CATEGORY_NO;\n\n UPDATE /*+ LABEL(GX_IRIS_SYNC_DIM_DATA)*/ {self._common_schema}.STAGE_{self._dim_product}\n SET OSM_SUB_CATEGORY = b.OSM_SUB_CATEGORY\n FROM \n (SELECT * FROM TEMP_OSM_SUB_CATEGORY_CONSISTENCY) b\n WHERE {self._common_schema}.STAGE_{self._dim_product}.OSM_SUB_CATEGORY_NO = b.OSM_SUB_CATEGORY_NO;\n /*UPDATE {self._common_schema}.STAGE_{self._dim_product}\n SET ITEM_GROUP = B.ITEM_GROUP\n FROM $schemaName.OLAP_ITEM B\n WHERE {self._common_schema}.STAGE_{self._dim_product}.ITEM_KEY = B.ITEM_KEY;*/\n\n INSERT /*+ DIRECT, LABEL(GX_IRIS_SYNC_DIM_DATA)*/ INTO TEMP_OSM_MAJOR_CATEGORY_CONSISTENCY\n SELECT a.OSM_MAJOR_CATEGORY_NO, MAX(a.OSM_MAJOR_CATEGORY) AS OSM_MAJOR_CATEGORY\n FROM {self._common_schema}.STAGE_{self._dim_product} a\n LEFT JOIN TEMP_OSM_MAJOR_CATEGORY_CONSISTENCY b on a.OSM_MAJOR_CATEGORY_NO = b.OSM_MAJOR_CATEGORY_NO\n WHERE a.OSM_MAJOR_CATEGORY_NO IS NOT NULL AND a.OSM_MAJOR_CATEGORY_NO <> '' AND b.OSM_MAJOR_CATEGORY_NO IS NULL\n GROUP BY a.OSM_MAJOR_CATEGORY_NO;\n\n UPDATE /*+ LABEL(GX_IRIS_SYNC_DIM_DATA)*/ {self._common_schema}.STAGE_{self._dim_product}\n SET OSM_MAJOR_CATEGORY = b.OSM_MAJOR_CATEGORY\n FROM \n (SELECT * FROM TEMP_OSM_MAJOR_CATEGORY_CONSISTENCY) b\n WHERE {self._common_schema}.STAGE_{self._dim_product}.OSM_MAJOR_CATEGORY_NO = b.OSM_MAJOR_CATEGORY_NO;\n \"\"\".format(self=self))\n self._logger.info(sql)\n self._dw.execute(sql)", "def pre_process_data(self):\n capacity = self.capacity\n voltage = self.voltage\n len_capacity = self.len_capacity\n # len_voltage = self.len_voltage\n\n f = interp1d(capacity, voltage, kind=self.interpolation_method)\n c1, c2 = index_bounds(capacity)\n self.capacity_preprocessed = np.linspace(c1, c2, len_capacity)\n # capacity_step = (c2-c1)/(len_capacity-1)\n self.voltage_preprocessed = f(self.capacity_preprocessed)\n\n if self.pre_smoothing:\n savgol_filter_window_divisor = np.amin((self.savgol_filter_window_divisor_default, len_capacity / 5))\n savgol_filter_window_length = int(len_capacity / savgol_filter_window_divisor)\n\n if savgol_filter_window_length % 2 == 0:\n savgol_filter_window_length -= 1\n self.voltage_preprocessed = savgol_filter(self.voltage_preprocessed,\n np.amax([3, savgol_filter_window_length]),\n self.savgol_filter_window_order)", "def load_price_data():\r\n return pd.read_csv(\"stock_data.csv\")", "def _load_products_list():\n products_json_path = get_abs_path(PRODUCTS_FILE)\n with open(products_json_path, 'r') as f:\n products_list = json.load(f)\n for product in products_list:\n # split price & ccy:\n product['currency'] = product['price'][-3:]\n product['price'] = float(product['price'][:-3])\n product['price_fmt'] = fmt_price(product['price'])\n # correct case:\n product['brand'] = product['brand'].title()\n # extract features from name:\n name = product['name']\n name_parts = name.replace(';', ',').split(',')\n product['name'] = name_parts[0]\n product['features'] = ', '.join([name.strip() for name in name_parts[1:]])\n return products_list", "def load_iter_data(self):\n if self.iterstepdata.files_available is False:\n empty_data = self.iterstepdata.get_empty_data()\n empty_data['header']['processing_start'] = datetime(1900, 1, 1)\n empty_data['header']['processing_end'] = datetime(1901, 1, 1)\n self.iter_data = empty_data\n else:\n self.iter_data = self.iterstepdata.read_latest_iter_data()", "def load_range(cls, start_date, end_date) -> list:\n if not start_date:\n start_date = \"1900-01-01\"\n if not end_date:\n end_date = \"3000-01-01\"\n\n Activity = cls.alias()\n query = (\n Event.select(\n Activity.name,\n fn.sum(TimeEntry.duration).alias(\"duration\"),\n )\n .join(TimeEntry, on=(Event.id == TimeEntry.event_id))\n .join(Activity, on=(Event.model_id == Activity.id))\n .where(\n (TimeEntry.start_time > start_date)\n & (TimeEntry.end_time < end_date)\n )\n .group_by(Activity.name)\n ).dicts()\n\n return [dict(result) for result in query]", "def load_pricepaid(since=0):\n #for the year's data, use: http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-2016.txt\n # for the whole history of sales use: http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv\n \n #Property Type \tD = Detached, S = Semi-Detached, T = Terraced, F = Flats/Maisonettes, O = Other\n\n #reading the whole landregistry for 24 million records from 1995-2016 takes a lot of time, so we\n #produce a subsampled set of just 300,000 purchases, which are returned instead\n \n filename = \"pp-complete.csv\"\n if not os.path.isfile('sampled_pp.csv'): \n if not os.path.isfile(filename): \n os.system('wget http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/'+filename)\n pp = pd.read_csv(filename,header=None,usecols=[1,2,3,4],names=[\"price\", \"date\", \"postcode\", \"type\"])\n pp = pp.ix[random.sample(pp.index, 300000)]\n pp.to_csv('sampled_pp.csv')\n else:\n print(\"Using presampled dataset.\")\n pp = pd.read_csv('sampled_pp.csv')\n \n #add seconds since epoch and year.\n seconds = np.zeros(len(pp))\n years = seconds.copy()\n for i,date in enumerate(pp['date']):\n seconds[i] = int(datetime.strptime(date, '%Y-%m-%d %H:%M').strftime(\"%s\"))\n years[i] = int(datetime.strptime(date, '%Y-%m-%d %H:%M').strftime(\"%Y\"))\n pp['seconds'] = seconds\n pp['years'] = years\n \n pp = pp[pp['years']>since] \n print(\"Loaded property prices.\")\n return pp", "def import_prices(self):\n for index, row in self.df.iterrows():\n self.set_journal_name(row[\"Title\"])\n self.set_issn(row[\"E-ISSN\"])\n self.set_journal()\n self.set_product_id(row[\"Product\"])\n self.in_electronic_price = False\n for region, currency_acronym in self.regions_and_currencies:\n self.set_currency(currency_acronym)\n self.set_country(region)\n column = currency_acronym + \" Price \" + str(self.year)\n self.set_price(row[column])\n media_type = row[\"Product Description\"]\n price_category = row[\"Price Category Description\"]\n self.add_prices(media_type, price_category)\n db.session.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of IDs for Bays in which this product is located.
def bays(self): if self._bays is None: self._bays = [b.id for b in CCAPI.get_bays_for_product(self.id)] return self._bays
[ "def get_ids():", "def get_bids(self):\n return Bid_API.Bid().get()", "def id_list(self):\n return numpy.array(self.spiketrains.keys(), int)", "def getAllBinIds( self, minBinId = None, maxBinId = None ):\n keys = list(self.bin2count.keys())\n if not keys: keys = ( 0, )\n return list(range( min( keys ) if minBinId is None else minBinId ,\n max( keys )+1 if maxBinId is None else maxBinId, 1))", "def get_biz_dids(start_did, end_did):\n bdates = pd.bdate_range(did_to_datestr(did=start_did, sep='-'),\n did_to_datestr(did=end_did, sep='-')).strftime('%Y%m%d')\n return [int(date) for date in bdates]", "def get_ids(self):\n links = self.driver.find_elements_by_xpath(self.entity_xpath)\n ids = [link.get_attribute('href').rsplit('=')[1] for link in links]\n if self.entity == 'attendance':\n number_links = self.driver.find_elements_by_xpath(self.number_xpath)\n numbers = [number_link.text for number_link in number_links]\n ids = zip(ids, numbers)\n # href is like parlamento.pt/DeputadoGP/Paginas/Biografia.aspx?BID=3\n return set(ids)", "def get_phonebook_ids(self) -> list[int]:\n return self.fph.phonebook_ids # type: ignore[no-any-return]", "def get_inv_ids(self):\n out = []\n for item in self.inventory:\n out.append(item.base_identifier)\n\n return out", "def get_bouquet_picon_ids(self):\n bq_selected = self._app.check_bouquet_selection()\n if not bq_selected:\n return\n\n model, paths = self._app.bouquets_view.get_selection().get_selected_rows()\n if len(paths) > 1:\n self._app.show_error_message(\"Please, select only one bouquet!\")\n return\n\n fav_bouquet = self._app.current_bouquets[bq_selected]\n services = self._app.current_services\n\n ids = set()\n for s in (services.get(fav_id) for fav_id in fav_bouquet):\n ids.add(s.picon_id)\n ids.add(get_picon_file_name(s.service))\n return ids", "def _get_product_ids(prefix):\n from accelpy._application import Application\n return Application.list(prefix)", "def job_ids(self):\n return [elem[\"id\"] for elem in self.all()]", "def get_ids(self):\n return self.multiengine.get_ids()", "def getIDs(self):\n return (self.getPdbID(), self.getChainID())", "def _get_all_eids(ibs):\n all_eids = ibs.db.get_all_rowids(ENCOUNTER_TABLE)\n return all_eids", "def getAllDropboxIDs(): # @NoSelf", "def get_object_ids(self):\n params = self.default_params.copy()\n params.update(self.ids_params)\n if self.where:\n params.update({\n 'where': ' and '.join(self.where),\n })\n url = self.service_url + urlencode(params)\n logger.debug('Getting object ids with url \"%s\"' % url)\n return json.load(urlopen(url))['objectIds']", "def get_job_ids(self) -> List[str]:\n # we can only query for job ids by jobs states which can change\n # between calls, so order in which job states are processed matters\n ids = defaultdict(int) \n logging.debug(f'Retrieving job IDs from job queue {self.job_queue_name}')\n for status in AWS_BATCH_JOB_STATES:\n batch_of_jobs = self.batch.list_jobs(jobQueue=self.job_queue_name,\n jobStatus=status)\n for j in batch_of_jobs['jobSummaryList']:\n ids[j['jobId']] = 1\n\n while 'nextToken' in batch_of_jobs:\n batch_of_jobs = self.batch.list_jobs(jobQueue=self.job_queue_name,\n jobStatus=status,\n nextToken=batch_of_jobs['nextToken'])\n for j in batch_of_jobs['jobSummaryList']:\n ids[j['jobId']] = 1\n\n logging.debug(f'Retrieved {len(ids.keys())} job IDs')\n return list(ids.keys())", "def _get_ids(self, query):\n return [getattr(elm, 'id') for elm in query]", "def aids(self):\n return self._aids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the product's country of origin ID.
def country_of_origin(self): if self._country_of_origin_id is None: self._reload() return self._country_of_origin_id
[ "def country(self):\n return Country(alpha_2=self.country_code)", "def idToCountry(self, countryId):\n if 'code3' == self.cf_country_print_mode:\n return GeoIP.id_to_country_code3(countryId)\n elif 'name' == self.cf_country_print_mode:\n return GeoIP.id_to_country_name(countryId)\n else: # 'code' (default)\n return GeoIP.id_to_country_code(countryId)", "def country_name(self):\n if self.country:\n if hasattr(self.country, 'common_name'):\n return self.country.common_name\n return self.country.name\n return None", "def wlanGetCountry(self, phy_id):\n test_cmd = COMMAND_GET_COUNTRY\n test_args = {\"phy_id\": phy_id}\n test_id = self.build_id(self.test_counter)\n self.test_counter += 1\n\n return self.send_command(test_id, test_cmd, test_args)", "def get_country_from_nation(self, nation):\n assert nation in self.nationalities_countries, \"Unknown nationality: {}\".format(nation)\n return self.nationalities_countries[nation]", "def _get_country_from_vk_country_id(vk_country_id: int) -> str:\n vk_countries = VkCountry.objects.filter(pk=vk_country_id)\n if not vk_countries.exists():\n raise VkCountryNotFoundError(vk_country_id)\n vk_country: VkCountry = vk_countries.first() # type: ignore\n country: str = vk_country.country\n return country", "def country_name(self, iso_code):\n if len(iso_code) == 2:\n iso_code = countries.get(iso_code).alpha3\n return self.df[self.df.ISO == iso_code].Country.iloc[0]", "def country_code(self, ip_address):\n return self.country(ip_address).get('country_code')", "def get_country(self):\r\n if len(self.user_flag) > 0:\r\n country = self.user_flag[0].get(\"title\", \"\")\r\n else:\r\n country = \"\"\r\n return country", "def countryname(cc):\n for corporation in corporations:\n if cc in corporation:\n return corporation[cc]\n raise LookupError('Unknown country code \"%s\"' % cc)", "def getCountryName(self):\n return self.driver.find_element_by_xpath(\n \"//*[@id=\\\"rhs\\\"]/div[2]/div/div/div/div/div[2]/div[1]/div/div[2]/div/div/span[2]/span\").text", "def country_name(self, ip_address):\n return self.country(ip_address).get('country_name')", "def get_country_name(tag):\n return pyradox.yml.get_localisation(tag, game = 'EU4')", "def contry_code(self):\n return self._data.get('profile', {}).get('countryCode')", "def getCountry(code=None, resource_uri=None):\n country = None\n \n if resource_uri is not None:\n country = loadJson(resource_uri)\n \n elif code is not None:\n countries = getCountries()\n for c in countries:\n if c['code'] == code:\n country = c\n break\n \n return country", "def city_country(city, country):\n return city + \", \" + country", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code", "def get_countries(self):\n return self._make_transferto_request(action=\"pricelist\", info_type=\"countries\")", "def get_user_country(self, code):\r\n tree = self.get_user_profile_tree(code)\r\n scraper = FanfictionScraper()\r\n country = scraper.get_user_country(tree)\r\n return country" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the barcode of the product.
def barcode(self): return self._barcode
[ "def barcode(self, barcode):\n CCAPI.set_product_barcode(product_id=self.id, barcode=barcode)", "def _get_products_db_barcode(self):\n\n products = Product.objects.all() # Gets all products from database\n products_barcode = []\n\n for product in products:\n # Gets product barcode\n products_barcode.append(product.barcode)\n\n return products_barcode", "def get_product_by_barcode(self, barcode):\n return _GetProductByBarcode(self).call(barcode)", "def get_barcode_name(self):\n possible_names = [self.name]\n if self.start_sequence:\n possible_names.append(self.start_sequence[0])\n if self.end_sequence:\n possible_names.append(self.end_sequence[0])\n barcode_name = sorted(possible_names, key=lambda x: len(x))[0]\n return barcode_name.replace(' ', '_')", "def product_code(self):\n\n return (self._edid[0x0B] << 8) + self._edid[0x0A]", "def show_one_barcode(_barcode):\n\n # remove space and hyphens\n try:\n barcode = str(_barcode).replace('-', '').replace(' ', '')\n int(barcode)\n except ValueError:\n return _barcode\n\n if len(barcode) > 16:\n # if extra 5 digits remove them (EAN 5)\n first = barcode[:-5]\n if stdean.is_valid(first):\n return '%s %s' % (first, barcode[-5:])\n elif len(barcode) > 13:\n # if extra 2 digits remove them (EAN 2)\n first = barcode[:-2]\n if stdean.is_valid(first):\n return '%s %s' % (first, barcode[-2:])\n\n return barcode", "def barcode():\n\n imagefile = request.files.get('imagefile', None)\n if not imagefile:\n return make_response(\"Missing file parameter\", 400)\n\n filename = secure_filename(imagefile.filename)\n full_path = os.path.join(UPLOAD_FOLDER, filename)\n imagefile.save(full_path)\n\n text = ''\n try:\n # Convert image to text\n text = scan_barcode_image(full_path)\n except:\n return make_response(\"Error processing image\", 500)\n\n \n return jsonify(text)", "def extract_barcode(record, eb):\n seq = record.sequence[eb.start:eb.end]\n qual = record.quality[eb.start:eb.end]\n return (eb.sequence_tag, seq, 'Z'), (eb.quality_tag, qual, 'Z')", "def get_barcode(dev = \"/dev/hidraw0\"):\n hiddev = open(dev, \"rb\")\n \n barcode = ''\n\n continue_looping = True\n\n k = 0\n\n while continue_looping:\n report = hiddev.read(8)\n\n # print \"k value: \", k\n k += 1\n\n for i in report:\n j = ord(i)\n # # print j\n if j == 0:\n # print \"j = \", j\n continue\n\n if j == 0x1E:\n barcode += '1'\n # print \"j = \", j\n continue\n elif j == 0x1F:\n barcode += '2'\n # print \"j = \", j\n continue\n elif j == 0x20:\n barcode += '3'\n # print \"j = \", j\n continue\n elif j == 0x21:\n barcode += '4'\n # print \"j = \", j\n continue\n elif j == 0x22:\n barcode += '5'\n # print \"j = \", j\n continue\n elif j == 0x23:\n barcode += '6'\n # print \"j = \", j\n continue\n elif j == 0x24:\n barcode += '7'\n # print \"j = \", j\n continue\n elif j == 0x25:\n barcode += '8'\n # print \"j = \", j\n continue\n elif j == 0x26:\n barcode += '9'\n # print \"j = \", j\n continue\n elif j == 0x27:\n barcode += '0'\n # print \"j = \", j\n continue\n elif j == 0x28:\n # print \"j = \", j\n # print barcode\n hiddev.close()\n continue_looping = False\n break\n else:\n pass\n # print \"+++ Melon melon melon +++\"\n # print \"j = \", j\n # hiddev.close()\n # continue_looping = False\n # break\n\n return barcode", "def generate(request, code, barcode_type='Standard39', auto_print=True):\n\n from reportlab.graphics.shapes import String\n from reportlab.graphics import renderPDF\n from reportlab.graphics.barcode import createBarcodeDrawing\n from reportlab.pdfbase import pdfdoc\n from reportlab.pdfbase import pdfmetrics\n from reportlab.pdfbase.ttfonts import TTFont\n\n response = HttpResponse(mimetype='application/pdf')\n response['Content-Disposition'] = 'inline; filename=%s.pdf' % (code,)\n\n # Config\n import bcp.settings as bcp_settings\n font_size = bcp_settings.FONT_SIZE\n bar_height = bcp_settings.BAR_HEIGHT\n bar_width = bcp_settings.BAR_WIDTH\n font_name = bcp_settings.FONT_NAME\n font_path = bcp_settings.FONT_PATH\n try:\n # If this is extended to different barcode types, then these options will need to be specified differently, eg not all formats support checksum.\n bc = createBarcodeDrawing(barcode_type, barHeight=bar_height, barWidth=bar_width, value=str(code), isoScale=True, quiet=bcp_settings.BAR_QUIET, checksum=bcp_settings.BAR_CHECKSUM,)\n except KeyError, e:\n return HttpResponseBadRequest('Barcode Generation Failed: %s' % (e))\n\n # Register the font\n pdfmetrics.registerFont(TTFont(font_name, font_path))\n\n # Set JS to Autoprint document\n if auto_print:\n pdfdoc.PDFCatalog.OpenAction = '<</S/JavaScript/JS(this.print\\({bUI:true,bSilent:false,bShrinkToFit:true}\\);)>>'\n pdfdoc.PDFInfo.title = code # nicety :)\n\n # Position for our text label\n x = bc.width / 2\n y = - font_size # or (bar_height + font_size) if placing on top\n # The textual barcode\n text = String(x, y, code, textAnchor='middle', fontName=font_name, fontSize=font_size)\n bc.add(text)\n bc = bc.resized() # resize barcode drawing object to accommodate text added\n\n buffer = StringIO() # buffer for the output\n renderPDF.drawToFile(bc, buffer, autoSize=1) # write PDF to buffer\n\n # Get the value of the StringIO buffer and write it to the response.\n pdf = buffer.getvalue()\n buffer.close()\n response.write(pdf)\n\n return response", "def test_barcode(self):\n barcode = Code39Barcode(x_pts=30,\n y_pts=500,\n chars='CODE39TEST')\n self.assertIn('barcode', barcode.required_parts)\n self.assertEqual(\n barcode.ps,\n '30 500 moveto (CODE39TEST) (includecheck includetext)\\n'\n '/code39 /uk.co.terryburton.bwipp findresource exec\\n')", "def get_product(self, barcode, columns=\"*\"):\n try:\n response = requests.get(self.server_url + 'get_product/' + barcode + '/' + columns)\n return response.json()\n except AssertionError as e:\n raise AssertionError(\"Error due to {}\".format(e))", "def product_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"product_name\")", "def get_barcode_details(self, barcode):\n sql = \"\"\"select create_date_time, status, scan_date,\n sample_postmark_date,\n biomass_remaining, sequencing_status, obsolete\n from barcode\n where barcode = %s\"\"\"\n cursor = self.get_cursor()\n cursor.execute(sql, [barcode])\n col_names = [x[0] for x in cursor.description]\n results = [dict(zip(col_names, row)) for row in cursor.fetchall()]\n cursor.close()\n if results:\n return results[0]\n else:\n return {}", "def get_barcode_details(self):\r\n\r\n # Check if last character is alpga\r\n if self.barcode[0].isalpha():\r\n bstring = self.barcode[1:]\r\n\r\n # Get details - extract portion of barcode\r\n\r\n if bstring[-2:].isalpha(): # If two letters at end\r\n bdate = bstring[-8:-2]\r\n btech = bstring[-10:-8]\r\n bplate = bstring[:-10]\r\n\r\n elif bstring[-1].isalpha(): # If one letter at end\r\n\r\n bdate = bstring[-7:-1]\r\n btech = bstring[-9:-7]\r\n bplate = bstring[:-9]\r\n\r\n else: # If no letters at end\r\n bdate = bstring[-6:]\r\n btech = bstring[-8:-6]\r\n bplate = bstring[:-8]\r\n\r\n bdate = datetime.strptime(bdate, '%d%m%y')\r\n bdate = bdate.strftime('%d-%b-%y')\r\n\r\n return btech, bdate, bplate", "def main(zipcode):\n print_barcode(zipcode)\n #print_barcode(\"36924\")\n #print_barcode(\"2c34a\")\n #print_barcode(\"123456\")", "def test_find_by_barcode_positive(self):\n\n user = self.client.users.create({})\n\n card_request = {\n \"card_product_token\": self.card_product.token,\n \"user_token\": user.token\n }\n\n card = self.client.cards.create(card_request)\n\n card_found = self.client.cards.find_by_barcode(card.barcode)\n\n self.assertEqual(card_found.barcode, card.barcode,\n 'Incorrect card returned by find')", "def test_barcode(self):\n barcode = Code93Barcode(x_pts=300,\n y_pts=50,\n chars='CODE93TEST')\n self.assertIn('barcode', barcode.required_parts)\n self.assertEqual(\n barcode.ps,\n '300 50 moveto (CODE93TEST) (includecheck includetext)\\n'\n '/code93 /uk.co.terryburton.bwipp findresource exec\\n')", "def return_barcode(self, return_barcode):\n\n self._return_barcode = return_barcode", "def read_barcode():\n print 'Scan barcode now!'\n line = sys.stdin.readline().strip()\n os.system('clear')\n out = int(line)\n return out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the barcode for the product.
def barcode(self, barcode): CCAPI.set_product_barcode(product_id=self.id, barcode=barcode)
[ "def set_barcode(self, barcode):\n\n self.barcode = barcode\n self.format_barcode()", "def barcode(self, barcode):\n if barcode is None:\n raise ValueError(\"Invalid value for `barcode`, must not be `None`\") # noqa: E501\n\n self._barcode = barcode", "def __init__(self, barcode_data):\n\n self.data = barcode_data", "def setBarcodeProjType(self, project, barcode):\n sql = \"\"\"update project_barcode set project_id =\n (select project_id from project where project = %s)\n where barcode = %s\"\"\"\n result = self.get_cursor()\n cursor = self.get_cursor()\n cursor.execute(sql, [project, barcode])\n self.connection.commit()\n cursor.close()", "def return_barcode(self, return_barcode):\n\n self._return_barcode = return_barcode", "def __update_barcode(self,payload):\n data = struct.unpack(\"%sH\" % 2, payload)\n if data[0] != 0xFFFF:\n self.__barcode = data[0]\n else:\n self.__barcode = None\n \n if data[1] != 0xFFFF:\n self.__color = data[1]\n else:\n self.__color = None", "def book_code(self, book_code):\n\n self._book_code = book_code", "def test_barcode(self):\n barcode = Code39Barcode(x_pts=30,\n y_pts=500,\n chars='CODE39TEST')\n self.assertIn('barcode', barcode.required_parts)\n self.assertEqual(\n barcode.ps,\n '30 500 moveto (CODE39TEST) (includecheck includetext)\\n'\n '/code39 /uk.co.terryburton.bwipp findresource exec\\n')", "def setCode(self, c):\n\t\t\n\t\tself.code = c", "def set_code(self, code):\n self._code = code", "def test_barcode(self):\n barcode = Code93Barcode(x_pts=300,\n y_pts=50,\n chars='CODE93TEST')\n self.assertIn('barcode', barcode.required_parts)\n self.assertEqual(\n barcode.ps,\n '300 50 moveto (CODE93TEST) (includecheck includetext)\\n'\n '/code93 /uk.co.terryburton.bwipp findresource exec\\n')", "def set_product_id(self, **kwargs):\n if self.is_quicklook():\n self._product_id = f'{self._obs_id}.quicklook'\n else:\n self._product_id = f'{self._obs_id}.continuum_imaging'", "def set_code(self, key, value):\n self._code[key] = value", "def code_no(self, code_no):\n\n self._code_no = code_no", "def product_code_collect(self, product_code_collect):\n\n self._product_code_collect = product_code_collect", "def validate_barcode(self, barcode):\n # Ignore empty barcode values\n if not barcode or barcode.strip() == '':\n return None\n\n barcode_hash = hash_barcode(barcode)\n\n if stock.models.StockItem.lookup_barcode(barcode_hash) is not None:\n raise ValidationError(_('Barcode is already in use'))\n\n return barcode", "def set_isbn(self):\n if \"isbn\" in self.libris_raw.keys():\n if isinstance(self.libris_raw[\"isbn\"], str):\n self.isbn = [self.libris_raw[\"isbn\"]]\n else:\n self.isbn = self.libris_raw[\"isbn\"]", "def scanner_manufacturer(self, scanner_manufacturer):\n self._scanner_manufacturer = scanner_manufacturer", "def product_class(self, product_class):\n\n self._product_class = product_class" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the description of the product.
def description(self): if self._description is None: self._description = CCAPI.get_product(self.id).description return self._description
[ "def getDescription(self):\n\n prod = self.productClass()\n\n if prod: result = prod.description\n else : result = None\n\n return result", "def v_product_item_description(self) -> str:\n return self._v_product_item_description", "def description(self, value):\n if value is None or value == \"\":\n value = self.name\n CCAPI.set_product_description(product_ids=[self.id], description=value)\n self._description = value", "def description(self):\n return self._book_dict[\"description\"]", "def _description_string(self) -> str:", "def device_description(self):\n return self.call_action(\"DeviceInfo1\", \"GetInfo\")[\"NewDescription\"]", "def description(self):\n if self.type_name is not None:\n return f\"{self.type_name} var{self.variant} rev{self.revision}\"\n else:\n return \"proprietary\"", "def print_desc(self):\n print(self.description)\n return", "def product_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"product_name\")", "def description(self):\n return type_get_description(self)", "def spn_description(self) -> str:\n return self._spn_description", "def setDescription(self, description):\n\n prod = self.productClass()\n\n if prod:\n prod.description = description", "def get_descripcion(self):\n return self.descripcion", "def __str__(self):\n return \"Product:\\n\" + '\\n'.join(\"%s : %r\" % (key2, str(val2)) for (key2, val2)\n in self.__get_dictionary().items()) + \"\\n\"", "def get_commodity_description(self):\n return str(self.gui.txt_commodity_description.text())", "def description_detaillee(self):\n return self.__description_detaillee", "def installable_description(self):", "def get_description(cls):\n if cls.__doc__ is None:\n return \"\"\n return cls.__doc__.strip().split(\"\\n\", 1)[0]", "def description_html(self):\n return self.description" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the description of the product.
def description(self, value): if value is None or value == "": value = self.name CCAPI.set_product_description(product_ids=[self.id], description=value) self._description = value
[ "def setDescription(self, description):\n\n prod = self.productClass()\n\n if prod:\n prod.description = description", "def set_description(description):", "async def set_description(self, description: str):\n self.preview_embed.description = description", "def set_description(self, description):\n if not isinstance(description, str):\n raise ValueError(\"Description must be a string.\")\n try:\n self._set_config_value(\n _SERVICE_INFO_SECTION_NAME, \"Description\", description\n )\n except Exception as e:\n logger.error(f\"Unable to set description: {e}\")", "def edit_description(self, new_desciption):\n self.desciption = new_desciption", "def set_descripcion(self, descripcion):\n self.descripcion = descripcion", "def update_prod_description(self, prod_id):\n if self.cursor:\n self.cursor.execute(\"\"\"UPDATE products SET \n prod_description = %s where prod_id = %s\"\"\",\n (self.data[\"prod_description\"], prod_id), )", "def set_description(self, room_description):\n self.description = room_description", "def set_description(module):\n name = module.attributes['name']\n value = module.attributes['description']\n module.node.api('interfaces').set_description(name, value)", "def set_desc(self, channel, description):\n channel.db.desc = description", "def _set_description(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"description must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"description\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/local-routing', defining_module='openconfig-local-routing', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__description = t\n if hasattr(self, '_set'):\n self._set()", "def set_description(self):\n if \"description\" not in self.data:\n logger.debug(\"Adding empty descriptions to root\")\n self.data[\"description\"] = \"\"", "async def description(self, ctx, *, description):\n author = ctx.author\n if len(str(description)) > 250:\n await ctx.send(\"Descriptions are limited to 250 characters :no_entry:\")\n return\n self.settingss[str(author.id)][\"DESCRIPTION\"] = description\n dataIO.save_json(self.JSON, self.settingss)\n await ctx.send(\"Your description has been set it'll now be on your profile\")", "def set_commodity_description(self, value):\n self.gui.txt_commodity_description.clear()\n self.gui.txt_commodity_description.setText(value)", "def v_product_item_description(self, v_product_item_description: str):\n\n self._v_product_item_description = v_product_item_description", "def price_description(self, price_description):\n\n self._price_description = price_description", "def add_description(self, description: str):\n self.response[DESCRIPTION_KEY] = description\n return self", "def _set_description(self, revision_range=None):\n if self.options.guess_description and not self.options.description:\n self.options.description = self.extract_description(revision_range)", "def description(self):\n if self._description is None:\n self._description = CCAPI.get_product(self.id).description\n return self._description" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the handling time for the product.
def handling_time(self): return self._handling_time
[ "def handling_time(self, handling_time):\n CCAPI.set_product_handling_time(product_id=self.id, handling_time=handling_time)\n self._handling_time = handling_time", "def get_time(self):\n return self.event_time", "def get_physical_time():\n return datetime.now().timestamp()", "def _get_time(self) -> datetime:\n return datetime.fromtimestamp(int(self._raw_meta['time']) / 1e3)", "def get_time(self): # TEST\n return self._game.get_time()", "def get_time_info(self):\n return self._time_info", "def release_time(self) -> str:\n return pulumi.get(self, \"release_time\")", "def time_info(self):\n return self._time_info", "def get_alarm(self):\n return self.alarm_time", "def get_time(self):\n return self.modelcache.get_model(self.current_model).time - self.zerotime", "def get_order_cooked_time(self):\n return self.cooked_at_time", "def get_time(self):\r\n return float(self._cur_time)", "def getLeaseTime(self):\n d = self.do_getattrdict([], [FATTR4_LEASE_TIME])\n return d[FATTR4_LEASE_TIME]", "def time():\n return datetime.datetime.now()", "def event_received_time(self):\n return self._event_received_time", "def _workflow_time(self):\n return self.__time", "def active_time(self):\n return self.details.get('active_time', 0)", "def get_delivery_time():\n try:\n address = get_address(current_user.address_id)\n address_string = get_address_string(address)\n delivery_time = get_travel_time(address_string)\n delivery_time += get_prep_time()\n return delivery_time\n except AttributeError:\n return \"Error getting user address\"", "def get_current_time(self):\n return datetime.datetime.now().strftime(\"%H:%M:%S\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the handling time for the product.
def handling_time(self, handling_time): CCAPI.set_product_handling_time(product_id=self.id, handling_time=handling_time) self._handling_time = handling_time
[ "def set_alarm(self, target_time: datetime.time):\n self.time = target_time.replace(second=0, microsecond=0)\n # print the time\n print(\"Alarm set for {}:{}\".format(self.time.hour, self.time.minute))", "def handling_time(self):\n return self._handling_time", "def set_pick_up_time(self, pick_up_time):\n self.pick_up_time = pick_up_time", "def setTime(self, t: 'SbTime') -> \"void\":\n return _coin.SoEvent_setTime(self, t)", "def on_action_set_time(self, content):\n self.set_time(as_qtime(content['time']))", "def set_time(self, enable=True):\r\n if enable:\r\n self.time = datetime.now\r\n else:\r\n self.time = None", "def setTimeFromNow(self, reltime: 'SbTime') -> \"void\":\n return _coin.SoAlarmSensor_setTimeFromNow(self, reltime)", "def setTime(self, abstime: 'SbTime') -> \"void\":\n return _coin.SoAlarmSensor_setTime(self, abstime)", "def set_max_time(self, time):\n raise NotImplementedError", "def setEditTime(self,when):\n self.editTime = when\n if not self.joinTime:\n self.setJoinTime( when )", "def set_indication_time(self, stage):\n self._time_indications[stage].append(datetime.now())", "def set_exposure_time(self, exposure_time):\n self.exposure_time = utils.get_quantity_value(exposure_time, u.second)", "def __call__(self):\n if \"expiration_date\" not in self.entity.cw_edited:\n delay = self._cw.vreg.config[\"default_expiration_delay\"]\n self.entity.cw_edited[\"expiration_date\"] = (\n datetime.date.today() + datetime.timedelta(delay))", "def setJoinTime(self,when):\n if not when:\n return\n self.joinTime = when", "def setValue(self, *args) -> \"void\":\n return _coin.SoMFTime_setValue(self, *args)", "def set_time(self, value):\n self._alive_time = value # No signal emitting since we call toggle_value after this", "def set_min_time(self, time):\n raise NotImplementedError", "def set_exp_start_time(self, time: str) -> None:\n self._logger.debug(\"running\")\n self._start_time_val.setText(time)\n self._logger.debug(\"done\")", "def time_t(self, time_t: int):\n\n self._time_t = time_t" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the product's name.
def name(self, name): CCAPI.set_product_name(name=name, product_ids=[self.id]) self._name = name self.full_name = None
[ "def set_name(self, new_name):\n self.name = new_name", "def update_prod_name(self, prod_id):\n if self.cursor:\n self.cursor.execute(\"\"\"UPDATE products SET \n prod_name = %s where prod_id = %s\"\"\",\n (self.data[\"prod_name\"], prod_id), )", "def v_product_item_name(self, v_product_item_name: str):\n\n self._v_product_item_name = v_product_item_name", "def set_Name(self, value):\n self.devName = value", "def setName(self, name):\n self.setAttribute('NAME', name)", "def set_name(self, name):\n self.recipe_proto[\"name\"] = name", "def set_name_item(self, item_name):\n self.name_item = item_name", "def product_name(self, value):\n if not str(value).isnumeric():\n self.__product_name = value\n else:\n raise Exception(\"Product name cannot be a number\")", "def set_name(self, new_name):\n\n self.img.attrib['Name'] = new_name", "def metal_name(self, name):\n self._name = name", "def _set_name(self):\n\n try:\n name = ('%s serial %s' % (self._protocol.name, self.device_type)).title()\n except Exception:\n name = 'Unknown Serial Device'\n self.device_name = name", "def change_name(self, name):\n self._player_name = name", "def product_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"product_name\")", "def get_product_name(self, product):\n product_name = product.get('product_name_fr')\n if product_name is None:\n product_name = product['product_name']\n return product_name", "def product_name(self) -> str:\n return self.driver.find_element(*self.PRODUCT_TITLE_LOC).text", "def setName(self, newname: 'SbName') -> \"void\":\n return _coin.SoBase_setName(self, newname)", "def set_player_name(self, player):\r\n self.__name = player", "def get_product_name():\n return \"SmartAlpha\"", "def convert_product_name(self, string_value, string_id):\n #Set correct product name\n if string_id == PRODUCT_NAME_ID:\n #Remove quotes for the begin and end of the string if they exists\n if string_value[0] == \"\\\"\" and string_value[len(string_value)-1] == \"\\\"\":\n self.productName = string_value[1:-1]\n else:\n self.productName = string_value\n else:\n if self.productName == \"\":\n raise Exception(\"Product name is not set. It should be first item in localization xml\")\n if self.productName != PRODUCT_NAME_DEFAULT_VALUE:\n #Default product name has been changed. Change that also from this string if it exists\n string_value = string_value.replace(PRODUCT_NAME_DEFAULT_VALUE, self.productName)\n return string_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the Product Options of the product.
def options(self): if self._options is None: self._options = productoptions.VariationOptions(self, self.product_range) return self._options
[ "def options(self):\n pclass_options = self.get_product_class().options.all()\n return set(pclass_options) or set(self.product_options.all())", "def getOptions(self,productTypeId):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/catalog/admin/attributedefinition/producttypes/{productTypeId}/Options\", \"GET\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"productTypeId\", productTypeId);\r\n\t\tself.client.withResourceUrl(url).execute();\r\n\t\treturn self.client.result();", "def options(self):\n return self.data['options']", "def load_by_product(self, product):\n try:\n option = Option.objects.filter(product=product)\n except Option.DoesNotExist:\n option = None\n\n return option", "def options(self) -> List:\n return self._options", "def getOption(self,productCode, attributeFQN, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/catalog/admin/products/{productCode}/Options/{attributeFQN}?responseFields={responseFields}\", \"GET\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"attributeFQN\", attributeFQN);\r\n\t\turl.formatUrl(\"productCode\", productCode);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).execute();\r\n\t\treturn self.client.result();", "def getOption(self,productTypeId, attributeFQN, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/catalog/admin/attributedefinition/producttypes/{productTypeId}/Options/{attributeFQN}?responseFields={responseFields}\", \"GET\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"attributeFQN\", attributeFQN);\r\n\t\turl.formatUrl(\"productTypeId\", productTypeId);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\tself.client.withResourceUrl(url).execute();\r\n\t\treturn self.client.result();", "def getProductContext(self):\n prod = self.productClass()\n if prod:\n prodcontext = self.primaryAq()\n return prodcontext.zenPropertyItems()\n return []", "def options(self):\n return list(self._options.values())", "def options(self, attribute, store_view=None):\n return self.call('catalog_product_attribute.options',\n [attribute, store_view])", "def get_options(self) -> Dict:\n\n center = max(self.center.get(), 1)\n linewidth= max(self.linewidth.get(), 1)\n power = max(self.power.get(), 1)\n\n out = {'power': power, 'linewidth': linewidth, 'center': center}\n return out", "def get_options():\n cursor = db.get_cursor()\n cursor.execute(SELECT_OPTIONS)\n options = cursor.fetchall()\n options = list(options)\n return options", "def options(self) -> List[OptionInfo]:\n return []", "def _get_productPreferences(self) -> \"adsk::core::Ptr< adsk::core::ProductPreferencesCollection >\" :\n return _core.Preferences__get_productPreferences(self)", "def get_options(self):\n\n data = self.__get_predefined_portfolio('opciones')\n df = pd.DataFrame(data['Result']['Stocks']) if data['Result'] and data['Result']['Stocks'] else pd.DataFrame()\n\n return self.process_options(df)", "def get_options(request, category, item):\n logging.info('views.get_options')\n return get_options_json(category, item)", "def feature_options(self) -> Dict:\n return self._feature_options", "def _show_available_products():\n return {\n \"prd001\": {\n \"description\": \"60-inch TV stand\",\n \"product_type\": \"livingroom\",\n \"quantity_available\": \"3\",\n },\n \"prd003\": {\n \"description\": \"Acacia kitchen table\",\n \"product_type\": \"kitchen\",\n \"quantity_available\": \"7\",\n },\n \"prd004\": {\n \"description\": \"Queen bed\",\n \"product_type\": \"bedroom\",\n \"quantity_available\": \"10\",\n },\n \"prd005\": {\n \"description\": \"Reading lamp\",\n \"product_type\": \"bedroom\",\n \"quantity_available\": \"20\",\n },\n \"prd006\": {\n \"description\": \"Portable heater\",\n \"product_type\": \"bathroom\",\n \"quantity_available\": \"14\",\n },\n \"prd008\": {\n \"description\": \"Smart microwave\",\n \"product_type\": \"kitchen\",\n \"quantity_available\": \"30\",\n },\n \"prd010\": {\n \"description\": \"60-inch TV\",\n \"product_type\": \"livingroom\",\n \"quantity_available\": \"3\",\n },\n }", "def _get_select_opts(self):\n provs = self.mp_controls.get_value(self._COMP_PATH)\n self.prov_settings_map = _get_map(provs)\n existing_provs = list(provs.keys())\n return [(val, idx) for idx, val in enumerate(sorted(existing_provs))]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the base price for the product.
def price(self, price): CCAPI.set_product_base_price(product_id=self.id, price=price) self._price = price
[ "def _set_base_price(self, price=None, region=\"us\"):\n\n if price is None or price == \"\" or price == self.R_NOT_RELATIVE:\n self._base_price = None\n\n elif price == self.R_RETAIL_PRICE:\n if region == \"uk\":\n self._base_price = self._si.original_price_uk\n else:\n self._base_price = self._si.original_price_us\n elif price == self.R_START_PRICE:\n self._base_price = self._get_price_from_date(self._base_date)\n elif price == self.R_END_PRICE:\n self._base_price = self._get_price_from_date(self._base_date)\n else:\n self.price = syt.int_zero(price)\n\n if self._base_price is None and not (self._report_options[\"Base Price\"] == self.R_NOT_RELATIVE or self._report_options[\"Base Price\"] == None):\n raise ValueError(\"No Price to evaluate from [Base Price = {} | Report Type = {}]\".format(price, self._report_options[\"Base Price\"]))", "def _get_base_price(self) -> int:\n pass", "def setBase(self):\n self.base = self.rp[0]*pow(10, self.rp[1])", "def base(self, base):\n self._base = float(base)", "def get_item_base_price(self, item):\n return item.price", "def base(self, base):\n self.set_base(base)", "def get_slot_base_price(self, slot):\n return self.types[slot].price", "def set_buy_price(self, buy_price: float) -> None:\n self.buy_price = buy_price", "def set_priced_current_price_and_period(self, price):\n self.currentPeriod = {\n 'date_utc': None,\n 'open': price,\n 'close': price,\n 'high': price,\n 'low': price\n }\n self.currentPrice = price", "def setPriceDozen(self,price):\n self.priceDozen=float(price)", "def compute_set_product_price(self):\n self.ensure_one()\n phantom_boms = self.bom_ids.filtered(lambda b: b.type == \"phantom\")\n\n if not phantom_boms:\n raise UserError(\n _(\n \"No phantom BoM found for product %s. Please create\"\n \" a phantom BoM to compute the price of the set product.\"\n % self.name\n )\n )\n\n products_2compute = self.product_variant_ids\n date_now = fields.Datetime.now()\n dummy_so = self.env[\"sale.order\"].create(\n {\n \"name\": \"Phantom Bom Price Compute: %s, %s\"\n % (self.id, date_now.strftime(\"%d-%m-%Y\")),\n \"partner_id\": 12515, # Ahmet Altınışık test\n \"partner_invoice_id\": 12515,\n \"partner_shipping_id\": 12515,\n \"pricelist_id\": 136, # USD pricelist\n \"warehouse_id\": 1,\n \"company_id\": 1,\n \"currency_id\": 2, # USD\n \"date_order\": fields.Datetime.now(),\n }\n )\n for product in products_2compute:\n bom = self.env[\"mrp.bom\"].sudo()._bom_find(product=product)\n if not bom.type == \"phantom\":\n continue\n # Create a new sale order line\n dummy_sol = self.env[\"sale.order.line\"].create(\n {\n \"order_id\": dummy_so.id,\n \"product_id\": product.id,\n \"product_uom_qty\": 1,\n \"product_uom\": product.uom_id.id,\n \"price_unit\": product.v_fiyat_dolar,\n }\n )\n # Explode the phantom bom\n dummy_sol.explode_set_contents()\n # Compute the price\n dummy_so.recalculate_prices()\n # Update the product price\n _logger.info(\n \"Updating product price for product %s: %s -> %s\"\n % (product.display_name, product.v_fiyat_dolar, dummy_so.amount_untaxed)\n )\n product.v_fiyat_dolar = dummy_so.amount_untaxed\n # Clear sale order lines\n dummy_so.order_line.unlink()\n # Clear the dummy sale order\n dummy_so.unlink()\n self.env.cr.commit()\n return True", "def unpad_price(self, price: str, base_id: int, quote_id: int) -> Decimal:\n return Decimal(price) * Decimal(f\"1e{self._digits[base_id] - self._digits[quote_id]}\")", "def set_sell_price(self, sell_price: float) -> None:\n self.sell_price = sell_price", "def market_dirty_price(self, value: float):\n self._market_dirty_price = value", "def set_is_base_currency(self, is_base_currency):\n self.is_base_currency = is_base_currency", "def get_base_price(self):\n # in progress\n # day = datetime.date.weekday()\n # print day\n # time = datetime.time()\n # print time\n base_price = random.randint(5, 9)\n\n return base_price", "def _adjust_price(self):\n\n # Go through each topping and add the money amount for topping\n topping_additional_money = 0\n for topping in self._toppings:\n topping_additional_money += topping.getPrice()\n\n self._price = self._base_price + topping_additional_money", "def test_set_price(self):\n\n test_price = 100.0\n test_quantity = 1\n\n # Grab the first part\n p = Part.list(self.api)[0]\n\n # Grab all internal prices for the part\n ip = InternalPrice.list(self.api, part=p.pk)\n\n # Delete any existsing prices\n for price in ip:\n self.assertEqual(type(price), InternalPrice)\n price.delete()\n\n # Ensure that no part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 0)\n\n # Set the internal price\n p.setInternalPrice(test_quantity, test_price)\n\n # Ensure that the part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 1)\n \n # Grab the internal price\n ip = ip[0]\n\n self.assertEqual(ip.quantity, test_quantity)\n self.assertEqual(ip.part, p.pk)\n ip_price_clean = float(ip.price)\n self.assertEqual(ip_price_clean, test_price)", "def set_base(self, base, copy=True):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the Product Range to whicth this product belongs.
def product_range(self): if self._product_range is None: from .functions import get_range self._product_range = get_range(self.range_id) return self._product_range
[ "def __get_range(self):\n return self.high - self.low", "def get_gridrange(self, start, end):\n return self._get_range(start, end, \"gridrange\")", "def price_range(self) -> Optional[pulumi.Input['GoogleCloudRecommendationengineV1beta1ProductCatalogItemPriceRangeArgs']]:\n return pulumi.get(self, \"price_range\")", "def get_range(self):\n if self.get_type() in [int, float]:\n values = [lv[\"value\"] for lv in self.line_value]\n return [min(values), max(values)]", "def energy_range(self):\n energy = self._energy_axis.edges\n e_min, e_max = energy[:-1], energy[1:]\n\n if self.mask_safe is not None:\n if self.mask_safe.any():\n e_min = e_min[self.mask_safe]\n e_max = e_max[self.mask_safe]\n else:\n return None, None\n\n return u.Quantity([e_min.min(), e_max.max()])", "def GetRange(self, component=0):\n array = self.FieldData.GetFieldData().GetArrayInformation(self.Name)\n range = array.GetComponentRange(component)\n return (range[0], range[1])", "def Range(begin: PrimExpr, end: PrimExpr) -> ir.Range: # pylint: disable=invalid-name\n return ir.Range(begin, end)", "def range(self) -> xr.DataArray:\n return self.max_val - self.min_val", "def getRangeInContext(self): #$NON-NLS-1$\r", "def get_q_range(self, q_min: float = None, q_max: float = None):\n q_min_idx = self._get_closest_index(q_min, self.q)\n q_max_idx = self._get_closest_index(q_max, self.q)\n return self.q[q_min_idx:q_max_idx]", "def _get_area_range(self):\n return self.__area_range", "def get_range(self) -> str:\n pass", "def price_ranges(self):\n inner_steps = sorted(self.price_classes.viewkeys()) or list()\n all_steps = [-float('inf')] + inner_steps + [float('inf')]\n return zip(all_steps[:-1], all_steps[1:])", "def get_ranges(self):\r\n pass", "def get_weight_range(self):\n return self._mins[1], self._maxs[1]", "def get_range(self, model, key, min, max):\n if key not in model.schema.props:\n raise RuntimeError(f\"{key} is not a part of {model.name}'s schema\")\n if not model.schema.props[key].index_key:\n return self.get_item_from_index_set(model, key, min, max)\n else:\n result = []\n for obj in self.storage.get_keys_in_model(model):\n obj_val = getattr(obj, key)\n if obj_val >= min and obj_val <= max:\n result.append(obj)\n return result", "def find_product_group(self):\r\n self.prod_group = ''\r\n range = SystemState.get_import_data(self, 'product_range.csv', [])\r\n for product in range:\r\n if self.name == product['prod_name']:\r\n self.prod_group = product['prod_group']\r\n return self.prod_group", "def x_axis_range(self) -> Tuple[float, float]:\n return self.GetXAxisRange()", "def control_range(self):\n return self.__control_range" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the current stock level for the product.
def stock_level(self): return self._stock_level
[ "def in_stock(self):\n return self.product.in_stock", "def get_inventory(self):\n return self.inventory_level", "def get_level(self):\r\n return self.__level", "def stock_level(self, new_stock_level):\n CCAPI.update_product_stock_level(\n product_id=self.id,\n new_stock_level=new_stock_level,\n old_stock_level=self._stock_level,\n )\n self._stock_level = new_stock_level", "def get_level():\n return LEVEL", "def __call__(self):\n return self._get_current_level()", "def excess_stock(self, product):\n return max(int(self.inventory[product][0] - self.goal[product]), 0)", "def available_stock(self):\n return self.total_stock - self.unreturned_stock", "def stock_state(self) -> Optional[pulumi.Input['GoogleCloudRecommendationengineV1beta1ProductCatalogItemStockState']]:\n return pulumi.get(self, \"stock_state\")", "def volume_level(self):\n if self._player_volume is None:\n return None\n return self._player_volume / 100", "def get_nutriscore(self, product):\n return product.get('nutrition_grade_fr')", "def level(self) -> FlowLevel:\n return FlowLevel.from_str(self.get_setting(\"power.inputs.level\"))", "def get_level(self, level_num=None):\n if level_num is None:\n level_num = self.current_level\n return self.levels_config['Level %s' % level_num]", "def getStockItem(self):\n\n return None", "def read_level(self) -> float:\n reply = self._command_reply(0x80000010, 0, 4)\n\n return struct.unpack('<f', reply)[0]", "def get_price_levels(self):\n querystring = {}\n price_levels = self.request(action='price_level', **querystring)\n return price_levels if 'PriceLevel' in price_levels else None", "def get_loraPower():\n\t\tcommand = \"get_config=pwr_level\"\n\t\treturn (str(uart_tx(command)).split(\"OK\")[1].split(\"\\\\\")[0])", "def get_level(self):\n\n # make a reading of the sensor\n level = self.level_sensor.get_distance()\n # make sure the measurement is valid\n if level:\n return level, level < self.level_warning, level < self.level_alarm\n return None, None, None", "def get_initial_volume(self, level):\r\n\r\n volume = self.initial_level * self.total_volume / 100\r\n return volume", "def getVolume(self):\n if (self.stockinformation.retrieved > datetime.datetime.now()-datetime.timedelta(seconds=10)):\n return self.stockinformation.volume\n else:\n stock_info = self.stockinformation\n stock_info.setData()\n stock_info.save()\n return self.stockinformation.volume" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the stock level of the product.
def stock_level(self, new_stock_level): CCAPI.update_product_stock_level( product_id=self.id, new_stock_level=new_stock_level, old_stock_level=self._stock_level, ) self._stock_level = new_stock_level
[ "def _update_stock(self, stock):\n from MercadoLibre.services.MeradoLibreService import MercadoLibreService\n MercadoLibreService().update_stock(stock)", "def update_stockcounter(self, stock):\n\n bg = stock.get_mw_price()\n self.update_portfolio()\n stock.counter = int(float(self.buyingpower / bg / stock.tradeshares))\n print \" --- Updated Net Worth: %s | Buying Power: %s ---\" % (self.networth, self.buyingpower)", "def update(self, product, data):\n return bool(\n self.call(\n 'cataloginventory_stock_item.update',\n [product, data]\n )\n )", "def increment_stock(self, q):\n self.__stock += q", "def update_stock(self, index: int, stock: Tuple[str, str, float, str]) -> None:\n self.stocks[index] = stock", "def update_fuel_level(self, new_level):\n if new_level <= self.fuel_capacity:\n self.fuel_level = new_level\n print(f\"You have {new_level} gallon(s) of fuel left in your tank.\")\n else:\n print(\"The tank won't hold that much.\")", "def edit_stocks(self, **stocks):\n prev_stocks = copy.copy(self.stocks)\n for type_ in Machine.StocksType:\n try:\n new_val = stocks[type_]\n except KeyError:\n pass\n else:\n if self.stocks[type_] < new_val <= self.max_stocks[type_]:\n self.stocks[type_] = new_val\n self._log.append(StockLog(prev_stocks, self.stocks))", "def RaiseLevelChangeEvent(self, level):\n\n if self.change_level_event:\n event = _PySlipEvent(_myEVT_PYSLIP_LEVEL, self.GetId())\n event.level = level\n self.GetEventHandler().ProcessEvent(event)", "def change_product_qty(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n\n inventory_obj = self.pool.get('stock.inventory')\n inventory_line_obj = self.pool.get('stock.inventory.line')\n\n for data in self.browse(cr, uid, ids, context=context):\n if data.new_quantity < 0:\n raise osv.except_osv(_('Warning!'), _('Quantity cannot be negative.'))\n ctx = context.copy()\n ctx['location'] = data.location_id.id\n ctx['lot_id'] = data.lot_id.id\n inventory_id = inventory_obj.create(cr, uid, {\n 'name': _('INV: %s') % tools.ustr(data.product_id.name),\n 'product_id': data.product_id.id,\n 'location_id': data.location_id.id,\n 'lot_id': data.lot_id.id}, context=context)\n product = data.product_id.with_context(location=data.location_id.id)\n th_qty = product.qty_available\n line_data = {\n 'inventory_id': inventory_id,\n 'product_qty': data.new_quantity,\n 'location_id': data.location_id.id,\n 'product_id': data.product_id.id,\n 'product_uom_id': data.product_id.uom_id.id,\n 'theoretical_qty': th_qty,\n 'prod_lot_id': data.lot_id.id,\n 'wheel_type': data.wheel_type,\n }\n inventory_line_obj.create(cr , uid, line_data, context=context)\n inventory_obj.action_done(cr, uid, [inventory_id], context=context)\n return {}", "def refreshStock(self, level : int = -1):\n self.shipsStock.clear()\n self.weaponsStock.clear()\n self.modulesStock.clear()\n self.turretsStock.clear()\n # self.currentTechLevel = random.randint(cfg.minTechLevel, cfg.maxTechLevel)\n if level == -1:\n self.currentTechLevel = gameMaths.pickRandomShopTL()\n else:\n if level not in range(cfg.minTechLevel, cfg.maxTechLevel + 1):\n raise ValueError(\"Attempted to refresh a shop at tech level \" + str(level) + \". must be within the range \" \\\n + str(cfg.minTechLevel) + \" to \" + str(cfg.maxTechLevel))\n self.currentTechLevel = level\n\n for i in range(self.maxShips):\n tlShipKeys = bbData.shipKeysByTL[gameMaths.pickRandomItemTL(self.currentTechLevel) - 1]\n if len(tlShipKeys) != 0:\n self.shipsStock.addItem(shipItem.Ship.fromDict(bbData.builtInShipData[random.choice(tlShipKeys)]))\n\n for i in range(self.maxModules):\n itemTL = gameMaths.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.moduleObjsByTL[itemTL - 1]) != 0:\n self.modulesStock.addItem(random.choice(bbData.moduleObjsByTL[itemTL - 1]))\n\n for i in range(self.maxWeapons):\n itemTL = gameMaths.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.weaponObjsByTL[itemTL - 1]) != 0:\n self.weaponsStock.addItem(random.choice(bbData.weaponObjsByTL[itemTL - 1]))\n\n # if random.randint(1, 100) <= cfg.turretSpawnProbability:\n for i in range(self.maxTurrets):\n itemTL = gameMaths.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.turretObjsByTL[itemTL - 1]) != 0:\n self.turretsStock.addItem(random.choice(bbData.turretObjsByTL[itemTL - 1]))", "def test_update_level(self):\n pass", "def updateInventory(order_food, stock):\n stock[7]=int(stock[7])-order_food[\"nBurgers\"]\n stock[8]=int(stock[8])-order_food[\"nLettuce\"]\n stock[9]=int(stock[9])-order_food[\"nTomato\"]\n stock[10]=int(stock[10])-order_food[\"nVeggie\"]\n stock[11]=int(stock[11])-order_food[\"nBacon\"]", "def updateFuel(self, wlevel, flevel, slevel):\n self.wlevel = wlevel\n self.flevel = flevel\n self.slevel = slevel", "def set_new_level(self, level):\r\n\r\n self.property_set(\"level\",\r\n Sample(0, int(level), unit=\"%\"))", "def set_volume_level(self, volume):\n self.soco.volume = str(int(volume * 100))", "def update_power(self):\n self.stop_threads()\n self.bulb.set_power(self.powervar.get())", "def test_update_depends_stock(self):\n with mn.model() as m:\n Foo = mn.stock('Foo', lambda: 1, (), lambda x: x, ('Bar',))\n Bar = mn.constant('Bar', 99)\n\n self.assertEqual(m['Foo'][''], 99)\n m['Bar'][''] = 90\n m.recalculate()\n self.assertEqual(m['Foo'][''], 90)\n m.step()\n self.assertEqual(m['Foo'][''], 91)", "def update_level(self, level):\n # crazy high intial values so that the new value is always lower\n num_events = 100_000\n newIdx = 100_000\n for (plotobj, plotopts) in zip(self.plotobjs, self.plotopts):\n nn, _newIdx = plotobj.plot(self.index, getNumEvents=True, **plotopts)\n num_events = min(num_events, nn)\n newIdx = min(newIdx, _newIdx)\n self.numEvents = num_events\n self.currentIndex.setText(str(newIdx))\n self.updateIndex()", "def set_volume_level(self, volume: float) -> None:\n self.send_command([\"mixer\", \"volume\", volume * 100])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the pending stock level of the product.
def get_pending_stock(self): return CCAPI.get_pending_stock(self.id)
[ "def in_stock(self):\n return self.product.in_stock", "def available_stock(self):\n return self.total_stock - self.unreturned_stock", "def excess_stock(self, product):\n return max(int(self.inventory[product][0] - self.goal[product]), 0)", "def stock_state(self) -> Optional[pulumi.Input['GoogleCloudRecommendationengineV1beta1ProductCatalogItemStockState']]:\n return pulumi.get(self, \"stock_state\")", "def getStockItem(self):\n\n return None", "def stock_level(self, new_stock_level):\n CCAPI.update_product_stock_level(\n product_id=self.id,\n new_stock_level=new_stock_level,\n old_stock_level=self._stock_level,\n )\n self._stock_level = new_stock_level", "def get_inventory(self):\n return self.inventory_level", "def get_product_status(self, product):\n self.wait_until_dashboard_displayed()\n status_locators = {}\n if product == Products.SPEND:\n status_locators[ProductApplicationStatus.IN_PROGRESS] = \\\n BaseElement(self.driver, locators.CONTINUE_SPEND_SAVE_APPLICATION_BUTTON)\n status_locators[ProductApplicationStatus.COMPLETED] = \\\n BaseElement(self.driver, locators.VIEW_SPEND_ACCOUNT_BUTTON)\n status_locators[ProductApplicationStatus.PENDING] = \\\n BaseElement(self.driver, locators.SPEND_SAVE_TELL_YOUR_FRIENDS_BUTTON)\n elif product == Products.SAVE:\n status_locators[ProductApplicationStatus.IN_PROGRESS] = \\\n BaseElement(self.driver, locators.CONTINUE_SAVE_APPLICATION_BUTTON)\n status_locators[ProductApplicationStatus.COMPLETED] = \\\n BaseElement(self.driver, locators.VIEW_SAVE_ACCOUNT_BUTTON)\n status_locators[ProductApplicationStatus.PENDING] = \\\n BaseElement(self.driver, locators.SAVE_TELL_YOUR_FRIENDS_BUTTON)\n elif product == Products.REDWOOD:\n status_locators[ProductApplicationStatus.IN_PROGRESS] = \\\n BaseElement(self.driver, locators.CONTINUE_REDWOOD_APPLICATION_BUTTON)\n status_locators[ProductApplicationStatus.COMPLETED] = \\\n BaseElement(self.driver, locators.VIEW_REDWOOD_ACCOUNT_BUTTON)\n status_locators[ProductApplicationStatus.PENDING] = \\\n BaseElement(self.driver, locators.REDWOOD_TELL_YOUR_FRIENDS_BUTTON)\n elif product == Products.FLAGSHIP:\n status_locators[ProductApplicationStatus.IN_PROGRESS] = \\\n BaseElement(self.driver, locators.CONTINUE_FLAGSHIP_APPLICATION_BUTTON)\n status_locators[ProductApplicationStatus.COMPLETED] = \\\n BaseElement(self.driver, locators.VIEW_FLAGSHIP_ACCOUNT_BUTTON)\n status_locators[ProductApplicationStatus.PENDING] = \\\n BaseElement(self.driver, locators.FLAGSHIP_TELL_YOUR_FRIENDS_BUTTON)\n else:\n return ProductApplicationStatus.DOES_NOT_EXIST\n\n # Based on product given check which, if any, status that product has\n if status_locators[ProductApplicationStatus.IN_PROGRESS].displayed():\n return ProductApplicationStatus.IN_PROGRESS\n elif status_locators[ProductApplicationStatus.PENDING].displayed():\n return ProductApplicationStatus.PENDING\n elif status_locators[ProductApplicationStatus.COMPLETED].displayed():\n return ProductApplicationStatus.COMPLETED\n else:\n return ProductApplicationStatus.DOES_NOT_EXIST", "def approved_patches_compliance_level(self) -> str:\n return pulumi.get(self, \"approved_patches_compliance_level\")", "def get_nutriscore(self, product):\n return product.get('nutrition_grade_fr')", "def remaining_stock(self):\n stockin_sum = StockIn.objects.filter(\n stock=self)\n\n if stockin_sum.exists():\n stockin_sum = stockin_sum.aggregate(Sum('new_stock'))\n in_sum = stockin_sum.get('new_stock__sum') or 0\n else:\n in_sum = 0\n\n stockout_sum = StockOut.objects.filter(stock=self)\n\n if stockout_sum.exists():\n stockout_sum = stockout_sum.aggregate(Sum('stock_out'))\n out_sum = stockout_sum.get('stock_out__sum') or 0\n else:\n out_sum = 0\n\n return in_sum - out_sum", "def get_nutritional_mark_for_100g(self, product):\n fat_100g = product['nutriments'].get('fat_100g')\n saturated_fat_100g = product['nutriments'].get('saturated-fat_100g')\n sugars_100g = product['nutriments'].get('sugars_100g')\n salt_100g = product['nutriments'].get('salt_100g')\n fat_level = product['nutrient_levels'].get('fat')\n saturated_fat_level = product['nutrient_levels'].get('saturated-fat')\n sugars_level = product['nutrient_levels'].get('sugars')\n salt_level = product['nutrient_levels'].get('salt')\n\n if fat_100g is None or fat_level is None:\n return None\n elif saturated_fat_100g is None or saturated_fat_level is None:\n return None\n elif sugars_100g is None or sugars_level is None:\n return None\n elif salt_100g is None or salt_level is None:\n return None\n else:\n return[fat_100g,\n fat_level,\n saturated_fat_100g,\n saturated_fat_level,\n sugars_100g,\n sugars_level,\n salt_100g,\n salt_level]", "def get_level(self):\r\n return self.__level", "def get_level():\n return LEVEL", "def get_product_balance(self, product):\n if product == Products.SPEND:\n product_balance = BaseElement(self.driver, locators.ASPIRATION_SPEND_BALANCE) \n elif product == Products.SAVE:\n product_balance = BaseElement(self.driver, locators.ASPIRATION_SAVE_BALANCE)\n elif product == Products.REDWOOD:\n product_balance = BaseElement(self.driver, locators.ASPIRATION_REDWOOD_BALANCE)\n # elif product == Products.FLAGSHIP:\n # product_balance = BaseElement(self.driver, locators.ASPIRATION_FLAGSHIP_BALANCE)\n else:\n raise ValueError(f\"'{product}' is not a valid product\") \n return utils.decimal_from_string(product_balance.get_text())", "def approved_patches_compliance_level(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"approved_patches_compliance_level\")", "def _quantity_on_hand_alert(self, selection: UncertainDemand) -> str:\n\n half_safety_stock = float(selection.safety_stock) * 0.5\n two_thirds_safety_stock = float(selection.safety_stock) * 0.75\n if selection.reorder_level > selection.quantity_on_hand > selection.safety_stock:\n traffic_light = 'amber'\n elif half_safety_stock > selection.quantity_on_hand > two_thirds_safety_stock:\n traffic_light = 'red'\n elif selection.quantity_on_hand < two_thirds_safety_stock:\n traffic_light = 'white'\n else:\n traffic_light = 'green'\n\n return traffic_light", "def check_stock(self):\n quantity = int(self.quantityEdit.text())\n \n if len(self.item) > 0 and not self.stock_item:#item pd.Series() is set and not adding stock\n if quantity > self.item.loc['stock']:\n self.show_not_enough_stock_message(quantity)", "def battery_level(self):\n return 100" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the supplier of the product. Remove all Factory Links and create a new Factory Link to the Factory named factory_name. Set Product Option Supplier to factory name.
def supplier(self, factory_name): if not isinstance(factory_name, Factory): factories = CCAPI.get_factories() if factory_name in factories.names: factory = factories.names[factory_name] else: raise exceptions.FactoryDoesNotExist(factory_name) self._update_product_factory_link(factory.id) self.options["Supplier"] = factory.name
[ "def test_update_supplier_with_no_name(self):\n test_supplier = self._create_suppliers(1)[0]\n test_supplier.name = None\n resp = self.app.put('/suppliers/{}'.format(test_supplier.id),\n json=test_supplier.serialize(), content_type='application/json')\n self.assertEqual(resp.status_code, HTTP_400_BAD_REQUEST)", "def on_change_supplier(self, cr, uid, ids, supplier):\n res = {}\n quote_obj = self.pool.get('transportation.quotes')\n if supplier:\n for quote in self.browse(cr, uid, ids):\n quote_ids = quote_obj.search(cr, uid, [('transportation_id', '=', quote.transportation_id.id)])\n for created_quote in quote_ids:\n if created_quote != quote.id:\n quote = quote_obj.browse(cr, uid, created_quote)\n if quote.supplier_id.id == supplier: #Check if this supplier already selected by an other quote\n res = {'value': { 'supplier_id':'', }}\n raise osv.except_osv(('Duplicated Supplier !'), ('This Supplier is already chosen for another Quote \\n Please .. Chose another supplier ..'))\n else:\n vat = self.pool.get('res.partner').browse(cr, uid, supplier).vat_subjected\n res = {'value': { 'supplier_vat':vat, }}\n return res", "def get_supplier_product(self, cr, uid, supplier_id, product_id, context=None):\n\t\tsupplier_product_name=\"\"\n\t\tif isinstance(product_id,(int,long)):\n\t\t\tproduct_id = self.pool.get(\"product.product\").browse(cr, uid, product_id, context=context)\n\t\tfor supplier_info in product_id.seller_ids:\n\t\t\tif supplier_info and supplier_info.name.id == supplier_id:\n\t\t\t\tsupplier_product_name += (supplier_info.product_code and '[%s]'%(supplier_info.product_code,) or '')\n\t\t\t\tsupplier_product_name += supplier_info.product_name\n\t\treturn supplier_product_name", "def SupplierEdit(request, supplier_id=None):\n return _PersonEdit(request, supplier_id, models.Supplier, forms.SupplierForm,\n 'supplier', 'Supplier')", "def test_create_supplier_with_no_name(self):\n new_supplier = SupplierFactory()\n new_supplier.name = None\n resp = self.app.post('/suppliers', json=new_supplier.serialize(),\n content_type='application/json')\n self.assertEqual(resp.status_code, HTTP_400_BAD_REQUEST)", "def reconfigure_product(self):\n\n cfg_steps = self.product_id.product_tmpl_id.config_step_line_ids\n active_step = str(cfg_steps[0].id) if cfg_steps else 'configure'\n\n wizard_obj = self.env['product.configurator']\n wizard = wizard_obj.create({\n 'product_id': self.product_id.id,\n 'state': active_step\n })\n\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'product.configurator',\n 'name': \"Configure Product\",\n 'view_mode': 'form',\n 'context': dict(\n self.env.context,\n wizard_id=wizard.id,\n ),\n 'target': 'new',\n 'res_id': wizard.id,\n }", "def setup(self, manager):\n self.product_manager = manager", "def set_factory(self, name, factory):\n self.factories[name] = factory", "def set_supply(self, s, i=None):\n if i is None:\n api.set_supplies(s)\n else:\n api.set_supply(i, s)", "def create_and_set_product_for_suggestion(apps, schema_editor):\n\n order_model = apps.get_model(\"orders\", \"order\")\n product_model = apps.get_model(\"orders\", \"product\")\n\n for order in order_model.objects.all():\n if order.product_suggestion:\n product = product_model.objects.create(name=order.product_suggestion)\n order.product = product\n order.product_suggestion = \"\"\n order.save()", "def set_commercial_on_forecourt(self, enabled=True, reefer=True):\n self.log.debug(f\"Attempting to set commercial in {enabled} in forecourt installation\")\n fc_config = {\n \"Dispensers\" : {\n \"Commercial Diesel\": enabled, #Transponder is now Commercial Check\n \"Reefer\": reefer\n }\n }\n \n FC = forecourt_installation.ForecourtInstallation()\n self.mws.click(\"Set Up\")\n\n # Set Commercial Diesel feature to the crind Configured as \"Gilbarco\"\n FC.change(\"Gilbarco\", \"Dispensers\", fc_config.get(\"Dispensers\"))\n\n self.mws.click_toolbar(\"Save\")\n self.mws.click_toolbar(\"Save\")", "def getSupplierPart(self):\n return inventree.company.SupplierPart(self._api, self.part)", "def product_salesperson1(self, product_salesperson1):\n\n self._product_salesperson1 = product_salesperson1", "def write_taxes_setting(self, vals):\n if vals.get('fiscal_classification_id', False):\n # update or replace 'taxes_id' and 'supplier_taxes_id'\n classification = self.env[\n 'account.product.fiscal.classification'].browse(\n vals['fiscal_classification_id'])\n tax_vals = {\n 'supplier_taxes_id': [[6, 0, [\n x.id for x in classification.purchase_tax_ids]]],\n 'taxes_id': [[6, 0, [\n x.id for x in classification.sale_tax_ids]]],\n }\n super(ProductProduct, self.sudo()).write(tax_vals)\n elif 'supplier_taxes_id' in vals.keys() or 'taxes_id' in vals.keys():\n # product template Single update mode\n fc_obj = self.env['account.product.fiscal.classification']\n if len(self) != 1:\n raise ValidationError(\n _(\"You cannot change Taxes for many Products.\"))\n purchase_tax_ids = [x.id for x in self.sudo().supplier_taxes_id]\n sale_tax_ids = [x.id for x in self.sudo().taxes_id]\n fc_id = fc_obj.find_or_create(\n self.company_id.id, sale_tax_ids, purchase_tax_ids)\n super(ProductProduct, self.sudo()).write(\n {'fiscal_classification_id': fc_id})", "def assign_sku(sender, **kwargs):\n product = kwargs.get('instance')\n\n if not product.sku:\n slug = slugify(product.name).replace('-', '').upper()[:3]\n product.sku = '%s%s%s%s' % (\n product.category_id, slug, product.user_id, timezone.now().strftime('%s')\n )", "def create_manufacturer(part_id, input_name, company_name):\n\n Company = apps.get_model('company', 'company')\n\n manufacturer = Company.objects.create(\n name=company_name,\n description=company_name,\n is_manufacturer=True\n )\n\n # Map both names to the same company\n links[input_name] = manufacturer.pk\n links[company_name] = manufacturer.pk\n\n companies[company_name] = manufacturer.pk\n\n print(\" - Part[{pk}]: Created new manufacturer: '{name}'\".format(pk=part_id, name=company_name))\n\n # Update SupplierPart object in the database\n cursor.execute(f\"update part_supplierpart set manufacturer_id={manufacturer.pk} where id={part_id};\")", "def set_main_product(self, option):\n (Select(self.driver.find_element(*ProjectFormLoc.FIELD_MAIN_PRODUCT)).\n select_by_visible_text(option))", "def resolve_supplier_meta(self, info, **kwargs):\n return self.get_supplier_meta", "def product_class(self, product_class):\n\n self._product_class = product_class" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Block for upsampling, concat and conv in UNet
def __init__(self, in_ch, out_ch, dropout_rate=0, learnable_upsample=True): super(UpBlock, self).__init__() # TODO: would be a nice idea if the upsampling could be learned too, if learnable_upsample: self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2) else: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) conv_block = DoubleConv self.conv = conv_block(in_ch, out_ch) self.dropout = nn.Dropout2d(p=dropout_rate, inplace=True)
[ "def upconv_block(\n inputs: tf.keras.layers.Layer, skip: tf.keras.layers.Layer\n) -> tf.keras.layers.Layer:\n x = inputs\n x = tf.keras.layers.UpSampling2D()(x)\n x = tf.keras.layers.Concatenate()([skip, x])\n\n return x", "def upsample(self, img, result=...) -> result:\n ...", "def initialize(self):\n\n gen_dim = self.gen_dim\n self.aug_net = conditional_aug()\n\n self.down_sampler = nn.Sequential(\n conv3(3, gen_dim),\n nn.ReLU(inplace = True),\n nn.Conv2d(gen_dim, gen_dim * 2, 4, stride = 2, padding = 1, bias = False),\n nn.BatchNorm2d(gen_dim*2),\n nn.ReLU(inplace = True),\n nn.Conv2d(gen_dim*2, gen_dim * 4, 4, stride = 2, padding = 1, bias = False),\n nn.BatchNorm2d(gen_dim * 4),\n nn.ReLU(inplace = True)\n )\n\n self.combiner = nn.Sequential(\n conv3(self.con_dim + gen_dim * 4, gen_dim * 4),\n nn.BatchNorm2d(gen_dim * 4),\n nn.ReLU(inplace = True)\n )\n\n self.residual = self.build_res_block(Res_block, gen_dim * 4)\n self.upsample1 = up_sampling(gen_dim * 4, gen_dim * 2)\n self.upsample2 = up_sampling(gen_dim * 2, gen_dim)\n \"\"\"\n ***************************************************************\n The stage 2 processing is on 256 x 256 images which is not possible\n without powerful GPU. So I modified the network from original version\n by working and outputting 64 x 64 images. Use the commented section\n to use 64 x 64 images in stage 2.\n ****************************************************************\n \"\"\"\n self.upsample3 = up_sampling(gen_dim, gen_dim//2) #self.upsample3 = up_sampling(gen_dim, gen_dim//2, scale = 1)\n self.upsample4 = up_sampling(gen_dim//2, gen_dim//4) #self.upsample4 = up_sampling(gen_dim, gen_dim//2, scale = 1)\n\n self.generated = nn.Sequential(\n conv3(gen_dim//4, 3),\n nn.Tanh()\n )", "def __init__(self, num_channels_in, num_channels_out, filter_size, params):\n super(DownsamplingBlock, self).__init__()\n\n self.conv = nn.Conv1d(num_channels_in, num_channels_out, filter_size, stride=2, padding=filter_size//2)\n self.norm = nn.BatchNorm1d(num_channels_out)\n self.relu = nn.LeakyReLU(params.relu_slope)", "def upsample_block(incoming_layer,\n skip_input_layer,\n num_filters,\n kernel_size=4,\n dropout_rate=0):\n upsample_layer = UpSampling2D(size=2)(incoming_layer)\n upsample_layer = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=1,\n padding='same',\n activation='relu')(upsample_layer)\n if dropout_rate:\n upsample_layer = Dropout(dropout_rate)(upsample_layer)\n upsample_layer = InstanceNormalization()(upsample_layer)\n upsample_layer = Concatenate()([upsample_layer, skip_input_layer])\n return upsample_layer", "def downsample(inputs):", "def upsample(layer):\n\treturn UpSampling2D(size=(2,2))(layer)", "def _make_upsampler(self, layers):\n layer = []\n for i in range(layers - 1):\n layer.insert(0, nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) )\n \n return nn.ModuleList(layer)", "def upsampleMultioutput(self, img, imgs_new, scale_factors, node_names) -> None:\n ...", "def __init__(self, size, mode='bilinear', align_corners=True):\n super(Upsample, self).__init__()\n self.output_shape = size\n self.upsampler = nn.Upsample(size=size, mode=mode, align_corners=align_corners)", "def upsample_conv_block(self, input_dim, output_dim, transpose_kernel=4, transpose_stride=2, transpose_pad=1):\n deconv1 = nn.ConvTranspose2d(input_dim, output_dim, kernel_size=transpose_kernel, stride=transpose_stride, padding=transpose_pad)\n bn1 = nn.BatchNorm2d(output_dim)\n relu1 = nn.ReLU()\n conv2 = nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1)\n bn2 = nn.BatchNorm2d(output_dim)\n relu2 = nn.ReLU()\n block = nn.Sequential(deconv1, bn1, relu1, conv2, bn2, relu2)\n return block", "def initialize_model(h,w,c,needs_scaling=True,dropout=0.1,summary=False):\n\n #Set up input layer dimensions\n img_shape = (h,w,c)\n input_layer = Input(shape=img_shape)\n #Adds divide by 255 if user specifies\n if needs_scaling:\n scaled_input = Lambda(lambda x: x / 255)(input_layer)\n else:\n scaled_input = input_layer\n\n #Array to keep track of conv layers for up-sampling section\n down_convs = []\n\n #Initialize conv window size and input\n pool_layer = scaled_input\n conv_window = 16\n #Iteratively generate down-sampling layers\n for i in range(5):\n #First conv stage with previous output (input on first iteration)\n c = Conv2D(conv_window,(3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(pool_layer)\n #Dropout for overfitting proteciton\n c = Dropout(dropout)(c)\n #Second conv stage\n c = Conv2D(conv_window,(3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c)\n #Pool\n pool_layer = MaxPooling2D((2,2))(c)\n #Store conv layer reference for up-sampling\n down_convs.append(c)\n #Double convolution window size\n conv_window*=2\n \n #Initialize the conv window size and input\n conv_window/=2\n prev = down_convs[-1]\n #Iteratively generate up-sampling layers\n for i in range(4):\n #Half the conv window size\n conv_window/=2\n conv_window = int(conv_window)\n #Up sampling layer\n u = Conv2DTranspose(conv_window, (2,2), strides=(2,2), padding='same')(prev)\n #Combine prev conv layer and up-sample layer\n u = concatenate([u,down_convs[-(i+2)]])\n #First conv stage\n c = Conv2D(conv_window, (3,3), activation='relu', kernel_initializer='he_normal', padding='same')(u)\n #Dropout for overfitting proteciton\n c = Dropout(dropout)(c)\n #Second conv stage\n c = Conv2D(conv_window, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c)\n #Keep track of previous output\n prev = c\n\n output_layer = Conv2D(1, (1, 1), activation='sigmoid') (prev)\n \n #Compile model\n model = Model(inputs=[input_layer], outputs=[output_layer])\n model.compile(optimizer='adam', loss='binary_crossentropy')\n \n #Print summary if requested\n if summary:\n model.summary()\n\n return model", "def prepare_uniq(self):\n # collect layers\n self.layers_list = self.get_layers_list()\n\n # set full precision to all layers\n for layer in self.layers_list:\n layer.__param_bitwidth__ = 32\n layer.__act_bitwidth__ = 32\n\n # collect layers by steps\n self.layers_steps = self.get_layers_steps(self.layers_list)\n # TODO: merge downsample with its conv to single step???\n\n # remove edge layers if we don't quantize edges\n if not self.quant_edges:\n self.layers_steps = self.layers_steps[1:-1]\n\n # set number of train steps\n self.step = len(self.layers_steps)\n\n # collect activations layers\n self.act_list = []\n for step in self.layers_steps:\n if len(step) > 1:\n for ind in range(len(step) - 1):\n if (not isinstance(step[ind], ActQuant)) and (isinstance(step[ind + 1], ActQuant)):\n self.act_list.append(step[ind])\n # TODO: act_bitwidth is only if we have ReLU ??? it must go together ???\n\n if self.act_noise:\n for layer in self.layers_steps[0]: # Turn on noise for first stage\n if isinstance(layer, ActQuant):\n layer.noise_during_training = True\n\n if (self.quant is False) or (len(self.bitwidth) == 0):\n self.bitwidth = [32] * len(self.layers_steps)\n\n if (self.act_quant is False) or (len(self.act_bitwidth) == 0):\n self.act_bitwidth = [32] * len(self.act_list)\n\n # set qunatization bitwidth for layers we want to quantize\n for index, step in enumerate(self.layers_steps):\n for layer in step:\n layer.__param_bitwidth__ = self.bitwidth[index]\n layer.__act_bitwidth__ = 32\n\n # set qunatization bitwidth for activations we want to quantize\n for index, layer in enumerate(self.act_list):\n layer.__act_bitwidth__ = self.act_bitwidth[index]", "def __init__(self, input_dim, skip_ch_dim, gated_act_dim, cond_dim,\n dilation_size, cnn_kernel_size=2, causal=True):\n super(WaveNetBlock_v2, self).__init__()\n\n #####\n # configurations\n #####\n # input tensor: (batchsize, length, self.input_dim)\n self.input_dim = input_dim\n # tensor sent to next WaveNetBlock, same shape as input\n self.res_ch_dim = input_dim\n # \n self.skip_ch_dim = skip_ch_dim\n self.gated_act_dim = gated_act_dim\n self.cond_dim = cond_dim\n self.dilation_size = dilation_size\n self.conv_kernel_s = cnn_kernel_size\n\n ######\n # layers\n ######\n # dilated convolution\n tmp_layer = nii_nn.Conv1dForARModel(\n self.input_dim, self.gated_act_dim * 2, self.dilation_size,\n self.conv_kernel_s, tanh=False, causal = causal)\n self.l_conv1d = torch.nn.utils.weight_norm(tmp_layer, name='weight') \n \n # condition feature transform\n tmp_layer = torch_nn.Linear(self.cond_dim, self.gated_act_dim*2)\n self.l_cond_trans = torch.nn.utils.weight_norm(tmp_layer, name='weight')\n \n # transformation after gated act\n tmp_layer = torch_nn.Linear(self.gated_act_dim, self.res_ch_dim)\n self.l_res_trans = torch.nn.utils.weight_norm(tmp_layer, name='weight') \n \n # transformation for skip channels\n #tmp_layer = torch_nn.Linear(self.res_ch_dim, self.skip_ch_dim)\n tmp_layer = torch_nn.Linear(self.gated_act_dim, self.skip_ch_dim)\n self.l_skip_trans = torch.nn.utils.weight_norm(tmp_layer, name='weight')\n \n return", "def _upsample(self, x: torch.Tensor, target_shape: torch.Size, in_branch_idx: int, out_branch_idx: int) -> torch.Tensor:\n # Upscale input tensor(s) `x` using (bi/tri)linear interpolation (or 'nearest' interpolation for tensor with 4D or more features maps)\n x = interpolate(x, out_spatial_shape=target_shape[self.channel_dim+1:], align_corners=self.align_corners)\n # Apply 1x1 convolution in order to obtain the target channel/feature-maps count\n if self.reuse_scaling_convs:\n return self.upscaling_1x1_convs[(x.shape[self.channel_dim], target_shape[self.channel_dim])](x)\n else:\n return self.upscaling_1x1_convs[out_branch_idx][in_branch_idx](x)", "def __init__(self, layer_ind):\r\n super(NearestUpsampleBlock, self).__init__()\r\n self.layer_ind = layer_ind\r\n self.block_name = 'nearest_upsample'\r\n return", "def Unet(cascades, \r\n shape, \r\n filters=24, \r\n filters_out=3, \r\n pool_size=2, \r\n final_activation='relu', \r\n batch_normalization=False,\r\n data_format='channels_last'):\r\n skipcons = []\r\n bn_axis = -1\r\n if data_format != 'channels_last':\r\n bn_axis = 1\r\n\r\n l = [tkl.Input(shape=shape)]\r\n for c in range(cascades):\r\n l.append(tkl.Conv2D(filters=filters*2**c, kernel_size=3, padding='same', activation='relu', data_format=data_format)(l[-1]))\r\n if batch_normalization:\r\n l.append(tkl.BatchNormalization(axis=bn_axis)(l[-1]))\r\n l.append(tkl.Conv2D(filters=filters*2**c, kernel_size=3, padding='same', activation='relu', data_format=data_format)(l[-1]))\r\n skipcons.append(len(l)-1)\r\n l.append(tkl.MaxPool2D(pool_size=pool_size, strides=pool_size, data_format=data_format)(l[-1]))\r\n \r\n l.append(tkl.Conv2D(filters=filters*2**cascades, kernel_size=3, padding='same', activation='relu', data_format=data_format)(l[-1])) \r\n l.append(tkl.Conv2D(filters=filters*2**cascades, kernel_size=3, padding='same', activation='relu', data_format=data_format)(l[-1])) \r\n\r\n for c in range(cascades-1,-1,-1):\r\n l.append(tkl.Conv2DTranspose(filters=filters*2**c, kernel_size=pool_size, strides=pool_size, activation='relu', data_format=data_format)(l[-1]))\r\n l.append(tkl.Concatenate()([l[skipcons.pop()], l[-1]])) \r\n l.append(tkl.Conv2D(filters=filters*2**c, kernel_size=3, padding='same', activation='relu', data_format=data_format)(l[-1])) \r\n if batch_normalization:\r\n l.append(tkl.BatchNormalization(axis=bn_axis)(l[-1]))\r\n l.append(tkl.Conv2D(filters=filters*2**c, kernel_size=3, padding='same', activation='relu', data_format=data_format)(l[-1])) \r\n \r\n l.append(tkl.Conv2D(filters=filters_out, kernel_size=3, padding='same', activation=final_activation, data_format=data_format)(l[-1])) \r\n \r\n model = tk.Model(inputs=l[0], outputs=l[-1], name=f\"U-net_{cascades}cas{filters}fil{filters_out}out\")\r\n return model", "def get_unet_dilated(self):\n\t\tinputs = Input((self.img_rows, self.img_cols, self.num_channels))\n\n\t\tconv1 = Conv2D(32, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(1, 1))(inputs)\n\t\tconv1 = Conv2D(32, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(2, 2))(conv1)\n\t\tpool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n\t\tconv2 = Conv2D(64, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(1, 1))(pool1)\n\t\tconv2 = Conv2D(64, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(2, 2))(conv2)\n\t\tpool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n\t\tconv3 = Conv2D(128, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(1, 1))(pool2)\n\t\tconv3 = Conv2D(128, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(2, 2))(conv3)\n\t\tpool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n\t\tconv4 = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(1, 1))(pool3)\n\t\tconv4 = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(2, 2))(conv4)\n\t\tpool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n\t\tconv5 = Conv2D(512, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(1, 1))(pool4)\n\t\tconv5 = Conv2D(512, (3, 3), padding=\"same\", activation=\"relu\", dilation_rate=(2, 2))(conv5)\n\n\t\tup6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3) # concat_axis=3 for Tensorflow vs 1 for theano\n\t\tconv6 = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\")(up6)\n\t\tconv6 = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\")(conv6)\n\n\t\tup7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)\n\t\tconv7 = Conv2D(128, (3, 3), padding=\"same\", activation=\"relu\")(up7)\n\t\tconv7 = Conv2D(128, (3, 3), padding=\"same\", activation=\"relu\")(conv7)\n\n\t\tup8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)\n\t\tconv8 = Conv2D(64, (3, 3), padding=\"same\", activation=\"relu\")(up8)\n\t\tconv8 = Conv2D(64, (3, 3), padding=\"same\", activation=\"relu\")(conv8)\n\n\t\tup9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=3)\n\t\tconv9 = Conv2D(32, (3, 3), padding=\"same\", activation=\"relu\")(up9)\n\t\tconv9 = Conv2D(32, (3, 3), padding=\"same\", activation=\"relu\")(conv9)\n\n\t\tconv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n\n\t\tmodel = Model(inputs=[inputs], outputs=[conv10])\n\t\treturn model", "def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetGenerator, self).__init__()\n # construct unet structure\n # REW: 先求最底层的跨层连接,再逐渐往上层去 大神级代码啊!\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer\n for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n # gradually reduce the number of filters from ngf * 8 to ngf\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Searches the src directory for all 'slugs' that should be translated by looking for matches of the pattern t("string")
def find_translation_slugs(): slugs = {} for (dirpath, _, filenames) in walk(SRC_DIR): for filename in filenames: if not filename.endswith(".py"): continue with open(join(dirpath, filename), "r") as src_file: contents = src_file.read() for match in re.findall(r"[^A-Za-z0-9]t\(\s*\"(.+?)\"\s*\)", contents): slugs[match] = True return slugs
[ "def _get_all_source_strings(resources, *args, **kwargs):\r\n return Translation.objects.source_strings(resources)", "def find_strings(self) -> List[LocalizedString]:\n raise NotImplementedError()", "def test_translate_locations(self):\n # Check that translatables can be loaded from the dialog directory\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-dialog/'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n # Check that translatables can be loaded from locale folder\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n\n # Check loading in a non-en-us language\n s = SimpleSkill1()\n s.config_core['lang'] = 'de-de'\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertEqual(lst, ['sonne', 'mycroft', 'zahne'])\n vals = s.translate_namedvalues('named_things')\n self.assertEqual(vals['blau'], '2')\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Aber setzen sie sich herr test framework'])\n\n # Check fallback to english\n lst = s.translate_list('not_in_german')\n self.assertEqual(lst, ['not', 'in', 'German'])\n\n # Restore lang to en-us\n s.config_core['lang'] = 'en-us'", "def add_from_strings(self, strings):\r\n for j in strings:\r\n # Check SE existence\r\n try:\r\n se = SourceEntity.objects.get(\r\n string = j.source_entity, context = j.context or \"None\",\r\n resource = self.resource\r\n )\r\n except SourceEntity.DoesNotExist:\r\n logger.warning(\r\n \"Source entity %s does not exist\" % j.source_entity\r\n )\r\n continue\r\n Suggestion.objects.get_or_create(\r\n string = j.translation, source_entity = se,\r\n language = self.language\r\n )", "def test_localized_bundle_languages(self):\n strings = dotstrings.load_all_strings(self.bundle_path)\n self.assertEqual(sorted(strings.languages()), [\"en\", \"fr\"])", "def _get_reviewed_source_strings(resources, language, *args, **kwargs):\r\n return Translation.objects.reviewed_source_strings(resources, language)", "def translateStrings(language_code):\n from translate_admin import translateAdminStrings\n from translate_frontend import translateFrontendStrings\n from translate_help import translateHelpStrings\n from translate_login import translateLoginStrings\n\n translateAdminStrings(language_code)\n translateFrontendStrings(language_code)\n translateHelpStrings(language_code)\n translateLoginStrings(language_code)", "def _get_untranslated_source_strings(resources, language, *args, **kwargs):\r\n return Translation.objects.untranslated_source_strings(resources, language)", "def _get_user_filtered_source_strings(resources, users, language, *args, **kwargs):\r\n return Translation.objects.user_translated_strings(resources, language, users)", "def source_strings(self, resources):\r\n source_language = get_source_language(resources)\r\n return self.filter(\r\n resource__in=resources, language=source_language, rule=5\r\n )", "def test_localized_bundle_tables_for_language(self):\n strings = dotstrings.load_all_strings(self.bundle_path)\n self.assertEqual(sorted(strings.tables_for_language(\"en\")), [\"One\", \"Two\"])\n self.assertEqual(sorted(strings.tables_for_language(\"fr\")), [\"One\", \"Two\"])", "def translated_source_strings(self, resources, language):\r\n source_language = get_source_language(resources)\r\n translated_se_ids = frozenset(self.filter(\r\n resource__in=resources, language=language, rule=5\r\n ).values_list('source_entity_id', flat=True))\r\n # Add resource_id as well to reduce the search space\r\n # by taking advantage of the indexes in resource and language\r\n return self.filter(\r\n resource__in=resources,\r\n source_entity__id__in=translated_se_ids,\r\n language=source_language, rule=5\r\n )", "def _get_unreviewed_source_strings(resources, language, *args, **kwargs):\r\n return Translation.objects.unreviewed_source_strings(resources, language)", "def load_resource_strings():\n # resource strings\n g.userResourceStrings = UsersResourceString\n g.authResourceStrings = AuthResourceStrings\n g.generalResourceStrings = GeneralResourceStrings\n g.organizationResourceStrings = OrganizationResourceStrings\n g.actionResourceStrings = ActionResourceStrings\n g.menuResourceStrings = MenuResourceStrings\n g.dropDownLists = DropdownLists\n\n # absolute file location for font file for pdf documents. It will be used in CSS rules.\n g.pdfFontFileAbsolutePath = os.path.abspath('./static/fonts/GFSDidot-Regular.ttf')\n\n # absolute folder location for uploaded images\n g.imageFolderAbsolutePath = os.path.abspath('./uploads/')", "def _get_source_strings(self):\r\n return SourceEntity.objects.filter(\r\n resource=self.resource\r\n ).values_list(\r\n 'id', 'string_hash', 'pluralized'\r\n ).order_by()", "def _find_subs(project, sample=None):\n name_patt = \"{}*.sub\".format(\"*\" + sample.name if sample else \"\")\n return glob.glob(os.path.join(project.submission_folder, name_patt))", "def transcripts_filenames(slug):\n return glob(join(transcripts_dir(slug), '*.json'))", "def those_with_translation(translated):\n for x in translated:\n if x[1]:\n yield x", "def _add_translation_string(self, *args, **kwargs):\r\n self.stringset.add(GenericTranslation(*args, **kwargs))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates all translation files, checking for missing and unnecessary translations
def validate_translation_files(): passed = True slugs = find_translation_slugs() translation_filenames = [ f for f in listdir(TRANSLATION_FILES_DIR) if isfile(join(TRANSLATION_FILES_DIR, f)) ] for translation_filename in translation_filenames: print("Validating %s..." % translation_filename) valid = True with open( join(TRANSLATION_FILES_DIR, translation_filename), "r" ) as translation_file: translations = load_translations(translation_file) for slug in slugs: if slug not in translations or translations[slug] == "": print('Missing translation for "%s"' % slug) valid = False for translation_slug in translations: if translation_slug not in slugs: print('Unnecessary translation for "%s"' % translation_slug) valid = False if valid: print("OK") passed = passed and valid if not passed: sys.exit(1)
[ "def check_properties_files():\n for lang_code in LANG_CODES:\n print \"======================\"\n print lang_code\n print \"======================\"\n translationPropertiesFile = get_properties_file_path(lang_code)\n englishPropertiesFile = get_properties_file_path(None)\n translationRows = get_rows_from_language_file(translationPropertiesFile)\n englishRows = get_rows_from_language_file(englishPropertiesFile)\n\n num_error_1 = 0\n num_error_2 = 0\n num_error_3 = 0\n for row in translationRows.values():\n if row.hash_ in englishRows:\n englishRow = englishRows[row.hash_]\n else:\n print \"ERROR: no row in English file to match translation row \" + row.hash_\n continue\n if row.full_string is None or len(row.full_string) == 0:\n # (1)\n print \"WARNING: no translation while processing \" + \": \" + englishRow.key\n num_error_1 += 1\n if row.full_string == englishRow.full_string and not englishRow.full_string.startswith(\"*T\") and not englishRow.full_string.upper() == \"OKs\":\n # (2)\n print \"WARNING: row has not been translated: \" + englishRow.key + \": \" + englishRow.full_string\n num_error_2 += 1\n for englishRowHash in englishRows:\n if englishRowHash not in translationRows:\n print \"ERROR: no translation found for row: \" + englishRows[englishRowHash].key\n num_error_3 += 1\n print \"======================\"\n print lang_code\n print \"No translation: \" + str(num_error_1)\n print \"Not translated: \" + str(num_error_2)\n print \"No translation for: \" + str(num_error_3)", "def make_check_templates():\n for lang_code in LANG_CODES:\n print \"======================\"\n print lang_code\n print \"======================\"\n translationPropertiesFile = get_properties_file_path(lang_code)\n englishPropertiesFile = get_properties_file_path(None)\n translationRows = get_rows_from_language_file(translationPropertiesFile)\n englishRows = get_rows_from_language_file(englishPropertiesFile)\n for englishRow in englishRows.values():\n if englishRow.hash_ in translationRows:\n englishRow.translation = translationRows[englishRow.hash_].full_string\n\n pathTemplateXLS = os.path.join(TEMPLATES_PATH, \"LanguageData_\" + ALIASES[lang_code] + \".xls\")\n make_template_file_from_delta_rows(englishRows.values(), pathTemplateXLS, lang_code, \"15/Mar/2015\")", "def run_validation_on_specific_files(self):\n files_validation_result = set()\n\n for path in self.file_path.split(','):\n error_ignore_list = self.get_error_ignore_list(get_pack_name(path))\n\n if os.path.isfile(path):\n click.secho('\\n================= Validating file =================', fg=\"bright_cyan\")\n files_validation_result.add(self.run_validations_on_file(path, error_ignore_list))\n\n else:\n path = path.rstrip('/')\n dir_name = os.path.basename(path)\n if dir_name in CONTENT_ENTITIES_DIRS:\n click.secho(f'\\n================= Validating content directory {path} =================',\n fg=\"bright_cyan\")\n files_validation_result.add(self.run_validation_on_content_entities(path, error_ignore_list))\n else:\n if os.path.basename(os.path.dirname(path)) == PACKS_DIR:\n click.secho(f'\\n================= Validating pack {path} =================',\n fg=\"bright_cyan\")\n files_validation_result.add(self.run_validations_on_pack(path))\n\n else:\n click.secho(f'\\n================= Validating package {path} =================',\n fg=\"bright_cyan\")\n files_validation_result.add(self.run_validation_on_package(path, error_ignore_list))\n\n return all(files_validation_result)", "def test_app_locales(self):\n filenames = list(gen_filenames())\n self.assertIn(os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),\n filenames)", "def custom_process_locale_dir(self, locale_dir, files):\n build_files = []\n for translatable in files:\n if self.verbosity > 1:\n self.stdout.write('processing file %s in %s\\n' % (\n translatable.file, translatable.dirpath\n ))\n if self.domain != 'djangular':\n continue\n build_file = self.build_file_class(self, self.domain, translatable)\n try:\n build_file.preprocess()\n except UnicodeDecodeError as e:\n self.stdout.write(\n 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (\n translatable.file, translatable.dirpath, e,\n )\n )\n continue\n build_files.append(build_file)\n\n if self.domain == 'djangular':\n # self.domain = 'django'\n args = [\n 'xgettext',\n '-d', self.domain,\n '--language=Python',\n '--keyword=gettext_noop',\n '--keyword=gettext_lazy',\n '--keyword=ngettext_lazy:1,2',\n '--keyword=ugettext_noop',\n '--keyword=ugettext_lazy',\n '--keyword=ungettext_lazy:1,2',\n '--keyword=pgettext:1c,2',\n '--keyword=npgettext:1c,2,3',\n '--keyword=pgettext_lazy:1c,2',\n '--keyword=npgettext_lazy:1c,2,3',\n '--output=-',\n ]\n else:\n return\n\n input_files = [bf.work_path for bf in build_files]\n with NamedTemporaryFile(mode='w+') as input_files_list:\n input_files_list.write('\\n'.join(input_files))\n input_files_list.flush()\n args.extend(['--files-from', input_files_list.name])\n args.extend(self.xgettext_options)\n msgs, errors, status = popen_wrapper(args)\n\n if errors:\n if status != STATUS_OK:\n for build_file in build_files:\n build_file.cleanup()\n raise CommandError(\n 'errors happened while running xgettext on %s\\n%s' %\n ('\\n'.join(input_files), errors)\n )\n elif self.verbosity > 0:\n # Print warnings\n self.stdout.write(errors)\n\n if msgs:\n if locale_dir is NO_LOCALE_DIR:\n file_path = os.path.normpath(build_files[0].path)\n raise CommandError(\n 'Unable to find a locale path to store translations for '\n 'file %s' % file_path\n )\n for build_file in build_files:\n msgs = build_file.postprocess_messages(msgs)\n potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain))\n write_pot_file(potfile, msgs)\n\n self.domain = 'djangular'\n\n for build_file in build_files:\n build_file.cleanup()", "def test_django_locales(self):\n filenames = list(gen_filenames())\n locales = []\n\n basedir = os.path.join(os.path.dirname(conf.__file__), 'locale')\n for dirpath, dirnames, locale_filenames in os.walk(basedir):\n for filename in locale_filenames:\n if filename.endswith('.mo'):\n locales.append(os.path.join(dirpath, filename))\n\n self.assertTrue(len(locales) > 10) # assume a few available locales\n for filename in locales:\n self.assertIn(filename, filenames)", "def _check_file(self, path: str) -> List[str]:\n errors = []\n filename = self._check_row(path)\n\n if filename:\n matches = self._format_matches(filename)\n errors.append(\n f'{path}: Filename contains bad language: {matches}'\n )\n\n try:\n file_errors = self._check_file_content(path)\n if file_errors:\n errors += file_errors\n except UnicodeDecodeError as e:\n errors.append(f'{path}: File couldn\\'t have been opened: {e}')\n\n return errors", "def test_translate_locations(self):\n # Check that translatables can be loaded from the dialog directory\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-dialog/'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n # Check that translatables can be loaded from locale folder\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n\n # Check loading in a non-en-us language\n s = SimpleSkill1()\n s.config_core['lang'] = 'de-de'\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertEqual(lst, ['sonne', 'mycroft', 'zahne'])\n vals = s.translate_namedvalues('named_things')\n self.assertEqual(vals['blau'], '2')\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Aber setzen sie sich herr test framework'])\n\n # Check fallback to english\n lst = s.translate_list('not_in_german')\n self.assertEqual(lst, ['not', 'in', 'German'])\n\n # Restore lang to en-us\n s.config_core['lang'] = 'en-us'", "def check_contents(self, files):\n\n for uri in files:\n if not os.path.exists(os.path.join(self.uri, uri)):\n return '\"%s\" must exist in \"%s\"' % (uri, self.uri)", "def clean(self):\n super().clean()\n\n # Trigger combined instance validation\n master = self.instance\n stashed = get_cached_translation(master)\n\n for form in self.forms:\n set_cached_translation(master, form.instance)\n exclusions = form._get_validation_exclusions()\n # fields from the shared model should not be validated\n if type(exclusions) == set:\n exclusions.union(f.name for f in master._meta.fields)\n else:\n exclusions.extend(f.name for f in master._meta.fields)\n try:\n master.clean()\n except ValidationError as e:\n form._update_errors(e)\n\n set_cached_translation(master, stashed)\n\n # Validate that at least one translation exists\n forms_to_delete = self.deleted_forms\n provided = [form for form in self.forms\n if (getattr(form.instance, 'pk', None) is not None or\n form.has_changed())\n and not form in forms_to_delete]\n if len(provided) < 1:\n raise ValidationError(_('At least one translation must be provided'),\n code='notranslation')", "def _lint_files(clang_format, files):\n clang_format = ClangFormat(clang_format, _get_build_dir())\n\n lint_clean = parallel.parallel_process([os.path.abspath(f) for f in files], clang_format.lint)\n\n if not lint_clean:\n print(\"ERROR: Source code does not match required source formatting style\")\n sys.exit(1)", "def page_templates_loading_check(app_configs, **kwargs):\n errors = []\n\n for page_template in settings.get_page_templates():\n try:\n loader.get_template(page_template[0])\n except template.TemplateDoesNotExist:\n errors.append(checks.Warning(\n 'Django cannot find template %s' % page_template[0],\n obj=page_template, id='pages.W001'))\n\n return errors", "def test_convertible_substitles_from_pressurcooker(pressurcooker_test_files):\n for fixture in pressurcooker_test_files:\n localpath = fixture[\"localpath\"]\n assert os.path.exists(localpath), \"Error mising local test file \" + localpath\n subtitle_file = SubtitleFile(localpath, language=fixture[\"language\"])\n filename = subtitle_file.process_file()\n assert filename, \"converted filename must exist\"\n assert filename.endswith(\".vtt\"), \"converted filename must have .vtt extension\"\n storage_path = config.get_storage_path(filename)\n with open(storage_path, encoding=\"utf-8\") as converted_vtt:\n filecontents = converted_vtt.read()\n assert (\n fixture[\"check_words\"] in filecontents\n ), \"missing check_words in converted subs\"", "def clean(self):\n if any(self.errors):\n return\n\n languages = []\n proficiencies = []\n language_duplicates = False\n\n for form in self.forms:\n if form.cleaned_data:\n print(form.cleaned_data)\n if form.cleaned_data['language'] in languages:\n language_duplicates = True\n languages.append(form.cleaned_data['language'])\n\n proficiencies.append(form.cleaned_data['proficiency'])\n\n if language_duplicates:\n raise forms.ValidationError(\n 'You may not list the same language twice.',\n code='duplicate_languages'\n )\n\n if 'NA' not in proficiencies:\n raise forms.ValidationError(\n 'You must choose \\'Native Speaker\\' for at least one language.',\n code='no_native_language'\n )\n\n if len(languages) < 2:\n raise forms.ValidationError(\n 'You must enter at least one language that you are learning.',\n code='no_foreign_language'\n )", "def _handle_templates(self, all_templates):\n\n errors = []\n try:\n # Make sure there is a template entry in zabbix\n for template in set(all_templates):\n self.zbxapi.ensure_template_exists(template)\n\n # Reason: disable pylint broad-except because we want to process as much as possible\n # Status: permanently disabled\n # pylint: disable=broad-except\n except Exception as error:\n self.logger.error(\"Failed creating templates: %s\", error.message)\n errors.append(error)\n\n return errors", "def _check_compilation_problems(translation_unit):\n if translation_unit.diagnostics:\n for diagnostic in translation_unit.diagnostics:\n if diagnostic.severity >= clang.Diagnostic.Error:\n logging.warning(diagnostic.spelling)", "def check_files_exist(self):\n\n files_fail = [\n 'Dockerfile',\n 'environment.yml',\n 'data',\n 'scripts'\n ]\n files_warn = [\n \n ]\n\n for files in files_fail:\n if not os.path.isfile(self.pf(files)):\n self.failed.append((1, 'File {} not found.'.format(files)))\n else:\n self.passed.append((1, 'File {} found.'.format(files)))\n\n for files in files_warn:\n if not os.path.isdir(self.pf(files)):\n self.warned.append((1, 'Dir {} not found.'.format(files)))\n else:\n self.passed.append((1, 'Dir {} found.'.format(files)))\n\n if os.path.isfile(self.pf('environment.yml')):\n self.load_environment_config()", "def alert_if_lang_matches(glob):\n\tverbose = False\n\tprinted_count = 0\n\tfor file in filtered_descendants(glob):\n\t\thas_match = False\n\t\ttry:\n\t\t\twith open(file, 'r', encoding='utf8') as contents:\n\t\t\t\tif check_match(file, contents.read()):\n\t\t\t\t\tprinted_count += 1\n\t\texcept:\n\t\t\tif verbose:\n\t\t\t\tprint(\"skipping {}\".format(file))\n\n\treturn printed_count", "def bake_translations():\n translation_table = {}\n translation_filenames = [\n f\n for f in listdir(TRANSLATION_FILES_DIR)\n if isfile(join(TRANSLATION_FILES_DIR, f))\n ]\n for translation_filename in translation_filenames:\n with open(\n join(TRANSLATION_FILES_DIR, translation_filename), \"r\"\n ) as translation_file:\n translations = json.load(translation_file)\n lookup = {}\n for slug, translation in list(translations.items()):\n lookup[binascii.crc32(slug.encode(\"utf-8\"))] = translation\n translation_table[basename(translation_filename).split(\".\")[0]] = lookup\n\n with open(join(SRC_DIR, \"krux\", \"translations.py\"), \"w\") as translations:\n translations.write(\n \"\"\"# The MIT License (MIT)\n\n# Copyright (c) 2021-2022 Krux contributors\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\\n\"\"\"\n )\n translations.write(\"# pylint: disable=C0301\\n\")\n translations.write(\"translation_table = \")\n translations.write(repr(translation_table))\n translations.write(\"\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bakes all translations into a translations.py file inside the krux namespace
def bake_translations(): translation_table = {} translation_filenames = [ f for f in listdir(TRANSLATION_FILES_DIR) if isfile(join(TRANSLATION_FILES_DIR, f)) ] for translation_filename in translation_filenames: with open( join(TRANSLATION_FILES_DIR, translation_filename), "r" ) as translation_file: translations = json.load(translation_file) lookup = {} for slug, translation in list(translations.items()): lookup[binascii.crc32(slug.encode("utf-8"))] = translation translation_table[basename(translation_filename).split(".")[0]] = lookup with open(join(SRC_DIR, "krux", "translations.py"), "w") as translations: translations.write( """# The MIT License (MIT) # Copyright (c) 2021-2022 Krux contributors # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.\n""" ) translations.write("# pylint: disable=C0301\n") translations.write("translation_table = ") translations.write(repr(translation_table)) translations.write("\n")
[ "def test_translate_locations(self):\n # Check that translatables can be loaded from the dialog directory\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-dialog/'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n # Check that translatables can be loaded from locale folder\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n\n # Check loading in a non-en-us language\n s = SimpleSkill1()\n s.config_core['lang'] = 'de-de'\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertEqual(lst, ['sonne', 'mycroft', 'zahne'])\n vals = s.translate_namedvalues('named_things')\n self.assertEqual(vals['blau'], '2')\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Aber setzen sie sich herr test framework'])\n\n # Check fallback to english\n lst = s.translate_list('not_in_german')\n self.assertEqual(lst, ['not', 'in', 'German'])\n\n # Restore lang to en-us\n s.config_core['lang'] = 'en-us'", "def update_locales(ctx):\r\n with ctx.lcd(os.path.join(settings.SRC_DIR, 'locale')):\r\n ctx.local('svn up')\r\n ctx.local('./compile-mo.sh .')", "def makemessages():\n for languagecode in LANGUAGE_CODES:\n _manage('makemessages -l {} -i \"static/*\" -i \"libs/*\"'.format(languagecode))", "def custom_process_locale_dir(self, locale_dir, files):\n build_files = []\n for translatable in files:\n if self.verbosity > 1:\n self.stdout.write('processing file %s in %s\\n' % (\n translatable.file, translatable.dirpath\n ))\n if self.domain != 'djangular':\n continue\n build_file = self.build_file_class(self, self.domain, translatable)\n try:\n build_file.preprocess()\n except UnicodeDecodeError as e:\n self.stdout.write(\n 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (\n translatable.file, translatable.dirpath, e,\n )\n )\n continue\n build_files.append(build_file)\n\n if self.domain == 'djangular':\n # self.domain = 'django'\n args = [\n 'xgettext',\n '-d', self.domain,\n '--language=Python',\n '--keyword=gettext_noop',\n '--keyword=gettext_lazy',\n '--keyword=ngettext_lazy:1,2',\n '--keyword=ugettext_noop',\n '--keyword=ugettext_lazy',\n '--keyword=ungettext_lazy:1,2',\n '--keyword=pgettext:1c,2',\n '--keyword=npgettext:1c,2,3',\n '--keyword=pgettext_lazy:1c,2',\n '--keyword=npgettext_lazy:1c,2,3',\n '--output=-',\n ]\n else:\n return\n\n input_files = [bf.work_path for bf in build_files]\n with NamedTemporaryFile(mode='w+') as input_files_list:\n input_files_list.write('\\n'.join(input_files))\n input_files_list.flush()\n args.extend(['--files-from', input_files_list.name])\n args.extend(self.xgettext_options)\n msgs, errors, status = popen_wrapper(args)\n\n if errors:\n if status != STATUS_OK:\n for build_file in build_files:\n build_file.cleanup()\n raise CommandError(\n 'errors happened while running xgettext on %s\\n%s' %\n ('\\n'.join(input_files), errors)\n )\n elif self.verbosity > 0:\n # Print warnings\n self.stdout.write(errors)\n\n if msgs:\n if locale_dir is NO_LOCALE_DIR:\n file_path = os.path.normpath(build_files[0].path)\n raise CommandError(\n 'Unable to find a locale path to store translations for '\n 'file %s' % file_path\n )\n for build_file in build_files:\n msgs = build_file.postprocess_messages(msgs)\n potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain))\n write_pot_file(potfile, msgs)\n\n self.domain = 'djangular'\n\n for build_file in build_files:\n build_file.cleanup()", "def create_translation_file(locale):\n translations = {}\n slugs = find_translation_slugs()\n for slug in slugs:\n translations[slug.replace(\"\\\\n\", \"\\n\")] = \"\"\n with open(join(TRANSLATION_FILES_DIR, \"%s.json\" % locale), \"w\") as translation_file:\n translation_file.write(\n json.dumps(translations, sort_keys=True, indent=4, ensure_ascii=False)\n )", "def langkit_main(langkit_root, files=[]):\n dirs = [os.path.join('contrib', 'python'),\n os.path.join('contrib', 'lkt'),\n os.path.join('langkit'),\n os.path.join('manage.py'),\n os.path.join('scripts'),\n os.path.join('setup.py'),\n os.path.join('testsuite'),\n os.path.join('utils')]\n excludes = ['__pycache__',\n os.path.join('contrib', 'python', 'build'),\n os.path.join('contrib', 'lkt', 'build'),\n os.path.join('langkit', 'support', 'obj'),\n os.path.join('langkit', 'dsl_unparse.py'),\n 'out',\n os.path.join('stylechecks', 'tests.py'),\n os.path.join('testsuite', 'python_support', 'expect.py'),\n os.path.join('testsuite', 'python_support', 'quotemeta.py'),\n os.path.join('testsuite', 'out')]\n main(langkit_root, files, dirs, excludes)", "def generate_translations(existing_codelist_filename, output_filename, lang):\n parser = etree.XMLParser(remove_blank_text=True)\n codelist_xml_file = open(existing_codelist_filename)\n codelist_xml = etree.parse(codelist_xml_file, parser)\n\n wb = xlwt.Workbook()\n sheet = wb.add_sheet('Sheet 1')\n sheet.write(0,0,'code')\n sheet.write(0,1,'name')\n sheet.write(0,2,'description')\n for i, code in enumerate(codelist_xml.xpath(\"/codelist/codelist-items/codelist-item\")):\n sheet.write(i+1, 0, get_text(code.find('code')))\n sheet.write(i+1, 1, get_text(code.find('name/narrative[@xml:lang=\"{}\"]'.format(lang), namespaces=nsmap)))\n sheet.write(i+1, 2, get_text(code.find('description/narrative[@xml:lang=\"{}\"]'.format(lang), namespaces=nsmap)))\n wb.save(output_filename, \"utf-8\")", "def setupTranslator(app):\n try:\n locale.setlocale(locale.LC_ALL, '')\n except locale.Error:\n pass\n global lang\n lang = os.environ.get('LC_MESSAGES', '')\n if not lang:\n lang = os.environ.get('LANG', '')\n if not lang:\n try:\n lang = locale.getdefaultlocale()[0]\n except ValueError:\n pass\n if not lang:\n lang = ''\n numTranslators = 0\n if lang and lang[:2] not in ['C', 'en']:\n numTranslators += loadTranslator('qt_{0}'.format(lang), app)\n numTranslators += loadTranslator('convertall_{0}'.format(lang), app)\n\n def translate(text, comment=''):\n \"\"\"Translation function that sets context to calling module's\n filename.\n \"\"\"\n try:\n frame = sys._getframe(1)\n fileName = frame.f_code.co_filename\n finally:\n del frame\n context = os.path.basename(os.path.splitext(fileName)[0])\n return QCoreApplication.translate(context, text, comment)\n\n def markNoTranslate(text, comment=''):\n return text\n\n if numTranslators:\n builtins._ = translate\n else:\n builtins._ = markNoTranslate", "def translation_file_changed(sender, file_path, **kwargs):\n if file_path.suffix == \".mo\":\n import gettext\n\n from django.utils.translation import trans_real\n\n gettext._translations = {}\n trans_real._translations = {}\n trans_real._default = None\n trans_real._active = Local()\n return True", "def setup_i18n(self):\n locale_name = self.locale\n\n root = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n os.pardir,\n os.pardir))\n translation_path = os.path.join(\n root,\n 'i18n',\n 'inasafe_' + str(locale_name) + '.qm')\n if os.path.exists(translation_path):\n self.translator = QTranslator()\n result = self.translator.load(translation_path)\n LOGGER.debug('Switched locale to %s' % translation_path)\n if not result:\n message = 'Failed to load translation for %s' % locale_name\n LOGGER.exception(message)\n raise TranslationLoadError(message)\n # noinspection PyTypeChecker, PyCallByClass, PyArgumentList\n QCoreApplication.installTranslator(self.translator)\n else:\n if locale_name != 'en':\n message = 'No translation exists for %s' % locale_name\n LOGGER.exception(message)", "def translateStrings(language_code):\n from translate_admin import translateAdminStrings\n from translate_frontend import translateFrontendStrings\n from translate_help import translateHelpStrings\n from translate_login import translateLoginStrings\n\n translateAdminStrings(language_code)\n translateFrontendStrings(language_code)\n translateHelpStrings(language_code)\n translateLoginStrings(language_code)", "def export(self):\n print_log(\"Exporting translations\")\n\n buffer = open(\"gtfs/translations.txt\", mode=\"w\", encoding=\"utf8\", newline=\"\")\n writer = csv.DictWriter(buffer, GTFS_HEADERS[\"translations.txt\"], extrasaction=\"ignore\")\n writer.writeheader()\n\n for ja_string, en_string in self.strings.items():\n writer.writerow({\"trans_id\": ja_string, \"lang\": \"ja\", \"translation\": ja_string})\n writer.writerow({\"trans_id\": ja_string, \"lang\": \"en\", \"translation\": en_string})\n\n buffer.close()", "def gettext_variables(some_string, lang=\"de\"):\r\n\r\n some_string = str(some_string)\r\n\r\n trans_file = os.path.join(django_settings.STATIC_ROOT, f'personal_translation_{lang}.pickle')\r\n\r\n if os.path.exists(trans_file):\r\n with open(trans_file, 'rb') as handle:\r\n trans_dict = pickle.load(handle)\r\n else:\r\n trans_dict = {}\r\n\r\n if some_string is not None:\r\n if some_string not in trans_dict:\r\n trans_dict[some_string] = \"\"\r\n\r\n with open(trans_file, 'wb') as handle:\r\n pickle.dump(trans_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)", "def merge_strings():\n\n ap = renpy.arguments.ArgumentParser(description=\"Merges translated strings with the game script.\")\n ap.add_argument(\"language\", help=\"The language to merge translated strings to.\")\n ap.add_argument(\"source\", help=\"The json file to take translated strings from.\")\n ap.add_argument(\"--reverse\", action=\"store_true\", help=\"Reverses the languages in the json file.\")\n ap.add_argument(\"--replace\", action=\"store_true\", help=\"Replaces non-trivial translations.\")\n\n args = ap.parse_args()\n\n language = args.language\n\n if language == 'None':\n language = None\n\n if language not in renpy.game.script.translator.strings: # @UndefinedVariable\n raise Exception(\"Language %r does not have any translations.\" % language)\n\n with io.open(args.source, \"r\", encoding=\"utf-8\") as f:\n data = json.loads(f.read())\n\n if args.reverse:\n new_data = { }\n\n for k, v in data.items():\n new_data[v] = k\n\n data = new_data\n\n st = renpy.game.script.translator.strings[language] # @UndefinedVariable\n\n renpy.config.clear_lines = False\n\n for k, v in st.translations.items():\n\n trivial = (not v) or (k == v)\n\n if (not trivial) and (not args.replace):\n continue\n\n if k not in data:\n continue\n\n if k not in st.translation_loc:\n continue\n\n new = data[k]\n quoted = renpy.translation.quote_unicode(new)\n code = u'new \"{}\"'.format(quoted)\n\n filename, linenumber = st.translation_loc[k]\n renpy.scriptedit.insert_line_before(code, filename, linenumber)\n renpy.scriptedit.remove_line(filename, linenumber + 1)\n\n return False", "def translate_file(self, fname):\n po = polib.pofile(fname)\n\n # FIXME - This might be a bit goofy\n po.metadata['Language'] = \",\".join(self.pipeline_spec)\n po.metadata['Plural-Forms'] = 'nplurals=2; plural= n != 1'\n po.metadata['Content-Type'] = 'text/plain; charset=UTF-8'\n count = 0\n for entry in po:\n if entry.msgid_plural:\n entry.msgstr_plural[0] = self.translate_string(\n entry.msgid)\n entry.msgstr_plural[1] = self.translate_string(\n entry.msgid_plural)\n else:\n entry.msgstr = self.translate_string(entry.msgid)\n\n if 'fuzzy' in entry.flags:\n entry.flags.remove('fuzzy') # clear the fuzzy flag\n count += 1\n\n po.save()\n return '{0}: Translated {1} messages.'.format(fname, count)", "def get_translation():\n with open(os.path.join(os.getcwd(), \"flask_app\", 'translation.json')) as json_file:\n dct = json.load(json_file)\n return dct", "def translations(context: Context, pull=False, push=False):\n if not (pull or push):\n raise TaskError('Specify whether to push or pull translations')\n if pull:\n context.shell('tx', 'pull')\n make_messages(context, javascript=False)\n make_messages(context, javascript=True)\n if push:\n context.shell('tx', 'push', '--source', '--no-interactive')", "def i18n():\n\n T = current.T\n scripts = ['''i18n.sSortAscending=\"%s\"''' % T(\"activate to sort column ascending\"),\n '''i18n.sSortDescending=\"%s\"''' % T(\"activate to sort column descending\"),\n '''i18n.sFirst=\"%s\"''' % T(\"First\"),\n '''i18n.sLast=\"%s\"''' % T(\"Last\"),\n '''i18n.sNext=\"%s\"''' % T(\"Next\"),\n '''i18n.sPrevious=\"%s\"''' % T(\"Previous\"),\n '''i18n.sEmptyTable=\"%s\"''' % T(\"No data available in table\"),\n '''i18n.sInfo=\"%s\"''' % T(\"Showing _START_ to _END_ of _TOTAL_ entries\"),\n '''i18n.sInfoEmpty=\"%s\"''' % T(\"Showing 0 to 0 of 0 entries\"),\n '''i18n.sInfoFiltered=\"%s\"''' % T(\"(filtered from _MAX_ total entries)\"),\n '''i18n.sInfoThousands=\"%s\"''' % current.deployment_settings.get_L10n_thousands_separator(),\n '''i18n.sLengthMenu=\"%s\"''' % T(\"Show _MENU_ entries\"),\n '''i18n.sLoadingRecords=\"%s\"''' % T(\"Loading\"),\n '''i18n.sProcessing=\"%s\"''' % T(\"Processing\"),\n '''i18n.sSearch=\"%s\"''' % T(\"Search\"),\n '''i18n.sZeroRecords=\"%s\"''' % T(\"No matching records found\"),\n ]\n script = \"\\n\".join(scripts)\n\n return script", "def _load_translation(self, filename):\n with open(filename) as tfile:\n translations = json.loads(tfile.read())\n for key, value in translations.items():\n if isinstance(value, str):\n self._session.add(Translation(string_id=key, value=value, lang='english'))\n self._session.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new translation file for the given locale with stubbedout translations
def create_translation_file(locale): translations = {} slugs = find_translation_slugs() for slug in slugs: translations[slug.replace("\\n", "\n")] = "" with open(join(TRANSLATION_FILES_DIR, "%s.json" % locale), "w") as translation_file: translation_file.write( json.dumps(translations, sort_keys=True, indent=4, ensure_ascii=False) )
[ "def bake_translations():\n translation_table = {}\n translation_filenames = [\n f\n for f in listdir(TRANSLATION_FILES_DIR)\n if isfile(join(TRANSLATION_FILES_DIR, f))\n ]\n for translation_filename in translation_filenames:\n with open(\n join(TRANSLATION_FILES_DIR, translation_filename), \"r\"\n ) as translation_file:\n translations = json.load(translation_file)\n lookup = {}\n for slug, translation in list(translations.items()):\n lookup[binascii.crc32(slug.encode(\"utf-8\"))] = translation\n translation_table[basename(translation_filename).split(\".\")[0]] = lookup\n\n with open(join(SRC_DIR, \"krux\", \"translations.py\"), \"w\") as translations:\n translations.write(\n \"\"\"# The MIT License (MIT)\n\n# Copyright (c) 2021-2022 Krux contributors\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\\n\"\"\"\n )\n translations.write(\"# pylint: disable=C0301\\n\")\n translations.write(\"translation_table = \")\n translations.write(repr(translation_table))\n translations.write(\"\\n\")", "def create_documents(self, locale):\n en = settings.WIKI_DEFAULT_LANGUAGE\n en_content = \"This article is in English\"\n trans_content = \"This article is translated into %slocale\" % locale\n # Create an English article and a translation for the locale\n en_doc = DocumentFactory(locale=en)\n ApprovedRevisionFactory(\n document=en_doc, content=en_content, is_ready_for_localization=True\n )\n trans_doc = DocumentFactory(parent=en_doc, locale=locale)\n # Create a new revision of the localized document\n trans_rev = ApprovedRevisionFactory(document=trans_doc, content=trans_content)\n # Make the created revision the current one for the localized document\n trans_doc.current_revision = trans_rev\n trans_doc.save()\n # Return both the English version and the localized version of the document\n return en_doc, trans_doc", "def _write_new_messages(po_file_path, trans_writer, meta_writer,\n msgids, msgstrs, languages):\n po_filename = os.path.basename(po_file_path)\n po_file = polib.pofile(po_file_path)\n\n new_trans = 0\n for entry in po_file:\n if entry.msgid not in msgids:\n new_trans += 1\n trans = [po_filename, entry.tcomment, entry.msgid, entry.msgstr]\n for lang in languages[1:]:\n trans.append(msgstrs[lang].get(entry.msgid, ''))\n\n meta = dict(entry.__dict__)\n meta.pop('msgid', None)\n meta.pop('msgstr', None)\n meta.pop('tcomment', None)\n\n trans_writer.writerow(trans)\n meta_writer.writerow([str(meta)])\n\n return new_trans", "def new_language(context, lang_code, app):\n\timport frappe.translate\n\n\tif not context[\"sites\"]:\n\t\traise Exception(\"--site is required\")\n\n\t# init site\n\tfrappe.connect(site=context[\"sites\"][0])\n\tfrappe.translate.write_translations_file(app, lang_code)\n\n\tprint(\n\t\t\"File created at ./apps/{app}/{app}/translations/{lang_code}.csv\".format(\n\t\t\tapp=app, lang_code=lang_code\n\t\t)\n\t)\n\tprint(\n\t\t\"You will need to add the language in frappe/geo/languages.json, if you haven't done it already.\"\n\t)", "def _prepare_polib_files(files_dict, filename, languages,\n locale_root, po_files_path, header):\n files_dict[filename] = {}\n for lang in languages:\n file_path = os.path.join(locale_root, lang, po_files_path)\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n if header is not None:\n _write_header(os.path.join(file_path, filename), lang, header)\n\n files_dict[filename][lang] = polib.pofile(\n os.path.join(file_path, filename), encoding=\"UTF-8\")", "def update_locales(ctx):\r\n with ctx.lcd(os.path.join(settings.SRC_DIR, 'locale')):\r\n ctx.local('svn up')\r\n ctx.local('./compile-mo.sh .')", "def export(self):\n print_log(\"Exporting translations\")\n\n buffer = open(\"gtfs/translations.txt\", mode=\"w\", encoding=\"utf8\", newline=\"\")\n writer = csv.DictWriter(buffer, GTFS_HEADERS[\"translations.txt\"], extrasaction=\"ignore\")\n writer.writeheader()\n\n for ja_string, en_string in self.strings.items():\n writer.writerow({\"trans_id\": ja_string, \"lang\": \"ja\", \"translation\": ja_string})\n writer.writerow({\"trans_id\": ja_string, \"lang\": \"en\", \"translation\": en_string})\n\n buffer.close()", "def make_new_language_properties_file(lang_code):\n newPropertiesFilePath = get_properties_file_path(lang_code)\n engPropertiesFilePath = get_properties_file_path(None)\n shutil.copy(engPropertiesFilePath, newPropertiesFilePath)", "def generate_translations(existing_codelist_filename, output_filename, lang):\n parser = etree.XMLParser(remove_blank_text=True)\n codelist_xml_file = open(existing_codelist_filename)\n codelist_xml = etree.parse(codelist_xml_file, parser)\n\n wb = xlwt.Workbook()\n sheet = wb.add_sheet('Sheet 1')\n sheet.write(0,0,'code')\n sheet.write(0,1,'name')\n sheet.write(0,2,'description')\n for i, code in enumerate(codelist_xml.xpath(\"/codelist/codelist-items/codelist-item\")):\n sheet.write(i+1, 0, get_text(code.find('code')))\n sheet.write(i+1, 1, get_text(code.find('name/narrative[@xml:lang=\"{}\"]'.format(lang), namespaces=nsmap)))\n sheet.write(i+1, 2, get_text(code.find('description/narrative[@xml:lang=\"{}\"]'.format(lang), namespaces=nsmap)))\n wb.save(output_filename, \"utf-8\")", "def _test_po_compile(self, handler):\r\n source_compiled_file = os.path.join(os.path.dirname(__file__),\r\n 'en_compiled.po')\r\n trans_compiled_file = os.path.join(os.path.dirname(__file__),\r\n 'ar_compiled.po')\r\n trans_compiled_file_reviewed = os.path.join(os.path.dirname(__file__),\r\n 'ar_compiled_for_review.po')\r\n handler.bind_resource(self.resource)\r\n handler.set_language(Language.objects.get(code='en_US'))\r\n compiled_template = handler.compile()\r\n f = open(source_compiled_file, 'r')\r\n expected_compiled_template = f.read()\r\n f.close()\r\n po = polib.pofile(compiled_template)\r\n epo = polib.pofile(expected_compiled_template)\r\n po.metadata['PO-Revision-Date'] = epo.metadata['PO-Revision-Date']\r\n po.metadata['Last-Translator'] = epo.metadata['Last-Translator']\r\n compiled_template = str(po)\r\n self.assertEqual(compiled_template,\r\n expected_compiled_template)\r\n\r\n handler.set_language(self.language_ar)\r\n compiled_template = handler.compile()\r\n f = open(trans_compiled_file, 'r')\r\n expected_compiled_template = f.read()\r\n f.close()\r\n po = polib.pofile(compiled_template)\r\n epo = polib.pofile(expected_compiled_template)\r\n po.metadata['PO-Revision-Date'] = epo.metadata['PO-Revision-Date']\r\n po.metadata['Last-Translator'] = epo.metadata['Last-Translator']\r\n compiled_template = str(po)\r\n self.assertEqual(compiled_template,\r\n expected_compiled_template)\r\n\r\n handler.set_language(self.language_ar)\r\n compiled_template = handler.compile(mode=Mode.REVIEWED)\r\n f = open(trans_compiled_file_reviewed, 'r')\r\n expected_compiled_template = f.read()\r\n f.close()\r\n po = polib.pofile(compiled_template)\r\n epo = polib.pofile(expected_compiled_template)\r\n po.metadata['PO-Revision-Date'] = epo.metadata['PO-Revision-Date']\r\n po.metadata['Last-Translator'] = epo.metadata['Last-Translator']\r\n compiled_template = str(po)\r\n self.assertEqual(compiled_template,\r\n expected_compiled_template)", "def csv_to_po(trans_csv_path, meta_csv_path, locale_root,\n po_files_path, header=None):\n pattern = \"^\\w+.*po$\"\n for root, dirs, files in os.walk(locale_root):\n for f in filter(lambda x: re.match(pattern, x), files):\n os.remove(os.path.join(root, f))\n\n # read title row and prepare descriptors for po files in each lang\n trans_reader = UnicodeReader(trans_csv_path)\n meta_reader = UnicodeReader(meta_csv_path)\n try:\n title_row = trans_reader.next()\n except StopIteration:\n # empty file\n return\n\n trans_languages = _prepare_locale_dirs(title_row[3:], locale_root)\n\n po_files = {}\n\n meta_reader.next()\n # go through every row in downloaded csv file\n for trans_row, meta_row in izip_longest(trans_reader, meta_reader):\n filename = trans_row[0].rstrip()\n metadata = meta_row[0].rstrip() if meta_row else METADATA_EMPTY\n comment = trans_row[1]\n msgid = trans_row[2]\n\n if filename not in po_files:\n _prepare_polib_files(po_files, filename, trans_languages,\n locale_root, po_files_path, header)\n\n _write_entries(po_files[filename], trans_languages, msgid,\n trans_row[3:], metadata, comment)\n for filename in po_files:\n for lang in po_files[filename]:\n po_files[filename][lang].save()\n\n trans_reader.close()\n meta_reader.close()", "def test_get_translation_file(self):\r\n self.test_resource_edit()\r\n url = reverse('download_for_translation', args=[self.project.slug, self.resource.slug, self.language.code])\r\n resp = self.client['maintainer'].post(url)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertTrue('project1_resource1_pt_BR.po' in resp['Content-Disposition'])", "def manually_translate_file(filename, save=False):\n\n print(\"Add manual translations to '{f}'\".format(f=filename))\n print(\"For each missing translation:\")\n print(\"a) Directly enter a new tranlation in the target language\")\n print(\"b) Leave empty to skip\")\n \n input(\"Press <ENTER> to continue\")\n print(\"\")\n\n with open(filename, 'r') as f:\n lines = f.readlines()\n\n out = []\n\n # Context data\n source_line = ''\n msgid = ''\n\n for num, line in enumerate(lines):\n # Keep track of context data BEFORE an empty msgstr object\n line = line.strip()\n\n if line.startswith(\"#: \"):\n source_line = line.replace(\"#: \", \"\")\n\n elif line.startswith(\"msgid \"):\n msgid = line.replace(\"msgid \", \"\")\n\n if line.strip() == 'msgstr \"\"':\n # We have found an empty translation!\n\n if msgid and len(msgid) > 0 and not msgid == '\"\"':\n print(\"Source:\", source_line)\n print(\"Enter translation for {t}\".format(t=msgid))\n\n translation = str(input(\">\"))\n\n if translation and len(translation) > 0:\n # Update the line with the new translation\n line = 'msgstr \"{msg}\"'.format(msg=translation)\n\n out.append(line + \"\\r\\n\")\n\n if save:\n with open(filename, 'w') as output_file:\n output_file.writelines(out)\n\n print(\"Translation done: written to\", filename)\n print(\"Run 'make translate' to rebuild translation data\")", "def po_to_csv_merge(languages, locale_root, po_files_path,\n local_trans_csv, local_meta_csv,\n gdocs_trans_csv, gdocs_meta_csv):\n msgids = []\n\n trans_reader = UnicodeReader(gdocs_trans_csv)\n meta_reader = UnicodeReader(gdocs_meta_csv)\n\n try:\n trans_title = trans_reader.next()\n meta_title = meta_reader.next()\n except StopIteration:\n trans_title = ['file', 'comment', 'msgid']\n trans_title += map(lambda s: s + ':msgstr', languages)\n meta_title = ['metadata']\n\n trans_writer, meta_writer = _get_new_csv_writers(\n trans_title, meta_title, local_trans_csv, local_meta_csv)\n\n for trans_row, meta_row in izip_longest(trans_reader, meta_reader):\n msgids.append(trans_row[2])\n trans_writer.writerow(trans_row)\n meta_writer.writerow(meta_row if meta_row else [METADATA_EMPTY])\n\n trans_reader.close()\n meta_reader.close()\n\n po_files = _get_all_po_filenames(locale_root, languages[0], po_files_path)\n\n new_trans = False\n for po_filename in po_files:\n new_msgstrs = {}\n for lang in languages[1:]:\n po_file_path = os.path.join(locale_root, lang,\n po_files_path, po_filename)\n if not os.path.exists(po_file_path):\n open(po_file_path, 'a').close()\n new_msgstrs[lang] = _get_new_msgstrs(po_file_path, msgids)\n\n if len(new_msgstrs[languages[1]].keys()) > 0:\n new_trans = True\n po_file_path = os.path.join(locale_root, languages[0],\n po_files_path, po_filename)\n _write_new_messages(po_file_path, trans_writer, meta_writer,\n msgids, new_msgstrs, languages)\n\n trans_writer.close()\n meta_writer.close()\n\n return new_trans", "def custom_process_locale_dir(self, locale_dir, files):\n build_files = []\n for translatable in files:\n if self.verbosity > 1:\n self.stdout.write('processing file %s in %s\\n' % (\n translatable.file, translatable.dirpath\n ))\n if self.domain != 'djangular':\n continue\n build_file = self.build_file_class(self, self.domain, translatable)\n try:\n build_file.preprocess()\n except UnicodeDecodeError as e:\n self.stdout.write(\n 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (\n translatable.file, translatable.dirpath, e,\n )\n )\n continue\n build_files.append(build_file)\n\n if self.domain == 'djangular':\n # self.domain = 'django'\n args = [\n 'xgettext',\n '-d', self.domain,\n '--language=Python',\n '--keyword=gettext_noop',\n '--keyword=gettext_lazy',\n '--keyword=ngettext_lazy:1,2',\n '--keyword=ugettext_noop',\n '--keyword=ugettext_lazy',\n '--keyword=ungettext_lazy:1,2',\n '--keyword=pgettext:1c,2',\n '--keyword=npgettext:1c,2,3',\n '--keyword=pgettext_lazy:1c,2',\n '--keyword=npgettext_lazy:1c,2,3',\n '--output=-',\n ]\n else:\n return\n\n input_files = [bf.work_path for bf in build_files]\n with NamedTemporaryFile(mode='w+') as input_files_list:\n input_files_list.write('\\n'.join(input_files))\n input_files_list.flush()\n args.extend(['--files-from', input_files_list.name])\n args.extend(self.xgettext_options)\n msgs, errors, status = popen_wrapper(args)\n\n if errors:\n if status != STATUS_OK:\n for build_file in build_files:\n build_file.cleanup()\n raise CommandError(\n 'errors happened while running xgettext on %s\\n%s' %\n ('\\n'.join(input_files), errors)\n )\n elif self.verbosity > 0:\n # Print warnings\n self.stdout.write(errors)\n\n if msgs:\n if locale_dir is NO_LOCALE_DIR:\n file_path = os.path.normpath(build_files[0].path)\n raise CommandError(\n 'Unable to find a locale path to store translations for '\n 'file %s' % file_path\n )\n for build_file in build_files:\n msgs = build_file.postprocess_messages(msgs)\n potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain))\n write_pot_file(potfile, msgs)\n\n self.domain = 'djangular'\n\n for build_file in build_files:\n build_file.cleanup()", "def makemessages():\n for languagecode in LANGUAGE_CODES:\n _manage('makemessages -l {} -i \"static/*\" -i \"libs/*\"'.format(languagecode))", "def test_translate_locations(self):\n # Check that translatables can be loaded from the dialog directory\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-dialog/'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n # Check that translatables can be loaded from locale folder\n s = SimpleSkill1()\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertTrue(isinstance(lst, list))\n vals = s.translate_namedvalues('named_things')\n self.assertTrue(isinstance(vals, dict))\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Oh look it\\'s my favourite test framework'])\n\n # Check loading in a non-en-us language\n s = SimpleSkill1()\n s.config_core['lang'] = 'de-de'\n s.root_dir = abspath(join(dirname(__file__),\n 'translate', 'in-locale'))\n lst = s.translate_list('good_things')\n self.assertEqual(lst, ['sonne', 'mycroft', 'zahne'])\n vals = s.translate_namedvalues('named_things')\n self.assertEqual(vals['blau'], '2')\n template = s.translate_template('test',\n data={'thing': 'test framework'})\n self.assertEqual(template,\n ['Aber setzen sie sich herr test framework'])\n\n # Check fallback to english\n lst = s.translate_list('not_in_german')\n self.assertEqual(lst, ['not', 'in', 'German'])\n\n # Restore lang to en-us\n s.config_core['lang'] = 'en-us'", "def create_template():\n\n try:\n cwd = os.getcwd()\n with open(os.path.join(cwd, 'example-email.txt'), 'wb') as my_file:\n my_file.write('Dear ${FULL_NAME},\\n\\nThis is an example message. '\n 'The placeholders would be replaced with names from the class list provided. '\n '\\n\\nYou can run vt-student-mailer in test mode for a demonstration! '\n 'Use the -x flag with -m example-email.txt and -s followed by the name of '\n 'a CSV file with student information from Hokie Spa. A sample email substituting the '\n 'placeholders with student infromation from the first line of the CSV file will be printed. '\n 'Use -h or --help for more usage information.'\n '\\n\\nThanks for reading, ${FIRST_NAME}!\\n\\n'\n 'All the best,\\n\\n'\n '-Foo')\n\n except Exception, e:\n print '[-] Error: Could not create file in current directory. Please retry. Trace:'\n print str(e)\n print '[-] -h or --help for usage information'\n exit(1)", "def test_newLocaleCustomDomain(self):\n call_command(self.cmd_name, interactive=False,\n locale=self.new_locale, domain=self.good_domain,\n no_empty=True)\n\n po_file_path = os.path.join(self.locale_path, self.new_locale,\n \"LC_MESSAGES\", \"{0}.po\".format(self.good_domain))\n\n # the new .po file exists\n self.assertTrue(os.path.exists(po_file_path))\n\n # remove .po\n rmtree(os.path.join(self.locale_path, self.new_locale))\n\n # remove nls directory\n rmtree(os.path.join(settings.ROOT, \"static\", \"js\", self.good_domain,\n \"nls\", self.new_locale))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sorts and prettyprints all translation files
def prettify_translation_files(): translation_filenames = [ f for f in listdir(TRANSLATION_FILES_DIR) if isfile(join(TRANSLATION_FILES_DIR, f)) ] for translation_filename in translation_filenames: translations = {} with open( join(TRANSLATION_FILES_DIR, translation_filename), "r" ) as translation_file: translations = json.load(translation_file) with open( join(TRANSLATION_FILES_DIR, translation_filename), "w" ) as translation_file: translation_file.write( json.dumps(translations, sort_keys=True, indent=4, ensure_ascii=False) )
[ "def main():\n file = open_file()\n word_list = format_file(file)\n new_list = add_to_list(word_list)\n sorted_list = msort(new_list)\n print_words(sorted_list)", "def sort(self):\n self.treeview.delete(*self.treeview.get_children())\n output_root = self.output_path.get() + '/'\n os.makedirs(output_root, exist_ok=True)\n\n # iterate over every file in table\n for song in self.book.book:\n song_data = song.data()\n filename = self.format.get() + '.mp3'\n filename = ''.join(['' if ch in '\\\\\"<>%:?*|' else ch for ch in filename])\n filename = re.sub(r' *([/]) *', r'\\1', filename)\n # exclude windows name incompatibile characters\n song_data[1:] = [''.join(['' if ch in '\\\\/\"<>%:?*|' else ch for ch in item]) for item in song_data[1:]]\n # replace keywords in control string with real value\n if 'artist' in filename:\n filename = filename.replace('artist', song_data[1])\n if 'title' in filename:\n filename = filename.replace('title', song_data[2])\n if 'album' in filename:\n filename = filename.replace('album', song_data[3])\n if 'number' in filename:\n filename = filename.replace('number', song_data[4])\n if 'year' in filename:\n filename = filename.replace('year', song_data[5])\n if '/' in filename:\n folders = filename.rsplit('/', 1)[0]\n os.makedirs(output_root + folders, exist_ok=True)\n\n # copy or move file\n if self.copy_move.get() == 'copy':\n shutil.copy(song_data[0], output_root + filename)\n else:\n shutil.move(song_data[0], output_root + filename)\n\n # change Book paths to new files and rewrite ID3 if edited\n song.change_file(output_root + filename)\n if song.edit_flag:\n song.id3_write()\n self.book = songbook.Book() # create clean book instance", "def sort_files(files):\n ct_idle = ''\n ct_model = ''\n ct_motion = ''\n ct_project = ''\n ct_script = ''\n for file in files:\n file_type = file.name.split('.')[1]\n if file_type == 'uctidle':\n ct_idle = file\n elif file_type == 'uctmodel':\n ct_model = file\n elif file_type == 'uctmotion':\n ct_motion = file\n elif file_type == 'uctproject':\n ct_project = file\n elif file_type == 'uctscript':\n ct_script = file\n return ct_idle, ct_model, ct_motion, ct_project, ct_script", "def sort(settings):\n\tfilter = settings.format(settings.content)\n\tfilter.sort()\n\tsettings.content = filter.content", "def sort_imports(c):\n if git_dirty(c):\n print(\"Repository is dirty! Commit changes.\")\n sys.exit(1)\n cmd = [\"isort\", \"--recursive\", \"--atomic\", \".\"]\n with cd(PROJECT_ROOT):\n c.run(\" \".join(cmd))", "def sort_subject_list() -> None:\n with open(\"resources/subject_list.txt\", \"r+\") as outfile:\n lines = outfile.readlines()\n lines.sort()", "def bake_translations():\n translation_table = {}\n translation_filenames = [\n f\n for f in listdir(TRANSLATION_FILES_DIR)\n if isfile(join(TRANSLATION_FILES_DIR, f))\n ]\n for translation_filename in translation_filenames:\n with open(\n join(TRANSLATION_FILES_DIR, translation_filename), \"r\"\n ) as translation_file:\n translations = json.load(translation_file)\n lookup = {}\n for slug, translation in list(translations.items()):\n lookup[binascii.crc32(slug.encode(\"utf-8\"))] = translation\n translation_table[basename(translation_filename).split(\".\")[0]] = lookup\n\n with open(join(SRC_DIR, \"krux\", \"translations.py\"), \"w\") as translations:\n translations.write(\n \"\"\"# The MIT License (MIT)\n\n# Copyright (c) 2021-2022 Krux contributors\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\\n\"\"\"\n )\n translations.write(\"# pylint: disable=C0301\\n\")\n translations.write(\"translation_table = \")\n translations.write(repr(translation_table))\n translations.write(\"\\n\")", "def get_sorted_export_files():\n ...", "def sortFiles(self):\n from WMCore.Algorithms.TreeSort import TreeSort\n name = lambda x: x['LFN']\n parents = lambda x: x.parentLFNs()\n return TreeSort(name, parents, self.files).sort()", "def encrypted_files_asc():\n return sorted(\n glob.glob(\"./Moje dokumenty/*.txt\"),\n key = lambda f: os.path.getmtime(f))", "def main():\n print(\"Starting directory is: {}\".format(os.getcwd()))\n\n # Change to desired directory\n os.chdir('FilesToSort')\n print(\"Changed directory is: {}\".format(os.getcwd()))\n\n # Create empty dictionary for file extensions and associated category\n file_extension_mapped_to_category = {}\n\n for filename in os.listdir('.'):\n # Ignore directories, just process files\n if os.path.isdir(filename):\n continue\n\n file_extension = filename.split('.')[-1]\n\n if file_extension not in file_extension_mapped_to_category:\n category = input(\"What category would you like to sort {} files into? \".format(file_extension))\n file_extension_mapped_to_category[file_extension] = category\n\n try:\n os.mkdir(category)\n print(\"New folder: \", category)\n except FileExistsError:\n print(\"Folder already exists:\", category)\n pass\n\n print(\"Moving {} to {}/{}\".format(filename, category, filename))\n # print(\"Moving {} to {}/{}\".format(filename, file_extension_mapped_to_category[file_extension], filename))\n shutil.move(filename, \"{}/{}\".format(category, filename))\n\n # Print a list of all files in current directory\n print(\"Files in {}:\\n{}\\n\".format(os.getcwd(), os.listdir('.')))", "def _orderDirectory(self, contents):\r\n order = list(contents.keys())\r\n order.sort()\r\n return order", "def custom_process_locale_dir(self, locale_dir, files):\n build_files = []\n for translatable in files:\n if self.verbosity > 1:\n self.stdout.write('processing file %s in %s\\n' % (\n translatable.file, translatable.dirpath\n ))\n if self.domain != 'djangular':\n continue\n build_file = self.build_file_class(self, self.domain, translatable)\n try:\n build_file.preprocess()\n except UnicodeDecodeError as e:\n self.stdout.write(\n 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (\n translatable.file, translatable.dirpath, e,\n )\n )\n continue\n build_files.append(build_file)\n\n if self.domain == 'djangular':\n # self.domain = 'django'\n args = [\n 'xgettext',\n '-d', self.domain,\n '--language=Python',\n '--keyword=gettext_noop',\n '--keyword=gettext_lazy',\n '--keyword=ngettext_lazy:1,2',\n '--keyword=ugettext_noop',\n '--keyword=ugettext_lazy',\n '--keyword=ungettext_lazy:1,2',\n '--keyword=pgettext:1c,2',\n '--keyword=npgettext:1c,2,3',\n '--keyword=pgettext_lazy:1c,2',\n '--keyword=npgettext_lazy:1c,2,3',\n '--output=-',\n ]\n else:\n return\n\n input_files = [bf.work_path for bf in build_files]\n with NamedTemporaryFile(mode='w+') as input_files_list:\n input_files_list.write('\\n'.join(input_files))\n input_files_list.flush()\n args.extend(['--files-from', input_files_list.name])\n args.extend(self.xgettext_options)\n msgs, errors, status = popen_wrapper(args)\n\n if errors:\n if status != STATUS_OK:\n for build_file in build_files:\n build_file.cleanup()\n raise CommandError(\n 'errors happened while running xgettext on %s\\n%s' %\n ('\\n'.join(input_files), errors)\n )\n elif self.verbosity > 0:\n # Print warnings\n self.stdout.write(errors)\n\n if msgs:\n if locale_dir is NO_LOCALE_DIR:\n file_path = os.path.normpath(build_files[0].path)\n raise CommandError(\n 'Unable to find a locale path to store translations for '\n 'file %s' % file_path\n )\n for build_file in build_files:\n msgs = build_file.postprocess_messages(msgs)\n potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain))\n write_pot_file(potfile, msgs)\n\n self.domain = 'djangular'\n\n for build_file in build_files:\n build_file.cleanup()", "def sort_file(file_path, encoding=ENCODING):\n file_path = os.path.normpath(file_path)\n input_file = open_fr(file_path, encoding)\n lines = [line.strip().replace('\\x00', '') for line in input_file]\n input_file.close()\n outfile = open_fw(file_path, encoding)\n lines.sort()\n for line in lines:\n outfile.write(line + \"\\n\")\n outfile.close()\n return file_path", "def test_file_default_sort(self):\n self.file_sort_common(sort_method=None)", "def sortCaseInsensitive():\n pass", "def render_files(self):\n if self.keep_dir_structure:\n print \"keeping directory structure\"\n self.render_content_recursive(self.input_path, self.output_path)\n return\n ## Else recurse into directory, render files one by one\n files = ls_recursive(self.input_path)\n for f in files:\n filename = get_filename_from_pathname(f)\n outpath = os.path.join(self.output_path, self.get_output_filename(filename))\n if not self.tword or self.tword in f.split('.'):\n print \" rendering: %s\" % (f,)\n self.render_content(f, outpath)\n else:\n if self.copy_not_matching:\n print \" copying: %s\" % (outpath,)\n self.simple_copy(f, outpath)\n else:\n print \" ignoring: %s\" % (f,)", "def axt_sort(self):\n self.update_log(\"Sorting the .axt file(s)\", \"10\",\n datetime.datetime.now())\n # Call to axtSort from kentUtils\n call([\"/hps/nobackup/goldman/conor/1k_genomes/template_switching_sm/tools/axtSort\",\n self.unsorted_axt_file, self.axt_file],\n stdout=self.FNULL, stderr=STDOUT)", "def sortFiles( self, root, extension ):\n date_file_list = []\n files = []\n for folder in glob.glob( root ):\n #print \"folder =\", folder\n \n # sort only files with the given extension. '*' for all files\n for file in glob.glob( folder + '/*.' + extension ):\n # retrieves the stats for the current file as a tuple\n # (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime)\n # the tuple element mtime at index 8 is the last-modified-date\n stats = os.stat( file )\n # create tuple (year yyyy, month(1-12), day(1-31), hour(0-23), minute(0-59), second(0-59),\n # weekday(0-6, 0 is monday), Julian day(1-366), daylight flag(-1,0 or 1)) from seconds since epoch\n # note: this tuple can be sorted properly by date and time\n lastmod_date = time.localtime( stats[8] )\n # create list of tuples ready for sorting by date\n date_file_tuple = lastmod_date, file\n \n # do not include zero size files\n fileSize = stats [stat.ST_SIZE]\n #if fileSize > 0:\n #date_file_list.append( date_file_tuple )\n date_file_list.append( date_file_tuple )\n \n date_file_list.sort() #oldest modification date first\n #date_file_list.reverse() # newest mod date now first\n\n #print \"%-40s %s\" % ( \"filename:\", \"last modified:\" )\n for file in date_file_list:\n #\n # extract just the filename\n #\n folder, file_name = os.path.split( file[1] )\n #\n # convert date tuple to MM/DD/YYYY HH:MM:SS format\n #\n #file_date = time.strftime( \"%m/%d/%y %H:%M:%S\", file[0] )\n files.append( file_name )\n\n return files" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the class using parameters e.g. which data from SNLI should be stored. SNLI also needs buckets These are initialized by default if not specified.
def __init__(self,label_dict, data_params=None, bucket_params=None, embeddings=None): super(SNLIData, self).__init__('SNLI', embeddings) # Default parameters to be called from SNLI if data_params is None or len(data_params) == 0: self.data_params = { "annotator_labels" : False, "captionID" : False, "gold_label" : True, "paidID" : False, "sentence1" : True, "sentence1_binary_parse" : False, "sentence1_parse" : False, "sentence2" : True, "sentence2_binary_parse" : False, "sentence2_parse" : False} else: self.data_params = data_params # label dict e.g. {'neutral':0, 'entailment': 1} self.label_dict = label_dict # Default buckets if bucket_params is None: self.bucket_params = [ [10,10], [10,20], [15,10], [15,20], [20,10], [20,20], [30,10], [30,20], [40,20], [100,100] ] else: self.bucket_params = bucket_params
[ "def __init__(self):\n # Empty subsystem data\n self._subsystems = [] # Remember order\n self._subsysdict = {} # Convenient access (want a sorteddict)\n\n # Initialise own parameters, variables with no kwargs\n super(SODENetwork, self).__init__()", "def __init__(self, init_size=16, elements=None):\n self.buckets = [LinkedList() for i in range(init_size)]\n self.size = 0 # Number of key-value entries\n if elements is not None:\n for item in elements:\n self.set(item)", "def __init__(self):\n\n self.clusterTableManager = ClusterTableManager()\n self.docManager = DocManager()\n self.processedClusterStore = ProcessedClusterStore()", "def initialize(self, runInfo, inputs, initDict):\n self._initializeLSpp(runInfo, inputs, initDict)\n self._initializeLSppROM(self.inputs[self.indexes])", "def __init__(self, smiles):\n self.search_smiles = smiles\n self.pubchem_compound_id = self._get_pubchem_cid()\n if self.pubchem_compound_id:\n self.canonical_smiles, self.isomeric_smiles, self.iupac = self._get_pubchem_smiles()\n self.depositor_synonyms = self._get_pubchem_synonyms()\n self.vendors = self._get_pubchem_vendors()\n self.patents = self._get_pubchem_patents()\n self.articles = self._get_pubchem_articles()\n else:\n self.canonical_smiles = None\n self.isomeric_smiles = None\n self.iupac = None\n self.depositor_synonyms = None\n self.vendors = None\n self.patents = None\n self.articles = None", "def __init__(self, user_name, user_email, user_password):\n self.user_name = user_name\n self.user_email = user_email\n self.user_password = user_password\n self.bucket_lists = {}", "def __init__(self, snips=None):\n if not snips:\n snips = SnipsConfig()\n self.snips = snips\n\n self._connect()\n self.initialize()\n self._start()", "def __init__(self):\n\n self.clusters = [ ]", "def __init__(self, *args):\n this = _coin.new_SbStorage(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, num_hash=5, buckets=272):\n self.num_hash = num_hash\n self.buckets = buckets\n self.table = np.zeros([num_hash, buckets])", "def __init__ (self):\n # Create a connection to S3\n self.handle = self.connect()", "def __init__(self,\n pools: List['LoadBalancerPool']) -> None:\n self.pools = pools", "def __init__(self, states, params, rxnstring=None):\n\n self.states = states # State management\n self.params = params # Parameter management\n self.reactants = []\n self.products = []\n self.activators = []\n self.inhibitors = []\n self.mark = '--'\n\n if rxnstring: self.read_rxn_str(rxnstring)", "def __init__(self):\n self._strains: list[Strain] = []\n # dict of strain name (id and alias) to primary strain object\n self._strain_dict_name: dict[str, Strain] = {}", "def __init__(self, dataset, minibatch, num_workers, size, rank):\n if dataset not in datasets_list:\n print(\"Existing datasets are: \", datasets_list)\n raise\n self.dataset = datasets_list.index(dataset)\n self.batch = minibatch * num_workers\n self.num_workers = num_workers\n self.num_ps = size - num_workers\n self.rank = rank", "def __init__(self,fn=None, db = None, z = None):\n # Read the data from pre-computed file.\n if fn==None: \n if db is None:\n db = \"../data/RunPB/\" # The directory where the data live.\n if z is None:\n z = 2\n self.pktable=np.loadtxt(db +\"PTdata/ps00_hh_RunPB_46_z%03d.dat\"%(z*100))\n else:\n self.pktable=np.loadtxt(fn)\n\n #", "def __init__(self, ints, floats, parameters):\n self.data = {0: ints, 1: floats}\n self.parameters = parameters", "def __init__(self):\n self.segments = []\n self._create_init_snakes()", "def __init__(self, *args):\n _snap.TSStr_swiginit(self,_snap.new_TSStr(*args))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stores the data points in the designated buckets and stores meta data a bucket is defined by the maximum length of sentence1 and sentence2 respectively
def bucketize_data(self, data_set, initialize): PAD_position = self.embeddings.get_pad_pos(initialize=True) bucket_name = data_set + "_buckets" if bucket_name in self.data_sets: return None # dictionary in which the data of the different buckets will be stored bucketized = {} # define metadata for each bucket for b1, b2 in self.bucket_params: bucketized[str(b1) + '_' + str(b2)] = {} # list of data points bucketized[str(b1) + '_' + str(b2)]['data'] = [] # max lengths of sentence1 and sentence2 respectively bucketized[str(b1) + '_' + str(b2)]['buckets'] = [b1, b2] # nr of data points in the bucket (will be counted up) bucketized[str(b1) + '_' + str(b2)]['length'] = 0 # position of sampled data (will be shuffled first and then iteratively retrieved) bucketized[str(b1) + '_' + str(b2)]['position'] = 0 # retrieve defined data_set ('train', 'test', 'dev') data = self.data_sets[data_set] # loop through elements of data set, store the data point in the corresponding bucket and count up the length for elem in data: len1 = elem['sentence1_length'] len2 = elem['sentence2_length'] for b1, b2 in self.bucket_params: if len1 <= b1 and len2 <= b2: elem['sentence1_positions'] = pad_positions(elem['sentence1_positions'], PAD_position, b1) elem['sentence2_positions'] = pad_positions(elem['sentence2_positions'], PAD_position, b2) bucketized[str(b1) + '_' + str(b2)]['data'].append(elem) bucketized[str(b1) + '_' + str(b2)]['length'] += 1 break # store the bucketized data in the class dictionary self.data_sets[bucket_name] = bucketized
[ "def perform_bucketing(opt, labeled_pair_list):\n # Obtain sentence lengths\n sentence_pair_lens = [(len(pair[0].split()), len(pair[1].split())) for pair in labeled_pair_list[0]]\n\n # Calculate bucket size\n buckets = [[0, 0] for _ in range(opt.num_buckets)]\n avg_bucket = len(labeled_pair_list[0]) // opt.num_buckets\n max_lens = [max(pair[0], pair[1]) for pair in sentence_pair_lens]\n len_counts = [(sent_len, max_lens.count(sent_len)) for sent_len in set(max_lens)]\n len_counts.sort(key=lambda x: x[0])\n\n bucket_pointer = 0\n len_pointer = 0\n\n while bucket_pointer < opt.num_buckets and len_pointer < len(len_counts):\n target_bucket = buckets[bucket_pointer]\n # Set lower limit on the bucket's lengths\n target_bucket[0] = len_counts[len_pointer][0]\n bucket_load = 0\n while True:\n try:\n len_count_pair = len_counts[len_pointer]\n deficit = avg_bucket - bucket_load\n surplus = (bucket_load + len_count_pair[1]) - avg_bucket\n if deficit >= surplus or bucket_pointer == opt.num_buckets - 1:\n bucket_load += len_count_pair[1]\n # Update upper limit on the bucket's lengths\n target_bucket[1] = len_count_pair[0]\n len_pointer += 1\n else:\n bucket_pointer += 1\n break\n except IndexError:\n break\n\n # Populate buckets\n bucketed = [([], []) for _ in range(opt.num_buckets)]\n for k in range(len(labeled_pair_list[0])):\n pair_len = max(sentence_pair_lens[k][0], sentence_pair_lens[k][1])\n for l in range(len(buckets)):\n if buckets[l][0] <= pair_len <= buckets[l][1]:\n bucketed[l][0].append(labeled_pair_list[0][k])\n bucketed[l][1].append(labeled_pair_list[1][k])\n return buckets, bucketed", "def runAnalysis(self, data , secondinterval, bucketCount):\n try:\n dayInterval = (secondinterval/float(3600*24))\n min_,max_ = min(data),max(data)\n\n #Truncate data if threshold of 27.5mV or -2.5mV is crossed\n while max_ > 27.5 or min_ < -2.5:\n print 'Data truncated at out of bounds'\n mid = int(len(data)/2)\n ind = data.index(max_ if max_ > 27.5 else min_)\n if ind > mid:\n data = data[:ind]\n else:\n data = data[ind + 1:]\n min_,max_ = min(data),max(data)\n \n \n count = len(data)\n bucketBounds = self.createBuckets(min_,max_,bucketCount)\n buckets = [0 for _ in xrange(bucketCount)]\n \n\n #Bucket Quartiles are divided such that the first and last bucket both have round(bucketCount / 4) buckets. eg:\n #For 11 buckets : Q1 = [0,1,2], Q23 = [3,4,5,6,7], Q4 = [8,9,10]\n #For 12 buckets : Q1 = [0,1,2], Q23 = [3,4,5,6,7,8], Q4 = [9,10,11]\n #For 13 buckets : Q1 = [0,1,2], Q23 = [3,4,5,6,7,8,9], Q4 = [10,11,12]\n #For 14 buckets : Q1 = [0,1,2,3], Q23 = [4,5,6,7,8,9], Q4 = [10,11,12,13]\n Q1Ind = range(int(round(bucketCount / 4.0)))\n Q4Ind = range(bucketCount-int(round(bucketCount / 4.0)),bucketCount)\n Q23Ind = range(Q1Ind[-1]+1,Q4Ind[0])\n\n Q = dict([('Q1',0),('Q23',0),('Q4',0)])\n\n Q4increment = [0 for _ in data]\n Q4time = [0 for _ in data]\n\n avg = 0\n for ind,value in enumerate(data):\n avg += value\n bucket = self.checkBucket(value,bucketBounds)\n buckets[bucket] += 1\n Q4 = 0\n if bucket in Q1Ind:\n Q['Q1'] += 1\n elif bucket in Q23Ind:\n Q['Q23'] += 1\n elif bucket in Q4Ind:\n Q['Q4'] += 1\n Q4 = dayInterval\n Q4increment[ind] = Q['Q4']/float(ind+1)\n Q4time[ind] = Q4time[ind-1]+Q4 if ind != 0 else Q4\n\n avg = avg/float(count)\n days = [x*dayInterval for x in range(0,len(data))]\n for key in Q.keys():Q[key] = Q[key]/float(count)\n for ind in range(len(buckets)):buckets[ind] = buckets[ind]/float(count)\n return days,count,buckets,Q,Q4increment,Q4time,min_,max_,avg\n \n except Exception as e:\n print e\n print \"Analysis of TOW sensor failed, check inputs for proper formatting.\"\n return False", "def load_prepare_data(input_dim, batch_size, reading_dir, char_vector, bucket_size):\n\n h, w, c = input_dim\n\n data_buckets = {}\n bucket_weights = {}\n number_samples = 0\n paths = [x[0] for x in os.walk('/content/scrabble-gan/res/data/lamo/words-Reading/')]\n # (1) read buckets into memory\n for path in paths:\n\n imgs = []\n labels = []\n\n reading_dir_bucket = path + '/'\n file_list = os.listdir(reading_dir_bucket)\n file_list = [fi for fi in file_list if fi.endswith(\".txt\")]\n\n for file in file_list:\n with open(reading_dir_bucket + file, 'r', encoding='utf8') as f:\n x = f.readline()\n for char in x:\n try:\n y = char_vector.index(char)\n except:\n print(char)\n # 'auto' -> [0, 20, 19, 14]\n label = [char_vector.index(char) for char in x]\n img = cv2.imread(os.path.join(reading_dir_bucket, os.path.splitext(file)[0] + '.png'), 0)\n imgs.append(img)\n labels.append(label)\n number_samples += 1\n \n print(path.split('/')[-1])\n data_buckets[path.split('/')[-1]] = (imgs, labels)\n\n # (2) compute bucket_weights\n for i in range(1, bucket_size + 1, 1):\n bucket_weights[i] = len(data_buckets[i][1]) / number_samples\n\n # (3) create python generator\n while True:\n # select random bucket (follow transcription length distribution)\n random_bucket_idx = np.random.choice(bucket_size, 1, p=[value for value in bucket_weights.values()]) + 1\n random_bucket_idx = int(random_bucket_idx[0])\n\n image_batch = []\n label_batch = []\n\n for i in range(batch_size):\n # retrieve random samples from bucket of size batch_size\n sample_idx = random.randint(0, len(data_buckets[random_bucket_idx][1]) - 1)\n image_batch.append(data_buckets[random_bucket_idx][0][sample_idx])\n label_batch.append(data_buckets[random_bucket_idx][1][sample_idx])\n\n # convert to numpy array\n image_batch = np.array(image_batch).astype('float32')\n label_batch = np.array(label_batch).astype(np.int32)\n\n # normalize images to [-1, 1]\n image_batch = image_batch.reshape(-1, h, int((h / 2) * random_bucket_idx), c)\n image_batch = (image_batch - 127.5) / 127.5\n\n yield (image_batch, label_batch)", "def add_stats(self, data):\n for i in range(1, self.maxk+1):\n self.add_kgrams(data, i)", "def apply_cutoffs(self):\n cur_pos = 0\n self.cutoff_groups = {}\n #without both of these, can't make groups:\n if self.cutoffs and self.cutoff_tags:\n cutoffs = self.cutoffs.split(',')\n cutoff_tags = self.cutoff_tags.split(',')\n for index, item in enumerate(cutoffs):\n tag = cutoff_tags[index]\n self.cutoff_groups[tag] = self.contents[cur_pos:int(item)]\n cur_pos = int(item)\n #print self.cutoff_groups", "def aggregate_prototype(self, key, values):\n # clustID, [(docIDx,wordsListx)] -> s'han unit tots els value que tenen el mateix clust/proto\n wordsInCluster = {}\n documentsInCluster = []\n totalDocumentsInCluster = 0\n\n # Calcular frequencia de cada paraula i nombre total de documents associats a un cluster\n for pair in values:\n totalDocumentsInCluster += 1\n documentsInCluster.append(pair[0])\n for word in pair[1]:\n if not word in wordsInCluster:\n wordsInCluster[word] = 1\n else:\n wordsInCluster[word] += 1\n\n # Generar llista amb les paraules i el seu pes\n wordsWithWeight = [] \n for word, freq in wordsInCluster.items():\n weight = float(freq/totalDocumentsInCluster)\n wordsWithWeight.append((word,weight))\n \n \n # Ordenar llista alfabeticament perque el dict no esta ordenat\n # Funcio lambda que donat un element retorna el primer element\n takeFirst = lambda pair: pair[0]\n wordsWithWeight = sorted(wordsWithWeight, key= takeFirst)\n documentsInCluster = sorted(documentsInCluster)\n\n # key/clustID, \n yield key, (documentsInCluster,wordsWithWeight)", "def divide_into_buckets(filename, bucketName, separator, classColumn):\n\n # Number of buckets\n numberOfBuckets = 10\n data = {}\n\n # Read in the data and divide by category\n with open(filename) as f:\n lines = f.readlines()\n for line in lines:\n if separator != '\\t':\n line = line.replace(separator, '\\t')\n # Get the category\n category = line.split('\\t')[classColumn]\n data.setdefault(category, [])\n data[category].append(line)\n # Initialize the buckets\n buckets = []\n for i in range(numberOfBuckets):\n print \"Creating %d \" % i\n buckets.append([])\n # Based on category put data in a bucket\n for k in data.keys():\n print data[k]\n random.shuffle(data[k])\n print data[k]\n bNum = 0\n # Divide into buckets\n for item in data[k]:\n buckets[bNum].append(item)\n bNum = (bNum + 1) % numberOfBuckets\n # Write to a file\n for j in range(numberOfBuckets):\n f = open(\"data/%s-%03i\" % (bucketName, j+1), 'w')\n for item in buckets[j]:\n f.write(item)\n f.close()", "def add_datapoint(self, value, sample):\n if value < self.buckets[0]['low']:\n bucket = {\n 'ff': 1,\n 'vv': np.power(value, 2),\n 'vf': value,\n 'v': [value, value, 1],\n 'low': value,\n 'high': self.buckets[0]['low'],\n 'frequency': 1,\n 'size': value + 1 - self.buckets[self.numbuckets - 1]['high']\n }\n self.mergesmallest(sample)\n self.buckets.append(bucket) # borrow one bucket\n #print \"new bucket: \" + str(bucket['low']) + \", \" + str(bucket['high']) + \", \" + str(len(self.buckets))\n elif value > self.buckets[self.numbuckets - 1]['high']:\n bucket = {\n 'ff': 1,\n 'vv': np.power(value, 2),\n 'vf': value,\n 'v': [value, value, 1],\n 'low': self.buckets[self.numbuckets - 1]['high'],\n 'high': value + 1,\n 'frequency': 1,\n 'size': value + 1 - self.buckets[self.numbuckets - 1]['high']\n }\n self.mergesmallest(sample)\n self.buckets.append(bucket)\n #print \"new bucket: \" + str(bucket['low']) + \", \" + str(bucket['high']) + \", \" + str(len(self.buckets))\n else:\n for i in range(0, self.numbuckets):\n if value >= self.buckets[i]['low'] and value < self.buckets[i]['high']:\n self.buckets[i]['frequency'] += 1", "def __init__(self, n, ngram_counts, vocab, unk=False):\n\n self.n = n\n\n self.vocab = vocab\n\n self.V = len(vocab)\n\n self.ngram_counts = ngram_counts\n\n # YOUR CODE HERE\n # START BY MAKING THE RIGHT COUNTS FOR THIS PARTICULAR self.n\n # for unigrams, we only need total word count\n if n == 1:\n self.total_count = sum(self.ngram_counts.values())\n # for bigrams, we need total count wrt each word. In our language, it is history count.\n elif n == 2:\n self.history_count = Counter()\n for k, v in self.ngram_counts.items():\n self.history_count[k[0]] = self.history_count[k[0]] + v\n # since we only count for the first word in the tuple, we will always\n # miss counting </s>. However, since the frequency of </s> is the same\n # as the frequency of <s>, we can simply assign it equal to it.\n self.history_count['</s>'] = self.history_count['<s>']", "def build_dictionary_ngrams(training_datasets): \n word_counter_unigrams = collections.Counter()\n word_counter_bigrams = collections.Counter()\n word_counter_trigrams = collections.Counter()\n for i, dataset in enumerate(training_datasets):\n for example in dataset:\n sent1_tokenized = tokenize(example['sentence1_binary_parse'])\n sent2_tokenized = tokenize(example['sentence2_binary_parse'])\n bigrams1 = nltk.bigrams(sent1_tokenized)\n bigrams2 = nltk.bigrams(sent2_tokenized)\n trigrams1 = nltk.trigrams(sent1_tokenized)\n trigrams2 = nltk.trigrams(sent2_tokenized)\n word_counter_bigrams.update(bigrams1)\n word_counter_bigrams.update(bigrams2)\n word_counter_trigrams.update(trigrams1)\n word_counter_trigrams.update(trigrams2)\n word_counter_unigrams.update(sent1_tokenized)\n word_counter_unigrams.update(sent2_tokenized)\n \n vocabulary_uni = set([word for word in word_counter_unigrams])\n vocabulary_uni = list(vocabulary_uni)\n vocabulary_uni = [PADDING, UNKNOWN] + vocabulary_uni \n word_indices_uni = dict(zip(vocabulary_uni, range(len(vocabulary_uni))))\n \n vocabulary_bi = set([word for word in word_counter_bigrams])\n vocabulary_bi = list(vocabulary_bi)\n vocabulary_bi = [PADDING, UNKNOWN] + vocabulary_bi \n word_indices_bi = dict(zip(vocabulary_bi, range(len(vocabulary_bi))))\n \n vocabulary_tri = set([word for word in word_counter_trigrams])\n vocabulary_tri = list(vocabulary_tri)\n vocabulary_tri = [PADDING, UNKNOWN] + vocabulary_tri \n word_indices_tri = dict(zip(vocabulary_tri, range(len(vocabulary_tri))))\n\n return word_indices_uni, word_indices_bi, word_indices_tri", "def create_dataset():\n opt = Opt.get_instance()\n\n opt.bins = [i for i in range(10, opt.max_len + 1)]\n\n if opt.dataset is not None and os.path.exists(opt.dataset):\n print('loading saved dataset...')\n with open(opt.dataset, 'rb') as f:\n opt.src_bins = pickle.load(f)\n opt.trg_bins = pickle.load(f)\n\n print({s: len(opt.src_bins[s]) for s in opt.bins})\n return\n\n print('reading datasets')\n with open(opt.src_data_path, 'r', encoding='utf-8') as f:\n opt.src_data = f.read().split('\\n')\n with open(opt.trg_data_path, 'r', encoding='utf-8') as f:\n opt.trg_data = f.read().split('\\n')\n\n opt.src_bins = {i: [] for i in opt.bins}\n opt.trg_bins = {i: [] for i in opt.bins}\n\n print('tokenizing and bining...')\n for i in tnrange(len(opt.src_data)):\n src = opt.src_data[i]\n trg = opt.trg_data[i]\n # for i, (src, trg) in enumerate(zip(opt.src_data, opt.trg_data)):\n src = opt.src_processor.encode(src)\n trg = [opt.trg_bos] + opt.trg_processor.encode(trg) + [opt.trg_eos]\n opt.src_data[i] = 0\n opt.trg_data[i] = 0\n\n lsrc = len(src)\n ltrg = len(trg)\n if lsrc > opt.max_len or ltrg > opt.max_len:\n continue\n\n for v in opt.bins:\n if lsrc <= v and ltrg <= v:\n for _ in range(lsrc, v):\n src.append(opt.src_pad)\n for _ in range(ltrg, v):\n trg.append(opt.trg_pad)\n\n opt.src_bins[v].append(src)\n opt.trg_bins[v].append(trg)\n break\n\n if opt.dataset is not None:\n with open(opt.dataset, 'wb') as f:\n pickle.dump(opt.src_bins, f)\n pickle.dump(opt.trg_bins, f)\n\n temp = {s: len(opt.src_bins[s]) for s in opt.bins}\n opt.train_len = sum([temp[v] for v in opt.bins])\n print(temp)", "def __create_tags(self, bucket):\n tags = []\n sentence_length = 0\n for line in bucket:\n line = line.strip()\n tokens = line.split()\n assert (len(tokens) == 4)\n sentence_length += 1\n tags.append(self.__tag_vec(tokens[3], self.config.class_size)) # tag one-hot\n if sentence_length == self.max_sentence_length: break\n # padding with 0s\n for _ in range(self.max_sentence_length - sentence_length):\n tags.append(np.array([0] * self.config.class_size))\n return tags", "def _parse_buckets(self, d: ByteString) -> None:\n bin_size = self.__BIN_STRUCT.size\n self._cuckoo_capacity = (len(bytes(d)) - bin_size) // bin_size // self.bucket_size\n start = 0\n end = bin_size\n self._buckets = []\n for i in range(self.capacity):\n self.buckets.append([])\n for _ in range(self.bucket_size):\n finger, count = self.__BIN_STRUCT.unpack(bytes(d[start:end]))\n if finger > 0:\n ccb = CountingCuckooBin(finger, count)\n self.buckets[i].append(ccb)\n self._inserted_elements += count\n self.__unique_elements += 1\n start = end\n end += bin_size", "def get_length_bucket( msg_length ):\n if msg_length < 20:\n return \"short\"\n elif msg_length < 80:\n return \"medium\"\n else:\n return \"long\"", "def arrangeBuckets(self, counter, areas, bucketarea, sample, N):\n boundaries = sorted(bucketarea.items(), key=operator.itemgetter(1))\n low = self.min\n values = bucketarea.values()\n values = list(itertools.chain(*values))\n values = sorted(values)\n for i in range(0, len(values)):\n self.buckets[i]['low'] = low\n highindex = values[i]\n self.buckets[i]['high'] = sample[highindex]\n self.buckets[i]['size'] = sample[highindex] - low\n if sample[highindex] == self.buckets[i]['low']:\n self.buckets[i]['high'] = sample[highindex + 1]\n self.buckets[i]['size'] = sample[highindex + 1] - low\n if low == self.min:\n self.buckets[i]['frequency'] = counter[sample[0]] * N / len(sample) * 2\n else:\n self.buckets[i]['frequency'] = counter[low] * N / len(sample) * 2\n low = self.buckets[i]['high']\n self.buckets[self.numbuckets - 1]['high'] = self.max + 1\n self.buckets[self.numbuckets - 1]['low'] = self.buckets[self.numbuckets - 2]['high']\n self.buckets[self.numbuckets - 1]['frequency'] = counter[self.buckets[self.numbuckets - 1]['low']] * N / len(sample) * 2\n self.buckets[self.numbuckets - 1]['size'] = self.buckets[self.numbuckets - 1]['high'] - self.buckets[self.numbuckets - 1]['low']\n f = 0\n for i in range(len(self.buckets)):\n f += self.buckets[i]['frequency']\n #assert np.isclose(f, N)", "def __init__(self, num_hash=5, buckets=272):\n self.num_hash = num_hash\n self.buckets = buckets\n self.table = np.zeros([num_hash, buckets])", "def __init__(self, num_buckets = 5):\n self.map = DoubleLinkedList()\n for i in range(0, num_buckets):\n self.map.push(DoubleLinkedList())", "def add_datapoint(self, value):\n if value < self.buckets[0]['low']:\n self.buckets[0]['low'] = value\n self.buckets[0]['frequency'] += 1\n self.buckets[0]['size'] = self.buckets[0]['high'] - value\n elif value > self.buckets[self.numbuckets - 1]['high']:\n self.buckets[self.numbuckets - 1]['high'] = value + 1\n self.buckets[self.numbuckets - 1]['frequency'] += 1\n self.buckets[self.numbuckets - 1]['size'] = value + 1 - self.buckets[self.numbuckets - 1]['low']\n else:\n for i in range(0, self.numbuckets):\n if value >= self.buckets[i]['low'] and value < self.buckets[i]['high']:\n self.buckets[i]['frequency'] += 1", "def compare_this_other(this_strnd, oth_strnd, oth_strnd_anot, cutoff):\n\n p = Plotter()\n\n (this_sizes, this_dists) = this_strnd\n (other_sizes, other_dists) = oth_strnd\n (annot_other_sizes, annot_other_dists) = oth_strnd_anot\n\n # These are all dictionaries. Compare the (normalized) distribution of\n # lenghts from all of them\n #sizes = {'this': this_sizes, 'other': other_sizes,\n #'annot_other':annot_other_sizes}\n sizes = {'Opposite strand': this_sizes, 'Annotated strand': other_sizes}\n\n distances = {'this': this_dists, 'other': other_dists,\n 'annot_other':annot_other_dists}\n\n ## Get all dists, irrespective of cluster size\n merged_dists = {}\n for (dist_name, dist_dict) in distances.items():\n merged_dists[dist_name] = sum(dist_dict.itervalues(), [])\n\n #p.distance_histogram(merged_dists)\n\n ## Create a zero-array for the max sizes\n all_sizes = {}\n for (size_name, size_dict) in sizes.items():\n this_size = np.zeros(cutoff)\n for (size, size_count) in size_dict.iteritems():\n if size < cutoff:\n this_size[size-1] = size_count\n if size >= cutoff:\n this_size[-1] += size_count\n\n all_sizes[size_name] = this_size\n\n p.cluster_size_distribution(all_sizes, cutoff)\n\n debug()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modify user input. Make sure the user's email is all lowercase. Create a slug for the user.
def save(self, *args, **kwargs): self.email = self.email.lower() self.slug = slugify(self.username, allow_unicode=True) super().save(*args, **kwargs)
[ "def test_new_user_email_normalize(self):\n email = 'test1@gmail.com'\n user = get_user_model().objects.create_user(email, 'test123')\n\n self.assertEqual(user.email, email.lower())", "def test_create_user_email_normalized(self):\n email = 'test1@ASDSS.com'\n user = sample_user(email)\n self.assertEqual(email.lower(), user.email)", "def clean_username_(self):\n submitted_username = self.cleaned_data.get('username', None)\n if submitted_username:\n self.cleaned_data['username'] = submitted_username.lower()\n return profanity_clean_field(self, 'username')", "def generate_username(\n email: typing.Optional[str],\n given_name: typing.Optional[str],\n family_name: typing.Optional[str],\n) -> str:\n\n def check_name(name):\n username = slugify(name)\n if name and User.objects.filter(username=username).count() == 0:\n return username\n else:\n return None\n\n email_name = check_name(email.split(\"@\")[0]) if email else None\n if email_name:\n return email_name\n\n given_slug = check_name(given_name) if given_name else None\n if given_slug:\n return given_slug\n\n name_slug = (\n check_name(given_name + \" \" + family_name)\n if given_name and family_name\n else None\n )\n if name_slug:\n return name_slug\n\n email_slug = check_name(email) if email else None\n if email_slug:\n return email_slug\n\n base_name = email_name if email_name else \"user\"\n existing = User.objects.filter(username__startswith=base_name + \"-\").count()\n return \"{}-{}\".format(base_name, existing + 1)", "def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.title + '-' +\n uuid.uuid4().hex[:6])\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = re.sub('[^\\w]+', '-', self.title.lower())\n\n ret = super(Post, self).save(*args, **kwargs)\n\n return ret", "def clean_username(self):\n if not alnum_re.search(self.cleaned_data['username']):\n raise forms.ValidationError('Usernames can only contain letters, numbers and underscores')\n try:\n user = User.objects.get(username__exact=self.cleaned_data['username'])\n except User.DoesNotExist:\n return self.cleaned_data['username']\n raise forms.ValidationError('This username is already taken. Please choose another.')", "def clean_email(sender, instance, *args, **kwargs):\n if isinstance(instance.email, (str, unicode)):\n instance.email = instance.email.lower().strip()", "def save(self):\n if not self.slug:\n self.slug = slugify(self.name) \n super(Location, self).save()", "def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = self._get_unique_slug()\n super().save()", "def normalize_username(name):\n underscores = re.sub(r'\\s', '_', name)\n single_space = re.sub(r'_+', ' ', underscores)\n trimmed = single_space.strip()\n first = trimmed[0:1]\n rest = trimmed[1:]\n return first.upper() + rest", "def save(self, **kwargs):\n self.slug = self._make_unique_slug(**kwargs)\n super(Submission,self).save(**kwargs)", "def _process_user_data(guid,\n email,\n first_name,\n middle_initial,\n last_name,\n email_validated,\n is_nyc_employee,\n has_nyc_account,\n active,\n terms_of_use_accepted,\n is_anonymous_requester):\n mailbox, _ = email.split('@')\n\n if first_name is None:\n first_name = mailbox\n\n user = Users.query.filter_by(guid=guid).first()\n if user is None:\n user = find_user_by_email(email)\n\n # update or create user\n if user is not None:\n _update_user_data(\n user,\n guid,\n email,\n first_name,\n middle_initial,\n last_name,\n email_validated,\n is_nyc_employee,\n has_nyc_account,\n active,\n terms_of_use_accepted,\n is_anonymous_requester\n )\n else:\n user = Users(\n guid=guid,\n email=email,\n first_name=first_name,\n middle_initial=middle_initial,\n last_name=last_name,\n email_validated=email_validated,\n is_nyc_employee=is_nyc_employee,\n has_nyc_account=has_nyc_account,\n active=active,\n terms_of_use_accepted=terms_of_use_accepted,\n is_anonymous_requester=is_anonymous_requester\n )\n create_object(user)\n\n return user", "def set_slug(sender, instance, *args, **kwargs):\n if instance.title and not instance.slug:\n slug = slugify(instance.title)\n\n while Book.objects.filter(slug=slug).exists():\n slug = slugify(\n '{}-{}'.format(instance.title, str(uuid.uuid4())[:8])\n )\n instance.slug = slug", "def clean_username(self):\n data = self.cleaned_data['username']\n return create_user_token(data)", "def on_model_change(self, form, model, is_created):\n model['url_slug'] = slugify(model['title'])", "def _make_slug(title):\n if title in constants.FORBIDDEN_SLUGS or _four_digit(title):\n title += constants.SLUG_MODIFIER\n return slugify(title)", "def make_url_friendly(input_str):\n if input_str is None:\n return None\n return re.sub(r'[\\W\\\\/_]+', '-', remove_accents(input_str)).lower()", "def edit_name(request):\n # Validate the edit form and save the new first and last name\n if request.method == 'POST':\n edit_user_form = EditNameForm(request.POST)\n if edit_user_form.is_valid():\n d = edit_user_form.cleaned_data\n request.user.first_name = d['first_name']\n request.user.last_name = d['last_name']\n request.user.save()\n return redirect('/account/')\n\n return render(request, 'edit_name.html')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transmit a single temperature to heatseeknyc.com.
def transmit_temperature(temperature): common.add_temperature(temperature) reading = dict(sensor_name=temperature['cell_id'], temp=temperature['temperature'], humidity=temperature['humidity'], time=temperature['hub_time'].timestamp(), verification='c0ffee') logging.info('POSTing {}...'.format(reading)) response = requests.post("{}/readings.json".format(os.environ['RELAY_HEATSEEK_APP']), json=dict(reading=reading)) if response.status_code != requests.codes.ok: logging.error('request %s got %s response %s', response.request.body, response.status_code, response.text) return response
[ "def sendMQTTData(temperature, humidity):\n timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime(time.time()))\n payload = (\"\"\"\n {\n \"deviceID\" : \"WeatherMap\",\n \"Data\" :{\n \"Temperature\" : {\n \"data\": \"%s\",\n \"unit\" : \"C\"\n },\n \"Humidity\" : {\n \"data\" : \"%s\",\n \"unit\" : \"%%\"\n },\n \"Timestamp\" : \"%s\"\n }\n }\n \"\"\"%(temperature, humidity, timestamp))\n client.publish(\"/RSU/remote/WeatherMap/json\", payload, 1)\n\n f = open(\"Receive/Weather.txt\", \"a+\")\n f.write(payload + \"\\n\")\n f.close()", "def temperature(self):\n noun = 'DEV:T' + str(self.temperature_channel) + ':TEMP:SIG:TEMP'\n command = 'READ:' + noun + '\\r\\n'\n response = self.query_and_receive(command)\n\n return self.extract_value(response, noun, 'K')", "def __sync_temperature(self) -> None:\n if self.__peer is not None:\n try:\n self.__peer.send_command(\n MicrobitTemperatureCommand(temperature=self.__temperature)\n )\n except CommunicationClosedError:\n self.__peer = None", "def temperature(self):\n\t\t# Poll status for TDR to be set.\n\t\twhile self._device.readU8(MPL3115A2._MPL3115A2_REGISTER_STATUS) & MPL3115A2._MPL3115A2_REGISTER_STATUS_TDR == 0:\n\t\t\ttime.sleep(0.01)\n\t\t# Read 2 bytes of data from temp register.\n\t\ttemp_msb = self._device.readU8(MPL3115A2._MPL3115A2_REGISTER_TEMP_MSB)\n\t\ttemp_lsb = self._device.readU8(MPL3115A2._MPL3115A2_REGISTER_TEMP_LSB)\n\t\ttemp = (temp_msb << 8) | temp_lsb\n\t\ttemp >>= 4\n\t\tif temp & 0x800:\n\t\t\ttemp |= 0xF000\n\t\t# Scale down to degrees Celsius.\n\t\treturn temp / 16.0", "async def set_temperature(self, temp: str):\n set_t = await super().set_temperature(temp)\n return await self.hw_device.set_temperature(self.channel, set_t)", "def set_temperature(self, celsius):\n return None", "def read_temperature1(self):\n self._sensor = subscribe.simple(\"Sensors/Room1/Temperature\", hostname=\"192.168.145.127\")\n return self._sensor.payload", "def temperature(self) -> TemperatureData:\n pass", "def read_temperature_data(self):\n\n current_temp = serialChiller.readCoolantTemperature(serialPort = self.ser)\n self.temperature_data.append(current_temp)", "def _get_publish_temperature(self, event_data=None):\n # Open and read the Linux system file that contains the temperature of\n # interest\n self._temperature_file = open(self._temperature_file_path, 'r')\n self._file_contents = self._temperature_file.read()\n self._temperature_file.close()\n # Make sure the value is a float64\n self._temperature = numpy.float64(self._file_contents)\n # Convert measurement to degrees Celsius\n self._temperature = self._temperature / self._temperature_multiplier\n if self._temperature_in_C == False:\n self._temperature = (self._temperature - 32.0) * 5.0 / 9.0\n # Convert measurement to Kelvin\n self._temperature = self._temperature + 273.15\n # Create message\n temperature_msg = sensor_msgs.msg.Temperature()\n temperature_msg.temperature = self._temperature\n temperature_msg.variance = 0.0 # unknown variance\n temperature_msg.header.stamp = rospy.Time.now()\n # Publish message\n self._temperature_publisher.publish(temperature_msg)", "def single_temperature(self, c, channel):\n dev = self.selectedDevice(c)\n return (dev.getSingleTemp(channel), dev.readings[channel][1])", "def read_temperature(self):\n self._humidity, self._celsius = Adafruit_DHT.read_retry(DHT_TYPE, DHT_PIN)\n\n if self._humidity is not None and self._celsius is not None:\n return self._celsius\n else:\n return 0", "def temperature(self, value: Quantity):\r\n self._temperature = value", "def get_temperature(self):\n self.get_reading()\n if self.raw_temperature < 0x3FFF:\n temperature = self.set_precision((self.raw_temperature * 165.0 / 2**14) - 40.0)\n return (temperature, 'temp', 'c')\n else:\n raise ValueError(\"Temperature value out of range (RawValue=0x%04X Max:0x3FFF)\" % raw_t)", "def temp(self):\n (status, temp_celcius) = self.__device.temperature()\n st = \"%d°C\" % temp_celcius\n return st", "def setHeatTemp(self, temp):\r\n self.heat.sensors[0].setTemp(temp)", "def setCoolerTemp(self):\n\n msg = PyQt5.QtWidgets.QMessageBox\n actValue = self.app.imaging.data.get('CCD_TEMPERATURE.CCD_TEMPERATURE_VALUE')\n\n if actValue is None:\n msg.critical(self,\n 'Error Message',\n 'Value cannot be set when not connected !')\n return False\n dlg = PyQt5.QtWidgets.QInputDialog()\n value, ok = dlg.getInt(self,\n 'Set cooler temperature',\n 'Value (-20..+20):',\n actValue,\n -20,\n 20,\n 1,\n )\n\n if not ok:\n return False\n\n self.app.imaging.sendCoolerTemp(temperature=value)\n\n return True", "def temperature(self, temperature):\n self.transite_light_state(color_temp=temperature)", "def target_temperature(self):\n return self._thermostat_temp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Continually transmit temperatures from database to heatseeknyc.com.
def transmit(): database = common.get_db() while True: with database: fetch_after = datetime.datetime.now() - datetime.timedelta(days=365) cursor = database.cursor() cursor.execute('select temperatures.id, cell_id, adc, temperature, hub_time, version, humidity' ' from temperatures left join cells on cells.id=cell_id' ' where relay and relayed_time is null and time > %s', (fetch_after.strftime('%Y-%m-%d'),)) temperatures = cursor.fetchall() if temperatures: logging.info('%s unrelayed temperatures', len(temperatures)) unknown_cell_ids = set() for temperature in temperatures: cell_id = temperature['cell_id'] if cell_id not in unknown_cell_ids: response = transmit_temperature(temperature) if response.status_code == requests.codes.ok: with database: database.cursor().execute('update temperatures set relayed_time = now()' ' where id=%(id)s', temperature) elif response.status_code == requests.codes.not_found: # give up on this cell's readings for this batch, since it will continue to 404 logging.info("404 for cell %s", cell_id) unknown_cell_ids.add(cell_id) elif response.status_code == requests.codes.bad_request: if "No user associated with that sensor" in response.text: # give up on this cell's readings for this batch, since it will continue to 400 logging.info("no user assocated with cell %s", cell_id) unknown_cell_ids.add(cell_id) time.sleep(1) time.sleep(1) # Notify deadmansnitch that the script is still running properly if os.environ.get('BATCH_WORKER_SNITCH_ID'): requests.get("https://nosnch.in/{}".format(os.environ["BATCH_WORKER_SNITCH_ID"]))
[ "def transmit_temperature(temperature):\n common.add_temperature(temperature)\n reading = dict(sensor_name=temperature['cell_id'],\n temp=temperature['temperature'],\n humidity=temperature['humidity'],\n time=temperature['hub_time'].timestamp(),\n verification='c0ffee')\n logging.info('POSTing {}...'.format(reading))\n response = requests.post(\"{}/readings.json\".format(os.environ['RELAY_HEATSEEK_APP']),\n json=dict(reading=reading))\n if response.status_code != requests.codes.ok:\n logging.error('request %s got %s response %s',\n response.request.body, response.status_code, response.text)\n return response", "def get_temp_wu():\n\n try:\n conn = sqlite3.connect(dbname)\n curs = conn.cursor()\n query = \"SELECT baudrate, port, id, active FROM sensors WHERE id like 'W_'\"\n\n curs.execute(query)\n rows = curs.fetchall()\n\n #print(rows)\n\n conn.close()\n\n if rows != None:\n for row in rows[:]:\n WUKEY = row[1]\n STATION = row[0]\n\n if int(row[3]) > 0:\n try:\n url = \"http://api.wunderground.com/api/{0}/conditions/q/{1}.json\".format(WUKEY, STATION)\n r = requests.get(url)\n data = r.json()\n log_temperature({'temperature': data['current_observation']['temp_c'], 'id': row[2]})\n except Exception as e:\n raise\n \n except Exception as e:\n text_file = open(\"debug.txt\", \"a+\")\n text_file.write(\"{0} ERROR:\\n{1}\\n\".format(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()), str(e)))\n text_file.close()", "def __sync_temperature(self) -> None:\n if self.__peer is not None:\n try:\n self.__peer.send_command(\n MicrobitTemperatureCommand(temperature=self.__temperature)\n )\n except CommunicationClosedError:\n self.__peer = None", "def yeast_temp(self, request_number):\n tmp = self.read_temp(request_number)\n # noinspection PyRedundantParentheses\n while (tmp > 80 or tmp < 60):\n try:\n print(\"\\t\\b***Temperature of yeast is out of range.***\")\n print(\" ***Bring another yeast and measure temperature again.*** \\n\")\n time.sleep(sleep_time * 3)\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Temperature of yeast is not in range.\",\n datetime.datetime.now(),\n \"fail\")\n print(log.generate_log())\n time.sleep(sleep_time * 2)\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Temperature of yeast is not in range.\")\n time.sleep(sleep_time)\n except Exception as e:\n print(e)\n tmp = self.read_temp(request_number)\n try:\n print(\" Temperature of yeast is in range and ready to use.\\n\")\n time.sleep(sleep_time * 2)\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Temperature of yeast measured.\", datetime.datetime.now(),\n \"pass\")\n print(log.generate_log())\n time.sleep(sleep_time * 2)\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Temperature of yeast measured\")\n time.sleep(sleep_time * 2)\n except Exception as e:\n\n # Exception: checks if measurement has failed\n print(e)\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Failed to measure temperature of yeast\",\n datetime.datetime.now(), \"fail\")\n print(log.generate_log())\n time.sleep(sleep_time * 2)\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Failed to measure temperature of yeast\")\n time.sleep(sleep_time)", "def insert_dummy_temperatures(number_of_readings_per_probe=1000): \n readings = []\n probe_ids = hardware.temperature_probes.probe_ids \n for probe_number in range(0, number_of_readings_per_probe): \n for probe_id in probe_ids: \n readings.append(get_temperature_for_probe(probe_id, probe_number))\n database.db_adapter.store_temperatures(readings) \n database.db_adapter.db.commit()\n print 'Added ' + str(number_of_readings_per_probe * len(probe_ids)) + ' test temperature readings.'", "def process_temperature():\n \n \"\"\"for mutliple Sensors\"\"\"\n\n for SENSOR in W1ThermSensor.get_available_sensors():\n\tlogging.info(\"Sensor %s has temperature %.2f\" % (SENSOR.id, SENSOR.get_temperature()))\n \tG.labels(\"%s\" % SENSOR.id).set(\"%.2f\" % SENSOR.get_temperature())", "def run(self):\n time.sleep(5)\n while(1):\n time.sleep(5)\n temperature = SensorData_Object.getTemperature()\n self.temp_value.set_value(temperature) # Publish Temperature Sensor Data\n \n humidity = SensorData_Object.getHumidity()\n self.hum_value.set_value(humidity) # Publish Humidity Sensor Data\n \n flux = SensorData_Object.getMagFlux()\n self.flux_value.set_value(flux) # Publish MagneticFlux Data\n \n corona_level = SensorData_Object.getCorona()\n self.corona_level.set_value(corona_level) # Publish Corona Level Data\n \n Resistence = SensorData_Object.getResistence()\n self.resistance.set_value(Resistence) # Publish Resistence Data\n \n logging.info(\"All Data Published to OPC Server\")", "def log_temperature(temp):\n \n conn = sqlite3.connect(dbname)\n curs = conn.cursor()\n\n curs.execute(\"INSERT INTO temps values(datetime('now', 'localtime'), '{0}', '{1}' )\".format(temp['temperature'], temp['id']))\n\n conn.commit()\n conn.close()", "def _write_temperatures(self, timestamp, temperatures):\n\n influx_client = InfluxDBClient(url=f'{self._hostname}:{self._port}', token=self._apikey, org=self._org)\n write_api = influx_client.write_api(write_options=SYNCHRONOUS)\n\n data = []\n for temperature in temperatures:\n\n record_actual, record_target, record_delta = _get_zone_measurements(timestamp, temperature.zone, temperature.actual, temperature.target, self._logger)\n\n if record_actual:\n data.append(record_actual)\n if record_target:\n data.append(record_target)\n if record_delta:\n data.append(record_delta)\n\n try:\n if self._simulation is False:\n self._logger.debug('Writing all zone measurements to influx...')\n write_api.write(bucket=self._bucket, record=data)\n except Exception as e:\n if hasattr(e, 'response'):\n if e.response.status == 401:\n self._logger.exception(f'Insufficient write permissions to Bucket: \"{self._bucket}\" - aborting write\\nError:{e}')\n else:\n self._logger.exception(f'Error Writing to {self._bucket} at {self._hostname}:{self._port} - aborting write.\\nResponse: {e.body.json()}\\nError:{e}')\n else:\n self._logger.exception(f'Error Writing to {self._bucket} at {self._hostname}:{self._port} - aborting write\\nError:{e}')", "def read_temperature_data(self):\n\n current_temp = serialChiller.readCoolantTemperature(serialPort = self.ser)\n self.temperature_data.append(current_temp)", "def sendMQTTData(temperature, humidity):\n timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%SZ\", time.gmtime(time.time()))\n payload = (\"\"\"\n {\n \"deviceID\" : \"WeatherMap\",\n \"Data\" :{\n \"Temperature\" : {\n \"data\": \"%s\",\n \"unit\" : \"C\"\n },\n \"Humidity\" : {\n \"data\" : \"%s\",\n \"unit\" : \"%%\"\n },\n \"Timestamp\" : \"%s\"\n }\n }\n \"\"\"%(temperature, humidity, timestamp))\n client.publish(\"/RSU/remote/WeatherMap/json\", payload, 1)\n\n f = open(\"Receive/Weather.txt\", \"a+\")\n f.write(payload + \"\\n\")\n f.close()", "def read_temp(self, request_number):\n try:\n\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Waiting to measure temperature of yeast\",\n datetime.datetime.now(), \"pass\")\n print(log.generate_log())\n time.sleep(sleep_time)\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Waiting to measure temperature of yeast\")\n time.sleep(sleep_time)\n temperature = random.randrange(55, 85, 1)\n input(\"\\033[1m 2. Press Enter to measure temperature of yeast: \\033[0m\\n\")\n time.sleep(sleep_time * 3)\n print('\\t\\t\\tTemp = \\033[1m{0:0.0f}*\\033[0;0m F'.format(temperature))\n time.sleep(sleep_time * 2)\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Temperature of yeast received\", datetime.datetime.now(), \"pass\")\n print(log.generate_log())\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Temperature of yeast received\")\n time.sleep(sleep_time * 2)\n return temperature\n except Exception as e:\n # Exception: checks if measurement has failed\n print(e)\n status_log = \"{\\\"batch_id\\\":\\\"\" + request_number + \"\\\", \\\"brew_batch_stage\\\":\\\"Prep\\\", \\\"log\\\":\\\"Temperature\\\"}\"\n sn_log = ServiceNowLog()\n ServiceNowLog.create_new_log(sn_log, status_log)\n self.log_no = self.log_no + 1\n log = Log(self.log_no, \"Prep.Temperature\", \"Failed to check temperature of yeast\", datetime.datetime.now(),\n \"fail\")\n print(log.generate_log())\n time.sleep(sleep_time * 2)\n ml = MongoLogging.MongoLogging()\n MongoLogging.MongoLogging.MongoLog(ml, request_number, \"Prep.Temperature\",\n \"Fail to check temperature of yeast\")\n time.sleep(sleep_time * 2)", "def send_to_influxdb_cloud(sensor_reading):\n\n token = app.config[\"INFLUXDB_TOKEN\"];\n organization = app.config[\"INFLUXDB_ORGANIZATION\"];\n bucket = app.config[\"INFLUXDB_BUCKET\"];\n\n headers = { \"content-type\": \"text/plain\", \"Authorization\": \"Token {token}\".format(token=token) }\n\n field_value = (\"temp=\" if sensor_reading.value_type == \"temperature\" else \"hum=\") + str(sensor_reading.value)\n\n request_data = \"{value_type},device_id={device_id},host={host},sensor_id={sensor_id},sensor_serial={sensor_serial},location={location} {field_value} {unix_timestamp}\" \\\n .format(value_type = sensor_reading.value_type,\n device_id = sensor_reading.device_id,\n host = sensor_reading.host,\n sensor_id = sensor_reading.sensor_id,\n sensor_serial = sensor_reading.sensor_serial_no,\n location = sensor_reading.location.replace(\" \", \"\\ \").replace(\"=\", \"\\=\").replace(\",\", \"\\,\"),\n field_value = field_value,\n unix_timestamp = sensor_reading.unix_timestamp)\n response = requests.post(\"https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/write?org={0}&bucket={1}&precision=s\".format(organization, bucket), data = request_data, headers = headers)", "def read_temp(self):\n\n \"\"\"\n read outdoor-air-temperature (variable v00104) / Aussenluft\n \"\"\"\n debug(\"Reads the sensor for the outdoor-air-temperature...\")\n self.modbusclient.write_multiple_registers(0, str2duohex(\"v00104\"))\n outTemp = duohex2str(self.modbusclient.read_holdingregisters(0, 8))[7:]\n\n \"\"\"\n read supplied-air-temperature (variable v00105) / Zuluft\n \"\"\"\n debug(\"Reads the sensor for the supplied-air-temperature...\")\n self.modbusclient.write_multiple_registers(0, str2duohex(\"v00105\"))\n suppTemp = duohex2str(self.modbusclient.read_holdingregisters(0, 8))[7:]\n\n \"\"\"\n read exhaust-air-temperature (variable v00106) / Fortluft\n \"\"\"\n debug(\"Reads the sensor for the exhaust-air-temperature...\")\n self.modbusclient.write_multiple_registers(0, str2duohex(\"v00106\"))\n exhaustTemp = duohex2str(self.modbusclient.read_holdingregisters(0, 8))[7:]\n\n \"\"\"\n read extract-air-temperature (variable v00107) / Abluft\n \"\"\"\n debug(\"Reads the sensor for the extract-air-temperature...\")\n self.modbusclient.write_multiple_registers(0, str2duohex(\"v00107\"))\n extractTemp = duohex2str(self.modbusclient.read_holdingregisters(0, 8))[7:]\n\n info(\"Successfully read all temperature sensors!\")\n return float(outTemp), float(suppTemp), float(exhaustTemp), float(extractTemp)", "def thingspeak_post(metrics_time_utc, humidity, temperature, logger=None, thingspeak_options=None):\n\n if not logger: logger = logging.getLogger(__name__)\n\n thingspeak_strf_utcnowtime = metrics_time_utc.strftime('%Y-%m-%dT%H:%M:%SZ')\n thingspeak_url= f\"{thingspeak_options['THINGSPEAK_BASEURL']}&field1={str(round(float(temperature),1))}&field2={str(round(float(humidity),1))}&created_at={thingspeak_strf_utcnowtime}\"\n\n # Post data to ThingSpeak. Retry upon failure with exponential back off delays.\n trynum = 0\n sleep_time = 3\n post_success = None\n tpost_elapsed = None\n total_process_time = None\n tprocess0 = time.perf_counter()\n while not post_success and trynum < thingspeak_options['THINGSPEAK_POST_ERR_RETRIES']:\n trynum += 1\n if trynum >= 2:\n logger.warning(f\"Try num.: {trynum}\")\n sleep_time = max(3, (min((2**trynum), thingspeak_options['THINGSPEAK_POST_MAXIMUM_BACKOFF_TIME']) - round(int(tpost_elapsed or 0))))\n logger.debug(f\"Sleeping for {sleep_time} seconds\")\n time.sleep(sleep_time)\n\n tpost_elapsed = None\n logger.debug(\"Sending metrics to ThingSpeak...\")\n tpost0 = time.perf_counter()\n try:\n urlpost = urllib.request.urlopen(thingspeak_url)\n post_success = True\n except (urllib.error.URLError, urllib.error.HTTPError) as err:\n post_success = False\n # We are substiting the url if it's included in the error msg since it contains the API key.\n logger.error(f\"urllib.error: {str(err).replace(thingspeak_options['THINGSPEAK_BASEURL'], '<HIDDEN_URL>')}\")\n finally:\n tpost_elapsed = time.perf_counter() - tpost0\n try: urlpost.close()\n except NameError: pass\n\n logger.debug(f\"ThingSpeak post elapsed time: {tpost_elapsed:.4f} seconds\")\n\n total_process_time = time.perf_counter() - tprocess0\n logger.debug(f\"Total ThingSpeak post Process time: {total_process_time:.4f} seconds - Tries: {trynum}\")\n\n if post_success:\n logger.info(f\"Success sending metrics to ThingSpeak: Temperature: {temperature}, Humidity: {humidity} at: {thingspeak_strf_utcnowtime} (UTC)\")\n else: logger.error(\"Could not post metrics to ThingSpeak. Aborting...\")\n\n return post_success", "def _calculate_temperature(self, outside_temp):\n for data in self.inside_sensors.values():\n last_inside_temp = data['temperature']\n temp_delta = (outside_temp - last_inside_temp) \\\n * (1 - self.house_isolation_factor)\n\n new_inside_temp = last_inside_temp \\\n + self.timeframe \\\n * (temp_delta + self.devices_settings['heating_lvl']\n - self.devices_settings['cooling_lvl']) / 5\n\n data['temperature_delta'] = new_inside_temp - last_inside_temp\n data['temperature'] = truncate(new_inside_temp, -20, 40)", "def generate_temperature(self):\n day = self.__iday\n # daily min. and max. air temperatures (deg. C):\n if self.__is_rain:\n txxm = self.__g['txm1']\n txxs = self.__g['txs1']\n else:\n txxm = self.__g['txm']\n txxs = self.__g['txs']\n # random error generator:\n e = [0.0, 0.0, 0.0]\n for k in range(3):\n v = 3.0\n while math.fabs(v) > 2.5:\n rn1 = SimWeather.rnd()\n rn2 = SimWeather.rnd()\n v = math.sqrt(-2 * math.log(rn1)) * math.cos(2 * math.pi * rn2)\n e[k] = v\n r = [0.0, 0.0, 0.0]\n rr = [0.0, 0.0, 0.0]\n for i in range(3):\n for j in range(3):\n r[i] += SimWeather.__b[i][j] * e[j]\n rr[i] += SimWeather.__a[i][j] * self.__xim1[j]\n x = [0.0, 0.0, 0.0]\n for k in range(3):\n self.__xim1[k] = x[k] = r[k] + rr[k]\n self.table['tmax'][day] = x[0] * txxs + txxm\n self.table['tmin'][day] = x[1] * self.__g['tns'] + self.__g['tnm']\n if self.table['tmin'][day] > self.table['tmax'][day]:\n tmm = self.table['tmax'][day]\n self.table['tmax'][day] = self.table['tmin'][day]\n self.table['tmin'][day] = tmm", "def temperature(self) -> TemperatureData:\n pass", "def register_temp(request,t_zone):\n is_now = datetime.now()\n v_register = Register()\n v_register_temp = v_register.reg_temperature(t_zone)\n html = \"<html><body>It is now %s. And temperature is %s</body></html>\" % (is_now, v_register_temp)\n return HttpResponse(html)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asynchronous coroutine to fetch the HostedNumberOrderInstance
async def fetch_async(self) -> "HostedNumberOrderInstance": return await self._proxy.fetch_async()
[ "async def fetch_async(self) -> \"BuildInstance\":\n return await self._proxy.fetch_async()", "async def fetch_async(self) -> \"FactorInstance\":\n return await self._proxy.fetch_async()", "async def fetch_async(self) -> \"AccountInstance\":\n return await self._proxy.fetch_async()", "async def fetch_async(self) -> \"StepContextInstance\":\n return await self._proxy.fetch_async()", "async def fetch_async(self) -> \"InteractionChannelInstance\":\n return await self._proxy.fetch_async()", "async def list_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[HostedNumberOrderInstance]:\n return [\n record\n async for record in await self.stream_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n limit=limit,\n page_size=page_size,\n )\n ]", "async def fetch_async(self) -> BuildInstance:\n \n payload = await self._version.fetch_async(method='GET', uri=self._uri, )\n\n return BuildInstance(\n self._version,\n payload,\n service_sid=self._solution['service_sid'],\n sid=self._solution['sid'],\n \n )", "async def stream_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[HostedNumberOrderInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n page_size=limits[\"page_size\"],\n )\n\n return self._version.stream_async(page, limits[\"limit\"])", "async def _process_order_queue(self):\n\n def order_accepted(info):\n accepted_order = hlp.json_to_order(info)\n accepted_order.ref = order.ref\n self.order_accepted(accepted_order)\n\n def order_rejected(info):\n self.order_rejected(info, order)\n\n # TODO: Candidate for modularisation and code extraction\n while not self.stop:\n if self.is_session_active():\n while not self._outgoing_order_queue.empty():\n order = self._outgoing_order_queue.get_nowait()\n order_dict = {\"type\": order.type.name, \"side\": order.side.name, \"price\": order.price,\n \"units\": order.units, \"market\": order.market, \"marketId\": order.market_id}\n\n if order.type == OrderType.CANCEL:\n order_dict[\"supplier\"] = order.id\n order_dict[\"original\"] = order.id\n\n self.debug(\"Order Queued: {}\".format(self._outgoing_order_count))\n await Request(\"/orders\", order_accepted, error_callback_func=order_rejected,\n request_method=RequestMethod.POST, data=order_dict).perform()\n self.debug(\" Order Sent: {}\".format(self._outgoing_order_count))\n\n self._outgoing_order_count[order.market_id] -= 1\n # task = self._loop.create_task(self._rest_post_data(cons.API_ROOT + \"/orders/\", order_dict, order_accepted, order_rejected))\n # asyncio.gather(task)\n else:\n if self._outgoing_order_queue.qsize() > 0:\n self.warning(\"I cannot send orders to an inactive session.\")\n await asyncio.sleep(cons.MONITOR_ORDER_BOOK_DELAY)", "async def fetch_async(self) -> FactorInstance:\n\n payload = await self._version.fetch_async(\n method=\"GET\",\n uri=self._uri,\n )\n\n return FactorInstance(\n self._version,\n payload,\n service_sid=self._solution[\"service_sid\"],\n identity=self._solution[\"identity\"],\n sid=self._solution[\"sid\"],\n )", "def get_next_order(self, device):\n # Set up the device specific query\n device_query = (\n \"((task_1_device = '\" + device + \"' and task_1_status = 'open') OR \"\n \"(task_2_device = '\" + device + \"' and task_2_status = 'open') OR \"\n \"(task_3_device = '\" + device + \"' and task_3_status = 'open') OR \"\n \"(task_4_device = '\" + device + \"' and task_4_status = 'open') OR \"\n \"(task_5_device = '\" + device + \"' and task_5_status = 'open'))\"\n )\n\n # 1) Check for an order with status active\n query = (\n \"SELECT * FROM orders WHERE id = (SELECT min(id) FROM orders WHERE status = 'active' AND \"\n + device_query\n + \")\"\n )\n # myresult = self.query_db_as_json(query, return_one=True)\n myresult = self.query_db_as_json(query)\n if myresult:\n return myresult\n\n # 2) Check for an order with priority high and which is scheduled\n query = (\n \"SELECT * FROM orders WHERE id = (SELECT min(id) FROM orders WHERE status = 'scheduled' AND \"\n \"priority = 'high' AND \" + device_query + \")\"\n )\n myresult = self.query_db_as_json(query)\n # myresult = self.query_db_as_json(query, return_one=True)\n if myresult:\n return myresult\n\n # 3) Check for an order which start on time and time has expired\n query = (\n \"SELECT * FROM orders WHERE id = (SELECT min(id) FROM orders WHERE status = 'scheduled' AND \"\n \"start_type = 'time' AND start_time = 0 AND \" + device_query + \")\"\n )\n myresult = self.query_db_as_json(query)\n # myresult = self.query_db_as_json(query, return_one=True)\n if myresult:\n return myresult\n\n # 4) Check for a scheduled order with the lowest id number\n query = (\n \"SELECT * FROM orders WHERE id = (SELECT min(id) FROM orders WHERE status = 'scheduled' AND \"\n + device_query\n + \")\"\n )\n myresult = self.query_db_as_json(query)\n # myresult = self.query_db_as_json(query, return_one=True)\n if myresult:\n return myresult\n else:\n return \"no next order\"", "async def fetch_async(self) -> AccountInstance:\n\n payload = await self._version.fetch_async(\n method=\"GET\",\n uri=self._uri,\n )\n\n return AccountInstance(\n self._version,\n payload,\n sid=self._solution[\"sid\"],\n )", "def get_object(self):\n return get_object_or_404(Order, number=self.kwargs['order_number'])", "def work(self):\n while True:\n value = self.urls_queue.get()\n\n self.get_product_info(value) # to call get_products method which collects all products urls\n self.urls_queue.task_done()", "async def fetch_async(self) -> InteractionChannelInstance:\n\n payload = await self._version.fetch_async(\n method=\"GET\",\n uri=self._uri,\n )\n\n return InteractionChannelInstance(\n self._version,\n payload,\n interaction_sid=self._solution[\"interaction_sid\"],\n sid=self._solution[\"sid\"],\n )", "def fetch_next(self):\n if self.rate_limit['remaining'] <= 0:\n print('Rate Limit exhausted. Waiting until', self.rate_limit['reset_date'], 'seconds left:', self.rate_limit['time_left'])\n interval = self.rate_limit['time_left']\n else:\n priority, q_insertion_num, github_path = self.queue.get()\n\n # Spawn a thread to download the GitHub data for the item and store it in the database\n self.Downloader(self, github_path, priority).start()\n\n # set timer for getting the next task.\n # keep q_insertion_num the same to keep sort order\n next_task = self.queue.get()\n next_priority = next_task[0]\n self.queue.put(next_task)\n\n if next_priority == self.priority_uncached:\n interval = self.interval_uncached\n elif next_priority == self.priority_user_requested:\n interval = self.interval_user_requested\n else:\n interval = self.interval_normal\n\n self.fetch_timer = DaemonTimer(interval, self.fetch_next)\n self.fetch_timer.start()", "async def acquire(self):\n await self.nonce_lock.acquire()", "async def get_payment_order_async(\n payment_order_no: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetPaymentOrder.create(\n payment_order_no=payment_order_no,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def _create_future(self):\n return asyncio.Future(loop=self.loop)", "def get_order(self):\n url = self._get_link(\"order\")\n if url:\n return self.client.orders.from_url(url)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asynchronous coroutine to fetch the HostedNumberOrderInstance
async def fetch_async(self) -> HostedNumberOrderInstance: payload = await self._version.fetch_async( method="GET", uri=self._uri, ) return HostedNumberOrderInstance( self._version, payload, sid=self._solution["sid"], )
[ "async def fetch_async(self) -> \"BuildInstance\":\n return await self._proxy.fetch_async()", "async def fetch_async(self) -> \"FactorInstance\":\n return await self._proxy.fetch_async()", "async def fetch_async(self) -> \"AccountInstance\":\n return await self._proxy.fetch_async()", "async def fetch_async(self) -> \"StepContextInstance\":\n return await self._proxy.fetch_async()", "async def fetch_async(self) -> \"InteractionChannelInstance\":\n return await self._proxy.fetch_async()", "async def list_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[HostedNumberOrderInstance]:\n return [\n record\n async for record in await self.stream_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n limit=limit,\n page_size=page_size,\n )\n ]", "async def fetch_async(self) -> BuildInstance:\n \n payload = await self._version.fetch_async(method='GET', uri=self._uri, )\n\n return BuildInstance(\n self._version,\n payload,\n service_sid=self._solution['service_sid'],\n sid=self._solution['sid'],\n \n )", "async def stream_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[HostedNumberOrderInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n page_size=limits[\"page_size\"],\n )\n\n return self._version.stream_async(page, limits[\"limit\"])", "async def _process_order_queue(self):\n\n def order_accepted(info):\n accepted_order = hlp.json_to_order(info)\n accepted_order.ref = order.ref\n self.order_accepted(accepted_order)\n\n def order_rejected(info):\n self.order_rejected(info, order)\n\n # TODO: Candidate for modularisation and code extraction\n while not self.stop:\n if self.is_session_active():\n while not self._outgoing_order_queue.empty():\n order = self._outgoing_order_queue.get_nowait()\n order_dict = {\"type\": order.type.name, \"side\": order.side.name, \"price\": order.price,\n \"units\": order.units, \"market\": order.market, \"marketId\": order.market_id}\n\n if order.type == OrderType.CANCEL:\n order_dict[\"supplier\"] = order.id\n order_dict[\"original\"] = order.id\n\n self.debug(\"Order Queued: {}\".format(self._outgoing_order_count))\n await Request(\"/orders\", order_accepted, error_callback_func=order_rejected,\n request_method=RequestMethod.POST, data=order_dict).perform()\n self.debug(\" Order Sent: {}\".format(self._outgoing_order_count))\n\n self._outgoing_order_count[order.market_id] -= 1\n # task = self._loop.create_task(self._rest_post_data(cons.API_ROOT + \"/orders/\", order_dict, order_accepted, order_rejected))\n # asyncio.gather(task)\n else:\n if self._outgoing_order_queue.qsize() > 0:\n self.warning(\"I cannot send orders to an inactive session.\")\n await asyncio.sleep(cons.MONITOR_ORDER_BOOK_DELAY)", "async def fetch_async(self) -> FactorInstance:\n\n payload = await self._version.fetch_async(\n method=\"GET\",\n uri=self._uri,\n )\n\n return FactorInstance(\n self._version,\n payload,\n service_sid=self._solution[\"service_sid\"],\n identity=self._solution[\"identity\"],\n sid=self._solution[\"sid\"],\n )", "def get_next_order(self, device):\n # Set up the device specific query\n device_query = (\n \"((task_1_device = '\" + device + \"' and task_1_status = 'open') OR \"\n \"(task_2_device = '\" + device + \"' and task_2_status = 'open') OR \"\n \"(task_3_device = '\" + device + \"' and task_3_status = 'open') OR \"\n \"(task_4_device = '\" + device + \"' and task_4_status = 'open') OR \"\n \"(task_5_device = '\" + device + \"' and task_5_status = 'open'))\"\n )\n\n # 1) Check for an order with status active\n query = (\n \"SELECT * FROM orders WHERE id = (SELECT min(id) FROM orders WHERE status = 'active' AND \"\n + device_query\n + \")\"\n )\n # myresult = self.query_db_as_json(query, return_one=True)\n myresult = self.query_db_as_json(query)\n if myresult:\n return myresult\n\n # 2) Check for an order with priority high and which is scheduled\n query = (\n \"SELECT * FROM orders WHERE id = (SELECT min(id) FROM orders WHERE status = 'scheduled' AND \"\n \"priority = 'high' AND \" + device_query + \")\"\n )\n myresult = self.query_db_as_json(query)\n # myresult = self.query_db_as_json(query, return_one=True)\n if myresult:\n return myresult\n\n # 3) Check for an order which start on time and time has expired\n query = (\n \"SELECT * FROM orders WHERE id = (SELECT min(id) FROM orders WHERE status = 'scheduled' AND \"\n \"start_type = 'time' AND start_time = 0 AND \" + device_query + \")\"\n )\n myresult = self.query_db_as_json(query)\n # myresult = self.query_db_as_json(query, return_one=True)\n if myresult:\n return myresult\n\n # 4) Check for a scheduled order with the lowest id number\n query = (\n \"SELECT * FROM orders WHERE id = (SELECT min(id) FROM orders WHERE status = 'scheduled' AND \"\n + device_query\n + \")\"\n )\n myresult = self.query_db_as_json(query)\n # myresult = self.query_db_as_json(query, return_one=True)\n if myresult:\n return myresult\n else:\n return \"no next order\"", "async def fetch_async(self) -> AccountInstance:\n\n payload = await self._version.fetch_async(\n method=\"GET\",\n uri=self._uri,\n )\n\n return AccountInstance(\n self._version,\n payload,\n sid=self._solution[\"sid\"],\n )", "def get_object(self):\n return get_object_or_404(Order, number=self.kwargs['order_number'])", "def work(self):\n while True:\n value = self.urls_queue.get()\n\n self.get_product_info(value) # to call get_products method which collects all products urls\n self.urls_queue.task_done()", "async def fetch_async(self) -> InteractionChannelInstance:\n\n payload = await self._version.fetch_async(\n method=\"GET\",\n uri=self._uri,\n )\n\n return InteractionChannelInstance(\n self._version,\n payload,\n interaction_sid=self._solution[\"interaction_sid\"],\n sid=self._solution[\"sid\"],\n )", "def fetch_next(self):\n if self.rate_limit['remaining'] <= 0:\n print('Rate Limit exhausted. Waiting until', self.rate_limit['reset_date'], 'seconds left:', self.rate_limit['time_left'])\n interval = self.rate_limit['time_left']\n else:\n priority, q_insertion_num, github_path = self.queue.get()\n\n # Spawn a thread to download the GitHub data for the item and store it in the database\n self.Downloader(self, github_path, priority).start()\n\n # set timer for getting the next task.\n # keep q_insertion_num the same to keep sort order\n next_task = self.queue.get()\n next_priority = next_task[0]\n self.queue.put(next_task)\n\n if next_priority == self.priority_uncached:\n interval = self.interval_uncached\n elif next_priority == self.priority_user_requested:\n interval = self.interval_user_requested\n else:\n interval = self.interval_normal\n\n self.fetch_timer = DaemonTimer(interval, self.fetch_next)\n self.fetch_timer.start()", "async def acquire(self):\n await self.nonce_lock.acquire()", "async def get_payment_order_async(\n payment_order_no: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetPaymentOrder.create(\n payment_order_no=payment_order_no,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def _create_future(self):\n return asyncio.Future(loop=self.loop)", "def get_order(self):\n url = self._get_link(\"order\")\n if url:\n return self.client.orders.from_url(url)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build an instance of HostedNumberOrderInstance
def get_instance(self, payload: Dict[str, Any]) -> HostedNumberOrderInstance: return HostedNumberOrderInstance(self._version, payload)
[ "def __new__(cls, *args, **kwargs):\n return BuiltInClass.get_instance(cls, 'FIXNUM', *args)", "def __new__(cls, *args, **kwargs):\n return BuiltInClass.get_instance(cls, 'NUMBER', True)", "def shopify_create_order_queue(self, instance, created_by=\"import\"):\n order_queue_vals = {\n \"shopify_instance_id\": instance and instance.id or False,\n \"created_by\": created_by\n }\n\n return self.env[\"shopify.order.data.queue.ept\"].create(order_queue_vals)", "def __init_platform_e_commerce_agents(self, num_platform_e_commerce_agents):\n for i in range(num_platform_e_commerce_agents):\n unique_id = \"platform_e_commerce_\" + i\n technical_cost = 200\n subsidy_cost = 80\n platform_e_commerce_agent = PlatformECommerceAgent(unique_id, self, technical_cost, subsidy_cost)\n self.platform_e_commerce_schedule.add(platform_e_commerce_agent)", "def create_order(self, order):\n return self.post(cc_urls['order'], {'order': json.dumps(order)})", "def __init__(self, process_order, name=None, max_threads=1,\n sort_orders=False, unique_orders=False):\n self._name = name\n self._process_order = process_order\n self._max_threads = max_threads\n self._sort_orders = sort_orders\n self._unique_orders = unique_orders\n # If True, worker threads must stop immediately.\n self._stop_immediately = False\n # If True, worker threads must stop when queue is empty.\n self._stop_if_no_orders = False\n self._threads = []\n # List of orders waiting for processing.\n self._waiting_orders = []\n # List of orders currently being processed.\n self._processing_orders = []\n self._condition = threading.Condition()", "def __init__(self, ttl, order, data=None, msg_id=None):\n self.id = msg_id or str(uuid.uuid4())\n self.ttl = ttl\n self.order = order.upper()\n self.data = data", "def __init__(self, initial_money=0, symbols=[], order_list=[]):\n \n #create the symbols list, adding 'Cash'\n self.symbols = list(symbols)\n if 'Cash' not in self.symbols:\n self.symbols.insert(0, 'Cash')\n \n #creates the dict where store the participation of the portfolio\n self.amount = dict()\n for s in self.symbols:\n self.amount[s] = 0\n self.amount['Cash'] = initial_money\n \n #initializes with the order lists\n for order in order_list:\n self.execute_order(date=order['date'], symbol=order['symbol'], is_buy=order['is_buy'],\n amount=order['amount'], p_output=True)", "def get_all_instances(cls, orders_input) -> Dict[str, Any]:\n # Collect all model keys from input\n identifiers = ModelIdentifiers()\n for order in orders_input:\n identifiers.user_ids.keys.append(order[\"user\"].get(\"id\"))\n identifiers.user_emails.keys.append(order[\"user\"].get(\"email\"))\n identifiers.user_external_references.keys.append(\n order[\"user\"].get(\"external_reference\")\n )\n identifiers.channel_slugs.keys.append(order.get(\"channel\"))\n identifiers.voucher_codes.keys.append(order.get(\"voucher\"))\n identifiers.order_external_references.keys.append(\n order.get(\"external_reference\")\n )\n if delivery_method := order.get(\"delivery_method\"):\n identifiers.warehouse_ids.keys.append(\n delivery_method.get(\"warehouse_id\")\n )\n identifiers.shipping_method_ids.keys.append(\n delivery_method.get(\"shipping_method_id\")\n )\n identifiers.tax_class_ids.keys.append(\n delivery_method.get(\"shipping_tax_class_id\")\n )\n notes = order.get(\"notes\") or []\n for note in notes:\n identifiers.user_ids.keys.append(note.get(\"user_id\"))\n identifiers.user_emails.keys.append(note.get(\"user_email\"))\n identifiers.user_external_references.keys.append(\n note.get(\"user_external_reference\")\n )\n identifiers.app_ids.keys.append(note.get(\"app_id\"))\n order_lines = order.get(\"lines\") or []\n for order_line in order_lines:\n identifiers.variant_ids.keys.append(order_line.get(\"variant_id\"))\n identifiers.variant_skus.keys.append(order_line.get(\"variant_sku\"))\n identifiers.variant_external_references.keys.append(\n order_line.get(\"variant_external_reference\")\n )\n identifiers.warehouse_ids.keys.append(order_line.get(\"warehouse\"))\n identifiers.tax_class_ids.keys.append(order_line.get(\"tax_class_id\"))\n fulfillments = order.get(\"fulfillments\") or []\n for fulfillment in fulfillments:\n for line in fulfillment.get(\"lines\") or []:\n identifiers.variant_ids.keys.append(line.get(\"variant_id\"))\n identifiers.variant_skus.keys.append(line.get(\"variant_sku\"))\n identifiers.variant_external_references.keys.append(\n line.get(\"variant_external_reference\")\n )\n identifiers.warehouse_ids.keys.append(line.get(\"warehouse\"))\n gift_cards = order.get(\"gift_cards\") or []\n for gift_card_code in gift_cards:\n identifiers.gift_card_codes.keys.append(gift_card_code)\n\n # Convert global ids to model ids and get rid of Nones\n for field in dataclass_fields(identifiers):\n identifier = getattr(identifiers, field.name)\n model, keys = identifier.model, identifier.keys\n keys = [key for key in keys if key is not None]\n setattr(identifier, \"keys\", keys)\n if \"_ids\" in field.name:\n model_ids = []\n for global_id in keys:\n try:\n _, id = from_global_id_or_error(\n str(global_id), model, raise_error=True\n )\n model_ids.append(id)\n except GraphQLError:\n pass\n setattr(identifier, \"keys\", model_ids)\n\n # Make DB calls\n users = User.objects.filter(\n Q(pk__in=identifiers.user_ids.keys)\n | Q(email__in=identifiers.user_emails.keys)\n | Q(external_reference__in=identifiers.user_external_references.keys)\n )\n variants = ProductVariant.objects.filter(\n Q(pk__in=identifiers.variant_ids.keys)\n | Q(sku__in=identifiers.variant_skus.keys)\n | Q(external_reference__in=identifiers.variant_external_references.keys)\n )\n channels = Channel.objects.filter(slug__in=identifiers.channel_slugs.keys)\n vouchers = Voucher.objects.filter(code__in=identifiers.voucher_codes.keys)\n warehouses = Warehouse.objects.filter(pk__in=identifiers.warehouse_ids.keys)\n shipping_methods = ShippingMethod.objects.filter(\n pk__in=identifiers.shipping_method_ids.keys\n )\n tax_classes = TaxClass.objects.filter(pk__in=identifiers.tax_class_ids.keys)\n apps = App.objects.filter(pk__in=identifiers.app_ids.keys)\n gift_cards = GiftCard.objects.filter(code__in=identifiers.gift_card_codes.keys)\n orders = Order.objects.filter(\n external_reference__in=identifiers.order_external_references.keys\n )\n\n # Create dictionary\n object_storage: Dict[str, Any] = {}\n for user in users:\n object_storage[f\"User.id.{user.id}\"] = user\n object_storage[f\"User.email.{user.email}\"] = user\n if user.external_reference:\n object_storage[\n f\"User.external_reference.{user.external_reference}\"\n ] = user\n\n for variant in variants:\n object_storage[f\"ProductVariant.id.{variant.id}\"] = variant\n if variant.sku:\n object_storage[f\"ProductVariant.id.{variant.sku}\"] = variant\n if variant.external_reference:\n object_storage[\n f\"ProductVariant.external_reference.{variant.external_reference}\"\n ] = variant\n\n for channel in channels:\n object_storage[f\"Channel.slug.{channel.slug}\"] = channel\n\n for voucher in vouchers:\n object_storage[f\"Voucher.code.{voucher.code}\"] = voucher\n\n for gift_card in gift_cards:\n object_storage[f\"GiftCard.code.{gift_card.code}\"] = gift_card\n\n for order in orders:\n object_storage[\n f\"Order.external_reference.{order.external_reference}\"\n ] = order\n\n for object in [*warehouses, *shipping_methods, *tax_classes, *apps]:\n object_storage[f\"{object.__class__.__name__}.id.{object.pk}\"] = object\n\n return object_storage", "def _build_tree(numbers: List[int]) -> Node:\n tree = Node()\n tree.load(numbers)\n return tree", "def from_number(cls, value):\n return cls(None, value, None)", "def get_object(self):\n return get_object_or_404(Order, number=self.kwargs['order_number'])", "def tst_ordbook(request):\n num = request.param.get('num')\n if not num or num == 0:\n return OrderBook()\n orders = [Order(i+1, choice([OrderType.ASK, OrderType.BID]), randint(10, 100), randint(1, 100))\n for i in range(num)]\n return OrderBook(*orders)", "def _create_instances(self):\n #initialize the module\n _instance = self._module()\n self._instance_list = [_instance]", "def allocate_hosts(AutoPlacement=None, ClientToken=None, InstanceType=None, Quantity=None, AvailabilityZone=None):\n pass", "def create_billing(_order_id):\n return {\n 'entity_id': str(uuid.uuid4()),\n 'order_id': _order_id,\n 'done': time.time()\n }", "def spawn_process(self):\n env = {\n self.QUANTUM_NETWORK_ID_KEY: self.network.id,\n self.QUANTUM_RELAY_SOCKET_PATH_KEY:\n self.conf.dhcp_lease_relay_socket\n }\n\n cmd = [\n 'dnsmasq',\n '--no-hosts',\n '--no-resolv',\n '--strict-order',\n '--bind-interfaces',\n '--interface=%s' % self.interface_name,\n '--except-interface=lo',\n '--domain=%s' % self.conf.dhcp_domain,\n '--pid-file=%s' % self.get_conf_file_name('pid',\n ensure_conf_dir=True),\n #TODO (mark): calculate value from cidr (defaults to 150)\n #'--dhcp-lease-max=%s' % ?,\n '--dhcp-hostsfile=%s' % self._output_hosts_file(),\n '--dhcp-optsfile=%s' % self._output_opts_file(),\n '--dhcp-script=%s' % self._lease_relay_script_path(),\n '--leasefile-ro',\n ]\n\n for i, subnet in enumerate(self.network.subnets):\n # if a subnet is specified to have dhcp disabled\n if not subnet.enable_dhcp:\n continue\n if subnet.ip_version == 4:\n mode = 'static'\n else:\n # TODO (mark): how do we indicate other options\n # ra-only, slaac, ra-nameservers, and ra-stateless.\n mode = 'static'\n cmd.append('--dhcp-range=set:%s,%s,%s,%ss' %\n (self._TAG_PREFIX % i,\n netaddr.IPNetwork(subnet.cidr).network,\n mode,\n self.conf.dhcp_lease_time))\n\n if self.conf.dnsmasq_config_file:\n cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)\n if self.conf.dnsmasq_dns_server:\n cmd.append('--server=%s' % self.conf.dnsmasq_dns_server)\n\n if self.namespace:\n ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)\n ip_wrapper.netns.execute(cmd, addl_env=env)\n else:\n # For normal sudo prepend the env vars before command\n cmd = ['%s=%s' % pair for pair in env.items()] + cmd\n utils.execute(cmd, self.root_helper)", "async def list_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[HostedNumberOrderInstance]:\n return [\n record\n async for record in await self.stream_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n limit=limit,\n page_size=page_size,\n )\n ]", "def make_order(self, message, state=None):\n resource_id = message['payload']['router']['id']\n resource_name = message['payload']['router']['name']\n user_id = None\n project_id = message['payload']['router']['tenant_id']\n\n order = Order(resource_id=resource_id,\n resource_name=resource_name,\n type=const.RESOURCE_ROUTER,\n status=state if state else const.STATE_RUNNING,\n user_id=user_id,\n project_id=project_id)\n return order", "def new_instance():\n\tmas = __empty_instance()\n\tm.set_env(mas, None)\n\tm.set_pop(mas, None)\n\tm.set_cell_rules(mas, [])\n\tm.set_agent_rules(mas, [])\n\tset_walker_rules(mas, [])\n\tm.set_ending_condition(mas, m.DEFAULT_ENDING_CONDITION)\n\tm.set_max_cycle(mas, 0)\n\tm.set_cycle(mas, 0)\n\treturn mas" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Streams HostedNumberOrderInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient.
def stream( self, status: Union["HostedNumberOrderInstance.Status", object] = values.unset, phone_number: Union[str, object] = values.unset, incoming_phone_number_sid: Union[str, object] = values.unset, friendly_name: Union[str, object] = values.unset, unique_name: Union[str, object] = values.unset, limit: Optional[int] = None, page_size: Optional[int] = None, ) -> Iterator[HostedNumberOrderInstance]: limits = self._version.read_limits(limit, page_size) page = self.page( status=status, phone_number=phone_number, incoming_phone_number_sid=incoming_phone_number_sid, friendly_name=friendly_name, unique_name=unique_name, page_size=limits["page_size"], ) return self._version.stream(page, limits["limit"])
[ "async def stream_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[HostedNumberOrderInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n page_size=limits[\"page_size\"],\n )\n\n return self._version.stream_async(page, limits[\"limit\"])", "async def list_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[HostedNumberOrderInstance]:\n return [\n record\n async for record in await self.stream_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n limit=limit,\n page_size=page_size,\n )\n ]", "def stream(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[BuildInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n page_size=limits['page_size']\n )\n\n return self._version.stream(page, limits['limit'])", "async def stream_async(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[BuildInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n page_size=limits['page_size']\n )\n\n return self._version.stream_async(page, limits['limit'])", "def chunk(self, limit, reverse=False):\n gen = reversed(self) if reverse else self\n interval = limit\n\n instances = []\n for count, p in enumerate(gen):\n if count >= limit:\n yield instances\n limit += interval\n instances = []\n instances.append(p)\n yield instances", "async def list_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[FactorInstance]:\n return [\n record\n async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )\n ]", "def stream(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[FactorInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(page_size=limits[\"page_size\"])\n\n return self._version.stream(page, limits[\"limit\"])", "async def list_async(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[BuildInstance]:\n return [record async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )]", "async def stream_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[FactorInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(page_size=limits[\"page_size\"])\n\n return self._version.stream_async(page, limits[\"limit\"])", "def stream(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[AccountInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n friendly_name=friendly_name, status=status, page_size=limits[\"page_size\"]\n )\n\n return self._version.stream(page, limits[\"limit\"])", "def perf_archive_get_instances_iter_next(self, tag, maximum):\n return self.request( \"perf-archive-get-instances-iter-next\", {\n 'tag': tag,\n 'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],\n }, {\n 'generation': [ int, False ],\n 'records': [ int, False ],\n 'archive-records': [ ArchiveRecord, True ],\n } )", "async def stream_async(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[AccountInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n friendly_name=friendly_name, status=status, page_size=limits[\"page_size\"]\n )\n\n return self._version.stream_async(page, limits[\"limit\"])", "def get_all_ids_as_generator(self, **options) -> Iterator[dict]:\n offset = 0\n finished = False\n while not finished:\n input_lines = []\n batch = self._call(\n \"objects/tickets/paged\",\n method=\"GET\",\n doseq=True,\n params={\"offset\": offset},\n **options\n )\n for line in batch[\"objects\"]:\n input_lines.append({\"id\": f\"{line['objectId']}\"})\n offset = batch[\"offset\"]\n\n finished = not batch[\"hasMore\"]\n\n yield from input_lines", "def stream(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[InteractionChannelInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(page_size=limits[\"page_size\"])\n\n return self._version.stream(page, limits[\"limit\"])", "async def list_async(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[AccountInstance]:\n return [\n record\n async for record in await self.stream_async(\n friendly_name=friendly_name,\n status=status,\n limit=limit,\n page_size=page_size,\n )\n ]", "def list_instances(self):\n resp = self.session.get(\"{0}/v2/resource_instances\".format(self.endpoint_url))\n resp.raise_for_status()\n\n while True:\n for res in resp.json()[\"resources\"]:\n yield res\n\n next_url = resp.json().get(\"next_url\")\n if not next_url:\n break\n\n resp = self.session.get(\"{0}{1}\".format(self.endpoint_url, next_url))\n resp.raise_for_status()", "async def stream_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[InteractionChannelInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(page_size=limits[\"page_size\"])\n\n return self._version.stream_async(page, limits[\"limit\"])", "async def list_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[InteractionChannelInstance]:\n return [\n record\n async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )\n ]", "def list(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[BuildInstance]:\n return list(self.stream(\n limit=limit,\n page_size=page_size,\n ))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asynchronously streams HostedNumberOrderInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient.
async def stream_async( self, status: Union["HostedNumberOrderInstance.Status", object] = values.unset, phone_number: Union[str, object] = values.unset, incoming_phone_number_sid: Union[str, object] = values.unset, friendly_name: Union[str, object] = values.unset, unique_name: Union[str, object] = values.unset, limit: Optional[int] = None, page_size: Optional[int] = None, ) -> AsyncIterator[HostedNumberOrderInstance]: limits = self._version.read_limits(limit, page_size) page = await self.page_async( status=status, phone_number=phone_number, incoming_phone_number_sid=incoming_phone_number_sid, friendly_name=friendly_name, unique_name=unique_name, page_size=limits["page_size"], ) return self._version.stream_async(page, limits["limit"])
[ "async def list_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[HostedNumberOrderInstance]:\n return [\n record\n async for record in await self.stream_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n limit=limit,\n page_size=page_size,\n )\n ]", "async def stream_async(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[BuildInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n page_size=limits['page_size']\n )\n\n return self._version.stream_async(page, limits['limit'])", "def stream(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[HostedNumberOrderInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n page_size=limits[\"page_size\"],\n )\n\n return self._version.stream(page, limits[\"limit\"])", "async def list_async(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[BuildInstance]:\n return [record async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )]", "async def list_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[FactorInstance]:\n return [\n record\n async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )\n ]", "async def stream_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[FactorInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(page_size=limits[\"page_size\"])\n\n return self._version.stream_async(page, limits[\"limit\"])", "async def stream_async(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[AccountInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n friendly_name=friendly_name, status=status, page_size=limits[\"page_size\"]\n )\n\n return self._version.stream_async(page, limits[\"limit\"])", "async def list_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[InteractionChannelInstance]:\n return [\n record\n async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )\n ]", "async def list_async(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[AccountInstance]:\n return [\n record\n async for record in await self.stream_async(\n friendly_name=friendly_name,\n status=status,\n limit=limit,\n page_size=page_size,\n )\n ]", "async def stream_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[InteractionChannelInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(page_size=limits[\"page_size\"])\n\n return self._version.stream_async(page, limits[\"limit\"])", "def stream(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[BuildInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n page_size=limits['page_size']\n )\n\n return self._version.stream(page, limits['limit'])", "def chunk(self, limit, reverse=False):\n gen = reversed(self) if reverse else self\n interval = limit\n\n instances = []\n for count, p in enumerate(gen):\n if count >= limit:\n yield instances\n limit += interval\n instances = []\n instances.append(p)\n yield instances", "def get_all_ids_as_generator(self, **options) -> Iterator[dict]:\n offset = 0\n finished = False\n while not finished:\n input_lines = []\n batch = self._call(\n \"objects/tickets/paged\",\n method=\"GET\",\n doseq=True,\n params={\"offset\": offset},\n **options\n )\n for line in batch[\"objects\"]:\n input_lines.append({\"id\": f\"{line['objectId']}\"})\n offset = batch[\"offset\"]\n\n finished = not batch[\"hasMore\"]\n\n yield from input_lines", "def perf_archive_get_instances_iter_next(self, tag, maximum):\n return self.request( \"perf-archive-get-instances-iter-next\", {\n 'tag': tag,\n 'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],\n }, {\n 'generation': [ int, False ],\n 'records': [ int, False ],\n 'archive-records': [ ArchiveRecord, True ],\n } )", "def stream(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[AccountInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n friendly_name=friendly_name, status=status, page_size=limits[\"page_size\"]\n )\n\n return self._version.stream(page, limits[\"limit\"])", "def list_instances(self):\n resp = self.session.get(\"{0}/v2/resource_instances\".format(self.endpoint_url))\n resp.raise_for_status()\n\n while True:\n for res in resp.json()[\"resources\"]:\n yield res\n\n next_url = resp.json().get(\"next_url\")\n if not next_url:\n break\n\n resp = self.session.get(\"{0}{1}\".format(self.endpoint_url, next_url))\n resp.raise_for_status()", "def stream(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[FactorInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(page_size=limits[\"page_size\"])\n\n return self._version.stream(page, limits[\"limit\"])", "def get_buffers_from_origin(self, origin):\n g = origin.provider.paginate(origin.uri)\n iterations = max(1, origin.iterations)\n\n # Generator can raise StopIteration before iterations is reached.\n # We use a for loop instead of a comprehension expression to catch\n # gracefully this situation.\n tasks = []\n for i in range(iterations):\n try:\n uri = next(g)\n tasks.append(self.get_buffer_from_uri(origin, uri))\n except StopIteration:\n msg = (\"{provider} has stopped the pagination after \"\n \"iteration #{index}\")\n msg = msg.format(provider=origin.provider, index=i)\n self.logger.warning(msg)\n break\n\n ret = yield from asyncio.gather(*tasks)\n return ret", "def getInstances(self):\n\n environmentId = self.getEnviroment()\n authorizationToken = self._getToken()\n\n url = \"https://\" + environmentId + \".env.timeseries.azure.com/timeseries/instances/\"\n \n querystring = self._getQueryString()\n payload = \"\"\n \n headers = {\n 'x-ms-client-application-name': self._applicationName,\n 'Authorization': authorizationToken,\n 'Content-Type': \"application/json\",\n 'cache-control': \"no-cache\"\n }\n \n response = requests.request(\"GET\", url, data=payload, headers=headers, params=querystring)\n if response.text:\n jsonResponse = json.loads(response.text)\n \n result = jsonResponse\n \n while len(jsonResponse['instances'])>999 and 'continuationToken' in list(jsonResponse.keys()):\n headers = {\n 'x-ms-client-application-name': self._applicationName,\n 'Authorization': authorizationToken,\n 'x-ms-continuation' : jsonResponse['continuationToken'],\n 'Content-Type': \"application/json\",\n 'cache-control': \"no-cache\"\n }\n response = requests.request(\"GET\", url, data=payload, headers=headers, params=querystring)\n if response.text:\n jsonResponse = json.loads(response.text)\n \n result['instances'].extend(jsonResponse['instances'])\n \n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asynchronously lists HostedNumberOrderInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning.
async def list_async( self, status: Union["HostedNumberOrderInstance.Status", object] = values.unset, phone_number: Union[str, object] = values.unset, incoming_phone_number_sid: Union[str, object] = values.unset, friendly_name: Union[str, object] = values.unset, unique_name: Union[str, object] = values.unset, limit: Optional[int] = None, page_size: Optional[int] = None, ) -> List[HostedNumberOrderInstance]: return [ record async for record in await self.stream_async( status=status, phone_number=phone_number, incoming_phone_number_sid=incoming_phone_number_sid, friendly_name=friendly_name, unique_name=unique_name, limit=limit, page_size=page_size, ) ]
[ "async def list_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[FactorInstance]:\n return [\n record\n async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )\n ]", "async def list_async(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[BuildInstance]:\n return [record async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )]", "async def list_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[InteractionChannelInstance]:\n return [\n record\n async for record in await self.stream_async(\n limit=limit,\n page_size=page_size,\n )\n ]", "async def list_async(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[AccountInstance]:\n return [\n record\n async for record in await self.stream_async(\n friendly_name=friendly_name,\n status=status,\n limit=limit,\n page_size=page_size,\n )\n ]", "def list(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[BuildInstance]:\n return list(self.stream(\n limit=limit,\n page_size=page_size,\n ))", "def list(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[FactorInstance]:\n return list(\n self.stream(\n limit=limit,\n page_size=page_size,\n )\n )", "def fetch_all(self, limit=15):\n records = []\n limit = self.db.llen(self.redis_key)\n for item in self.db.lrange(self.redis_key, 0, limit-1):\n record_obj = json.loads(item.decode('utf-8'))\n records.append(record_obj)\n \n return records", "async def stream_async(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[BuildInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n page_size=limits['page_size']\n )\n\n return self._version.stream_async(page, limits['limit'])", "def chunk(self, limit, reverse=False):\n gen = reversed(self) if reverse else self\n interval = limit\n\n instances = []\n for count, p in enumerate(gen):\n if count >= limit:\n yield instances\n limit += interval\n instances = []\n instances.append(p)\n yield instances", "async def stream_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[HostedNumberOrderInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n page_size=limits[\"page_size\"],\n )\n\n return self._version.stream_async(page, limits[\"limit\"])", "async def stream_async(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[FactorInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(page_size=limits[\"page_size\"])\n\n return self._version.stream_async(page, limits[\"limit\"])", "async def stream_async(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[AccountInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n friendly_name=friendly_name, status=status, page_size=limits[\"page_size\"]\n )\n\n return self._version.stream_async(page, limits[\"limit\"])", "def list(\n self,\n friendly_name: Union[str, object] = values.unset,\n status: Union[\"AccountInstance.Status\", object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[AccountInstance]:\n return list(\n self.stream(\n friendly_name=friendly_name,\n status=status,\n limit=limit,\n page_size=page_size,\n )\n )", "def list_records(self, domain, limit=None, offset=None):\r\n return domain.list_records(limit=limit, offset=offset)", "def getInstances(self):\n\n environmentId = self.getEnviroment()\n authorizationToken = self._getToken()\n\n url = \"https://\" + environmentId + \".env.timeseries.azure.com/timeseries/instances/\"\n \n querystring = self._getQueryString()\n payload = \"\"\n \n headers = {\n 'x-ms-client-application-name': self._applicationName,\n 'Authorization': authorizationToken,\n 'Content-Type': \"application/json\",\n 'cache-control': \"no-cache\"\n }\n \n response = requests.request(\"GET\", url, data=payload, headers=headers, params=querystring)\n if response.text:\n jsonResponse = json.loads(response.text)\n \n result = jsonResponse\n \n while len(jsonResponse['instances'])>999 and 'continuationToken' in list(jsonResponse.keys()):\n headers = {\n 'x-ms-client-application-name': self._applicationName,\n 'Authorization': authorizationToken,\n 'x-ms-continuation' : jsonResponse['continuationToken'],\n 'Content-Type': \"application/json\",\n 'cache-control': \"no-cache\"\n }\n response = requests.request(\"GET\", url, data=payload, headers=headers, params=querystring)\n if response.text:\n jsonResponse = json.loads(response.text)\n \n result['instances'].extend(jsonResponse['instances'])\n \n return result", "def list(\n self,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[InteractionChannelInstance]:\n return list(\n self.stream(\n limit=limit,\n page_size=page_size,\n )\n )", "def stream(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[HostedNumberOrderInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n page_size=limits[\"page_size\"],\n )\n\n return self._version.stream(page, limits[\"limit\"])", "def stream(self, \n \n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[BuildInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n page_size=limits['page_size']\n )\n\n return self._version.stream(page, limits['limit'])", "async def list_companies(limit: int = 1000):\n # TODO: remove to_list & add skip and list\n # https://pymongo.readthedocs.io/en/3.11.0/api/pymongo/collection.html#pymongo.collection.Collection.find\n company_ls = await company_db.collection.find().to_list(limit)\n return company_ls" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve a specific page of HostedNumberOrderInstance records from the API. Request is executed immediately
def get_page(self, target_url: str) -> HostedNumberOrderPage: response = self._version.domain.twilio.request("GET", target_url) return HostedNumberOrderPage(self._version, response)
[ "def get_object(self):\n return get_object_or_404(Order, number=self.kwargs['order_number'])", "def stream(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> Iterator[HostedNumberOrderInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = self.page(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n page_size=limits[\"page_size\"],\n )\n\n return self._version.stream(page, limits[\"limit\"])", "def get_orders(self,\r\n account_number,\r\n page=None,\r\n limit=None,\r\n filter=None):\r\n # The base uri for api requests\r\n query_builder = Configuration.BASE_URI\r\n \r\n # Prepare query string for API call\r\n query_builder += \"/accounts/{account_number}/orders\"\r\n\r\n # Process optional template parameters\r\n query_builder = APIHelper.append_url_with_template_parameters(query_builder, { \r\n \"account_number\": account_number\r\n })\r\n\r\n # Process optional query parameters\r\n query_parameters = {\r\n \"page\": page,\r\n \"limit\": limit,\r\n \"filter\": filter\r\n }\r\n \r\n # Validate and preprocess url\r\n query_url = APIHelper.clean_url(query_builder)\r\n\r\n # Prepare headers\r\n headers = {\r\n \"user-agent\": \"APIMATIC 2.0\",\r\n \"accept\": \"application/json\",\r\n \"X-Auth-Token\": Configuration.x_auth_token,\r\n \"X-Auth-Token\": Configuration.x_auth_token\r\n }\r\n\r\n # Prepare the API call.\r\n http_request = self.http_client.get(query_url, headers=headers, query_parameters=query_parameters)\r\n\r\n # Invoke the API call to fetch the response.\r\n response = self.http_client.execute_as_string(http_request);\r\n\r\n # Endpoint error handling using HTTP status codes.\r\n if response.status_code == 401:\r\n raise APIException(\"You are not authenticated\", 401, response.raw_body)\r\n elif response.status_code == 403:\r\n raise APIException(\"This action needs a valid WSSE header\", 403, response.raw_body)\r\n elif response.status_code == 404:\r\n raise APIException(\"Resource not found\", 404, response.raw_body)\r\n\r\n # Global error handling using HTTP status codes.\r\n self.validate_response(response) \r\n\r\n return response.raw_body", "async def list_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> List[HostedNumberOrderInstance]:\n return [\n record\n async for record in await self.stream_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n limit=limit,\n page_size=page_size,\n )\n ]", "async def stream_async(\n self,\n status: Union[\"HostedNumberOrderInstance.Status\", object] = values.unset,\n phone_number: Union[str, object] = values.unset,\n incoming_phone_number_sid: Union[str, object] = values.unset,\n friendly_name: Union[str, object] = values.unset,\n unique_name: Union[str, object] = values.unset,\n limit: Optional[int] = None,\n page_size: Optional[int] = None,\n ) -> AsyncIterator[HostedNumberOrderInstance]:\n limits = self._version.read_limits(limit, page_size)\n page = await self.page_async(\n status=status,\n phone_number=phone_number,\n incoming_phone_number_sid=incoming_phone_number_sid,\n friendly_name=friendly_name,\n unique_name=unique_name,\n page_size=limits[\"page_size\"],\n )\n\n return self._version.stream_async(page, limits[\"limit\"])", "def _fetch_page(self, **params):\n r = requests.get(self.url, params=params)\n if not r.ok:\n raise Exception(r.text)\n return json.loads(r.text.strip('()'))", "def getPage(self, pageNum):\n pass", "def _paginated(self, instances, deep):\r\n if isinstance(instances, list):\r\n num_results = len(instances)\r\n else:\r\n num_results = count(self.session, instances)\r\n results_per_page = self._compute_results_per_page()\r\n if results_per_page > 0:\r\n # get the page number (first page is page 1)\r\n page_num = int(request.args.get('page', 1))\r\n start = (page_num - 1) * results_per_page\r\n end = min(num_results, start + results_per_page)\r\n total_pages = int(math.ceil(num_results / results_per_page))\r\n else:\r\n page_num = 1\r\n start = 0\r\n end = num_results\r\n total_pages = 1\r\n objects = [to_dict(x, deep, exclude=self.exclude_columns,\r\n exclude_relations=self.exclude_relations,\r\n include=self.include_columns,\r\n include_relations=self.include_relations,\r\n include_methods=self.include_methods)\r\n for x in instances[start:end]]\r\n return dict(page=page_num, objects=objects, total_pages=total_pages,\r\n num_results=num_results)", "def _paginated(self, instances, deep):\n if isinstance(instances, list):\n num_results = len(instances)\n else:\n num_results = count(self.session, instances)\n results_per_page = self._compute_results_per_page()\n if results_per_page > 0:\n # get the page number (first page is page 1)\n page_num = int(request.args.get('page', 1))\n start = (page_num - 1) * results_per_page\n end = min(num_results, start + results_per_page)\n total_pages = int(math.ceil(num_results / results_per_page))\n else:\n page_num = 1\n start = 0\n end = num_results\n total_pages = 1\n objects = [to_dict(x, deep, exclude=self.exclude_columns,\n exclude_relations=self.exclude_relations,\n include=self.include_columns,\n include_relations=self.include_relations,\n include_methods=self.include_methods)\n for x in instances[start:end]]\n return dict(page=page_num, objects=objects, total_pages=total_pages,\n num_results=num_results)", "def get_orders(client, sheet_id):\r\n return client.get(f'/api/orders/sheet/{sheet_id}')", "def get_order(self,\r\n account_number,\r\n order_id):\r\n # The base uri for api requests\r\n query_builder = Configuration.BASE_URI\r\n \r\n # Prepare query string for API call\r\n query_builder += \"/accounts/{account_number}/orders/{order_id}\"\r\n\r\n # Process optional template parameters\r\n query_builder = APIHelper.append_url_with_template_parameters(query_builder, { \r\n \"account_number\": account_number,\r\n \"order_id\": order_id\r\n })\r\n \r\n # Validate and preprocess url\r\n query_url = APIHelper.clean_url(query_builder)\r\n\r\n # Prepare headers\r\n headers = {\r\n \"user-agent\": \"APIMATIC 2.0\",\r\n \"accept\": \"application/json\",\r\n \"X-Auth-Token\": Configuration.x_auth_token,\r\n \"X-Auth-Token\": Configuration.x_auth_token\r\n }\r\n\r\n # Prepare the API call.\r\n http_request = self.http_client.get(query_url, headers=headers)\r\n\r\n # Invoke the API call to fetch the response.\r\n response = self.http_client.execute_as_string(http_request);\r\n\r\n # Endpoint error handling using HTTP status codes.\r\n if response.status_code == 401:\r\n raise APIException(\"You are not authenticated\", 401, response.raw_body)\r\n elif response.status_code == 403:\r\n raise APIException(\"This action needs a valid WSSE header\", 403, response.raw_body)\r\n elif response.status_code == 404:\r\n raise APIException(\"Resource not found\", 404, response.raw_body)\r\n\r\n # Global error handling using HTTP status codes.\r\n self.validate_response(response) \r\n\r\n return response.raw_body", "def invoice_by_number(self, context, params):\n\n accesstoken = util.get_xero_client(context[\"headers\"])\n headers = {\n \"Accept\" : \"application/json\",\n \"xero-tenant-id\" : params.get(\"organization_id\"),\n \"Authorization\" : f\"Bearer {accesstoken}\"\n }\n number = params.get('number')\n response = requests.request(\"GET\", f'{self.base_url}Invoices?InvoiceNumbers={number}', headers=headers).text\n response = json.loads(response)\n\n data = XeroInvoice(\n organization_id= params.get(\"organization_id\"),\n item_code= response[\"Invoices\"][0][\"LineItems\"][0][\"Item\"][\"Code\"],\n invoice_id= response[\"Invoices\"][0][\"InvoiceID\"],\n description= response[\"Invoices\"][0][\"LineItems\"][0][\"Description\"],\n name= response[\"Invoices\"][0][\"Contact\"][\"Name\"],\n status= response[\"Invoices\"][0][\"Status\"],\n currency= response[\"Invoices\"][0][\"CurrencyCode\"], \n creation_date= response[\"Invoices\"][0][\"DateString\"],\n due_date= response[\"Invoices\"][0][\"DueDateString\"],\n branding_theme= response[\"Invoices\"][0][\"BrandingThemeID\"],\n number= response[\"Invoices\"][0][\"InvoiceNumber\"],\n reference= response[\"Invoices\"][0][\"Reference\"],\n line_items_type= response[\"Invoices\"][0][\"LineAmountTypes\"],\n quantity= response[\"Invoices\"][0][\"LineItems\"][0][\"Quantity\"],\n unit_price= response[\"Invoices\"][0][\"LineItems\"][0][\"UnitAmount\"],\n discount= response[\"Invoices\"][0][\"LineItems\"][0][\"DiscountRate\"],\n account= response[\"Invoices\"][0][\"LineItems\"][0][\"AccountCode\"],\n tax_rate= response[\"Invoices\"][0][\"LineItems\"][0][\"TaxAmount\"]\n )\n return data.__dict__", "def get_specific_order_page(browser, src, dst, indx):\n base = \"https://www.amazon.com/gp/your-account/order-history\"\n params = \"/ref=oh_aui_pagination_{_from}_{_to}?ie=UTF8&orderFilter=months-6&search=&startIndex={_start}\".format(_from=src, _to=dst, _start=indx)\n # order_page_url = urljoin(base, params)\n order_page_url = base + params\n return get_html(browser, order_page_url)", "def get_instances(self, params):\n params = transform_params(params)\n\n resp, page = self.request(\"GET\", self.uri, params=params)\n\n if self.key not in page:\n raise TwilioException(\"Key %s not present in response\" % self.key)\n\n return [self.load_instance(ir) for ir in page[self.key]]", "def show_numbers_handler():\n try:\n results = [{'id': n.id,\n 'number': n.number,\n 'timestamp': n.timestamp,\n 'max_range': n.max_range\n } for n in models.Numbers.query.all()]\n result_obj = {'numbers': results}\n return result_obj\n\n except OperationalError as e:\n function_error = \"ERR: A database error occurred. Is it running? Details:\\n\\n{}\".format(e)\n db.session.rollback()\n return function_error, 500", "def viewOrderDetails(wbn, context=None):\n # Error handling for authentication failure\n if context.get(\"error\") is True:\n return {\n \"statusCode\": 4001,\n \"statusMessage\": context.get(\"error_response\", \"\")\n }\n client = context.get(\"client\", None)\n find_criteria = dict()\n find_criteria[\"wbn\"] = wbn\n if client:\n find_criteria[\"cl\"] = client\n cols = ['wbn', 'oid', 'pt', 'cty', 'pt', 'nm', 'add', 'ph', 'cs',\n 'em', \"s\", \"cd\"]\n if settings.USE_REPLICA_SET:\n db = get_replica_set()\n if db:\n pkg = db['packages'].find_one(find_criteria, fields=cols)\n else:\n pkg = connection.Package.find_one(find_criteria, cols)\n else:\n pkg = connection.Package.find_one(find_criteria, cols)\n #if no package found return error response\n if not pkg:\n return {\n \"statusCode\": 3042,\n \"statusMessage\": \"Order Number does not match any Package.\"\n }\n p = Package(pkg)\n last_status = p.get(\"cs\", None)\n scan_status = last_status.get(\"ss\", None)\n client_status = PACKAGE_STATUS_MAP.get(scan_status, None)\n #return error if no scan found in system.\n if (not client_status) or (not last_status):\n return {\n \"statusCode\": 3042,\n \"statusMessage\": \"Order has not yet received a scan.\"\n }\n #this condition returns all the order details for a package\n package_statuses = p.get(\"s\", [])\n shipment_details =\\\n {\n 'customerDetails': {'address': (\", \").join(p.add),\n 'contactNo': ', '.join(filter\n (lambda x: x, p.ph)),\n 'email': p.get(\"em\", \"\"),\n 'name': p.nm,\n },\n \"comments\": (\", \").join([\"%s: %s\" % (k.get(\"u\", None),\n k.get(\"sr\", None))\n for k in package_statuses]),\n \"deliveryDate\": last_status.get('ntd',\n last_status['sd']).strftime(\"%d-%m-%Y\") if\n scan_status else \"\",\n \"orderStatus\": client_status[0],\n \"paymentMode\": \"cash\",\n }\n return {\n \"response\": shipment_details,\n \"statusCode\": 200,\n \"statusMessage\": \"Success\"\n }", "def test_pagination(self):\n for i in range(21):\n self.create_report()\n response = self._get(get_kwargs={'page': 2})\n self.assertEquals(response.status_code, 200)\n queryset, form = self._extract(response)\n self.assertEquals(queryset.count(), 21)\n page = response.context['table'].page\n self.assertEquals(page.object_list.data.count(), 1)", "def _request(provider, record_type, params, paging, rate_limit):\n # establish an authenticated session\n session = Client._session(provider)\n url = provider.endpoints[record_type]\n results = []\n first = True\n\n while (first or paging) and url:\n # get the page of data\n if first:\n r = session.get(url, params=params)\n first = False\n else:\n r = session.get(url)\n # bail for non-200 status\n if r.status_code != 200:\n Client._describe(r)\n break\n # check payload for data\n # for vehicles, keep payload regardless as last_updated and ttl info may be useful\n payload = r.json()\n if record_type == VEHICLES or Client._has_data(payload, record_type):\n results.append(payload)\n # check for next page\n url = Client._next_url(payload)\n if url and rate_limit:\n time.sleep(rate_limit)\n\n return results", "def _retrieve_batch_page(self, batch_status, page=1):\n br = self.browser\n params = dict(MANAGE_PARAMS, status=batch_status, page=page)\n url = '%s?%s' % (MANAGE_URL, urlencode(params))\n \n if DEBUG:\n print >>sys.stderr, '*** _retrieve_batch_page(%s, %s)' % (batch_status, page)\n \n response = br.open(url)\n soup = BeautifulSoup(response.read())\n pagination = soup.find(attrs={'class': 'pagination'})\n page_links = set( int(a.string) for a in pagination.find_all('a') if a.string.isdigit() ) \\\n if pagination is not None else set()\n \n next_page = page+1 if (page+1) in page_links else None\n \n DIV_ID_PREFIX = 'batch_capsule_'\n batches = []\n for batch_capsule in soup.find_all(id=lambda x: x and x.startswith(DIV_ID_PREFIX)):\n batch_id = int(batch_capsule.attrs['id'][len(DIV_ID_PREFIX):])\n batch_link_tag = batch_capsule.find('a', id='batch_status_%s' % batch_id)\n batch_name = batch_link_tag.string\n tbl = batch_capsule.find(id=\"batch_%s\" % batch_id)\n metadata = [line for line in tbl.text.splitlines() if line.strip()]\n \n batches.append( Batch(batch_id, batch_name, metadata) )\n \n return batches, next_page", "async def main(self):\n\t\tfor i in range(2, self.number_of_requests+2):\n\t\t\turl = self.base_url +f'/?page={i}'\n\t\t\tawait self.make_requests(url)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r"""Forces a Failover for a regional cluster. Failover promotes the HA standby instance in the regional cluster.
def Failover(self, request, global_params=None): config = self.GetMethodConfig('Failover') return self._RunMethod( config, request, global_params=global_params)
[ "def FailoverInstance(opts, args):\n cl = GetClient()\n instance_name = args[0]\n ignore_consistency = opts.ignore_consistency\n force = opts.force\n iallocator = opts.iallocator\n target_node = opts.dst_node\n\n if iallocator and target_node:\n raise errors.OpPrereqError(\"Specify either an iallocator (-I), or a target\"\n \" node (-n) but not both\", errors.ECODE_INVAL)\n\n if not force:\n _EnsureInstancesExist(cl, [instance_name])\n\n usertext = (\"Failover will happen to image %s.\"\n \" This requires a shutdown of the instance. Continue?\" %\n (instance_name,))\n if not AskUser(usertext):\n return 1\n\n if ignore_consistency:\n usertext = (\"To failover instance %s, the source node must be marked\"\n \" offline first. Is this already the case?\") % instance_name\n if not AskUser(usertext):\n return 1\n\n op = opcodes.OpInstanceFailover(instance_name=instance_name,\n ignore_consistency=ignore_consistency,\n shutdown_timeout=opts.shutdown_timeout,\n iallocator=iallocator,\n target_node=target_node,\n ignore_ipolicy=opts.ignore_ipolicy)\n SubmitOrSend(op, opts, cl=cl)\n return 0", "def test_failover(self):\n self._do(self.hdfs_active, 'stop-namenode')\n self.d.sentry.wait_for_messages({\n 'namenode': [\n 'Ready (3 DataNodes, HA degraded down (missing: standby), with automatic fail-over)',\n 'Ready (3 DataNodes, HA degraded active (missing: standby), with automatic fail-over)',\n ]\n }, timeout=1800)\n self._hdfs_read_file()\n self._do(self.hdfs_active, 'start-namenode')\n self.d.sentry.wait_for_messages({\n 'namenode': [\n 'Ready (3 DataNodes, HA active, with automatic fail-over)',\n 'Ready (3 DataNodes, HA standby, with automatic fail-over)',\n ]\n }, timeout=1800)\n (self.hdfs_active, self.hdfs_standby) = (self.hdfs_standby, self.hdfs_active)\n self._hdfs_read_file()", "def _do_failover_or_switchover(obj, action, cluster_name, master, candidate, force, scheduled=None):\n\n dcs = get_dcs(obj, cluster_name)\n cluster = dcs.get_cluster()\n\n if action == 'switchover' and cluster.leader is None:\n raise PatroniCtlException('This cluster has no master')\n\n if master is None:\n if force or action == 'failover':\n master = cluster.leader and cluster.leader.name\n else:\n master = click.prompt('Master', type=str, default=cluster.leader.member.name)\n\n if master is not None and cluster.leader and cluster.leader.member.name != master:\n raise PatroniCtlException('Member {0} is not the leader of cluster {1}'.format(master, cluster_name))\n\n # excluding members with nofailover tag\n candidate_names = [str(m.name) for m in cluster.members if m.name != master and not m.nofailover]\n # We sort the names for consistent output to the client\n candidate_names.sort()\n\n if not candidate_names:\n raise PatroniCtlException('No candidates found to {0} to'.format(action))\n\n if candidate is None and not force:\n candidate = click.prompt('Candidate ' + str(candidate_names), type=str, default='')\n\n if action == 'failover' and not candidate:\n raise PatroniCtlException('Failover could be performed only to a specific candidate')\n\n if candidate == master:\n raise PatroniCtlException(action.title() + ' target and source are the same.')\n\n if candidate and candidate not in candidate_names:\n raise PatroniCtlException('Member {0} does not exist in cluster {1}'.format(candidate, cluster_name))\n\n scheduled_at_str = None\n scheduled_at = None\n\n if action == 'switchover':\n if scheduled is None and not force:\n next_hour = (datetime.datetime.now() + datetime.timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M')\n scheduled = click.prompt('When should the switchover take place (e.g. ' + next_hour + ' ) ',\n type=str, default='now')\n\n scheduled_at = parse_scheduled(scheduled)\n if scheduled_at:\n if cluster.is_paused():\n raise PatroniCtlException(\"Can't schedule switchover in the paused state\")\n scheduled_at_str = scheduled_at.isoformat()\n\n failover_value = {'leader': master, 'candidate': candidate, 'scheduled_at': scheduled_at_str}\n\n logging.debug(failover_value)\n\n # By now we have established that the leader exists and the candidate exists\n click.echo('Current cluster topology')\n output_members(dcs.get_cluster(), cluster_name)\n\n if not force:\n demote_msg = ', demoting current master ' + master if master else ''\n if scheduled_at_str:\n if not click.confirm('Are you sure you want to schedule {0} of cluster {1} at {2}{3}?'\n .format(action, cluster_name, scheduled_at_str, demote_msg)):\n raise PatroniCtlException('Aborting scheduled ' + action)\n else:\n if not click.confirm('Are you sure you want to {0} cluster {1}{2}?'\n .format(action, cluster_name, demote_msg)):\n raise PatroniCtlException('Aborting ' + action)\n\n r = None\n try:\n member = cluster.leader.member if cluster.leader else cluster.get_member(candidate, False)\n\n r = request_patroni(member, 'post', action, failover_value)\n\n # probably old patroni, which doesn't support switchover yet\n if r.status == 501 and action == 'switchover' and b'Server does not support this operation' in r.data:\n r = request_patroni(member, 'post', 'failover', failover_value)\n\n if r.status in (200, 202):\n logging.debug(r)\n cluster = dcs.get_cluster()\n logging.debug(cluster)\n click.echo('{0} {1}'.format(timestamp(), r.data.decode('utf-8')))\n else:\n click.echo('{0} failed, details: {1}, {2}'.format(action.title(), r.status, r.data.decode('utf-8')))\n return\n except Exception:\n logging.exception(r)\n logging.warning('Failing over to DCS')\n click.echo('{0} Could not {1} using Patroni api, falling back to DCS'.format(timestamp(), action))\n dcs.manual_failover(master, candidate, scheduled_at=scheduled_at)\n\n output_members(cluster, cluster_name)", "def disable_autofailover(self):\n for server in self.servers:\n rest = RestConnection(server)\n rest.update_autofailover_settings(False, 120)", "def reset_cluster_after_upgrade_neutron_ceph(self):\n self.env.revert_snapshot('upgrade_master_neutron_ceph')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.fuel_web.stop_reset_env_wait(cluster_id)\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[6:7])\n\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-05': ['compute', 'ceph-osd'],\n 'slave-06': ['compute', 'ceph-osd']\n }, False, True\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-07': ['controller']}\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id,\n test_sets=['ha', 'sanity', 'smoke'],\n should_fail=1)", "def test_fsx_lustre_backup(region, pcluster_config_reader, clusters_factory, os, scheduler):\n mount_dir = \"/fsx_mount_dir\"\n daily_automatic_backup_start_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=15)\n logging.info(f\"daily_automatic_backup_start_time: {daily_automatic_backup_start_time}\")\n cluster_config = pcluster_config_reader(\n mount_dir=mount_dir, daily_automatic_backup_start_time=daily_automatic_backup_start_time.strftime(\"%H:%M\")\n )\n\n # Create a cluster with automatic backup parameters.\n cluster = clusters_factory(cluster_config)\n remote_command_executor = RemoteCommandExecutor(cluster)\n scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)\n fsx_fs_id = get_fsx_fs_id(cluster, region)\n\n # Mount file system\n assert_fsx_lustre_correctly_mounted(remote_command_executor, mount_dir, os, region, fsx_fs_id)\n\n # Create a text file in the mount directory.\n create_backup_test_file(scheduler_commands, remote_command_executor, mount_dir)\n\n # Wait for the creation of automatic backup and assert if it is in available state.\n automatic_backup = monitor_automatic_backup_creation(\n remote_command_executor, fsx_fs_id, region, daily_automatic_backup_start_time\n )\n\n # Create a manual FSx Lustre backup using boto3 client.\n manual_backup = create_manual_fs_backup(remote_command_executor, fsx_fs_id, region)\n\n # Delete original cluster.\n cluster.delete()\n\n # Verify whether automatic backup is also deleted along with the cluster.\n _test_automatic_backup_deletion(remote_command_executor, automatic_backup, region)\n\n # Restore backup into a new cluster\n cluster_config_restore = pcluster_config_reader(\n config_file=\"pcluster_restore_fsx.config.yaml\", mount_dir=mount_dir, fsx_backup_id=manual_backup.get(\"BackupId\")\n )\n\n cluster_restore = clusters_factory(cluster_config_restore)\n remote_command_executor_restore = RemoteCommandExecutor(cluster_restore)\n fsx_fs_id_restore = get_fsx_fs_id(cluster_restore, region)\n\n # Mount the restored file system\n assert_fsx_lustre_correctly_mounted(remote_command_executor_restore, mount_dir, os, region, fsx_fs_id_restore)\n\n # Validate whether text file created in the original file system is present in the restored file system.\n _test_restore_from_backup(remote_command_executor_restore, mount_dir)\n\n # Test deletion of manual backup\n _test_delete_manual_backup(remote_command_executor, manual_backup, region)", "def reset_cluster_after_upgrade_nova_cinder(self):\n self.env.revert_snapshot('upgrade_master_nova_cinder')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.fuel_web.stop_reset_env_wait(cluster_id)\n\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[3:6])\n\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-04': ['controller'],\n 'slave-05': ['controller'],\n 'slave-06': ['compute']\n }\n )\n\n self.fuel_web.run_network_verify(cluster_id)\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n self.fuel_web.run_ostf(cluster_id)\n\n nailgun_controllers = self.fuel_web.\\\n get_nailgun_cluster_nodes_by_roles(cluster_id=cluster_id,\n roles=['controller'])\n devops_controllers = self.fuel_web.\\\n get_devops_nodes_by_nailgun_nodes(nailgun_controllers)\n\n primary_controller = self.fuel_web.get_nailgun_primary_node(\n self.env.d_env.nodes().slaves[0])\n primary_controller.destroy()\n\n wait(lambda: not self.fuel_web.\n get_nailgun_node_by_devops_node(primary_controller)['online'],\n timeout=60 * 10)\n\n # Wait for HA services ready\n self.fuel_web.assert_ha_services_ready(cluster_id)\n\n # Wait until OpenStack services are UP\n self.fuel_web.assert_os_services_ready(cluster_id, should_fail=1)\n\n logger.info(\"Waiting 300 sec before MySQL Galera will up, \"\n \"then run OSTF\")\n\n # Wait until MySQL Galera is UP on online controllers\n self.fuel_web.wait_mysql_galera_is_up(\n [n.name for n in\n set(devops_controllers) - {primary_controller}], timeout=300)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'],\n should_fail=1)", "def test_200_forced_cinder_failover(self):\n cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode(\n self.cinder_ceph_app_name)\n if cinder_rbd_mirroring_mode != 'image':\n logging.warning(\n \"Skipping 'test_200_cinder_failover_without_primary_site' \"\n \"since Cinder RBD mirroring mode is {}.\".format(\n cinder_rbd_mirroring_mode))\n return\n\n # Make sure that the Cinder Ceph backend workaround is applied.\n self.apply_cinder_ceph_workaround()\n\n session = openstack.get_overcloud_keystone_session()\n cinder = openstack.get_cinder_session_client(session, version=3)\n openstack.failover_cinder_volume_host(\n cinder=cinder,\n backend_name=self.cinder_ceph_app_name,\n target_backend_id='ceph',\n target_status='disabled',\n target_replication_status='failed-over')\n\n # Check that the Cinder volumes are still available after forced\n # failover.\n for volume in cinder.volumes.list():\n self.assertEqual(volume.status, 'available')", "def upgrade_ha(self):\n # TODO(ddmitriev): change snapshot name to actual when reverting 7.0\n if not self.env.d_env.has_snapshot('deploy_neutron_gre_ha'):\n raise SkipTest()\n\n self.env.revert_snapshot(\"deploy_neutron_gre_ha\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n available_releases_before = self.fuel_web.get_releases_list_for_os(\n release_name=hlp_data.OPENSTACK_RELEASE)\n\n self.env.admin_actions.upgrade_master_node()\n\n self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)\n self.fuel_web.assert_nodes_in_ready_state(cluster_id)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:5])\n self.fuel_web.assert_nailgun_upgrade_migration()\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n\n available_releases_after = self.fuel_web.get_releases_list_for_os(\n release_name=hlp_data.OPENSTACK_RELEASE)\n added_release = [release_id for release_id in available_releases_after\n if release_id not in available_releases_before]\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[5:7])\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=hlp_data.DEPLOYMENT_MODE,\n settings={\n 'net_provider': 'neutron',\n 'net_segment_type': 'vlan'\n },\n release_id=added_release[0]\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-06': ['controller'],\n 'slave-07': ['compute']\n }\n )\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:\n _ip = self.fuel_web.get_nailgun_node_by_name('slave-06')['ip']\n with self.env.d_env.get_ssh_to_remote(_ip) as remote:\n kernel = self.get_slave_kernel(remote)\n logger.debug(\"ubuntu kernel version\"\n \" on new node is {}\".format(kernel))\n self.fuel_web.verify_network(cluster_id)\n\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n self.env.make_snapshot(\"upgrade_ha\")", "def test_load_balancer_failover(self):\n lb_name = data_utils.rand_name(\"lb_member_lb1-failover\")\n lb = self.mem_lb_client.create_loadbalancer(\n name=lb_name, provider=CONF.load_balancer.provider,\n vip_network_id=self.lb_member_vip_net[const.ID])\n self.addClassResourceCleanup(\n self.mem_lb_client.cleanup_loadbalancer,\n lb[const.ID])\n\n lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,\n lb[const.ID], const.PROVISIONING_STATUS,\n const.ACTIVE,\n CONF.load_balancer.lb_build_interval,\n CONF.load_balancer.lb_build_timeout)\n\n # Test RBAC not authorized for non-admin role\n if not CONF.load_balancer.RBAC_test_type == const.NONE:\n self.assertRaises(exceptions.Forbidden,\n self.mem_lb_client.failover_loadbalancer,\n lb[const.ID])\n\n # Assert we didn't go into PENDING_*\n lb = self.mem_lb_client.show_loadbalancer(lb[const.ID])\n self.assertEqual(const.ACTIVE, lb[const.PROVISIONING_STATUS])\n\n if CONF.load_balancer.provider in const.AMPHORA_PROVIDERS:\n before_amphorae = self.lb_admin_amphora_client.list_amphorae(\n query_params='{loadbalancer_id}={lb_id}'.format(\n loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb[const.ID]))\n\n self.os_roles_lb_admin.loadbalancer_client.failover_loadbalancer(\n lb[const.ID])\n\n lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,\n lb[const.ID], const.PROVISIONING_STATUS,\n const.ACTIVE,\n CONF.load_balancer.lb_build_interval,\n CONF.load_balancer.lb_build_timeout)\n\n if CONF.load_balancer.provider in const.AMPHORA_PROVIDERS:\n after_amphorae = self.lb_admin_amphora_client.list_amphorae(\n query_params='{loadbalancer_id}={lb_id}'.format(\n loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb[const.ID]))\n\n # Make sure all of the amphora on the load balancer have\n # failed over\n for amphora in before_amphorae:\n for new_amp in after_amphorae:\n self.assertNotEqual(amphora[const.ID], new_amp[const.ID])\n\n # Attempt to clean up so that one full test run doesn't start 10+\n # amps before the cleanup phase fires\n try:\n self.mem_lb_client.delete_loadbalancer(lb[const.ID])\n\n waiters.wait_for_deleted_status_or_not_found(\n self.mem_lb_client.show_loadbalancer, lb[const.ID],\n const.PROVISIONING_STATUS,\n CONF.load_balancer.lb_build_interval,\n CONF.load_balancer.lb_build_timeout)\n except Exception:\n pass", "def _failover(self):\n\n slap = slapos.slap.slap()\n slap.initializeConnection(self.server_url, self.key_file, self.cert_file)\n\n # partition that will take over.\n cp_winner = slap.registerComputerPartition(computer_guid=self.computer_guid,\n partition_id=self.partition_id)\n # XXX although we can already rename cp_winner, to change its software type we need to\n # get hold of the root cp as well\n\n cp_exporter_ref = self.namebase + '0' # this is ok. the boss is always number zero.\n\n # partition to be deactivated\n cp_broken = cp_winner.request(software_release=self.software_release,\n software_type='frozen',\n state='stopped',\n partition_reference=cp_exporter_ref)\n\n broken_new_ref = 'broken-{}'.format(time.strftime(\"%d-%b_%H:%M:%S\", time.gmtime()))\n\n log.debug(\"Renaming {}: {}\".format(cp_broken.getId(), broken_new_ref))\n\n cp_broken.rename(new_name=broken_new_ref)\n\n cp_broken.stopped()\n\n log.debug(\"Renaming {}: {}\".format(cp_winner.getId(), cp_exporter_ref))\n\n # update name (and later, software type) for the partition that will take over\n\n cp_winner.rename(new_name=cp_exporter_ref)\n cp_winner.bang(message='partitions have been renamed!')", "def run(ceph_cluster, **kw):\n\n log.info(\"Deploying stretch cluster with arbiter mon node\")\n log.info(run.__doc__)\n config = kw.get(\"config\")\n cephadm = CephAdmin(cluster=ceph_cluster, **config)\n rados_obj = RadosOrchestrator(node=cephadm)\n mon_obj = MonElectionStrategies(rados_obj=rados_obj)\n client_node = ceph_cluster.get_nodes(role=\"client\")[0]\n\n site1_name = config[\"site1\"][\"name\"]\n site2_name = config[\"site2\"][\"name\"]\n\n # disabling automatic crush update\n cmd = \"ceph config set osd osd_crush_update_on_start false\"\n cephadm.shell([cmd])\n\n # Sleeping for 2 seconds after map update.\n time.sleep(2)\n\n # Setting the election strategy to connectivity mode\n if not mon_obj.set_election_strategy(mode=\"connectivity\"):\n log.error(\"could not set election strategy to connectivity mode\")\n return 1\n\n # Sleeping for 2 seconds after strategy update.\n time.sleep(2)\n\n # Checking updated election strategy in mon map\n strategy = mon_obj.get_election_strategy()\n if strategy != 3:\n log.error(\n f\"cluster created election strategy other than connectivity, i.e {strategy}\"\n )\n return 1\n log.info(\"Enabled connectivity mode on the cluster\")\n\n # Creating new datacenter crush objects and moving under root/default\n for name in [site1_name, site2_name]:\n cmd = f\"ceph osd crush add-bucket {name} datacenter\"\n rados_obj.run_ceph_command(cmd)\n time.sleep(2)\n move_crush_item(cephadm, crush_obj=name, name=\"root\", value=\"default\")\n time.sleep(2)\n\n # Moving all the OSD and Mon daemons into respective sites\n sites = [\"site1\", \"site2\", \"site3\"]\n mon_hosts = mon_obj.get_mon_quorum().keys()\n osd_hosts = mon_obj.get_osd_hosts()\n log.debug(f\"Mon hosts defined: {mon_hosts}\")\n log.debug(f\"OSD hosts defined: {osd_hosts}\")\n\n def search(hosts, pattern):\n for daemon in hosts:\n if re.search(daemon, pattern):\n return daemon\n\n def mon_set_location(pattern, crush_name):\n daemon = search(mon_hosts, pattern)\n if daemon:\n _cmd = f\"ceph mon set_location {daemon} datacenter={crush_name}\"\n cephadm.shell([_cmd])\n log.info(\n f\"Set location for mon.{daemon} onto site {crush_name}\\n\"\n \"sleeping for 5 seconds\"\n )\n time.sleep(5)\n\n def osd_set_location(pattern, crush_name):\n daemon = search(osd_hosts, pattern)\n if daemon:\n move_crush_item(\n node=cephadm,\n crush_obj=daemon,\n name=\"datacenter\",\n value=crush_name,\n )\n log.info(\n f\"Set location for OSD {daemon} onto site {crush_name}\\n\"\n \"sleeping for 5 seconds\"\n )\n time.sleep(5)\n\n for site in sites:\n # Collecting hosts from each site and setting locations accordingly\n site_details = config[site]\n _crush_name = site_details[\"name\"]\n\n for item in site_details[\"hosts\"]:\n mon_set_location(item, _crush_name)\n osd_set_location(item, _crush_name)\n\n log.info(\"Moved all the hosts into respective sites\")\n\n stretch_rule_name = config.get(\"stretch_rule_name\", \"stretch_rule\")\n if not setup_crush_rule(\n node=client_node,\n rule_name=stretch_rule_name,\n site1=site1_name,\n site2=site2_name,\n ):\n log.error(\"Failed to Add crush rules in the crush map\")\n return 1\n\n # Sleeping for 5 sec for the strategy to be active\n time.sleep(5)\n\n # Enabling the stretch cluster mode\n tiebreaker_node = search(mon_hosts, config[\"site3\"][\"hosts\"][0])\n log.info(f\"tiebreaker node provided: {tiebreaker_node}\")\n cmd = (\n f\"ceph mon enable_stretch_mode {tiebreaker_node} {stretch_rule_name} datacenter\"\n )\n try:\n cephadm.shell([cmd])\n except Exception as err:\n log.error(\n f\"Error while enabling stretch rule on the datacenter. Command : {cmd}\"\n )\n log.error(err)\n return 1\n time.sleep(2)\n\n # wait for PG's to settle down with new crush rules after deployment of stretch mode\n wait_for_clean_pg_sets(rados_obj)\n\n # Checking if the pools have been updated with the new crush rules\n acting_set = rados_obj.get_pg_acting_set()\n if len(acting_set) != 4:\n log.error(\n f\"There are {len(acting_set)} OSD's in PG. OSDs: {acting_set}. Stretch cluster requires 4\"\n )\n return 1\n log.info(f\"Acting set : {acting_set} Consists of 4 OSD's per PG\")\n log.info(\"Stretch rule with arbiter monitor node set up successfully\")\n return 0", "def test_failover(self):\n pass", "def ha_one_controller_backup_restore(self):\n self.env.revert_snapshot(\"deploy_ha_one_controller_flat\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id),\n 'novaSimpleFlat', 'novaSimpleFlat', 'novaSimpleFlat')\n self.fuel_web.assert_cluster_ready(\n os_conn, smiles_count=6, networks_count=1, timeout=300)\n self.fuel_web.backup_master(self.env.get_admin_remote())\n checkers.backup_check(self.env.get_admin_remote())\n\n self.fuel_web.update_nodes(\n cluster_id, {'slave-03': ['compute']}, True, False)\n\n assert_equal(\n 3, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))\n\n self.fuel_web.restore_master(self.env.get_admin_remote())\n checkers.restore_check_sum(self.env.get_admin_remote())\n self.fuel_web.restore_check_nailgun_api(self.env.get_admin_remote())\n checkers.iptables_check(self.env.get_admin_remote())\n\n assert_equal(\n 2, len(self.fuel_web.client.list_cluster_nodes(cluster_id)))\n\n self.fuel_web.update_nodes(\n cluster_id, {'slave-03': ['compute']}, True, False)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.run_ostf(\n cluster_id=cluster_id)\n\n self.env.make_snapshot(\"ha_one_controller_backup_restore\")", "def deploy_ha_after_upgrade(self):\n if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):\n raise SkipTest()\n self.env.revert_snapshot('ceph_ha_one_controller_compact')\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n available_releases_before = self.fuel_web.get_releases_list_for_os(\n release_name=hlp_data.OPENSTACK_RELEASE)\n\n self.env.admin_actions.upgrade_master_node()\n\n self.fuel_web.assert_nodes_in_ready_state(cluster_id)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:3])\n self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)\n self.fuel_web.assert_nailgun_upgrade_migration()\n available_releases_after = self.fuel_web.get_releases_list_for_os(\n release_name=hlp_data.OPENSTACK_RELEASE)\n added_release = [release_id for release_id in available_releases_after\n if release_id not in available_releases_before]\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[3:9])\n segment_type = hlp_data.NEUTRON_SEGMENT['vlan']\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=hlp_data.DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": segment_type\n },\n release_id=added_release[0]\n )\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-04': ['controller'],\n 'slave-05': ['controller'],\n 'slave-06': ['controller'],\n 'slave-07': ['compute'],\n 'slave-08': ['compute'],\n 'slave-09': ['cinder']\n }\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n cluster = self.fuel_web.client.get_cluster(cluster_id)\n assert_equal(str(cluster['net_provider']), 'neutron')\n if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:\n _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']\n with self.env.d_env.get_ssh_to_remote(_ip) as remote:\n kernel = self.get_slave_kernel(remote)\n logger.debug(\"ubuntu kernel version\"\n \" on new node is {}\".format(kernel))\n self.fuel_web.verify_network(cluster_id=cluster_id)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n self.env.make_snapshot(\"deploy_ha_after_upgrade\")", "def test_100_cinder_failover(self):\n cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode(\n self.cinder_ceph_app_name)\n if cinder_rbd_mirroring_mode != 'image':\n logging.warning(\n \"Skipping 'test_100_cinder_failover' since Cinder RBD \"\n \"mirroring mode is {}.\".format(cinder_rbd_mirroring_mode))\n return\n\n session = openstack.get_overcloud_keystone_session()\n cinder = openstack.get_cinder_session_client(session, version=3)\n\n # Check if the Cinder volume host is available with replication\n # enabled.\n host = 'cinder@{}'.format(self.cinder_ceph_app_name)\n svc = cinder.services.list(host=host, binary='cinder-volume')[0]\n self.assertEqual(svc.replication_status, 'enabled')\n self.assertEqual(svc.status, 'enabled')\n\n # Setup the test Cinder volume\n volume = self.setup_test_cinder_volume()\n\n # Check if the volume is properly mirrored\n self.wait_for_mirror_state(\n 'up+replaying',\n check_entries_behind_master=True,\n application_name=self.application_name + self.site_b_app_suffix,\n model_name=self.site_b_model,\n pools=[self.cinder_ceph_app_name])\n\n # Execute the Cinder volume failover\n openstack.failover_cinder_volume_host(\n cinder=cinder,\n backend_name=self.cinder_ceph_app_name,\n target_backend_id='ceph',\n target_status='disabled',\n target_replication_status='failed-over')\n\n # Check if the test volume is still available after failover\n self.assertEqual(cinder.volumes.get(volume.id).status, 'available')", "def load_ceph_ha(self):\n self.check_run(\"load_ceph_ha\")\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1, initialize=True)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE,\n settings={\n 'volumes_ceph': True,\n 'images_ceph': True,\n 'volumes_lvm': False,\n 'osd_pool_size': \"3\"\n }\n )\n self.show_step(2)\n self.show_step(3)\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller', 'ceph-osd'],\n 'slave-02': ['controller', 'ceph-osd'],\n 'slave-03': ['controller', 'ceph-osd'],\n 'slave-04': ['compute'],\n 'slave-05': ['compute']\n }\n )\n\n self.show_step(4)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.run_ostf(\n cluster_id=cluster_id)\n self.show_step(5)\n self.env.make_snapshot(\"load_ceph_ha\", is_make=True)", "def upgrade_ha_one_controller(self):\n if not self.env.d_env.has_snapshot('ceph_ha_one_controller_compact'):\n raise SkipTest()\n self.env.revert_snapshot('ceph_ha_one_controller_compact')\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n _ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']\n with self.env.d_env.get_ssh_to_remote(_ip) as remote:\n expected_kernel = self.get_slave_kernel(remote)\n\n self.env.admin_actions.upgrade_master_node()\n\n self.fuel_web.assert_nodes_in_ready_state(cluster_id)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:3])\n self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)\n self.fuel_web.assert_nailgun_upgrade_migration()\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n self.env.bootstrap_nodes(\n self.env.d_env.nodes().slaves[3:4])\n self.fuel_web.update_nodes(\n cluster_id, {'slave-04': ['compute']},\n True, False\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.run_ostf(cluster_id=cluster_id,\n test_sets=['ha', 'smoke', 'sanity'])\n if hlp_data.OPENSTACK_RELEASE_UBUNTU in hlp_data.OPENSTACK_RELEASE:\n _ip = self.fuel_web.get_nailgun_node_by_name('slave-04')['ip']\n with self.env.d_env.get_ssh_to_remote(_ip) as remote:\n kernel = self.get_slave_kernel(remote)\n checkers.check_kernel(kernel, expected_kernel)\n create_diagnostic_snapshot(\n self.env, \"pass\", \"upgrade_ha_one_controller\")\n\n self.env.make_snapshot(\"upgrade_ha_one_controller\")", "def degrade_cluster():\n log_everywhere(resource.nodes, 'Checking current cluster state')\n # All have quorum\n A.volumes.write(direct=1)\n B.volumes.write(direct=1)\n C.volumes.write(direct=1)\n\n log_everywhere(resource.nodes, 'Isolating node B')\n resource.forbidden_patterns.difference_update([\n r'connection:NetworkFailure',\n r'connection:BrokenPipe',\n r'connection:Timeout'\n ])\n\n connections(to_node=B).block()\n\n connections(B).event(r'connection .* connection:(BrokenPipe|NetworkFailure|Timeout)')\n\n A.volumes.write(direct=1)\n C.volumes.write(direct=1)\n log_everywhere([A, C], '* Nodes A & C still have quorum')\n\n expect_no_quorum(B)\n log_everywhere([B], '* Isolated node B lost quorum')", "def cluster_call_failover_timeout(self, cluster_call_failover_timeout):\n\n self._cluster_call_failover_timeout = cluster_call_failover_timeout" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View which gets the link for the given shortcut value and redirects to it.
def follow(request, shortcut, params=None): try: link = Link.objects.get(shortcut=shortcut) link.usage_count += 1 link.save() user = request.user if user.is_anonymous(): user = None Click.objects.create( link=link, user=user, useragent=request.META['HTTP_USER_AGENT']) url = link.url if params: url = url + params return HttpResponseRedirect(url) except: values = default_values(request) values["error"] = "This shortcut doesn't yet exit. Create it now!" values["link_form"].initial["shortcut"] = shortcut return index(request, values)
[ "def info(request, shortcut):\n link = get_object_or_404(Link, shortcut=shortcut)\n values = default_values(request)\n values['link'] = link\n return render_to_response(\n 'shortener/link_info.html',\n values,\n context_instance=RequestContext(request))", "def redirect_to_url(request, short_url):\n instance = get_object_or_404(Shorten, id=decode(short_url))\n instance.increase_view_count()\n\n return redirect(instance.long_url)", "def shortcut(request, content_type_id, object_id):\r\n # Look up the object, making sure it's got a get_absolute_url() function.\r\n try:\r\n content_type = ContentType.objects.get(pk=content_type_id)\r\n except (ContentType.DoesNotExist, ValueError):\r\n raise http.Http404(_(\"Content type %(ct_id)s object %(obj_id)s doesn't exist\") %\r\n {'ct_id': content_type_id, 'obj_id': object_id})\r\n \r\n if not content_type.model_class():\r\n raise http.Http404(_(\"Content type %(ct_id)s object has no associated model\") %\r\n {'ct_id': content_type_id})\r\n try:\r\n obj = content_type.get_object_for_this_type(pk=object_id)\r\n except (content_type.model_class().DoesNotExist, ValueError):\r\n raise http.Http404(_(\"Content type %(ct_id)s object %(obj_id)s doesn't exist\") %\r\n {'ct_id': content_type_id, 'obj_id': object_id})\r\n\r\n try:\r\n get_absolute_url = obj.get_absolute_url\r\n except AttributeError:\r\n raise http.Http404(_(\"%(ct_name)s objects don't have a get_absolute_url() method\") %\r\n {'ct_name': content_type.name})\r\n absurl = get_absolute_url()\r\n\r\n # Try to figure out the object's domain, so we can do a cross-site redirect\r\n # if necessary.\r\n\r\n # If the object actually defines a domain, we're done.\r\n if absurl.startswith('http://') or absurl.startswith('https://'):\r\n return http.HttpResponseRedirect(absurl)\r\n\r\n # Otherwise, we need to introspect the object's relationships for a\r\n # relation to the Site object\r\n object_domain = None\r\n\r\n if Site._meta.installed:\r\n opts = obj._meta\r\n\r\n # First, look for an many-to-many relationship to Site.\r\n for field in opts.many_to_many:\r\n if field.rel.to is Site:\r\n try:\r\n # Caveat: In the case of multiple related Sites, this just\r\n # selects the *first* one, which is arbitrary.\r\n object_domain = getattr(obj, field.name).all()[0].domain\r\n except IndexError:\r\n pass\r\n if object_domain is not None:\r\n break\r\n\r\n # Next, look for a many-to-one relationship to Site.\r\n if object_domain is None:\r\n for field in obj._meta.fields:\r\n if field.rel and field.rel.to is Site:\r\n try:\r\n object_domain = getattr(obj, field.name).domain\r\n except Site.DoesNotExist:\r\n pass\r\n if object_domain is not None:\r\n break\r\n\r\n # Fall back to the current site (if possible).\r\n if object_domain is None:\r\n try:\r\n object_domain = get_current_site(request).domain\r\n except Site.DoesNotExist:\r\n pass\r\n\r\n # If all that malarkey found an object domain, use it. Otherwise, fall back\r\n # to whatever get_absolute_url() returned.\r\n if object_domain is not None:\r\n protocol = 'https' if request.is_secure() else 'http'\r\n return http.HttpResponseRedirect('%s://%s%s'\r\n % (protocol, object_domain, absurl))\r\n else:\r\n return http.HttpResponseRedirect(absurl)", "def _run_shortcut(self, shortcut):\n _ = shortcut # Keep Pylint happy", "def run_shortcut(self, shortcut_name):\n _ = shortcut_name # Keep Pylint happy", "def canonical_redirect(request, episode_number):\n\n episode = get_object_or_404(Episode, episode_number=episode_number)\n return redirect(episode)", "def redirect_url(id, db):\n\n found = db.execute('''SELECT url FROM ShortUrl WHERE base62 = ?''', (id,))\n url = found.fetchone()\n if url is not None:\n print(url[0])\n redirect(url[0], code=301)\n else:\n print(\"Failed\")\n return HTTPError(status=404)", "def get_show_url(self, name):", "def get_shortcut(self, shortcut_name):\n return self.shortcut_dict[shortcut_name]", "def _open_shortcut_dialog(self, item, column):\r\n if item.childCount():\r\n return\r\n\r\n self.shortcut_dialog.set_shortcut(\r\n QKeySequence(item.text(1)).toString())\r\n self.shortcut_dialog.exec_()", "def open_with_shortcut(self, shortcut_name, targets):\n _ = shortcut_name, targets # Keep Pylint happy", "def redirectUrlShowItem(cat_id, item_name):\n category = session.query(Category).filter_by(id=cat_id).one()\n return redirect(url_for(\n 'showItem', category_name=category.name, item_name=item_name))", "def suggestions_url(self):\n return self.request.link(Search(self.request, None, None), 'suggest')", "def _getUrlNameForRedirect(self):\n return url_names.GCI_PROGRAM_EDIT", "def assign_url(context):", "def webrequest_to_uri_links(view, context, model, p):\n admin_url = app.global_content['options']['admin-url']\n if not model.uri:\n return None\n args = {\n 'search': model.uri.uri\n }\n link_args = urllib.parse.urlencode(args)\n the_link = '<a href=\"/%s/uri?%s\">%s</a>' % (\n admin_url.value,\n link_args,\n model.uri.uri)\n return Markup(the_link)", "def get_document_shortcut(connection, document_id, instance_id, error_msg=None):\n endpoint_url = f'/api/documents/{document_id}/instances/{instance_id}/shortcut'\n return connection.get(connection.base_url + endpoint_url)", "def get_shortcut(shortcut_name):\r\n global SHORTCUTS, CUSTOM_SHORTCUTS\r\n return CUSTOM_SHORTCUTS.get(shortcut_name, SHORTCUTS.get(shortcut_name))", "def redirect_from(short):\n full_url = memcache_client.get(short)\n if not full_url:\n full_url = db.full_url_from_shortened_url(short)\n if full_url:\n memcache_client.set(short, full_url)\n\n if full_url:\n return redirect(full_url)\n return Response(\"Shortened URL not valid\", status.HTTP_400_BAD_REQUEST)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
View which shows information on a particular link
def info(request, shortcut): link = get_object_or_404(Link, shortcut=shortcut) values = default_values(request) values['link'] = link return render_to_response( 'shortener/link_info.html', values, context_instance=RequestContext(request))
[ "def get_show_url(self, name):", "def Info(request):\n return render_to_response('radabo/info.html', {})", "def get_link_info(link_id: int, db: Session = Depends(get_db)):\n link = db.query(Link).where(Link.id == link_id).first()\n if not link:\n raise HTTPException(404, detail='Link not found.')\n return link", "def detail():\n tool_id = request.args.get(\"tool_id\")\n db = get_db()\n cur = db.execute('select id, title, content, hasargs from entries where id=?', (tool_id,))\n result = cur.fetchone()\n\n return render_template('show_detail.html', tool_id = result[0],\n tool_titile=result[1], tool_des = result[2], tool_hasargs = result[3])", "def links_index(request):\n extra_context = get_extra_context()\n links = Link.objects.all()\n extra_context['links'] = links\n return render_to_response(\"other/links.html\", extra_context,\n context_instance=RequestContext(request))", "def link(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.link\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)", "def synopsis_link(request, synopsis_id, template_name='synopsis/synopsis_link.html'):\n\n synopsis = Synopsis.objects.get(id=synopsis_id)\n\n return render_to_response(template_name, \n dict(synopsis=synopsis),\n context_instance=RequestContext(request))", "def view_link(link_num=1):\n # if the .hn file doesn't exist, then update_links first\n if not os.path.isfile(HN_PATH):\n update_links()\n \n links = load_links()\n\n # check that the given num doesn't exceed the length of the list\n if link_num > len(links):\n print 'there is no HackerNews link associated with ' + str(link_num)\n sys.exit(1)\n\n # access the link at link_num\n num = link_num - 1\n\n print 'hn - opening ' + links[num][0] + \" at \" + links[num][1]\n hn_url = links[num][1]\n\n # open the link\n subprocess.call(['open', hn_url])", "def viewer(request):\n # Note: I'm not using Alpaca.objects.order_by('?')[0] because it's been known\n # to be slow on some databases (MySQL) with a large dataset, so I'm playing\n # it safe and just accessing a random index from .all()\n alpaca = None\n size = Alpaca.objects.count()\n if size > 0:\n i = randint(0, size-1)\n alpaca = Alpaca.objects.all()[i]\n return render_to_response('viewer.html', {'alpaca': alpaca})", "def processLink(link: str) -> str:\n s_time = time()\n res = ydl.extract_info(link, download=False)\n unique_id, title = res['display_id'], res['title']\n print(f\"{unique_id},{title}\")", "def about():\n\n afl_links = AffiliateLinks()\n afl_button = afl_links.afl_button\n #print('afl_links.afl_button[concerning]:', afl_links.afl_button['concerning'])\n\n return render_template(\n 'about.html',\n aboutActive='active',\n afl_button=afl_button,\n )", "def printLink(self, thisLink):\n print '{0}\\t{1}\\t\\t{2}'.format(thisLink['url'], thisLink['title'], thisLink['image'])", "def single_document_details(request, id):\n document = Document.objects.get(id=id)\n return render(request, 'html/detail.html', {'document': document})", "def list(request):\n urls = myURL.objects.order_by('-accessNb')\n return render(request, 'mini_url/list.html', locals())", "def links():\n links_list = tasks.json_list(os.path.join(pathlib.Path(__file__).parent.absolute(),'static/links.json'))\n return render_template('links.html',title='collegeSmart - Helpful Links',links=links_list)", "def getLinkData(self, link):\n page = connectionChecker(link, self.webName)\n html = BeautifulSoup(page.content, features=\"html.parser\")\n for div in html.select('div[class*=\"layout_list_item css_class_479\"]'):\n title = div.find('div', attrs={'class': 'list_item_title_with_brand'}).text\n price = div.find('a', attrs={'class': 'price'}).text\n prodIdWeb = div.get('id')\n self.resultVegList.append(self.getVegDetails(title, price, prodIdWeb, link))", "def download_show(self, url):", "def link(self):\n return self._book_dict[\"link\"]", "def format_link(self, ind):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return true if user is allowed to submit URLs
def is_allowed_to_submit(request): return not settings.REQUIRE_LOGIN or request.user.is_authenticated()
[ "def url_allowed(self, url):\n return get_netloc(url) in self.root_hosts", "def can_create_url(self):\n return (self.allowed_postfixes is not None)", "def legal_url(self, url_name):\n if users.is_current_user_admin():\n return True\n if (self.active_permissions_vault == None):\n UserType.prepare_for_use(self)\n return self.active_permissions_vault.legal_url(url_name)", "def authorize(self):\n return True", "def isUserAllowed(self):\n security = getSecurityManager()\n portal = getToolByName(self, 'portal_url').getPortalObject()\n return security.checkPermission(permissions.USE_LINK_MANAGEMENT,\n portal)", "def allowed(self):\n return command_allowed(self.name(), self.user_id())", "def has_permission(self, request, view):\n if view.action == 'create': #creating user. Anyone can register\n return True\n elif request.user.is_superuser: #superusers are allowed free access\n return True\n elif view.action in ['retrieve','update','destroy']:\n # action is GET PUT or DELETE and user is not superuser.\n # PUT and DELETE are relegated to object permissions\n # if GET is access to detail, relegate to object permissions, if GET is access to listing then not allow\n return True \n else: \n return False", "def can_access(self, user):\r\n # Sanity check (this should normally be ensured by the caller).\r\n if user.site_id != self.site_id:\r\n return False\r\n return (self.open_access_granted()\r\n or self.permissions().get(user.id) != None)", "def can_request_assistance(user):\n return _is_in_acl(user, 'authorized')", "def allow(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"allow\")", "def __canVisitSite(self, stringUrl):\r\n # extract the robots.txt url\r\n parsedUrl = urlparse.urlparse(stringUrl)\r\n robotsUrl = urlparse.urlunparse((parsedUrl[0], parsedUrl[1], \"robots.txt\",\r\n parsedUrl[3], parsedUrl[4], parsedUrl[5]))\r\n #logging.debug(\"Robots for [%s] is [%s]\" % (stringUrl, robotsUrl))\r\n\r\n # parse robots.txt\r\n self.robotParser.set_url(robotsUrl)\r\n self.robotParser.read()\r\n\r\n # check permission to access page\r\n return self.robotParser.can_fetch(\"Ugrah/0.1\", stringUrl)", "def access_is_allowed(self, user, project, path):\r\n # most common cases first\r\n if user == project.owner:\r\n return True\r\n if self.team_allowed is None:\r\n return False\r\n if path in self.team_allowed and user in project.team_members:\r\n return True\r\n return False", "def allow_submit_for_review(self, user):\n return all(\n [\n self.author == user,\n self.status == constants.COLLECTING,\n self.moderation_requests.exists(),\n ]\n )", "def registration_allowed(self, request):\n\t\treturn getattr(settings, 'REGISTRATION_OPEN', True)", "def authorized_to_upload(self):\n if self.userobject is None:\n return False\n return self.userobject.may_upload or self.userobject.superuser", "def __can_upload(bill, approver):\n return (approver == bill.assign) and (bill.get_state_id() == STATE_DRAFT)", "def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc", "def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc", "def supports_authorization_rules(self):\n return # boolean", "def authorized_for_webstorage(self):\n if self.userobject is None:\n return False\n return self.userobject.may_use_webstorage or self.userobject.superuser" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decides the computation shape based on the split_size.
def ComputationShape(split_size): assert (split_size in SUPPORTED_SPLIT_SIZE), ('Model parallelism with %d', 'devices is currently not' ' supported.' % split_size) return SUPPORTED_SPLIT_SIZE[split_size]
[ "def split_shape(self):\n return self.__split_shape", "def calculate_split_by_split_size(self):\n self.set_split_extents_by_split_size()\n return self.calculate_split_from_extents()", "def compute_splits(var_shape, block_size):\n splits = []\n split_sizes = []\n for i, d in enumerate(var_shape):\n if block_size > 0 and d > block_size:\n nsplit = math.ceil(d / block_size)\n sizes = np.ones(nsplit, dtype=np.int32) * block_size\n if d % block_size > 0:\n sizes[-1] = d % block_size\n splits.append((i, tuple(sizes)))\n split_sizes.append(sizes)\n else:\n split_sizes.append(np.array([d], dtype=np.int32))\n return splits, split_sizes", "def calculate_split_parameters(width, height, split_size=128, overlap=40):\n new_shape = []\n images_amount = []\n for idx, x in enumerate([width, height]):\n n = 0\n while True:\n result = split_size + (split_size - overlap) * n\n if (result - x) > 0:\n break\n n += 1\n images_amount.append(n + 1)\n new_shape.append(result)\n\n return new_shape, images_amount", "def splits(self):\n if self.canvas_origin is not None:\n if self.orientation == \"horizontal\":\n content_size = Geometry.IntSize.make(self.canvas_size).height\n else:\n content_size = Geometry.IntSize.make(self.canvas_size).width\n with self.__lock:\n sizings = copy.deepcopy(self.__sizings)\n _, sizes = self.__calculate_layout(self.canvas_size, sizings)\n return [float(size) / content_size for size in sizes]\n return None", "def calculate_split_from_extents(self):\n self.logger.debug(\"self.split_shape=%s\", self.split_shape)\n self.logger.debug(\"self.split_begs=%s\", self.split_begs)\n self.logger.debug(\"self.split_ends=%s\", self.split_ends)\n\n ret = \\\n _np.array(\n [\n tuple(\n [\n slice(\n max([\n self.split_begs[d][idx[d]]\n + self.array_start[d]\n - self.halo[d, 0]\n * (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),\n self.tile_beg_min[d]\n ]),\n min([\n self.split_ends[d][idx[d]]\n + self.array_start[d]\n + self.halo[d, 1]\n * (self.split_ends[d][idx[d]] > self.split_begs[d][idx[d]]),\n self.tile_end_max[d]\n ])\n )\n for d in range(len(self.split_shape))\n ]\n )\n for idx in\n _np.array(\n _np.unravel_index(\n _np.arange(0, _np.product(self.split_shape)),\n self.split_shape\n )\n ).T\n ],\n dtype=[(\"%d\" % d, \"object\") for d in range(len(self.split_shape))]\n ).reshape(self.split_shape)\n\n return ret", "def find_splitsize(total_cols, total_rows, col_splits, row_splits):\n divisor = 100 * col_splits\n # Image dimension is rounded up (padded) to nearest 100*col_spits\n cols_pad = math.ceil(float(total_cols) / divisor) * divisor\n rows_pad = math.ceil(float(total_rows) / divisor) * divisor\n # Number of columns and rows in each split\n split_cols = int(cols_pad / col_splits)\n split_rows = int(rows_pad / row_splits)\n\n return split_cols, split_rows", "def get_n_splits(self):\r\n return self.n_splits", "def _chooseChunkshape(self, blockshape):\n # Choose a chunkshape:\n # - same time dimension as blockshape\n # - same channel dimension as blockshape\n # - aim for roughly 100k (for decent compression/decompression times)\n # - aim for roughly the same ratio of xyz sizes as the blockshape\n\n # Start with a copy of blockshape\n axes = self.Input.meta.getTaggedShape().keys()\n taggedBlockshape = collections.OrderedDict( zip(axes, self._blockshape) )\n taggedChunkshape = copy.copy( taggedBlockshape )\n\n dtypeBytes = self._getDtypeBytes(self.Input.meta.dtype)\n\n # How much xyz space can a chunk occupy and still fit within 100k?\n desiredSpace = 100000.0 / dtypeBytes\n for key in 'tc':\n if key in taggedChunkshape:\n desiredSpace /= taggedChunkshape[key] \n logger.debug(\"desired space: {}\".format( desiredSpace ))\n\n # How big is the blockshape?\n blockshapeSpace = 1.0\n numSpaceAxes = 0.0\n for key in 'xyz':\n if key in taggedBlockshape:\n numSpaceAxes += 1.0\n blockshapeSpace *= taggedBlockshape[key]\n logger.debug(\"blockshape space: {}\".format( blockshapeSpace ))\n \n # Determine factor to shrink each spatial dimension\n factor = blockshapeSpace / float(desiredSpace)\n factor = factor**(1/numSpaceAxes)\n logger.debug(\"factor: {}\".format(factor))\n \n # Adjust by factor\n for key in 'xyz':\n if key in taggedChunkshape:\n taggedChunkshape[key] /= factor\n taggedChunkshape[key] = max(1, taggedChunkshape[key])\n taggedChunkshape[key] = int(taggedChunkshape[key])\n\n chunkshape = taggedChunkshape.values()\n \n # h5py will crash if the chunkshape is larger than the dataset shape.\n chunkshape = numpy.minimum(self._blockshape, chunkshape )\n\n chunkshape = tuple( chunkshape )\n logger.debug(\"Using chunk shape: {}\".format( chunkshape ))\n return chunkshape", "def split_num_slices_per_axis(self):\n return self.__split_num_slices_per_axis", "def splits(cls, tokenizer, r8=False, val_size=0.1):\n (train_docs, test_docs, val_docs), unique_cls = cls.prepare_reuters(r8, val_size)\n\n train_split = cls.get_split(tokenizer, train_docs, unique_cls)\n test_split = cls.get_split(tokenizer, test_docs, unique_cls)\n val_split = cls.get_split(tokenizer, val_docs, unique_cls)\n\n return train_split, test_split, val_split", "def split(\n self, split_func, num_splits, f_args=None, f_kwargs=None, extract_metadata=False\n ):\n f_args = tuple() if f_args is None else f_args\n f_kwargs = {} if f_kwargs is None else f_kwargs\n return self._wrap_partitions(\n self.deploy_splitting_func(\n self.axis,\n split_func,\n f_args,\n f_kwargs,\n num_splits,\n *self.list_of_blocks,\n extract_metadata=extract_metadata,\n ),\n extract_metadata=extract_metadata,\n )", "def _create_split_op(self, op: Op):\n split_name_parts = ['Split_', str(self._split_count)]\n split_name = ''.join(split_name_parts)\n self._split_count += 1\n split_dotted_name_parts = [self._model_name, split_name]\n split_dotted_name = '.'.join(split_dotted_name_parts)\n is_anonymous = True\n split_op = Op(split_name, split_dotted_name, op.output_shape, is_anonymous, 'Split')\n self._ops[split_name] = split_op\n return split_op", "def build(self,input_shape):\r\n self.input_shape = input_shape\r\n return input_shape", "def get_split(\n self, split: int = 0\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n assert isinstance(split, int)\n assert split >= 0\n assert split < 10\n x_test = self.x[self.test_mask[:, split], :]\n x_train = self.x[self.train_mask[:, split], :]\n y_test = self.y[self.test_mask[:, split], :]\n y_train = self.y[self.train_mask[:, split], :]\n return x_train, y_train, x_test, y_test", "def _create_split_op(self, op: Op) -> Op:\n split_name_parts = ['Split_', str(self._split_count)]\n split_name = ''.join(split_name_parts)\n self._split_count += 1\n split_dotted_name_parts = [self._model_name, split_name]\n split_dotted_name = '.'.join(split_dotted_name_parts)\n is_anonymous = True\n split_op = Op(name=split_name, dotted_name=split_dotted_name, output_shape=op.output_shape,\n is_anonymous=is_anonymous, op_type='Split', residing_module=None)\n self._ops[split_name] = split_op\n return split_op", "def _split(array, n_splits):\n assert array.ndim == 1\n n_elements = array.shape[0]\n\n remainder = n_elements % n_splits\n split_sizes = []\n for i in range(n_splits):\n if i < remainder:\n split_sizes.append(n_elements // n_splits + 1)\n else:\n split_sizes.append(n_elements // n_splits)\n return tf.split(array, split_sizes)", "def split(self, split_sizes: list):\n if not all(isinstance(x, int) for x in split_sizes):\n raise ValueError(\"Value of split_sizes must be a list of integers.\")\n cloudlist = []\n curi = 0\n for i in split_sizes:\n cloudlist.append(self[curi : curi + i])\n curi += i\n return cloudlist", "def state_shape(self, batch_size):\n return ([self.num_layers * self.num_dirs, batch_size, self.num_units],\n [self.num_layers * self.num_dirs, batch_size, self.num_units])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the overall step rate and adds a summary.
def _RecordStepRate(self, current_steps, total_examples): self._time_steps.append((time.time(), current_steps, total_examples)) # Keeps a relative long history to compute a smooth steps/second. # Removes duplicate stats for step = 0 to get rid of the warm-up period. while (self._time_steps[-1][1] - self._time_steps[0][1] > 10000 or (len(self._time_steps) > 1 and self._time_steps[0][1] == self._time_steps[1][1])): del self._time_steps[0] (t0, s0, e0), (t1, s1, e1) = self._time_steps[0], self._time_steps[-1] rate = 0.0 example_rate = 0.0 if t1 > t0 + 1: elapsed_secs = t1 - t0 rate = (s1 - s0) / elapsed_secs example_rate = (e1 - e0) / elapsed_secs tf.logging.info('Steps/second: %f, Examples/second: %f', rate, example_rate) self._SummarizeValue(current_steps, 'global_step/sec', rate) self._SummarizeValue(current_steps, 'examples/sec', example_rate) return rate, example_rate
[ "def get_total(self, time_step: int) -> Decimal:\n return self.rate * self.get_rounded_hours(time_step)", "def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None, attns=None):\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats, attns=attns,)", "def step(self) -> float:\n self.step_count += 1\n if self.step_count > self.warmup and self.val < self.final_val:\n self.val += self.step_per_epoch\n self.val = min(self.val, self.final_val)\n return self.val", "def median_progress_rate_speedup(self, prefix):\n total_median_progress_rate_runtime = 0\n runtimes_for_combined_stages = []\n all_start_finish_times = []\n for id, stage in self.stages.iteritems():\n median_rate_runtimes = stage.task_runtimes_with_median_progress_rate()\n if id in self.stages_to_combine:\n runtimes_for_combined_stages.extend(median_rate_runtimes)\n else:\n no_stragglers_runtime, start_finish_times = simulate.simulate(\n median_rate_runtimes, concurrency.get_max_concurrency(stage.tasks))\n start_finish_times_adjusted = [\n (start + total_median_progress_rate_runtime, finish + total_median_progress_rate_runtime) \\\n for start, finish in start_finish_times]\n total_median_progress_rate_runtime += no_stragglers_runtime\n all_start_finish_times.append(start_finish_times_adjusted)\n print \"No stragglers runtime: \", no_stragglers_runtime\n print \"MAx concurrency: \", concurrency.get_max_concurrency(stage.tasks)\n\n if len(runtimes_for_combined_stages) > 0:\n no_stragglers_runtime, start_finish_times = simulate.simulate(\n runtimes_for_combined_stages, self.combined_stages_concurrency)\n start_finish_times_adjusted = [\n (start + total_median_progress_rate_runtime, finish + total_median_progress_rate_runtime) \\\n for start, finish in start_finish_times]\n total_median_progress_rate_runtime += no_stragglers_runtime\n all_start_finish_times.append(start_finish_times_adjusted)\n\n self.write_simulated_waterfall(all_start_finish_times, \"%s_sim_median_progress_rate\" % prefix)\n return total_median_progress_rate_runtime * 1.0 / self.get_simulated_runtime()", "def summarize(usl_fit):\n print\n print '----- Summary -----'\n print\n print usl_fit.fit_report()", "def print_summary():\n global current_class, statisitcs\n # compute precision, recall, and F-measure\n tp, fp, fn = statisitcs\n precision = tp / (float(tp + fp) or 1e6)\n recall = tp / (float(tp + fn) or 1e6)\n if precision or recall:\n fmeasure = 2 * precision * recall / (precision + recall)\n else:\n fmeasure = 0.0\n # output statistics\n foutput.fprint(\"{:15s}{:7.2f}{:7.2f}{:7.2f}\".format(\n current_class, precision, recall, fmeasure))\n # reset the counters\n current_class = \"\"\n statisitcs = [0, 0, 0]", "def collect_metrics(self, val_env, val_episodes: int):\n total_return = 0.0\n for _ in range(val_episodes):\n time_step = val_env.reset()\n episode_return = 0.0\n\n while not time_step.is_last():\n action_step = self.agent.policy.action(time_step)\n time_step = val_env.step(action_step.action)\n episode_return += time_step.reward\n total_return += episode_return\n\n avg_return = total_return // val_episodes\n\n with self.writer.as_default():\n tf.summary.scalar(\"avg_return\", avg_return.numpy()[0], step=self.global_episode)", "def update_step_and_losses(self):\n self.define_logging() # set losses and loss names\n if self.running_losses is None:\n self.init_losses()\n for loss_name, loss in self.losses.items():\n if loss:\n self.running_losses[loss_name] += float(loss)\n self.step += 1\n self.running_loss_step += 1", "def _report_step(self, step, train_stats=None,\n valid_stats=None):\n if self.report_manager is not None:\n return self.report_manager.report_step(0,\n step, train_stats=train_stats,\n valid_stats=valid_stats)", "def _maybe_record_behavior_summaries(self, env_step, worker_name):\n if env_step % self._summary_interval == 0:\n for metric in self._behavior_metrics[worker_name]:\n add_summary(self._train_file_writers[worker_name],\n 'Metrics/' + metric.name, metric.result(), env_step)", "def calc_running_avg_loss(loss, running_avg_loss, step, decay=0.99):\n if running_avg_loss == 0: # on the first iteration just take the loss\n running_avg_loss = loss\n else:\n running_avg_loss = running_avg_loss * decay + (1 - decay) * loss\n #running_avg_loss = min(running_avg_loss, 12) # clip\n #loss_sum = tf.Summary()\n #tag_name = 'running_avg_loss/decay=%f' % (decay)\n #loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)\n #summary_writer.add_summary(loss_sum, step)\n #tf.logging.info('running_avg_loss: %f', running_avg_loss)\n return running_avg_loss", "def record_summary(self, t):\n\n fd = {\n self.avg_reward_placeholder: self.avg_reward,\n self.avg_collsions_placeholder: self.avg_collisions,\n self.avg_distance_placeholder: self.avg_distance,\n #self.eval_reward_placeholder: self.eval_reward,\n }\n summary = self.sess.run(self.merged, feed_dict=fd)\n # tensorboard stuff\n self.file_writer.add_summary(summary, t)", "def finalize_averages(self):\n self.compute_averages()\n self.compute_error()\n self.acceptance_rate = self.acceptance_rate / self.N\n #self.print_averages()", "def show_summary(self):\n length = self.sum_length.first()\n coef = self.sum_coef.first()\n cons = length*coef/100\n print(\"Total length: \" + str(length) +\n \"\\nTotal consumption: \" + str(coef) +\n \"\\nTotal fuel used: \" + str(cons))", "def learning_rate(self, step):\n if self._lr_schedule is not None:\n with fastmath.use_backend(fastmath.Backend.NUMPY):\n return self._lr_schedule(step)\n opt = self._optimizer\n if callable(opt): # when optimizer is a function, like Adam, not Adam()\n opt = opt()\n params = opt._init_opt_params # pylint: disable=protected-access\n return params['learning_rate']", "def _calculate_overall_performance(self):\n return sum(self._episodic_performances) / len(self._episodic_performances)", "def on_statistics(self, step, akku):\n for s in self._states:\n self._stats_step.tstats.append(s.statistics)\n self._stats_accu.accu(self._stats_step)\n self.add_statistics(step, self._stats_step)\n self.add_statistics(akku, self._stats_accu)\n self._stats_step.reset()", "def calculate_summary(self):\n\n # compute point estimates\n self.posterior_mean_voting_prefs[0] = self.sampled_voting_prefs[0].mean()\n self.posterior_mean_voting_prefs[1] = self.sampled_voting_prefs[1].mean()\n\n # compute credible intervals\n percentiles = [2.5, 97.5]\n self.credible_interval_95_mean_voting_prefs[0] = np.percentile(\n self.sampled_voting_prefs[0], percentiles\n )\n self.credible_interval_95_mean_voting_prefs[1] = np.percentile(\n self.sampled_voting_prefs[1], percentiles\n )", "def step_end(self):\n if self.log_time:\n total_time = time.monotonic() - self.start_time\n self.update(total_time=total_time)\n if self.total is not None:\n self.update(eta_time=total_time / (self.cur_step + 1) * (self.total - self.cur_step-1))\n self.cur_step += 1\n for name, metric in self.metrics.items():\n del metric[self.cur_step:]\n while len(metric) < self.cur_step:\n metric.append(None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process all perexample tensor outfeed data for a TPU sess.run.
def _OutfeedDequeueLoop(self, per_example_tensors, num_loops, num_devices): if not per_example_tensors: return tf.no_op() tensor_shapes = [ py_utils.GetShape(per_example_tensors[key]) for key in sorted(per_example_tensors) ] tensor_types = [ tf.as_dtype(per_example_tensors[key].dtype) for key in sorted(per_example_tensors) ] def LoopBody(i, *input_arrays): """Process outfeed data for a single TpuTrainStep. Args: i: current loop index. *input_arrays: One tf.TensorArray per outfeed tensor. Returns: i+1 (new index) plus post-write tf.TensorArray handles. """ # Outfeed ops execute on each JF node, so they must be located on the # nodes. outfeed_devices = [] device_assignment = py_utils.GetTpuDeviceAssignment() assert device_assignment for replica in xrange(device_assignment.num_replicas): for core in xrange(device_assignment.num_cores_per_replica): with tf.device(device_assignment.host_device(replica, core)): outfeed_devices.append( tf.contrib.tpu.outfeed_dequeue_tuple( tensor_types, tensor_shapes, device_ordinal=device_assignment.tpu_ordinal(replica, core))) offset = i * num_devices output_arrays = list(input_arrays) # Each output_array holds a different per-example tensor. We get results # for each tensor from each TPU for each TpuTrainStep call. for j in range(len(output_arrays)): for k in range(len(outfeed_devices)): output_arrays[j] = output_arrays[j].write(offset + k, outfeed_devices[k][j]) return tuple([i + 1] + output_arrays) def LoopCond(i, *output_arrays): del output_arrays return i < num_loops output_arrays = [ tf.TensorArray( tensor_types[i], size=num_loops * num_devices, element_shape=tensor_shapes[i]) for i in range(len(tensor_shapes)) ] # Loop once for each time that TpuTrainStep runs. output_arrays = tf.while_loop( LoopCond, LoopBody, [0] + output_arrays, parallel_iterations=1)[1:] concatenated_arrays = [array.concat() for array in output_arrays] return dict(zip(sorted(per_example_tensors), concatenated_arrays))
[ "def produce(self, dataset, batch, output_tensors):\n\n # Fill feed dict\n feed_dict = self.fill_feed_dict(batch)\n\n # Run one step of the model\n output_values = self.sess.run(output_tensors, feed_dict=feed_dict)\n\n return output_values", "def run(\n self,\n fetches: Any,\n feed_dict: Dict[tf.Tensor, np.ndarray] = {},\n tag: Optional[str] = None,\n write_trace: bool = False,\n output_partition_graphs: bool = False,\n ):\n\n sanitized_fetches = self._sanitize_fetches(fetches)\n\n if not __TFE_STATS__ or tag is None:\n fetches_out = super(Session, self).run(\n sanitized_fetches,\n feed_dict=feed_dict\n )\n else:\n session_tag = \"{}{}\".format(tag, _run_counter[tag])\n run_tag = os.path.join(__TENSORBOARD_DIR__, session_tag)\n _run_counter[tag] += 1\n\n writer = tf.summary.FileWriter(run_tag, self.graph)\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE,\n output_partition_graphs=output_partition_graphs)\n run_metadata = tf.RunMetadata()\n\n fetches_out = super(Session, self).run(\n sanitized_fetches,\n feed_dict=feed_dict,\n options=run_options,\n run_metadata=run_metadata\n )\n\n if output_partition_graphs:\n for i, g in enumerate(run_metadata.partition_graphs):\n tf.io.write_graph(\n g,\n logdir=os.path.join(__TENSORBOARD_DIR__, session_tag),\n name='partition{}.pbtxt'.format(i),\n )\n\n writer.add_run_metadata(run_metadata, session_tag)\n writer.close()\n\n if __TFE_TRACE__ or write_trace:\n tracer = timeline.Timeline(run_metadata.step_stats)\n chrome_trace = tracer.generate_chrome_trace_format()\n with open('{}/{}.ctr'.format(__TENSORBOARD_DIR__, session_tag), 'w') as f:\n f.write(chrome_trace)\n\n return fetches_out", "def run(self, data_batch):\n return self.sess.run(self.predictions, feed_dict={self.data_ph:data_batch})", "def get_post_samples(self, obs, sess):\n feed_dictionary = {}\n for mask_i in range(len(self.pi_dropout_mask_phs)):\n feed_dictionary[self.pi_dropout_mask_phs[mask_i]] = np.ones(self.pi_dropout_mask_phs[mask_i].shape.as_list())\n\n # Sample states\n if self.act_dim == 1:\n feed_dictionary[self.x_ph] = np.random.normal(obs,\n self.sample_obs_std,\n size=(self.n_post_action,self.obs_dim))\n else:\n feed_dictionary[self.x_ph] = np.random.multivariate_normal(obs,\n self.sample_obs_std*np.identity(self.obs_dim),\n self.n_post_action)\n # import pdb; pdb.set_trace()\n a_post = sess.run(self.pi, feed_dict=feed_dictionary)\n return a_post", "def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:\n fake_imgs = []\n for pred in data_samples:\n fake_img_ = pred\n # get ema/orig results\n if self.sample_model in fake_img_:\n fake_img_ = fake_img_[self.sample_model]\n # get specific fake_keys\n if (self.fake_key is not None and self.fake_key in fake_img_):\n fake_img_ = fake_img_[self.fake_key]\n else:\n # get img tensor\n fake_img_ = fake_img_['fake_img']\n fake_imgs.append(fake_img_)\n fake_imgs = torch.stack(fake_imgs, dim=0)\n feat = self.extract_features(fake_imgs)\n feat_list = list(torch.split(feat, 1))\n self.fake_results += feat_list", "def every_after_train_step_callback_fn(self, sess):\n pass", "def gather(self, outputs, output_device):\n def _coll_tensor(coll_list, out_tensor):\n out_tensor = gather(outputs=[out_tensor],\n target_device=self.output_device, dim=0)\n coll_list.append(out_tensor)\n\n def _coll_dict(coll_dict, out_dict):\n for k, v in out_dict.items():\n if k not in coll_dict:\n coll_dict[k] = []\n coll_dict[k].append(v)\n\n def _coll_dec_state(coll_dstate, out_dstate):\n for attr_name, attr_val in out_dstate.__dict__.items():\n if isinstance(attr_val, Tensor):\n if getattr(coll_dstate, attr_name) is None:\n setattr(coll_dstate, attr_name, [])\n _coll_tensor(getattr(coll_dstate, attr_name), attr_val)\n elif isinstance(attr_val, dict):\n if getattr(coll_dstate, attr_name) is None:\n setattr(coll_dstate, attr_name, dict())\n _coll_dict(getattr(coll_dstate, attr_name), attr_val)\n elif attr_val is None:\n continue\n else:\n raise NotImplementedError\n\n # minor reformatting\n for indx, o in enumerate(outputs):\n if not isinstance(o, (list, tuple)):\n outputs[indx] = [o]\n\n coll_outputs = [None for _ in range(len(outputs[0]))]\n\n # collecting outputs\n for output in outputs:\n for indx, o in enumerate(output):\n # statistics, such as loss, assumed to be of simple types\n if isinstance(o, dict):\n if coll_outputs[indx] is None:\n coll_outputs[indx] = dict()\n _coll_dict(coll_outputs[indx], o)\n # tensors\n elif isinstance(o, Tensor):\n if coll_outputs[indx] is None:\n coll_outputs[indx] = []\n _coll_tensor(coll_outputs[indx], o)\n # decoder state\n elif isinstance(o, DecState):\n if coll_outputs[indx] is None:\n coll_outputs[indx] = DecState()\n coll_dec_state = coll_outputs[indx]\n _coll_dec_state(coll_dec_state, o)\n else:\n raise NotImplementedError\n\n # aggregating outputs\n for indx, o in enumerate(coll_outputs):\n if isinstance(o, list):\n coll_outputs[indx] = T.cat(o)\n elif isinstance(o, dict):\n coll_outputs[indx] = {k: sum(v) / len(v) for k, v in o.items()}\n elif isinstance(o, DecState):\n for attr_name, attr_val in o.__dict__.items():\n if isinstance(attr_val, list):\n setattr(o, attr_name, T.cat(attr_val))\n elif isinstance(attr_val, dict):\n for k, v in attr_val.items():\n attr_val[k] = T.cat(v)\n elif attr_val is None:\n continue\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n if len(coll_outputs) == 1:\n coll_outputs = coll_outputs[0]\n\n return coll_outputs", "def run_eval_step(self, sess, batch):\n feed_dict = self._make_feed_dict(batch)\n to_return = {\n 'summaries': self._summaries,\n 'loss': self._loss,\n 'global_step': self.global_step,\n }\n if self._hps.coverage:\n to_return['coverage_loss'] = self._coverage_loss\n return sess.run(to_return, feed_dict)", "def infer(self):\r\n counter = 0\r\n output = {}\r\n while True:\r\n batch = self._batcher.next_batch() # 1 example repeated across batch\r\n if batch is None: # finished decoding dataset in single_pass mode\r\n print(\"Decoder has finished reading dataset for single_pass.\")\r\n # log original information\r\n with open(os.path.join(self._decode_dir, \"output.json\"), 'w', encoding='utf-8') as w:\r\n json.dump(output, w)\r\n print(\"Output has been saved in %s.\" % self._decode_dir)\r\n\r\n #start evaluation\r\n evaluate.main(self.ckpt_path, FLAGS.log_root, self._decode_dir, FLAGS.mode, FLAGS.multi_label_eval)\r\n return\r\n\r\n background_span = data.show_background_span(batch.original_backgrounds_token[0], batch.original_b_starts[0], batch.original_b_ends[0])\r\n response_span = data.show_background_span(batch.original_responses_token[0], batch.original_r_starts[0], batch.original_r_ends[0])\r\n # Run greed search to get best Hypothesis\r\n best_hyp = greed_search.run_greed_search(self._sess, self._model, self._vocab, batch)\r\n best_hyp.tokens = [token for token in best_hyp.tokens if token not in [None]]\r\n # Extract the output ids from the hypothesis and convert back to words\r\n output_ids = best_hyp.tokens[1:]\r\n decoded_token, highlights_decoded_token, spans = data.outputids2words(output_ids, self._vocab, batch.bac_oovs[0], batch.original_backgrounds_token[0])\r\n\r\n if output_ids[-1] == 3:\r\n output_ids_semantic = output_ids[:(len(output_ids)-1)]\r\n else:\r\n output_ids_semantic = output_ids\r\n\r\n ids_for_print = [str(i)for i in output_ids_semantic]\r\n ids_for_print = ' '.join(ids_for_print)\r\n\r\n switch_ref_probs = best_hyp.switch_ref_probs\r\n switch_ref_probs = [str(i) for i in switch_ref_probs]\r\n switch_ref_probs = ' '.join(switch_ref_probs)\r\n\r\n switch_gen_probs = best_hyp.switch_gen_probs\r\n switch_gen_probs = [str(i) for i in switch_gen_probs]\r\n switch_gen_probs = ' '.join(switch_gen_probs)\r\n\r\n switch_gen_pred_probs = best_hyp.switch_gen_pred_probs\r\n switch_gen_pred_probs = [str(i) for i in switch_gen_pred_probs]\r\n switch_gen_pred_probs = ' '.join(switch_gen_pred_probs)\r\n\r\n switch_gen_copy_probs = best_hyp.switch_gen_copy_probs\r\n switch_gen_copy_probs = [str(i) for i in switch_gen_copy_probs]\r\n switch_gen_copy_probs = ' '.join(switch_gen_copy_probs)\r\n\r\n # Remove the [STOP] token from decoded_words, if necessary\r\n try:\r\n fst_stop_idx = decoded_token.index(data.STOP_DECODING) # index of the (first) [STOP] symbol\r\n fst_stop_idx1 = highlights_decoded_token.index(data.STOP_DECODING)\r\n decoded_token = decoded_token[:fst_stop_idx]\r\n highlights_decoded_token = highlights_decoded_token[:fst_stop_idx1]\r\n\r\n if len(decoded_token) == 0:\r\n decoded_token.append(\".\")\r\n\r\n except ValueError:\r\n decoded_token = decoded_token\r\n highlights_decoded_token = highlights_decoded_token\r\n\r\n spans_output = ' '.join(spans)\r\n decoded_output = ' '.join(decoded_token)\r\n highlights_decoded_output = ' '.join(highlights_decoded_token)\r\n\r\n output[batch.original_example_ids[0]] = {\"background\": background_span, \"context\": batch.original_contexts[0], \"highlights_ref_response\": response_span,\r\n \"highlights_inferred_response\": highlights_decoded_output, \"ref_response\": batch.original_responses[0],\r\n \"inferred_response\": decoded_output, \"ref_span\": batch.original_spans[0],\"inferred_spans\": spans_output, \"output_index\": output_ids_semantic,\r\n \"switch_ref_probs\": switch_ref_probs, \"switch_gen_probs\": switch_gen_probs,\r\n \"switch_gen_pred_probs\": switch_gen_pred_probs,\"switch_gen_copy_probs\": switch_gen_copy_probs}\r\n\r\n self.write_for_observation(batch.original_example_ids[0], background_span, batch.original_contexts[0], response_span, highlights_decoded_output, ids_for_print, switch_ref_probs, switch_gen_probs, switch_gen_pred_probs, switch_gen_copy_probs, counter)\r\n counter += 1 # this is how many examples we've decoded\r", "def _process_batch(tensor_dict, sess, batch_index, counters,\n losses_dict=None):\n # print(eval_config)\n try:\n if not losses_dict:\n losses_dict = {}\n result_dict, result_losses_dict = sess.run( [tensor_dict, losses_dict] )\n counters['success'] += 1\n except tf.errors.InvalidArgumentError:\n logging.info( 'Skipping image' )\n counters['skipped'] += 1\n return {}, {}\n with sess.graph.as_default():\n # global_step = tf.train.global_step( sess, 200000)\n global_step = 200000\n if batch_index < eval_config.num_visualizations:\n tag = 'image-{}'.format( batch_index )\n eval_util.visualize_detection_results(\n result_dict,\n tag,\n global_step,\n categories=categories,\n summary_dir=FLAGS.eval_dir,\n export_dir=eval_config.visualization_export_dir,\n show_groundtruth=eval_config.visualize_groundtruth_boxes,\n groundtruth_box_visualization_color=eval_config.\n groundtruth_box_visualization_color,\n min_score_thresh=eval_config.min_score_threshold,\n max_num_predictions=eval_config.max_num_boxes_to_visualize,\n skip_scores=eval_config.skip_scores,\n skip_labels=eval_config.skip_labels,\n keep_image_id_for_visualization_export=eval_config.\n keep_image_id_for_visualization_export )\n return result_dict, result_losses_dict", "def trace_tpu(self, graph,\n tensor_fetches,\n op_fetches=None,\n num_replicas=None,\n num_replicas_per_host=None,\n num_hosts=None):\n\n if graph in TensorTracer._traced_graphs:\n logging.warning('Graph is already rewritten with tensor tracer, ignoring '\n 'multiple calls.')\n return tensor_fetches\n else:\n TensorTracer._traced_graphs.add(graph)\n self._device_type = _DEVICE_TYPE_TPU\n self._num_replicas = num_replicas\n self._num_replicas_per_host = num_replicas_per_host\n self._num_hosts = num_hosts\n if self._num_replicas is not None:\n if self._num_replicas_per_host is None:\n self._num_replicas_per_host = 8\n if self._num_hosts is None:\n self._num_hosts = num_replicas // self._num_replicas_per_host + \\\n (num_replicas % self._num_replicas_per_host > 0)\n\n if self._num_replicas_per_host > 8:\n # Checks for the assumption in _generate_flush_cache_op().\n raise RuntimeError('num_replicas_per_host (%d) is '\n 'greater than 8'%self._num_replicas_per_host)\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_before_tt.pbtxt')\n with graph.as_default():\n self._add_replica_id_to_graph()\n tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,\n on_tpu=True)\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_after_tt.pbtxt')\n return tensor_fetches", "def write_eval(self, sess, feed_dict, epoch_per):\n if self.eval_write_op != None:\n ret_eval = sess.run(self.eval_write_op, feed_dict=feed_dict)\n if self.eval_write_func != None:\n self.eval_write_func(ret_eval, self.sum_han.output_log, epoch_per)\n else:\n self.sum_han.eval_write_to_log(ret_eval)", "def inference(self, inputs, sess, mode):\n fetches = {}\n if mode == 'depth':\n fetches['depth'] = self.est_depth\n inputs_ph = self.inputs_depth\n if mode == 'egomotion':\n fetches['egomotion'] = self.est_egomotion\n inputs_ph = self.inputs_egomotion\n results = sess.run(fetches, feed_dict={inputs_ph: inputs})\n return results", "def output_thread(out_q, params):\n none_count = 0\n X = []\n Y = []\n while True:\n res = out_q.get()\n if res is None:\n none_count += 1\n else:\n X.append(res[0])\n Y.append(res[1])\n if none_count == params['n_threads']:\n break\n X = np.array(X)\n Y = np.array(Y)\n\n ones = np.sum(Y)\n zeros = np.size(Y) - ones\n total = ones + zeros\n \n print(\"P-phases (zeros):\", zeros, \"(\", 100*zeros/total, \"%)\")\n print(\"S-phases (ones):\", ones, \"(\", 100*ones/total, \"%)\")\n\n np.save(params[\"training_dset_X\"], X)\n np.save(params[\"training_dset_Y\"], Y)\n\n print(\"Saved the synthetic training dataset.\")\n\n return", "def build_outputs(self):\n with tf.variable_scope(\"build_baseline_outputs\"):\n self.semantic_tgt = self.upsample_semantic(self.pred_semantic_logits_tgt)\n self.depth_tgt = self.prepare_depth(self.pred_disp_tgt[0])\n self.disp_tgt = self.prepare_disp(self.pred_disp_tgt[0])", "def enqueue_ops_fn():\n per_host_sharded_inputs = []\n control_deps = []\n for _ in range(self.params[\"replicas_per_worker\"]):\n with tf.control_dependencies(control_deps):\n features, labels = iterator.get_next()\n if self.use_spatial_partition:\n self.input_dims_flattener.validate_and_flatten_input_dims(\n features, labels)\n flattened_inputs = self.input_flattener.flatten_features_and_labels(\n features, labels)\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n if self.use_spatial_partition:\n flattened_input_dims = (\n self.input_dims_flattener.flattened_input_dims)\n # pylint: disable=protected-access\n infeed = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]),\n host_id=host_id,\n input_partition_dims=flattened_input_dims,\n device_assignment=self.device_assignment)\n self.infeed_queue.append(infeed)\n return infeed.generate_enqueue_ops(per_host_sharded_inputs)\n\n infeed = tf.contrib.tpu.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n self.infeed_queue.append(infeed)\n return infeed.generate_enqueue_ops(\n per_host_sharded_inputs,\n tpu_ordinal_function=functools.partial(\n runner_utils.tpu_ordinal_fn,\n replicas_per_worker=self.params[\"replicas_per_worker\"]))", "def run(self, input_tvm_ndarrays):\n self._func(*input_tvm_ndarrays, self._adj_row_indices_tvm, self._adj_col_indices_tvm, self.out_tvm)\n return self.out_tvm", "def eval_one_epoch(sess, ops, test_writer,tracks=False,lstm_params=None):\n global EPOCH_CNT\n is_training = False\n log_string(str(datetime.now()))\n log_string('---- EPOCH %03d EVALUATION ----' % (EPOCH_CNT))\n test_idxs = np.arange(0, len(TEST_DATASET))\n num_batches = len(TEST_DATASET) / BATCH_SIZE\n\n # To collect statistics\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n total_seen_class = [0 for _ in range(NUM_CLASSES)]\n total_correct_class = [0 for _ in range(NUM_CLASSES)]\n iou2ds_sum = 0\n iou3ds_sum = 0\n iou3d_correct_cnt = 0\n iou3d_correct_cnt_old = 0\n iou3d_correct_cnt_05=0\n # E: This is necessary to collect features of batches before the evaluation\n if tracks:\n for batch_idx in range(int(num_batches)):\n start_idx = batch_idx * BATCH_SIZE\n end_idx = (batch_idx + 1) * BATCH_SIZE\n # E: Get also batch_indices which shows the (world_id,frame_id,track_id) of the objects in the batch\n # E: Batch indices are valid (non-empty) only if the tracks flag is True\n batch_data, batch_label, batch_center, \\\n batch_hclass, batch_hres, \\\n batch_sclass, batch_sres, \\\n batch_rot_angle, batch_one_hot_vec, batch_indices = \\\n get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,\n NUM_POINT, NUM_CHANNEL,tracks=tracks)\n\n # Emec added the feature line\n # E: Get the features at the prev time steps of the objects in the batch\n batch_feat_lstm = get_batch_features(TEST_DATASET.feature_dict,\n batch_wft=batch_indices,tau=lstm_params['tau'],\n feat_len=lstm_params['feat_vec_len'],rev_order=True)\n # E: Get the number of tracks at the tau prev. time steps for each object in the batch: How many of the tau-1 frames before the current frames of the objects contain the same object with the same track id \n batch_seq_len = batch_track_num(feature_dict=TEST_DATASET.feature_dict,wfts=batch_indices)\n\n feed_dict = {ops['pointclouds_pl']: batch_data,\n ops['one_hot_vec_pl']: batch_one_hot_vec,\n ops['labels_pl']: batch_label,\n ops['centers_pl']: batch_center,\n ops['heading_class_label_pl']: batch_hclass,\n ops['heading_residual_label_pl']: batch_hres,\n ops['size_class_label_pl']: batch_sclass,\n ops['size_residual_label_pl']: batch_sres,\n ops['is_training_pl']: is_training,\n ops['end_points']['lstm_layer']['feat_input']:batch_feat_lstm,\n ops['end_points']['lstm_layer']['pf_seq_len']:batch_seq_len}\n '''\n summary, step, loss_val, logits_val, iou2ds, iou3ds, box_est_feature_vec = \\\n sess.run([ops['merged'], ops['step'],\n ops['loss'], ops['logits'],\n ops['end_points']['iou2ds'], ops['end_points']['iou3ds'],\n ops['end_points']['box_est_feature_vec']],\n feed_dict=feed_dict)\n '''\n box_est_feature_vec = \\\n sess.run(ops['end_points']['box_est_feature_vec'],\n feed_dict=feed_dict)\n \n update_batch_features(feature_dict=TEST_DATASET.feature_dict,batch_wft=batch_indices,\n batch_feat_vecs=box_est_feature_vec)\n\n # Simple evaluation with batches\n for batch_id in range(int(num_batches)):\n start_idx = batch_id * BATCH_SIZE\n end_idx = (batch_id + 1) * BATCH_SIZE\n # E: Get also batch_indices which shows the (world_id,frame_id,track_id) of the objects in the batch\n # E: Batch indices are valid (non-empty) only if the tracks flag is True\n batch_data, batch_label, batch_center, \\\n batch_hclass, batch_hres, \\\n batch_sclass, batch_sres, \\\n batch_rot_angle, batch_one_hot_vec, batch_indices = \\\n get_batch(TEST_DATASET, test_idxs, start_idx, end_idx,\n NUM_POINT, NUM_CHANNEL,tracks=tracks)\n \n if tracks:\n # Emec added the feature line\n # E: Get the features at the prev time steps of the objects in the batch\n batch_feat_lstm = get_batch_features(TEST_DATASET.feature_dict,\n batch_wft=batch_indices,tau=lstm_params['tau'],\n feat_len=lstm_params['feat_vec_len'],rev_order=True)\n # E: Get the number of tracks at the tau prev. time steps for each object in the batch: How many of the tau-1 frames before the current frames of the objects contain the same object with the same track id \n batch_seq_len = batch_track_num(feature_dict=TEST_DATASET.feature_dict,wfts=batch_indices)\n \n feed_dict = {ops['pointclouds_pl']: batch_data,\n ops['one_hot_vec_pl']: batch_one_hot_vec,\n ops['labels_pl']: batch_label,\n ops['centers_pl']: batch_center,\n ops['heading_class_label_pl']: batch_hclass,\n ops['heading_residual_label_pl']: batch_hres,\n ops['size_class_label_pl']: batch_sclass,\n ops['size_residual_label_pl']: batch_sres,\n ops['is_training_pl']: is_training,\n ops['end_points']['lstm_layer']['feat_input']:batch_feat_lstm,\n ops['end_points']['lstm_layer']['pf_seq_len']:batch_seq_len}\n \n summary, step, loss_val, logits_val, iou2ds, iou3ds, box_est_feature_vec = \\\n sess.run([ops['merged'], ops['step'],\n ops['loss'], ops['logits'],\n ops['end_points']['iou2ds'], ops['end_points']['iou3ds'],\n ops['end_points']['box_est_feature_vec']],\n feed_dict=feed_dict)\n \n update_batch_features(feature_dict=TEST_DATASET.feature_dict,batch_wft=batch_indices,\n batch_feat_vecs=box_est_feature_vec)\n else:\n feed_dict = {ops['pointclouds_pl']: batch_data,\n ops['one_hot_vec_pl']: batch_one_hot_vec,\n ops['labels_pl']: batch_label,\n ops['centers_pl']: batch_center,\n ops['heading_class_label_pl']: batch_hclass,\n ops['heading_residual_label_pl']: batch_hres,\n ops['size_class_label_pl']: batch_sclass,\n ops['size_residual_label_pl']: batch_sres,\n ops['is_training_pl']: is_training}\n \n summary, step, loss_val, logits_val, iou2ds, iou3ds = \\\n sess.run([ops['merged'], ops['step'],\n ops['loss'], ops['logits'],\n ops['end_points']['iou2ds'], ops['end_points']['iou3ds']],\n feed_dict=feed_dict)\n test_writer.add_summary(summary, step+batch_id)\n\n preds_val = np.argmax(logits_val, 2)\n correct = np.sum(preds_val == batch_label)\n total_correct += correct\n total_seen += (BATCH_SIZE * NUM_POINT)\n loss_sum += loss_val\n for l in range(NUM_CLASSES):\n total_seen_class[l] += np.sum(batch_label == l)\n total_correct_class[l] += (np.sum((preds_val == l) & (batch_label == l)))\n iou2ds_sum += np.sum(iou2ds)\n iou3ds_sum += np.nansum(iou3ds)\n #IPython.embed()\n iou3d_correct_cnt_old += np.sum(iou3ds >= 0.7)\n #iou3d_correct_cnt += np.sum(iou3ds >= 0.7)\n # class specific IoU-based accuracy calculation\n cl = np.argmax(batch_one_hot_vec,axis=1)\n cl_ids = list(set(cl))\n for _cl in cl_ids:\n cl_iou3ds = iou3ds[np.where(cl==_cl)]\n if _cl == 0:\n iou3d_correct_cnt += np.sum(cl_iou3ds>=0.7)\n else:\n iou3d_correct_cnt += np.sum(cl_iou3ds>=0.5)\n \n iou3d_correct_cnt_05 += np.sum(iou3ds >= 0.5)\n for i in range(BATCH_SIZE):\n segp = preds_val[i, :]\n segl = batch_label[i, :]\n part_ious = [0.0 for _ in range(NUM_CLASSES)]\n for l in range(NUM_CLASSES):\n if (np.sum(segl == l) == 0) and (np.sum(segp == l) == 0):\n part_ious[l] = 1.0 # class not present\n else:\n part_ious[l] = np.sum((segl == l) & (segp == l)) / \\\n float(np.sum((segl == l) | (segp == l)))\n \n log_string('eval mean loss: %f' % (loss_sum / float(num_batches)))\n log_string('eval segmentation accuracy: %f' % \\\n (total_correct / float(total_seen)))\n log_string('eval segmentation avg class acc: %f' % \\\n (np.mean(np.array(total_correct_class) / \\\n np.array(total_seen_class, dtype=np.float))))\n log_string('eval box IoU (ground/3D): %f / %f' % \\\n (iou2ds_sum / float(num_batches * BATCH_SIZE), iou3ds_sum / \\\n float(num_batches * BATCH_SIZE)))\n log_string('eval box estimation accuracy (IoU=0.7&0.5): %f' % \\\n (float(iou3d_correct_cnt) / float(num_batches * BATCH_SIZE)))\n log_string('eval box estimation accuracy (IoU=0.7): %f' % \\\n (float(iou3d_correct_cnt_old) / float(num_batches * BATCH_SIZE)))\n log_string('eval box estimation accuracy (IoU=0.5): %f' % \\\n (float(iou3d_correct_cnt_05) / float(num_batches * BATCH_SIZE)))\n\n EPOCH_CNT += 1\n eval_box_est_acc = float(iou3d_correct_cnt_old) / float(num_batches * BATCH_SIZE)\n return eval_box_est_acc", "def eval_dataset_and_unshard(viewdir_mlp_model, viewdir_mlp_params,\n rgb_features, directions, source_dataset,\n scene_params):\n\n @functools.partial(jax.pmap, in_axes=(0, 0), axis_name=\"batch\")\n def pmap_eval_fn(rgb_and_feature_chunk, direction_chunk):\n \"\"\"We need an inner function as only JAX types can be passed to a pmap.\"\"\"\n residual = model_utils.viewdir_fn(viewdir_mlp_model, viewdir_mlp_params,\n rgb_and_feature_chunk, direction_chunk,\n scene_params)\n output = jnp.minimum(1.0, rgb_and_feature_chunk[Ellipsis, 0:3] + residual)\n return jax.lax.all_gather(output, axis_name=\"batch\")\n\n num_hosts = jax.host_count()\n num_local_devices = jax.local_device_count()\n num_images = source_dataset.camtoworlds.shape[0]\n num_batches = math.ceil(num_images / num_hosts)\n num_batches = num_local_devices * math.ceil(num_batches / num_local_devices)\n\n outputs = []\n for i in range(len(rgb_features)):\n # First, evaluate the loss in parallel across all devices.\n output_batch = pmap_eval_fn(rgb_features[i], directions[i])\n output_batch = np.reshape(\n output_batch[0],\n (num_hosts, num_local_devices, source_dataset.h, source_dataset.w, 3))\n\n # Then, make sure to populate the output array in the same order\n # as the original dataset.\n for j in range(num_local_devices):\n base_index = (i * num_local_devices + j) * num_hosts\n for k in range(num_hosts):\n gathered_dataset_index = base_index + k\n if gathered_dataset_index >= num_images:\n break\n\n outputs.append(np.array(output_batch[k][j]).reshape(\n (source_dataset.h, source_dataset.w, 3)))\n\n return outputs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the checkpoint id for the decoder out file. Finds the checkpoint id in the checkpoint file name and compares to global step. If they diverge, uses the retrieved id and prints a warning.
def _GetCheckpointIdForDecodeOut(checkpoint_path, global_step): ckpt_id_from_file = int(re.sub(r'.*ckpt-', '', checkpoint_path)) tf.logging.info('Loaded checkpoint is at global step: %d', global_step) tf.logging.info('Checkpoint path: %s', checkpoint_path) tf.logging.info('Checkpoint id according to checkpoint path: %d', ckpt_id_from_file) if global_step != ckpt_id_from_file: tf.logging.warning( 'Checkpoint id %d != global step %d. ' 'Will use checkpoint id from checkpoint file for ' 'writing decoder output.', ckpt_id_from_file, global_step) return ckpt_id_from_file
[ "def get_checkpoint():\n if FLAGS.checkpoint:\n checkpoint = os.path.join(FLAGS.model_dir, FLAGS.checkpoint)\n else:\n checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)\n # TODO(petershaw): Consider less hacky way to get current step.\n step = None\n if checkpoint is not None:\n step = int(checkpoint.split(\"-\")[-2])\n print(\"Using checkpoint %s at step %s\" % (checkpoint, step))\n return checkpoint, step", "def get_checkpoint(self, sequence_id: int) -> Checkpoint:\n pass", "def _checkpointLabelFromCheckpointDir(checkpointDir):\n assert checkpointDir.endswith(g_defaultCheckpointExtension)\n\n lastSegment = os.path.split(checkpointDir)[1]\n\n checkpointLabel = lastSegment[0:-len(g_defaultCheckpointExtension)]\n\n return checkpointLabel", "def _get_checkpoint_filename(ckpt_dir_or_file):\n if isinstance(ckpt_dir_or_file, os.PathLike):\n ckpt_dir_or_file = os.fspath(ckpt_dir_or_file)\n if gfile.IsDirectory(ckpt_dir_or_file):\n return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)\n return ckpt_dir_or_file", "def get_latest_checkpoint(ckpt_dir):\n\n listfiles = os.listdir(ckpt_dir)\n\n if len(listfiles) == 0:\n return None\n else:\n file_split = listfiles[0].split('_')\n extension = file_split[-1].split('.')[-1]\n\n basename = ''\n for i in range(len(file_split) - 1):\n basename = basename + file_split[i] + '_'\n\n listfiles_step = [int(file.split('_')[-1].split('.')[0]) for file in listfiles]\n listfiles_step = np.array(listfiles_step)\n global_step = listfiles_step.max()\n\n return basename + str(global_step) + '.' + extension", "def DecodeCheckpoint(self, sess, checkpoint_path):\n p = self._model_task.params\n samples_per_summary = p.eval.decoder_samples_per_summary\n if not samples_per_summary:\n samples_per_summary = p.eval.samples_per_summary\n self._LoadCheckpointForEval(sess, checkpoint_path)\n\n global_step = sess.run(py_utils.GetGlobalStep())\n dec_metrics = self._model_task.CreateDecoderMetrics()\n buffered_decode_out = []\n num_examples_metric = dec_metrics['num_samples_in_batch']\n start_time = time.time()\n while num_examples_metric.total_value < samples_per_summary:\n tf.logging.info('Fetching dec_output.')\n fetch_start = time.time()\n run_options = config_pb2.RunOptions(\n report_tensor_allocations_upon_oom=False)\n if self._summary_op is None:\n # No summaries were collected.\n dec_out = sess.run(self._dec_output, options=run_options)\n else:\n dec_out, summary = sess.run([self._dec_output, self._summary_op],\n options=run_options)\n self._summary_writer.add_summary(summary, global_step)\n post_process_start = time.time()\n tf.logging.info(\n 'Done fetching (%f seconds)' % (post_process_start - fetch_start))\n decode_out = self._model_task.PostProcessDecodeOut(dec_out, dec_metrics)\n if decode_out:\n buffered_decode_out.extend(decode_out)\n tf.logging.info(\n 'Total examples done: %d/%d '\n '(%f seconds decode postprocess)', num_examples_metric.total_value,\n samples_per_summary,\n time.time() - post_process_start)\n\n summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)}\n elapsed_secs = time.time() - start_time\n example_rate = num_examples_metric.total_value / elapsed_secs\n summaries['examples/sec'] = metrics.CreateScalarSummary(\n 'examples/sec', example_rate)\n self._WriteSummaries(\n self._summary_writer,\n os.path.basename(self._decoder_dir),\n global_step,\n summaries,\n text_filename=os.path.join(self._decoder_dir,\n 'score-{:08d}.txt'.format(global_step)))\n self._ExportMetrics(\n decode_checkpoint=global_step,\n dec_metrics=dec_metrics,\n example_rate=example_rate)\n if buffered_decode_out:\n # global_step and the checkpoint id from the checkpoint file might be\n # different. For consistency of checkpoint filename and decoder_out\n # file, use the checkpoint id as derived from the checkpoint filename.\n checkpoint_id = _GetCheckpointIdForDecodeOut(checkpoint_path, global_step)\n decode_out_path = self.GetDecodeOutPath(self._decoder_dir, checkpoint_id)\n self._WriteKeyValuePairs(decode_out_path, buffered_decode_out)\n\n should_stop = global_step >= self.params.train.max_steps\n if self._should_report_metrics:\n trial_should_stop = self._trial.ReportEvalMeasure(\n global_step, dec_metrics, checkpoint_path)\n should_stop = should_stop or trial_should_stop\n return should_stop", "def get_checkpoint_filename(size_of_game):\n\n\tpath = \"neat-checkpoints\"\n\tfilenames = os.listdir(path)\n\n\tfilenames = [name.split(\"-\") for name in filenames]\n\n\tcheck_size = lambda x: x[2] == str(size_of_game) \n\tfilenames = list(filter(check_size, filenames))\n\n\n\tfilenames = [int(name[3]) for name in filenames]\n\n\tname = str(max(filenames))\n\tname = \"neat-checkpoint-\" + str(size_of_game) + \"-\" + name\n\n\treturn path + \"/\" + name", "def _validarCheckpoint(self, num_check):\n dirCheckpoint = os.path.join(os.getcwd(), 'projects/{}/training/'.format(self.nameProject))\n for root, dirs, files in os.walk(dirCheckpoint):\n for file_name in files:\n indexstr = file_name.find('model.ckpt-{}.meta'.format(num_check))\n if not (indexstr.__eq__(-1)): # si es diferente de -1\n print('Si existe {}'.format('model.ckpt-{}.meta'.format(num_check)))\n return 1 # regresamos 1 para informar que si exite\n else:\n b = 0\n return b", "def load_nearest_checkpoint(self, target_step):\n last, last_time = self.checkpoints[-1]\n if last < target_step:\n print('Cannot load checkpoint for target step',str(target_step),'- last checkpoint written was',str(last))\n return None, None, None\n\n ckp_i = -1\n min_sep = 1e32\n for i in range(0, len(self.checkpoints)):\n step_i, step_t = self.checkpoints[i]\n delta = target_step - step_i\n if delta > 0:\n min_sep = min(min_sep, delta)\n ckp_i = i\n elif delta == 0:\n ckp_i = i\n break\n\n if ckp_i == -1:\n print('Failed to locate suitable checkpoint file for target step',str(target_step))\n return None, None, None\n\n step_i, step_t = self.checkpoints[ckp_i]\n v = self.load_solution(step_i)\n \n return v, step_i, step_t", "def get_last_checkpoint():\n logdir = '.'\n logfiles = sorted([f for f in os.listdir(logdir) if f.startswith('checkpoint')])\n checkpoint_path = logfiles[-1]\n return checkpoint_path", "def load(saver,sess):\r\n print(\"\\nReading Checkpoints.....\\n\\n\")\r\n model_dir = \"%s_%s_%s\" % (\"espcn\", FLAGS.image_size,FLAGS.scale)# give the model name by label_size\r\n checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, model_dir)\r\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\r\n \r\n # Check the checkpoint is exist \r\n if ckpt and ckpt.model_checkpoint_path:\r\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\r\n saver.restore(sess, os.path.join(os.getcwd(), ckpt_path))\r\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\r\n else:\r\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def restore_checkpoint(self, notebook_id, checkpoint_id):\n doc = self.collection.find_one( { '_id' : notebook_id })\n if doc:\n if 'ipynb_chkpt' in doc:\n doc['ipynb'] = doc['ipynb_chkpt']\n doc['created'] = doc['chkpt_created']\n id = self.collection.save( doc, manipulate = True, safe=True)\n self.log.debug(\"copying ipynb_chkpt to ipynb for %s\", notebook_id)\n else:\n self.log.debug(\"checkpoint for %s does not exist\" % notebook_id)\n raise web.HTTPError(404,\n u'Notebook checkpoint does not exist: %s' % notebook_id)\n else:\n self.log( \"notebook %s does not exist\" % notebook_id)\n raise web.HTTPError(404,\n u'Notebook %s does not exist' % notebook_id)", "def DecodeLatestCheckpoint(self, last_path=None):\n with tf.container(self._container_id), self._GetSession() as sess:\n # This initializes local tables\n sess.run(self.initialize_tables)\n # This initializes local variables.\n sess.run(self._initialize_local_vars)\n path = tf.train.latest_checkpoint(self._train_dir)\n if not path:\n tf.logging.info('No checkpoint available.')\n return\n elif path == last_path:\n tf.logging.info('Latest checkpoint was already decoded.')\n return\n self.DecodeCheckpoint(sess, path)", "def get_index_of_highest_checkpoint():\n cp_files_highest = glob.glob(os.path.join(HIGHEST_DIR,\n CHECK_POINT_FILE.format('*')))\n if len(cp_files_highest) == 0:\n return 'latest'\n\n index = int(cp_files_highest[0].split('-')[-1].split('.')[0])\n\n # Check if checkpoint files exists in the CHECK_POINT_DIR directory\n check_points = [os.path.basename(f) for f in cp_files_highest]\n cp_files_cpdir = [os.path.join(CHECK_POINT_DIR, f) for f in check_points]\n exists = all([os.path.isfile(f) for f in cp_files_cpdir])\n\n # If it doesn't already exists, copy from HIGHEST_DIR\n if not exists:\n for f in cp_files_highest:\n copy(f, CHECK_POINT_DIR)\n\n return index", "def get_train_step_from_last_restored_checkpoint_path(self) -> Optional[int]:\n return self._train_step_from_last_restored_checkpoint_path", "def _consolidated_checkpoint_file_path(self):\n return os.path.join(\n self.__config.agent_data_path,\n CONSOLIDATED_CHECKPOINTS_FILE_NAME,\n )", "def save(self, checkpoint_dir):\n\n try:\n iteration = self.session.graph.get_tensor_by_name(\"iteration:0\")\n global_step = self.session.run(iteration)\n except KeyError:\n global_step = None\n path = self.saver.save(self.session, os.path.join(checkpoint_dir, \"model.ckpt\"), global_step=global_step)\n return path", "def get_checkpoint_file(self, model_name):\n assert isinstance(model_name, str)\n return os.path.join(\n f\"{self.data_save_dir}/saves/iter_{self.iteration}\",\n model_name\n )", "def get_latest_checkpoint(self, checkpoint_path=\"./model_weights/\"):\n\n checkpoint_filename = os.path.join(checkpoint_path, \"checkpoint\")\n\n if os.path.exists(checkpoint_filename):\n with open(checkpoint_filename) as ckpt_file:\n checkpoints = ckpt_file.read()\n checkpoints=checkpoints.split(\"\\n\")\n latest_checkpoint = checkpoints[0]\n latest_checkpoint = latest_checkpoint.split(\":\")[1]\n latest_checkpoint = latest_checkpoint.strip()\n latest_checkpoint = latest_checkpoint.replace('\"', '')\n if os.path.exists(os.path.join(checkpoint_path,latest_checkpoint+\".tar\")):\n return latest_checkpoint\n else:\n print(\"tar file for latest checkpoint {} not found in {}\".format(latest_checkpoint,checkpoint_path))\n return latest_checkpoint\n else:\n print(\"checkpoint file that contains name of latest checkpoint not found\")\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the path to decode out file.
def GetDecodeOutPath(cls, decoder_dir, checkpoint_id): out_dir = cls._GetTtlDir(decoder_dir, duration='7d') return os.path.join(out_dir, 'decoder_out_%09d' % checkpoint_id)
[ "def file_path(self) -> str:\n return self.files[self.__main['location']['file']]", "def GetCodegenFile(self):\n\t\tif os.path.isabs(self.FilePath):\n\t\t\tRelativePath = self.FilePath[3:]\n\t\t\treturn \"%s\\\\%s.codegen.inl\" % (ExportPath, RelativePath)\n\t\telse:\n\t\t\treturn \"%s\\\\%s.codegen.inl\" % (ExportPath, self.FilePath)", "def get_real_path(self):\n return os.path.join(self.root.path, self.path, self.filename)", "def path(self):\n\n return inspect.getfile(self)", "def path(self):\n path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',\n self.charset, self.encoding_errors)\n return path.lstrip('/')", "def fetch_output_path(node):\n return node[\"file\"].value()", "def GetOutputFilename(self, fname):\n return os.path.join(self.outdir, fname)", "def get_full_path(self) -> str:\r\n return self.location + \"\\\\\" + self.filename + \".\" + self.ext", "def _create_outfilepath(self, inpath):\n return inpath + '.crypt'", "def get_transcoder_path(name=NEW_TRANSCODER_NAME):\n return os.path.join(TRANSCODER_DIR, name)", "def get_output_dir(self) -> Path:\n return self.output_dir", "def path(self) -> str:\n return os.path.abspath(os.path.join(self.image_directory, self.filename))", "def _get_output_file(self, type_):\n name = self._opts[type_]\n if name == 'NONE' and type_ in self._optional_outputs:\n return name\n name = self._process_output_name(name, type_)\n path = utils.normpath(os.path.join(self['OutputDir'], name), False)\n self._create_output_dir(os.path.dirname(path), type_)\n return path", "def transform_path():\n return str(pathlib.Path(__file__).parent.absolute())", "def get_relative_file_path(self):\n return os.path.join(self.output_dir, self.file_name)", "def getDumpPath(self,ooid):\n path = os.path.join(self.namePath(ooid)[0],ooid+self.fileSuffix)\n self.readableOrThrow(path)\n return path", "def get_full_filepath(test_filename):\n file_path = os.path.dirname(os.path.abspath(__file__))\n return_filepath = os.path.abspath(file_path + \"/responses/\" + test_filename)\n return return_filepath", "def get_document_path(self) -> str:\n return self._parser.file_path", "def output_file(self):\n\n return self._outfile", "def get_output_file_path(self):\n zip_filename = \"%s.%s_%s.wotmod\" % (\n self.author_id, self.mod_id, self.mod_version)\n return os.path.abspath(os.path.join(self.dist_dir, zip_filename))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decodes `samples_per_summary` examples using `checkpoint_path`.
def DecodeCheckpoint(self, sess, checkpoint_path): p = self._model_task.params samples_per_summary = p.eval.decoder_samples_per_summary if not samples_per_summary: samples_per_summary = p.eval.samples_per_summary self._LoadCheckpointForEval(sess, checkpoint_path) global_step = sess.run(py_utils.GetGlobalStep()) dec_metrics = self._model_task.CreateDecoderMetrics() buffered_decode_out = [] num_examples_metric = dec_metrics['num_samples_in_batch'] start_time = time.time() while num_examples_metric.total_value < samples_per_summary: tf.logging.info('Fetching dec_output.') fetch_start = time.time() run_options = config_pb2.RunOptions( report_tensor_allocations_upon_oom=False) if self._summary_op is None: # No summaries were collected. dec_out = sess.run(self._dec_output, options=run_options) else: dec_out, summary = sess.run([self._dec_output, self._summary_op], options=run_options) self._summary_writer.add_summary(summary, global_step) post_process_start = time.time() tf.logging.info( 'Done fetching (%f seconds)' % (post_process_start - fetch_start)) decode_out = self._model_task.PostProcessDecodeOut(dec_out, dec_metrics) if decode_out: buffered_decode_out.extend(decode_out) tf.logging.info( 'Total examples done: %d/%d ' '(%f seconds decode postprocess)', num_examples_metric.total_value, samples_per_summary, time.time() - post_process_start) summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)} elapsed_secs = time.time() - start_time example_rate = num_examples_metric.total_value / elapsed_secs summaries['examples/sec'] = metrics.CreateScalarSummary( 'examples/sec', example_rate) self._WriteSummaries( self._summary_writer, os.path.basename(self._decoder_dir), global_step, summaries, text_filename=os.path.join(self._decoder_dir, 'score-{:08d}.txt'.format(global_step))) self._ExportMetrics( decode_checkpoint=global_step, dec_metrics=dec_metrics, example_rate=example_rate) if buffered_decode_out: # global_step and the checkpoint id from the checkpoint file might be # different. For consistency of checkpoint filename and decoder_out # file, use the checkpoint id as derived from the checkpoint filename. checkpoint_id = _GetCheckpointIdForDecodeOut(checkpoint_path, global_step) decode_out_path = self.GetDecodeOutPath(self._decoder_dir, checkpoint_id) self._WriteKeyValuePairs(decode_out_path, buffered_decode_out) should_stop = global_step >= self.params.train.max_steps if self._should_report_metrics: trial_should_stop = self._trial.ReportEvalMeasure( global_step, dec_metrics, checkpoint_path) should_stop = should_stop or trial_should_stop return should_stop
[ "def load_example_data():\n from pkg_resources import resource_stream\n data = np.load(resource_stream(__name__, 'example_data/CCF1.npy'))\n return data", "def read_make_examples_run_info(path):\n with tf.gfile.GFile(path) as f:\n return text_format.Parse(f.read(), deepvariant_pb2.MakeExamplesRunInfo())", "def load_summary(self):\n summary_path = os.path.join(self.base_dir, 'result_summary.npz')\n if os.path.exists(summary_path):\n summary_results = self.load_phase_result(summary_path)\n self.R.update(summary_results)", "def begin_read_samples(self):", "def test_audio_dataset_is_batch_preprocessed(batch_file_preprocessor):\n batch_file_preprocessor.preprocess()\n assert os.path.isfile(os.path.join(\"savedummy\", \"dummy.npy\"))\n assert os.path.isfile(os.path.join(\"savedummy\", \"dir1\", \"dir2\",\n \"dummy.npy\"))\n shutil.rmtree(\"savedummy\")", "def test_simple_CheckpointDecoder(multisource_with_factors):\n model, multisource, target = multisource_with_factors\n cd = sockeye.checkpoint_decoder.CheckpointDecoder(context=mx.context,\n multisource=multisource,\n references=target,\n model='model')\n # 10 sentence tuples each\n assert len(cd.target_sentences) == 10\n assert len(cd.inputs_sentences) == 10\n # Two sources\n assert len(cd.inputs_sentences[0]) == 2\n # Three factors each (per source)\n assert len(cd.inputs_sentences[0][0]) == 3\n assert len(cd.inputs_sentences[0][1]) == 3\n # First sentence, first source's factors\n assert cd.inputs_sentences[0][0][0].strip() == 'source_0_0-0'\n assert cd.inputs_sentences[0][0][1].strip() == 'source_0_1-0'\n assert cd.inputs_sentences[0][0][2].strip() == 'source_0_2-0'\n # First sentence, second source's factors\n assert cd.inputs_sentences[0][1][0].strip() == 'source_1_0-0'\n assert cd.inputs_sentences[0][1][1].strip() == 'source_1_1-0'\n assert cd.inputs_sentences[0][1][2].strip() == 'source_1_2-0'", "def load(saver,sess):\r\n print(\"\\nReading Checkpoints.....\\n\\n\")\r\n model_dir = \"%s_%s_%s\" % (\"espcn\", FLAGS.image_size,FLAGS.scale)# give the model name by label_size\r\n checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, model_dir)\r\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\r\n \r\n # Check the checkpoint is exist \r\n if ckpt and ckpt.model_checkpoint_path:\r\n ckpt_path = str(ckpt.model_checkpoint_path) # convert the unicode to string\r\n saver.restore(sess, os.path.join(os.getcwd(), ckpt_path))\r\n print(\"\\n Checkpoint Loading Success! %s\\n\\n\"% ckpt_path)\r\n else:\r\n print(\"\\n! Checkpoint Loading Failed \\n\\n\")", "def load_summarizer(seq2seq_model_path, text_processor_path):\n #the code from the GitHub team has a LOT of soon to be depricated functions\n #suppress the depricated warnings\n tf.logging.set_verbosity('ERROR')\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"]=\"3\"\n warnings.filterwarnings(\"ignore\")\n\n logging.warning('Loading pre-trained model...')\n # Load model\n seq2seq_Model = load_model(seq2seq_model_path + '/py_func_sum_v9_.epoch16-val2.55276.hdf5')\n\n logging.warning('Loading text processor (encoder)...')\n # Load encoder (code) pre-processor\n num_encoder_tokens, enc_pp = load_text_processor(text_processor_path + '/py_code_proc_v2.dpkl')\n\n logging.warning('Loading text processor (decoder)...')\n # Load decoder (docstrings/comments) pre-processor\n num_decoder_tokens, dec_pp = load_text_processor(text_processor_path + '/py_comment_proc_v2.dpkl')\n\n graph = tf.get_default_graph()\n\n seq2seq_inf = Seq2Seq_Inference(encoder_preprocessor=enc_pp,\n decoder_preprocessor=dec_pp,\n seq2seq_model=seq2seq_Model)\n\n return seq2seq_inf, graph", "def read_in_sequencing_summary(summary_path):\n data = pd.read_csv(summary_path, sep=\"\\t\")\n return data", "def show_samples():\n files = os.listdir(FLAGS.directory)\n for file in files:\n image, label = read_and_decode(tf.train.string_input_producer([os.path.join(FLAGS.directory, file)]),\n (256, 256, 3))\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n tf.train.start_queue_runners(sess=sess)\n\n label_val_1, image_val_1 = sess.run([label, image])\n\n cv2.imshow('s', (image_val_1 + 0.5))\n print(label_val_1)\n cv2.waitKey(1000)", "def load_checkpoints(self, label: str):\n all_checkpoints = os.listdir('checkpoints')\n checkpoint = [x for x in all_checkpoints if label in x]\n if not checkpoint:\n self.logger.error(f\"No checkpoint files exist for given label {label}\")\n quit()\n checkpoint = sorted(checkpoint, key=lambda x: int(x.rstrip('.tar').split('_')[-1]))\n epoch = [int(x.rstrip('.tar').split('_')[-1]) for x in checkpoint]\n checkpoint = [torch.load(f\"checkpoints/{x}\") for x in checkpoint]\n return checkpoint, epoch", "def parser(self, serialized_example):\n features = dict()\n features['sequence_length'] = tf.FixedLenFeature((), tf.int64)\n for example_name, (name, shape) in self.state_like_names_and_shapes.items():\n if example_name == 'images':\n features[name] = tf.VarLenFeature(tf.string)\n else:\n features[name] = tf.VarLenFeature(tf.float32)\n for example_name, (name, shape) in self.action_like_names_and_shapes.items():\n features[name] = tf.VarLenFeature(tf.float32)\n\n features = tf.parse_single_example(serialized_example, features=features)\n\n example_sequence_length = features['sequence_length']\n state_like_seqs = OrderedDict()\n action_like_seqs = OrderedDict()\n for example_name, (name, shape) in self.state_like_names_and_shapes.items():\n if example_name == 'images':\n seq = tf.sparse_tensor_to_dense(features[name], '')\n else:\n seq = tf.sparse_tensor_to_dense(features[name])\n seq = tf.reshape(seq, [example_sequence_length] + list(shape))\n state_like_seqs[example_name] = seq\n for example_name, (name, shape) in self.action_like_names_and_shapes.items():\n seq = tf.sparse_tensor_to_dense(features[name])\n seq = tf.reshape(seq, [example_sequence_length - 1] + list(shape))\n action_like_seqs[example_name] = seq\n\n state_like_seqs, action_like_seqs = \\\n self.slice_sequences(state_like_seqs, action_like_seqs, example_sequence_length)\n\n # decode and preprocess images on the sampled slice only\n _, image_shape = self.state_like_names_and_shapes['images']\n state_like_seqs['images'] = self.decode_and_preprocess_images(state_like_seqs['images'], image_shape)\n return state_like_seqs, action_like_seqs", "def loader(training_path, segmented_path, batch_size):", "def _run_and_report_benchmark(self,\n training_summary_path,\n min_accuracy=0.95,\n max_accuracy=0.97):\n\n start_time_sec = time.time()\n self._run_xlnet_classifier()\n wall_time_sec = time.time() - start_time_sec\n\n with tf.io.gfile.GFile(training_summary_path, 'rb') as reader:\n summary = json.loads(reader.read().decode('utf-8'))\n\n super(XLNetClassifyAccuracy, self)._report_benchmark(\n stats=summary,\n wall_time_sec=wall_time_sec,\n min_accuracy=min_accuracy,\n max_accuracy=max_accuracy)", "def _read_train(self):\n outfilename = str(self.pb.wd + \n \"out_\"+str(self.pb.conf_num)+\"_0.npz\")\n outfile = np.load(outfilename)\n self.train_predictions = outfile['train_predictions']\n self.hypotheses = outfile['hypotheses']", "def _extract_summary(self,\n accumulator,\n summary_name,\n restart_determiner=None):\n if not isinstance(summary_name, (list, tuple)):\n summary_name = [summary_name]\n\n # Check if we are loading from Tensors or Scalars\n tensor_keys = accumulator.tensors.Keys()\n load_tensors = bool(set(tensor_keys).intersection(summary_name))\n\n # Load summaries -- try each key in the list until the first one that works.\n summaries = None\n for name in summary_name:\n try:\n if load_tensors:\n summaries = accumulator.Tensors(name)\n else:\n summaries = accumulator.Scalars(name)\n except KeyError:\n continue\n else:\n break\n assert summaries # Assert that summaries were actually loaded.\n\n # Load steps and values.\n steps = [summary.step for summary in summaries]\n values = [\n self._get_summary_value(summary, load_tensors) for summary in summaries\n ]\n\n # Discard the \"tails\" from restarts.\n if self.discard_tails:\n steps, values = self._discard_tails_from_restarts(steps, values,\n restart_determiner)\n\n return values, steps", "def read_and_convert(self):\n if self._example_pointer == self._num_examples:\n return None\n image = self._images[self._example_pointer].tostring()\n label = int(self._labels[self._example_pointer])\n self._example_pointer += 1\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image': ExampleReader._bytes_feature(image),\n 'label': ExampleReader._int64_feature(label)\n }))\n return example", "def _run_and_report_benchmark(self,\n training_summary_path,\n min_accuracy=87.0,\n max_accuracy=89.0):\n\n start_time_sec = time.time()\n self._run_xlnet_squad()\n wall_time_sec = time.time() - start_time_sec\n\n with tf.io.gfile.GFile(training_summary_path, 'rb') as reader:\n summary = json.loads(reader.read().decode('utf-8'))\n\n super(XLNetSquadAccuracy, self)._report_benchmark(\n stats=summary,\n wall_time_sec=wall_time_sec,\n min_accuracy=min_accuracy,\n max_accuracy=max_accuracy)", "def prepare_data(self):\n self.tokenizer = custom_tokenizer_from_pretrained(\n self.tokenizer_name_or_path, self.cache_dir\n )\n try:\n self.train_examples = ExamplesBuilder(\n self.data_dir,\n Split.train,\n delimiter=self.delimiter,\n ).examples\n self.val_examples = ExamplesBuilder(\n self.data_dir,\n Split.dev,\n delimiter=self.delimiter,\n ).examples\n self.test_examples = ExamplesBuilder(\n self.data_dir,\n Split.test,\n delimiter=self.delimiter,\n ).examples\n\n if self.num_samples > 0:\n self.train_examples = self.train_examples[: self.num_samples]\n self.val_examples = self.val_examples[: self.num_samples]\n self.test_examples = self.test_examples[: self.num_samples]\n\n # create label vocabulary from dataset\n all_examples = self.train_examples + self.val_examples + self.test_examples\n all_labels = sorted(\n {\n tag.label\n for ex in all_examples\n for tag in ex.labels\n if tag.bio != BIO.O\n }\n )\n self.label_list = [BIO.O.value] + sorted(all_labels)\n label_types = sorted(\n {\n tag.tagtype.value\n for ex in all_examples\n for tag in ex.labels\n if tag.bio != BIO.O\n }\n )\n with open(self.labels_path, \"w\") as fp:\n for l in label_types:\n fp.write(l)\n fp.write(\"\\n\")\n\n self.label_to_id = {l: i for i, l in enumerate(self.label_list)}\n self.id_to_label = self.label_list\n\n start = time.time()\n self.train_dataset = self.create_dataset(\n self.train_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(train): {read_time}\")\n\n start = time.time()\n self.val_dataset = self.create_dataset(\n self.val_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(val): {read_time}\")\n\n start = time.time()\n self.test_dataset = self.create_dataset(\n self.test_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(test): {read_time}\")\n\n self.dataset_size = len(self.train_dataset)\n\n logger.info(self.val_examples[:3])\n logger.info(self.val_dataset[:3])\n\n except NoLocalFileError as e:\n logger.error(e)\n exit(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs decoder on the latest checkpoint.
def DecodeLatestCheckpoint(self, last_path=None): with tf.container(self._container_id), self._GetSession() as sess: # This initializes local tables sess.run(self.initialize_tables) # This initializes local variables. sess.run(self._initialize_local_vars) path = tf.train.latest_checkpoint(self._train_dir) if not path: tf.logging.info('No checkpoint available.') return elif path == last_path: tf.logging.info('Latest checkpoint was already decoded.') return self.DecodeCheckpoint(sess, path)
[ "def DecodeCheckpoint(self, sess, checkpoint_path):\n p = self._model_task.params\n samples_per_summary = p.eval.decoder_samples_per_summary\n if not samples_per_summary:\n samples_per_summary = p.eval.samples_per_summary\n self._LoadCheckpointForEval(sess, checkpoint_path)\n\n global_step = sess.run(py_utils.GetGlobalStep())\n dec_metrics = self._model_task.CreateDecoderMetrics()\n buffered_decode_out = []\n num_examples_metric = dec_metrics['num_samples_in_batch']\n start_time = time.time()\n while num_examples_metric.total_value < samples_per_summary:\n tf.logging.info('Fetching dec_output.')\n fetch_start = time.time()\n run_options = config_pb2.RunOptions(\n report_tensor_allocations_upon_oom=False)\n if self._summary_op is None:\n # No summaries were collected.\n dec_out = sess.run(self._dec_output, options=run_options)\n else:\n dec_out, summary = sess.run([self._dec_output, self._summary_op],\n options=run_options)\n self._summary_writer.add_summary(summary, global_step)\n post_process_start = time.time()\n tf.logging.info(\n 'Done fetching (%f seconds)' % (post_process_start - fetch_start))\n decode_out = self._model_task.PostProcessDecodeOut(dec_out, dec_metrics)\n if decode_out:\n buffered_decode_out.extend(decode_out)\n tf.logging.info(\n 'Total examples done: %d/%d '\n '(%f seconds decode postprocess)', num_examples_metric.total_value,\n samples_per_summary,\n time.time() - post_process_start)\n\n summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)}\n elapsed_secs = time.time() - start_time\n example_rate = num_examples_metric.total_value / elapsed_secs\n summaries['examples/sec'] = metrics.CreateScalarSummary(\n 'examples/sec', example_rate)\n self._WriteSummaries(\n self._summary_writer,\n os.path.basename(self._decoder_dir),\n global_step,\n summaries,\n text_filename=os.path.join(self._decoder_dir,\n 'score-{:08d}.txt'.format(global_step)))\n self._ExportMetrics(\n decode_checkpoint=global_step,\n dec_metrics=dec_metrics,\n example_rate=example_rate)\n if buffered_decode_out:\n # global_step and the checkpoint id from the checkpoint file might be\n # different. For consistency of checkpoint filename and decoder_out\n # file, use the checkpoint id as derived from the checkpoint filename.\n checkpoint_id = _GetCheckpointIdForDecodeOut(checkpoint_path, global_step)\n decode_out_path = self.GetDecodeOutPath(self._decoder_dir, checkpoint_id)\n self._WriteKeyValuePairs(decode_out_path, buffered_decode_out)\n\n should_stop = global_step >= self.params.train.max_steps\n if self._should_report_metrics:\n trial_should_stop = self._trial.ReportEvalMeasure(\n global_step, dec_metrics, checkpoint_path)\n should_stop = should_stop or trial_should_stop\n return should_stop", "def run_decoding(use_pred, **ctx):\n if use_pred:\n log.info(\"Running decoding analysis on pred data\")\n else:\n log.info(\"Running decoding analysis on raw data\")\n success = decoding.do_decoding_analysis(lv_model=use_pred, **ctx)\n # decoding results get saved in their own file, so don't really need to return anything\n if success == 0:\n return ctx\n else:\n raise ValueError(\"Decoding analysis failed\")", "def decode_and_evaluate(self, checkpoint: int) -> Dict[str, float]:\n translator = sockeye.inference.Translator(self.context, 'linear', None,\n *sockeye.inference.load_models(self.context,\n self.max_input_len,\n self.beam_size,\n [self.model],\n [checkpoint]))\n\n output_name = os.path.join(self.model, C.DECODE_OUT_NAME % checkpoint)\n with smart_open(output_name, 'w') as output:\n handler = sockeye.output_handler.StringOutputHandler(output)\n translations = []\n for sent_id, input_sentence in enumerate(self.input_sentences):\n trans_input = translator.make_input(sent_id, input_sentence)\n trans_output = translator.translate(trans_input)\n handler.handle(trans_input, trans_output)\n translations.append(trans_output.translation)\n logger.info(\"Checkpoint [%d] %d translations saved to '%s'\", checkpoint, len(translations), output_name)\n # TODO(fhieber): eventually add more metrics (METEOR etc.)\n return {\"bleu-val\": sockeye.bleu.corpus_bleu(translations, self.target_sentences)}", "def decode_func():\n dec_outputs, _, dec_lengths = contrib_seq2seq.dynamic_decode(\n decoder=self.decoder(\n embeddings=embeddings,\n inputs=inputs,\n inputs_length=inputs_length,\n hiddens=hiddens,\n hiddens_length=hiddens_length,\n enc_state=enc_state,\n mode=mode,\n hparams=self._hparams,\n decoder_hparams=decoder_hparams,\n reuse=tf.AUTO_REUSE),\n impute_finished=impute_finished,\n maximum_iterations=decoder_iterations)\n return {\n \"rnn_output\": dec_outputs.rnn_output,\n \"sample_id\": dec_outputs.sample_id,\n \"length\": dec_lengths}", "def _decode(lconf, dconf, econf, fold):\n if fp.exists(_counts_file_path(lconf, econf, fold)):\n print(\"skipping %s/%s (already done)\" % (econf.learner.name,\n econf.decoder.name),\n file=sys.stderr)\n return\n\n fold_dir = _fold_dir_path(lconf, fold)\n if not os.path.exists(fold_dir):\n os.makedirs(fold_dir)\n args = FakeDecodeArgs(lconf, econf, fold)\n phrasebook = args_to_phrasebook(args)\n decoder = args_to_decoder(args)\n\n fold_attach, fold_relate =\\\n att.decode.select_fold(dconf.attach, dconf.relate,\n args, phrasebook)\n attach = DataAndModel(fold_attach,\n load_model(args.attachment_model))\n relate = DataAndModel(fold_relate,\n load_model(args.relation_model))\n threshold = args_to_threshold(attach.model, decoder,\n requested=args.threshold)\n config = DecoderConfig(phrasebook=phrasebook,\n threshold=threshold,\n post_labelling=False,\n use_prob=args.use_prob)\n\n att.decode.main_for_harness(args, config, decoder, attach, relate)\n args.cleanup()", "def _decode_step(self, states: List[ModelState]) -> Tuple[mx.nd.NDArray, mx.nd.NDArray, List[ModelState]]:\n model_probs, model_attention_scores = [], []\n for m, s in zip(self.models, states):\n probs, attention_scores, s.source_dynamic, s.decoder_hidden, s.decoder_states = m.run_decoder(\n s.source_encoded,\n s.source_dynamic,\n s.source_length,\n s.prev_target_word_id,\n s.decoder_hidden,\n s.decoder_states,\n s.bucket_key)\n model_probs.append(probs)\n model_attention_scores.append(attention_scores)\n probs, attention_scores = self._combine_predictions(model_probs, model_attention_scores)\n return probs, attention_scores, states", "def Decode(self, input_batch):\n p = self.params\n\n predictions = self.ComputePredictions(self.theta, input_batch)\n bboxes_and_logits = self._BBoxesAndLogits(input_batch, predictions)\n predicted_bboxes = bboxes_and_logits.predicted_bboxes\n batch_size, num_bboxes, _ = py_utils.GetShape(predicted_bboxes, 3)\n classification_scores = bboxes_and_logits.classification_scores\n classification_scores = py_utils.HasShape(\n classification_scores, [batch_size, num_bboxes, p.num_classes])\n\n _, per_example_dict = self.ComputeLoss(self.theta, predictions, input_batch)\n if 'score_scaler' in per_example_dict:\n classification_scores *= per_example_dict['score_scaler']\n\n with tf.device('/cpu:0'):\n # Decode the predicted bboxes, performing NMS.\n if p.nms_decoder_type == NMSDecoderType.NMS_DECODER:\n decode_fn = functools.partial(\n detection_decoder.DecodeWithNMS,\n nms_iou_threshold=p.nms_iou_threshold,\n max_boxes_per_class=p.max_nms_boxes,\n use_oriented_per_class_nms=p.use_oriented_per_class_nms)\n elif p.nms_decoder_type == NMSDecoderType.HEATMAP_NMS_DECODER:\n points = py_utils.HasShape(input_batch.anchor_centers,\n [-1, -1, -1, -1, 3])\n bs, nx, ny = py_utils.GetShape(points, 3)\n decode_fn = functools.partial(\n detection_decoder.DecodeWithMaxPoolNMS,\n heatmap_shape=[bs, nx, ny],\n kernel_size=p.heatmap_nms_kernel_size,\n max_boxes_per_class=p.max_nms_boxes,\n use_oriented_per_class_nms=p.use_oriented_per_class_nms)\n elif p.nms_decoder_type == NMSDecoderType.NO_NMS_DECODER:\n decode_fn = self._NoPostProcessDecoder\n per_cls_idxs, per_cls_bboxes, per_cls_bbox_scores, per_cls_valid_mask = (\n decode_fn(\n predicted_bboxes,\n classification_scores,\n score_threshold=p.nms_score_threshold))\n\n # per_cls_valid_mask is [batch, num_classes, num_boxes] Tensor that\n # indicates which boxes were selected by NMS. Each example will have a\n # different number of chosen bboxes, so the mask is present to allow us\n # to keep the boxes as a batched dense Tensor.\n #\n # We mask the scores by the per_cls_valid_mask so that none of these boxes\n # will be interpreted as valid.\n per_cls_bbox_scores *= per_cls_valid_mask\n visualization_weights = py_utils.HasShape(\n per_cls_bbox_scores, [batch_size, p.num_classes, p.max_nms_boxes])\n\n # For top down visualization, filter boxes whose scores are not above the\n # visualization threshold.\n visualization_weights = tf.where(\n tf.greater_equal(visualization_weights,\n p.visualization_classification_threshold),\n visualization_weights, tf.zeros_like(visualization_weights))\n\n model_outputs = py_utils.NestedMap()\n model_outputs.per_class_predicted_bboxes = per_cls_bboxes\n model_outputs.per_class_predicted_bbox_scores = per_cls_bbox_scores\n model_outputs.per_class_valid_mask = per_cls_valid_mask\n\n decoder_outputs = py_utils.NestedMap({\n 'per_class_predicted_bboxes': per_cls_bboxes,\n 'per_class_predicted_bbox_scores': per_cls_bbox_scores,\n 'per_class_valid_mask': per_cls_valid_mask,\n 'visualization_weights': visualization_weights,\n })\n\n if p.decode_include_residuals:\n # Including the residuals in the decoder output makes it possible to save\n # the outputs for further analysis. Note that we ensure that the outputs\n # match the per-class NMS output format of [batch, num_classes, ...].\n def _ReshapeGather(tensor):\n \"\"\"Reshapes tensor and then gathers using the nms indices.\"\"\"\n tensor = tf.gather(\n tf.reshape(tensor, [batch_size, num_bboxes, -1]),\n per_cls_idxs,\n batch_dims=1)\n if not p.use_oriented_per_class_nms:\n # Tile so that the data fits the expected per class shape of\n # [batch_size, num_classes, ...]. When *not* using oriented NMS, the\n # num_classes dimension will be missing since the indices will not\n # have it.\n tensor = tf.tile(tensor[:, tf.newaxis, :, :],\n [1, p.num_classes, 1, 1])\n return tensor\n\n decoder_outputs.update({\n 'per_class_gt_residuals':\n _ReshapeGather(per_example_dict['target_predictions']),\n 'per_class_gt_labels':\n _ReshapeGather(per_example_dict['assigned_gt_labels']),\n 'per_class_gt_bboxes':\n _ReshapeGather(per_example_dict['assigned_gt_bboxes']),\n 'per_class_residuals':\n _ReshapeGather(per_example_dict['residuals']),\n 'per_class_logits':\n _ReshapeGather(per_example_dict['classification_logits']),\n 'per_class_points':\n _ReshapeGather(per_example_dict['points']),\n })\n\n decoder_outputs.update(\n self.output_decoder.ProcessOutputs(input_batch, model_outputs))\n\n # Produce global step as an output (which is the step\n # of the checkpoint being decoded.)\n decoder_outputs.global_step = py_utils.GetGlobalStep()\n\n return decoder_outputs", "def decode_batch(params, batch, masks, key):\n _, predictions = apply_model(batch, *masks, params, key, teacher_force=False)\n\n questions = decode_onehot(batch['query'])\n infers = decode_onehot(predictions)\n goldens = decode_onehot(batch['answer'])\n for question, inferred, golden in zip(questions, infers, goldens):\n log_decode(question, inferred, golden[1:]) # Remove '=' prefix.", "def _create_decoder(self):\n params = self.params['decoder_params']\n return self.params['decoder'](params=params, mode=self.mode, model=self)", "def infer(self):\r\n counter = 0\r\n output = {}\r\n while True:\r\n batch = self._batcher.next_batch() # 1 example repeated across batch\r\n if batch is None: # finished decoding dataset in single_pass mode\r\n print(\"Decoder has finished reading dataset for single_pass.\")\r\n # log original information\r\n with open(os.path.join(self._decode_dir, \"output.json\"), 'w', encoding='utf-8') as w:\r\n json.dump(output, w)\r\n print(\"Output has been saved in %s.\" % self._decode_dir)\r\n\r\n #start evaluation\r\n evaluate.main(self.ckpt_path, FLAGS.log_root, self._decode_dir, FLAGS.mode, FLAGS.multi_label_eval)\r\n return\r\n\r\n background_span = data.show_background_span(batch.original_backgrounds_token[0], batch.original_b_starts[0], batch.original_b_ends[0])\r\n response_span = data.show_background_span(batch.original_responses_token[0], batch.original_r_starts[0], batch.original_r_ends[0])\r\n # Run greed search to get best Hypothesis\r\n best_hyp = greed_search.run_greed_search(self._sess, self._model, self._vocab, batch)\r\n best_hyp.tokens = [token for token in best_hyp.tokens if token not in [None]]\r\n # Extract the output ids from the hypothesis and convert back to words\r\n output_ids = best_hyp.tokens[1:]\r\n decoded_token, highlights_decoded_token, spans = data.outputids2words(output_ids, self._vocab, batch.bac_oovs[0], batch.original_backgrounds_token[0])\r\n\r\n if output_ids[-1] == 3:\r\n output_ids_semantic = output_ids[:(len(output_ids)-1)]\r\n else:\r\n output_ids_semantic = output_ids\r\n\r\n ids_for_print = [str(i)for i in output_ids_semantic]\r\n ids_for_print = ' '.join(ids_for_print)\r\n\r\n switch_ref_probs = best_hyp.switch_ref_probs\r\n switch_ref_probs = [str(i) for i in switch_ref_probs]\r\n switch_ref_probs = ' '.join(switch_ref_probs)\r\n\r\n switch_gen_probs = best_hyp.switch_gen_probs\r\n switch_gen_probs = [str(i) for i in switch_gen_probs]\r\n switch_gen_probs = ' '.join(switch_gen_probs)\r\n\r\n switch_gen_pred_probs = best_hyp.switch_gen_pred_probs\r\n switch_gen_pred_probs = [str(i) for i in switch_gen_pred_probs]\r\n switch_gen_pred_probs = ' '.join(switch_gen_pred_probs)\r\n\r\n switch_gen_copy_probs = best_hyp.switch_gen_copy_probs\r\n switch_gen_copy_probs = [str(i) for i in switch_gen_copy_probs]\r\n switch_gen_copy_probs = ' '.join(switch_gen_copy_probs)\r\n\r\n # Remove the [STOP] token from decoded_words, if necessary\r\n try:\r\n fst_stop_idx = decoded_token.index(data.STOP_DECODING) # index of the (first) [STOP] symbol\r\n fst_stop_idx1 = highlights_decoded_token.index(data.STOP_DECODING)\r\n decoded_token = decoded_token[:fst_stop_idx]\r\n highlights_decoded_token = highlights_decoded_token[:fst_stop_idx1]\r\n\r\n if len(decoded_token) == 0:\r\n decoded_token.append(\".\")\r\n\r\n except ValueError:\r\n decoded_token = decoded_token\r\n highlights_decoded_token = highlights_decoded_token\r\n\r\n spans_output = ' '.join(spans)\r\n decoded_output = ' '.join(decoded_token)\r\n highlights_decoded_output = ' '.join(highlights_decoded_token)\r\n\r\n output[batch.original_example_ids[0]] = {\"background\": background_span, \"context\": batch.original_contexts[0], \"highlights_ref_response\": response_span,\r\n \"highlights_inferred_response\": highlights_decoded_output, \"ref_response\": batch.original_responses[0],\r\n \"inferred_response\": decoded_output, \"ref_span\": batch.original_spans[0],\"inferred_spans\": spans_output, \"output_index\": output_ids_semantic,\r\n \"switch_ref_probs\": switch_ref_probs, \"switch_gen_probs\": switch_gen_probs,\r\n \"switch_gen_pred_probs\": switch_gen_pred_probs,\"switch_gen_copy_probs\": switch_gen_copy_probs}\r\n\r\n self.write_for_observation(batch.original_example_ids[0], background_span, batch.original_contexts[0], response_span, highlights_decoded_output, ids_for_print, switch_ref_probs, switch_gen_probs, switch_gen_pred_probs, switch_gen_copy_probs, counter)\r\n counter += 1 # this is how many examples we've decoded\r", "def build_decoder(self):\n\n dec_input = self.build_decoder_input()\n dec_dense = self.build_decoder_dense(dec_input)\n dec_reshape = self.build_decoder_reshape(dec_dense)\n dec_conv = self.build_decoder_convs(dec_reshape)\n dec_output = self.build_decoder_output(dec_conv)\n\n self.decoder = Model(dec_input, dec_output,\n name='Decoder')", "def unpack_checkpoint(self, checkpoint, model, criterion, optimizer, scheduler) -> None:\n pass", "def decoder(self, decoder):\n\n self._decoder = decoder", "def new_decoded_pad(self, decoder, pad, is_last):\n\t\tself.probe_id = pad.add_buffer_probe(self._buffer_probe)\n\t\tself.probed_pad = pad\n\t\tself.processing = True\n\t\tself.query_duration()", "def single_worker_inference(infer_model,\n ckpt,\n inference_input_file,\n inference_output_file,\n hparams):\n output_infer = inference_output_file\n\n # Read data\n infer_data = load_data(inference_input_file, hparams)\n\n with tf.Session(config=utils.get_config_proto(), graph=infer_model.graph) as sess:\n loaded_infer_model = model_helper.load_model(infer_model.model, ckpt, sess, \"infer\")\n sess.run(infer_model.iterator.initializer,\n feed_dict={\n infer_model.src_placeholder: infer_data,\n infer_model.batch_size_placeholder: hparams.infer_batch_size\n })\n # Decode\n utils.print_out(\"# Start decoding\")\n _decode_and_evaluate(\"infer\",\n loaded_infer_model,\n sess,\n output_infer,\n ref_file=None,\n subword_option=None,\n beam_width=hparams.beam_width,\n tgt_eos=hparams.eos,\n num_translations_per_input=hparams.num_translations_per_input)", "def _create_decode_layer(self):\n with tf.name_scope(\"decoder\"):\n\n activation = tf.add(\n tf.matmul(self.encode, tf.transpose(self.W_)),\n self.bv_\n )\n\n if self.dec_act_func:\n self.reconstruction = self.dec_act_func(activation)\n else:\n self.reconstruction = activation", "def _build_decoder(self, encoder_outputs, hparams):\n\n ## Decoder.\n with tf.variable_scope(\"decoder\", reuse=tf.AUTO_REUSE) as decoder_scope:\n # Optional ops depends on which mode we are in and which loss function we\n # are using.\n logits = tf.no_op()\n decoder_cell_outputs = None\n if self.mode == contrib_learn.ModeKeys.TRAIN:\n beam_width = 1\n else:\n beam_width = hparams.beam_width\n theta, input_kernels, state0 = build_atten_rnn(\n encoder_outputs, self.features[\"source_sequence_length\"],\n hparams.num_units, beam_width, \"multi_rnn_cell\")\n\n ## Train or eval\n if self.mode != contrib_learn.ModeKeys.INFER:\n # decoder_emp_inp: [max_time, batch_size, num_units]\n target_input = self.features[\"target_input\"]\n target_output = self.features[\"target_output\"]\n target_input = tf.transpose(target_input)\n target_output = tf.transpose(target_output)\n if self.length > 0:\n target_input = tf.slice(target_input, [0, 0], [self.length, -1])\n target_output = tf.slice(target_output, [0, 0], [self.length, -1])\n\n decoder_emb_inp = self._emb_lookup(\n self.embedding_decoder, target_input, is_decoder=True)\n\n seq_len = self.features[\"target_sequence_length\"]\n padding = tf.transpose(\n tf.sequence_mask(seq_len, target_input.shape[0],\n decoder_emb_inp.dtype))\n max_seq_len = tf.reduce_max(seq_len)\n o = decoder_emb_inp\n if self.mode == contrib_learn.ModeKeys.TRAIN:\n o = o * dropout(o.shape, o.dtype, 1.0 - hparams.dropout)\n inp = {\"rnn\": tf.einsum(\"tbf,fd->tbd\", o, input_kernels[0])}\n new_states = build_rnn(theta[0], state0[0], inp, attention_cell,\n attention_cell_grad, max_seq_len)\n attention_state = new_states[\"attention\"]\n o = new_states[\"h\"]\n for i in range(1, 4):\n c = tf.concat([o, attention_state], -1)\n if self.mode == contrib_learn.ModeKeys.TRAIN:\n c = c * dropout(c.shape, c.dtype, 1.0 - hparams.dropout)\n inp = {\"rnn\": tf.einsum(\"tbf,fd->tbd\", c, input_kernels[i])}\n out = build_rnn(theta[i], state0[i], inp, lstm_cell, lstm_cell_grad,\n max_seq_len)\n o = out[\"h\"] + o if i > 1 else out[\"h\"]\n\n out = tf.reshape(o * tf.expand_dims(padding, 2), [-1, self.num_units])\n\n logits = tf.matmul(\n tf.cast(out, self.output_layer.dtype), self.output_layer)\n label = tf.one_hot(\n tf.cast(tf.reshape(target_output, [-1]), tf.int32),\n self.tgt_vocab_size,\n 1.0 - self.label_smoothing,\n self.label_smoothing / (self.tgt_vocab_size - 1),\n dtype=logits.dtype)\n loss = softmax_cross_entropy(logits, label)\n return tf.reduce_sum(loss), None, None\n\n ## Inference\n else:\n assert hparams.infer_mode == \"beam_search\"\n start_tokens = tf.fill([self.batch_size], hparams.tgt_sos_id)\n end_token = hparams.tgt_eos_id\n beam_width = hparams.beam_width\n length_penalty_weight = hparams.length_penalty_weight\n coverage_penalty_weight = hparams.coverage_penalty_weight\n\n # maximum_iteration: The maximum decoding steps.\n maximum_iterations = self._get_infer_maximum_iterations(\n hparams, self.features[\"source_sequence_length\"])\n\n def cell_fn(inputs, state):\n \"\"\"Cell function used in decoder.\"\"\"\n inp = {\"rnn\": tf.matmul(inputs, input_kernels[0])}\n atten_state, _ = attention_cell(theta[0], state[0], inp)\n o = atten_state[\"h\"]\n new_states = [atten_state]\n for i in range(1, 4):\n ns, _ = lstm_cell(\n theta[i], state[i], {\n \"rnn\":\n tf.matmul(\n tf.concat([o, atten_state[\"attention\"]], -1),\n input_kernels[i])\n })\n new_states.append(ns)\n if i > 1:\n o = ns[\"h\"] + o\n else:\n o = ns[\"h\"]\n return new_states, o\n\n my_decoder = beam_search_decoder.BeamSearchDecoder(\n cell=cell_fn,\n embedding=self.embedding_decoder,\n start_tokens=start_tokens,\n end_token=end_token,\n initial_state=state0,\n beam_width=beam_width,\n output_layer=self.output_layer,\n max_tgt=maximum_iterations,\n length_penalty_weight=length_penalty_weight,\n coverage_penalty_weight=coverage_penalty_weight,\n dtype=self.dtype)\n\n # Dynamic decoding\n predicted_ids = decoder.dynamic_decode(\n my_decoder,\n maximum_iterations=maximum_iterations,\n swap_memory=True,\n scope=decoder_scope)\n\n return logits, decoder_cell_outputs, predicted_ids", "def decode(args: Dict[str, str]):\n test_src_dir = os.path.join(args.test_dir, args.input_col.lower())\n test_tgt_dir = os.path.join(args.test_dir, args.output_col.lower())\n\n print(f\"load test source sentences from [{test_src_dir}]\", file=sys.stderr)\n test_data_src = read_corpus(test_src_dir, source='src')\n if test_tgt_dir:\n print(f\"load test target sentences from [{test_tgt_dir}]\", file=sys.stderr)\n test_data_tgt = read_corpus(test_tgt_dir, source='tgt')\n\n model_path = os.path.join(args.model_dir, 'model.bin')\n print(f\"load model from {model_path}\", file=sys.stderr)\n model = NMT.load(model_path)\n\n if args.cuda:\n model = model.to(torch.device(\"cuda:0\"))\n\n hypotheses = beam_search(model, test_data_src,\n beam_size=int(args.beam_size),\n max_decoding_time_step=int(args.max_decoding_time_step))\n\n top_hypotheses = [hyps[0] for hyps in hypotheses]\n bleu_score = compute_corpus_level_bleu_score(test_data_tgt, top_hypotheses)\n print(f'Corpus BLEU: {bleu_score}', file=sys.stderr)\n\n output_path = os.path.join(args.eval_dir, 'decode.txt')\n with open(output_path, 'w') as f:\n f.write(str(bleu_score))", "def test_IDNet_dense_autoencoder( train = False, num_epochs = 10):\n modelName = 'Dense_IDNet' + '_trained.h5'\n if train is True:\n data, ydata = create_idnet_training_dataset()\n # Train autoencoder\n encoder = train_dense_autoencoder( num_epochs, data, ydata)\n # saving the trained model \n print('Saved model: ' + modelName)\n encoder.save(const.TRAINED_MODELS_DIR + '/' + modelName)\n else:\n # load model \n print(const.TRAINED_MODELS_DIR)\n print(modelName)\n modelName = const.TRAINED_MODELS_DIR+ '/' + modelName\n print(modelName)\n encoder = load_model(modelName)\n print('Loaded model: ' + modelName)\n\n if MEASUREMENT_PROTOCOL_TYPE == const.MeasurementProtocol.SAME_DAY:\n print('session_1')\n X_train, y_train = extract_features(encoder, 'session_1', const.AutoencoderModelType.DENSE, 1, 154, 1, 5)\n X_test, y_test = extract_features(encoder, 'session_1', const.AutoencoderModelType.DENSE, 1, 154, 5, 7) \n evaluation(X_train, y_train, X_test, y_test)\n print('session_2')\n X_train, y_train = extract_features(encoder, 'session_2', const.AutoencoderModelType.DENSE, 1, 154, 1, 5)\n X_test, y_test = extract_features(encoder, 'session_2', const.AutoencoderModelType.DENSE, 1, 154, 5, 7) \n evaluation(X_train, y_train, X_test, y_test)\n \n if MEASUREMENT_PROTOCOL_TYPE == const.MeasureuentProtocol.CROSS_DAY: \n X_train, y_train = extract_features(encoder, 'session_1', const.AutoencoderModelType.DENSE, 1, 154, 1, 7)\n X_test, y_test = extract_features(encoder, 'session_2', const.AutoencoderModelType.DENSE, 1, 154, 1, 7) \n evaluation(X_train, y_train, X_test, y_test)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns params for job `job_name` on the dataset `dataset_name`.
def GetParamsForDataset(self, job_name, dataset_name): # Get the current cluster and update its params from flags. cluster = cluster_factory.Current() self.UpdateClusterParamsFromFlags(cluster.params, job_name) with cluster_factory.Cluster(cluster.params): try: cfg = self.model_registry.GetParams(self._model_name, dataset_name) except AttributeError as e: dataset_name_retry = dataset_name.title() tf.logging.warning( 'Exception configuring dataset %s, retrying as %s: %s', dataset_name, dataset_name_retry, e) cfg = self.model_registry.GetParams(self._model_name, dataset_name_retry) tf.logging.warning( 'Succeeded after retrying as %s.' % dataset_name_retry) cfg.cluster = cluster.params return cfg
[ "def job_attributes(self) -> Dict[str, str]:\n params: Dict[str, str] = {\n \"esi_job_name\": self.name,\n \"esi_job_id_\": self.id_,\n \"esi_job_op_id\": self.op_id,\n \"esi_job_max_attempts\": str(self.max_attempts),\n \"esi_job_uid\": str(self.uid),\n \"esi_job_iso_date_time\": datetime.now().isoformat().replace(\":\", \"-\"),\n }\n return params", "def get_params(self, job: Job):\n if job.params and \"secrets\" in job.params:\n logger.warning(\"Secrets were present in job params\", extra={\"job\": job.id})\n del job.params[\"secrets\"]\n return job.params", "def parse_job_name():\n print('\\nparsing compute & algorithm choices from job-name...\\n') \n model_type = default_model_type\n compute_type = default_compute_type\n cv_folds = default_cv_folds\n rapids_version = default_rapids_version\n\n try:\n if 'SM_TRAINING_ENV' in os.environ:\n env_params = json.loads( os.environ['SM_TRAINING_ENV'] )\n job_name = env_params['job_name']\n\n # compute \n compute_selection = job_name.split('-')[1].lower()\n if 'mgpu' in compute_selection:\n compute_type = 'multi-GPU'\n elif 'mcpu' in compute_selection:\n compute_type = 'multi-CPU'\n elif 'scpu' in compute_selection:\n compute_type = 'single-CPU'\n elif 'sgpu' in compute_selection:\n compute_type = 'single-GPU'\n # parse model type\n model_selection = job_name.split('-')[2].lower()\n if 'rf' in model_selection:\n model_type = 'RandomForest'\n elif 'xgb' in model_selection:\n model_type = 'XGBoost'\n \n # parse CV folds\n cv_folds = int(job_name.split('-')[3].split('cv')[0])\n \n except Exception as error:\n print( error )\n\n if 'GPU' in compute_type:\n rapids_version = int( str( cudf.__version__ ).split('.')[1] ) \n\n assert ( model_type in ['RandomForest', 'XGBoost'] )\n assert ( compute_type in ['single-GPU', 'multi-GPU', 'single-CPU', 'multi-CPU'] )\n assert ( cv_folds >= 1 )\n \n print(f' Compute: {compute_type}\\n'\n f' Algorithm: {model_type}\\n'\n f' CV_folds: {cv_folds}\\n' \n f' RAPIDS version: {rapids_version}\\n')\n\n return model_type, compute_type, cv_folds, rapids_version", "def input_data_job_id(conf):\n # type: (dict) -> str\n return conf['job_id']", "def get_task_params(self, datum):\n raise NotImplementedError()\n # return {\n # \"Title\": \"Short title\",\n # \"Description\": \"Longer description,\n # \"FrameHeight\": \"1200\",\n # \"AssignmentDurationInSeconds\": \"300\",\n # \"LifetimeInSeconds\": \"86400\",\n # \"MaxAssignments\": \"3\",\n # \"Reward\": \"0.10\",\n # }", "def get_batch(self, data_asset_name, expectation_suite_name, batch_kwargs=None, **kwargs):\n normalized_data_asset_name = self.normalize_data_asset_name(data_asset_name)\n\n datasource = self.get_datasource(normalized_data_asset_name.datasource)\n if not datasource:\n raise ge_exceptions.DataContextError(\n \"Can't find datasource {} in the config - please check your {}\".format(\n normalized_data_asset_name,\n self.GE_YML\n )\n )\n\n if batch_kwargs is None:\n batch_kwargs = self.build_batch_kwargs(data_asset_name, **kwargs)\n\n data_asset = datasource.get_batch(normalized_data_asset_name,\n expectation_suite_name,\n batch_kwargs,\n **kwargs)\n return data_asset", "def jobInfo_get(self, **kargs):\n\n timeout = 0.1\n attemptsTotal = 500\n job = self.jobCount\n field = 'pid'\n\n for key, val in kargs.items():\n if key == 'job': job = val\n if key == 'field': field = val\n if key == 'timeout': timeout = val\n if key == 'attemptsTotal': attemptsTotal = val\n\n attempts = 0\n b_success = False\n ret = None\n\n while not b_success and job < self.jobTotal:\n try:\n ret = self.d_job[str(job)][field]\n b_success = True\n except:\n time.sleep(timeout)\n attempts += 1\n if attempts > attemptsTotal:\n b_success = False\n break\n\n return{'success': b_success,\n 'field': ret}", "def get_job(self, name):\n return self.jobs.get(name)", "def find_job_by_name(self, job_name, gcp_project, region=None):\n if not region:\n regions = variables.DATAFLOW_REGIONS\n else:\n regions = (region,)\n\n base_request = self.client.projects().locations().jobs()\n\n all_matching_jobs = []\n\n # TODO: no batch requesting from Google's side, but should add\n # threading to send multiple requests concurrently. @lynn\n for region in regions:\n # Note: the parameter `view=\"JOB_VIEW_ALL\"` does not return\n # the same information in this `.list()` call as it\n # does in the `.get()` call in `get_job_detail` below.\n request = base_request.list(\n projectId=gcp_project, location=region, filter=\"ACTIVE\"\n )\n\n try:\n response = request.execute()\n\n # general catch all since the handling would be the same no matter\n # of the exception\n except Exception as e:\n self.logger.warning(\n \"Error listing active jobs in project '%s' in region '%s':\"\n \" %s\" % (gcp_project, region, e)\n )\n continue\n\n job_results = response.get(\"jobs\", [])\n if job_results:\n for result in job_results:\n if result[\"name\"] == job_name:\n all_matching_jobs.append(result)\n\n # Note: job names are unique within regions, but not across\n # regions :grimace:\n if len(all_matching_jobs) > 1:\n self.logger.info(\n \"More than one parent job found for job name '%s' under \"\n \"project '%s'. Selecting one at random.\"\n )\n return random.choice(all_matching_jobs)\n if all_matching_jobs:\n return all_matching_jobs[0]", "def handle_parms_job(rop_node, job_parm_dict):\n job_parm_dict[\"Name\"] = rop_node.parm(\"hf_job_name\").evalAsString()\n job_parm_dict[\"Comment\"] = rop_node.parm(\"hf_comment\").evalAsString()\n job_parm_dict[\"Department\"] = \"3D\"", "def get_job_config(self, job_name):\n str_job_xml = self.server.get_job_config(job_name)\n return str_job_xml", "def test_retrieve_job_sets_by_name(self):\n name = str(time.time()).replace('.', '')\n\n job_set = self._jm.run(\n [self._qc]*2, backend=self.fake_api_backend, name=name, max_experiments_per_job=1)\n rjob_set = self._jm.job_sets(name=name)[0]\n self.assertEqual(job_set, rjob_set)", "def test_parameters__param_fetch_ok(self):\n job_state = get_test_job(JOB_CREATED)\n job_params = job_state.get(\"job_input\", {}).get(\"params\")\n self.assertIsNotNone(job_params)\n\n # delete the job params from the input\n del job_state[\"job_input\"][\"params\"]\n job = Job(job_state)\n self.assertEqual(job.params, JOB_ATTR_DEFAULTS[\"params\"])\n\n with assert_obj_method_called(MockClients, \"check_job\", call_status=True):\n params = job.parameters()\n self.assertEqual(params, job_params)", "def parse_params(args):\n job = args[0]\n params = {}\n for arg in args[1:]:\n if arg[:2] == \"--\":\n name, value = arg[2:].split(\"=\")\n\n if settings.JENKINS_UPPERCASE_PARAMS:\n name = name.upper()\n\n params[name] = value\n\n return job, params", "def test_parameters(self):\n job_state = get_test_job(JOB_COMPLETED)\n job_params = job_state.get(\"job_input\", {}).get(\"params\")\n self.assertIsNotNone(job_params)\n job = Job(job_state)\n self.assertIsNotNone(job.params)\n\n with assert_obj_method_called(MockClients, \"check_job\", call_status=False):\n params = job.parameters()\n self.assertIsNotNone(params)\n self.assertEqual(params, job_params)", "def inspect_job(self) -> 'outputs.GooglePrivacyDlpV2InspectJobConfigResponse':\n return pulumi.get(self, \"inspect_job\")", "def get_data_pid(self):\n\n desc = self.describe_job\n\n input_data = None\n if \"input_data\" in desc:\n input_data = desc[\"input_data\"]\n\n return input_data", "def get_batch_job(self) -> SlurmBatchJob:\n ...", "def getJobName(self):\n xpath = self.root_tag + \"/updateParameters\" + self.version_filter + \"/jobName\"\n self.debug(\"getDeveloperEmail(): xpath=\" + xpath + \"\\n\")\n # node_set = self.puke_dom.xml_select( xpath )\n node_set = self.getData(xpath)\n value = \"\"\n for node in node_set:\n # value = str( node.jobName )\n value = node.getValue()\n return value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If given a `FLAGS.cluster_spec`, update flags for running distributed.
def MaybeConfigRunDistributed(self): if not FLAGS.cluster_spec: return job_specs = FLAGS.cluster_spec.split('@') cluster_spec_dict = {} for job_spec in job_specs: # ps_host=worker1:1231,worker2:1234 job_machines = job_spec.split('=') if len(job_machines) != 2: raise ValueError('Invalid job specification: %s', job_spec) cluster_spec_dict[job_machines[0]] = job_machines[1].split(',') if FLAGS.job == 'trainer_client': FLAGS.tf_master = 'grpc://%s' % cluster_spec_dict['worker'][FLAGS.task] for job in cluster_spec_dict.keys(): if job.startswith('decoder_'): assert len(job_specs) == 1, 'Decoder jobs must run on their own' assert ',' not in job_specs[0], 'Only single machine supported' FLAGS.decoder_job = '/job:%s' % job FLAGS.decoder_replicas = 1 if job.startswith('evaler_'): assert len(job_specs) == 1, 'Evaler jobs must run on their own' assert ',' not in job_specs[0], 'Only single machine supported' FLAGS.evaler_job = '/job:%s' % job FLAGS.evaler_replicas = 1 if FLAGS.mode == 'sync' and FLAGS.job in ('controller', 'trainer_client', 'worker'): FLAGS.worker_job = '/job:worker' FLAGS.worker_replicas = len(cluster_spec_dict['worker']) FLAGS.ps_job = '/job:worker' FLAGS.ps_replicas = FLAGS.worker_replicas if FLAGS.mode == 'async' and FLAGS.job in ('controller', 'trainer', 'ps'): FLAGS.worker_job = '/job:trainer' FLAGS.worker_replicas = len(cluster_spec_dict['trainer']) FLAGS.ps_job = '/job:ps' FLAGS.ps_replicas = len(cluster_spec_dict['ps'])
[ "def UpdateClusterParamsFromFlags(self, cluster, job_name):\n cluster.mode = FLAGS.mode\n cluster.job = job_name\n cluster.task = FLAGS.task\n\n cluster.controller.name = FLAGS.controller_job\n cluster.controller.gpus_per_replica = FLAGS.controller_gpus\n\n cluster.worker.name = FLAGS.worker_job\n cluster.worker.replicas = FLAGS.worker_replicas\n cluster.worker.gpus_per_replica = FLAGS.worker_gpus\n cluster.worker.tpus_per_replica = FLAGS.worker_tpus\n cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts\n cluster.worker.devices_per_split = FLAGS.worker_split_size\n\n cluster.ps.name = FLAGS.ps_job\n cluster.ps.replicas = FLAGS.ps_replicas\n cluster.ps.gpus_per_replica = FLAGS.ps_gpus\n\n cluster.input.name = FLAGS.input_job\n cluster.input.replicas = FLAGS.input_replicas\n cluster.input.targets = FLAGS.input_targets\n\n cluster.evaler.name = FLAGS.evaler_job\n cluster.evaler.replicas = FLAGS.evaler_replicas\n cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus\n\n cluster.decoder.name = FLAGS.decoder_job\n cluster.decoder.replicas = FLAGS.decoder_replicas\n cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus", "def modify_cluster(ClusterId=None, StepConcurrencyLevel=None):\n pass", "async def do_start_cluster(self, cluster):\n raise NotImplementedError", "def cluster_updated(configuration, cluster_state):", "def sub_cluster(\n cluster,\n rmin=None,\n rmax=None,\n mmin=None,\n mmax=None,\n vmin=None,\n vmax=None,\n emin=None,\n emax=None,\n kwmin=0,\n kwmax=15,\n npop=None,\n indx=[None],\n projected=False,\n sortstars=True,\n reset_centre=False,\n reset_nbody=False,\n reset_nbody_mass=False,\n reset_nbody_radii=False,\n reset_rvirial=False,\n reset_projected=False,\n **kwargs\n):\n cluster.save_cluster()\n units0,origin0, rorder0, rorder_origin0 = cluster.units0,cluster.origin0, cluster.rorder0, cluster.rorder_origin0\n\n\n if projected:\n r = cluster.rpro\n v = cluster.vpro\n else:\n r = cluster.r\n v = cluster.v\n\n \"\"\"\n if rmin == None:\n rmin = np.amin(r)\n if rmax == None:\n rmax = np.amax(r)\n if vmin == None:\n vmin = np.amin(v)\n if vmax == None:\n vmax = np.amax(v)\n if mmin == None:\n mmin = np.amin(cluster.m)\n if mmax == None:\n mmax = np.amax(cluster.m)\n\n if emin == None and emax != None:\n eindx = cluster.etot <= emax\n elif emin != None and emax == None:\n eindx = cluster.etot >= emin\n elif emin != None and emax != None:\n eindx = (cluster.etot <= emax) * (cluster.etot >= emin)\n else:\n eindx = cluster.id > -1\n\n if None in indx:\n indx = cluster.id > -1\n\n indx *= (\n (r >= rmin)\n * (r <= rmax)\n * (cluster.m >= mmin)\n * (cluster.m <= mmax)\n * (v >= vmin)\n * (v <= vmax)\n * eindx\n )\n\n if len(cluster.kw) > 0:\n indx*=((cluster.kw >= kwmin) * (cluster.kw <= kwmax))\n \"\"\"\n\n indx=cluster.subset(rmin=rmin,rmax=rmax,vmin=vmin,vmax=vmax,mmin=mmin,mmax=mmax,emin=emin,emax=emax,kwmin=kwmin,kwmax=kwmax,npop=npop,indx=indx,projected=projected)\n\n\n if np.sum(indx) > 0:\n\n\n subcluster = StarCluster(\n cluster.tphys,\n units=cluster.units,\n origin=cluster.origin,\n ctype=cluster.ctype,\n ro=cluster._ro,\n vo=cluster._vo,\n zo=cluster._zo,\n solarmotion=cluster._solarmotion,\n )\n\n subcluster.add_stars(\n cluster.x[indx],\n cluster.y[indx],\n cluster.z[indx],\n cluster.vx[indx],\n cluster.vy[indx],\n cluster.vz[indx],\n cluster.m[indx],\n cluster.id[indx],\n cluster.m0[indx],\n cluster.npop[indx],\n sortstars=sortstars,\n )\n\n if len(cluster.ra)==len(cluster.x):\n subcluster.ra, subcluster.dec, subcluster.dist = (\n cluster.ra[indx],\n cluster.dec[indx],\n cluster.dist[indx],\n )\n subcluster.pmra, subcluster.pmdec, subcluster.vlos = (\n cluster.pmra[indx],\n cluster.pmdec[indx],\n cluster.vlos[indx],\n )\n\n subcluster.add_nbody6(cluster.nc,cluster.rc,cluster.rbar,\n cluster.rtide,cluster.xc,cluster.yc,cluster.zc,\n cluster.zmbar,cluster.vbar,cluster.tbar,cluster.rscale,\n cluster.ns,cluster.nb,cluster.n_p)\n\n subcluster.projected = cluster.projected\n subcluster.centre_method = cluster.centre_method\n\n if len(cluster.logl) > 0:\n if len(cluster.ep) !=0 and len(cluster.ospin) != 0:\n subcluster.add_sse(\n cluster.kw[indx],\n cluster.logl[indx],\n cluster.logr[indx],\n cluster.ep[indx],\n cluster.ospin[indx],\n )\n else:\n subcluster.add_sse(\n cluster.kw[indx],\n cluster.logl[indx],\n cluster.logr[indx],\n )\n elif len(cluster.kw) > 0:\n subcluster.kw = cluster.kw[indx]\n\n\n if len(cluster.id2) > 0:\n bindx1 = np.in1d(cluster.id1, cluster.id[indx])\n bindx2 = np.in1d(cluster.id2, cluster.id[indx])\n bindx=np.logical_or(bindx1,bindx2)\n\n\n if len(cluster.ep1) !=0 and len(cluster.ospin1) != 0:\n\n subcluster.add_bse(\n cluster.id1[bindx],\n cluster.id2[bindx],\n cluster.kw1[bindx],\n cluster.kw2[bindx],\n cluster.kcm[bindx],\n cluster.ecc[bindx],\n cluster.pb[bindx],\n cluster.semi[bindx],\n cluster.m1[bindx],\n cluster.m2[bindx],\n cluster.logl1[bindx],\n cluster.logl2[bindx],\n cluster.logr1[bindx],\n cluster.logr2[bindx],\n cluster.ep1[bindx],\n cluster.ep2[bindx],\n cluster.ospin1[bindx],\n cluster.ospin2[bindx],\n )\n else:\n subcluster.add_bse(\n cluster.id1[bindx],\n cluster.id2[bindx],\n cluster.kw1[bindx],\n cluster.kw2[bindx],\n cluster.kcm[bindx],\n cluster.ecc[bindx],\n cluster.pb[bindx],\n cluster.semi[bindx],\n cluster.m1[bindx],\n cluster.m2[bindx],\n cluster.logl1[bindx],\n cluster.logl2[bindx],\n cluster.logr1[bindx],\n cluster.logr2[bindx],\n )\n\n if len(cluster.etot) > 0:\n subcluster.add_energies(\n cluster.kin[indx], cluster.pot[indx],\n )\n\n if cluster.give == 'mxvpqael':\n subcluster.give=cluster.give\n subcluster.gyrpot=cluster.gyrpot[indx]\n subcluster.gyrq=cluster.gyrq[indx]\n subcluster.gyracc=cluster.gyracc[indx]\n subcluster.eps=cluster.eps[indx]\n subcluster.gyrlev=cluster.gyrlev[indx]\n elif cluster.give =='mxve':\n subcluster.give=cluster.give\n subcluster.eps=cluster.eps[indx]\n\n\n if reset_centre:\n subcluster.add_orbit(\n cluster.xgc,\n cluster.ygc,\n cluster.zgc,\n cluster.vxgc,\n cluster.vygc,\n cluster.vzgc,\n )\n\n if cluster.origin=='centre' or cluster.origin=='cluster':\n subcluster.find_centre(0.0, 0.0, 0.0, reset_centre=reset_centre)\n else:\n subcluster.find_centre(reset_centre=reset_centre)\n\n else:\n subcluster.add_orbit(\n cluster.xgc,\n cluster.ygc,\n cluster.zgc,\n cluster.vxgc,\n cluster.vygc,\n cluster.vzgc,\n )\n subcluster.xc, subcluster.yc, subcluster.zc = (\n cluster.xc,\n cluster.yc,\n cluster.zc,\n )\n subcluster.vxc, subcluster.vyc, subcluster.vzc = (\n cluster.vxc,\n cluster.vyc,\n cluster.vzc,\n )\n\n subcluster.ra_gc, subcluster.dec_gc, subcluster.dist_gc = cluster.ra_gc, cluster.dec_gc, cluster.dist_gc\n subcluster.pmra_gc, subcluster.pmdec_gc, subcluster.vlos_gc = (\n cluster.pmra_gc,\n cluster.pmdec_gc,\n cluster.vlos_gc,\n )\n\n if reset_nbody:\n subcluster.to_pckms()\n subcluster.analyze()\n subcluster.reset_nbody_scale(mass=True,radius=True,rvirial=reset_rvirial,projected=reset_projected,**kwargs)\n elif reset_nbody_mass or reset_nbody_radii:\n subcluster.to_pckms()\n subcluster.analyze()\n subcluster.reset_nbody_scale(mass=reset_nbody_mass,radius=reset_nbody_radii,rvirial=reset_rvirial,projected=reset_projected,**kwargs)\n\n else:\n subcluster = StarCluster(cluster.tphys)\n\n if subcluster.ntot > 0:\n if subcluster.units!=units0: subcluster.to_units(units0)\n if subcluster.origin!=origin0: subcluster.to_origin(origin0)\n subcluster.analyze(sortstars=sortstars)\n\n cluster.return_cluster(units0,origin0, rorder0, rorder_origin0)\n\n\n return subcluster", "def setup_cluster_or_multicore(self):\n if self.cluster_mode == 1:\n cluster_name = self.options['cluster_type']\n try:\n self.cluster = cluster.from_name[cluster_name](**self.options)\n except KeyError:\n # Check if a plugin define this type of cluster\n # check for PLUGIN format\n cluster_class = misc.from_plugin_import(self.plugin_path, \n 'new_cluster', cluster_name,\n info = 'cluster handling will be done with PLUGIN: %{plug}s' )\n if cluster_class:\n self.cluster = cluster_class(**self.options)\n \n if self.cluster_mode == 2:\n try:\n import multiprocessing\n if not self.nb_core:\n try:\n self.nb_core = int(self.options['nb_core'])\n except TypeError:\n self.nb_core = multiprocessing.cpu_count()\n logger.info('Using %d cores' % self.nb_core)\n except ImportError:\n self.nb_core = 1\n logger.warning('Impossible to detect the number of cores => Using One.\\n'+\n 'Use set nb_core X in order to set this number and be able to'+\n 'run in multicore.')\n\n self.cluster = cluster.MultiCore(**self.options)", "def cmd_node_update_cluster(self, args):\n node_id = args[0]\n cluster_id = args[1]\n data = {'cluster_id': cluster_id}\n self._update_obj(node_id, 'node', data)", "def init_multicluster_ocsci_conf(args, nclusters):\n parser = argparse.ArgumentParser(add_help=False)\n # Dynamically adding the argument --cluster$i to enforce\n # user's to pass --cluster$i param followed by normal cluster conf\n # options so that separation of per cluster conf will be easier\n for i in range(nclusters):\n parser.add_argument(\n f\"--cluster{i+1}\",\n required=True,\n action=\"store_true\",\n help=(\n \"Index argument for per cluster args, \"\n \"this marks the start of the cluster{i} args\"\n \"any args between --cluster{i} and --cluster{i+1} will be\",\n \"considered as arguments for cluster{i}\",\n ),\n )\n\n # Parsing just to enforce `nclusters` number of --cluster{i} arguments are passed\n _, _ = parser.parse_known_args(args[2:])\n multicluster_conf, common_argv = tokenize_per_cluster_args(args[2:], nclusters)\n\n # We need to seperate common arguments and cluster specific arguments\n framework.config.multicluster = True\n framework.config.nclusters = nclusters\n framework.config.init_cluster_configs()\n framework.config.reset_ctx()\n for index in range(nclusters):\n framework.config.switch_ctx(index)\n process_ocsci_conf(common_argv + multicluster_conf[index][1:])\n for arg in range(len(multicluster_conf[index][1:])):\n if multicluster_conf[index][arg + 1].startswith(\"--\"):\n multicluster_conf[index][\n arg + 1\n ] = f\"{multicluster_conf[index][arg+1]}{index + 1}\"\n framework.config.multicluster_args.append(multicluster_conf[index][1:])\n check_config_requirements()\n framework.config.multicluster_common_args.append(common_argv)\n # Set context to default_cluster_context_index\n framework.config.switch_default_cluster_ctx()\n # Set same run_id across all clusters\n # there is a race condition in which multiple run id's could be generated\n universal_run_id = framework.config.RUN[\"run_id\"]\n for cluster in framework.config.clusters:\n cluster.RUN[\"run_id\"] = universal_run_id", "def cluster_spec(self):\n tf_config = _load_tf_config()\n if 'cluster' not in tf_config:\n return ClusterSpec({})\n return ClusterSpec(tf_config['cluster'])", "def _UpdateBenchmarkSpecWithFlags(benchmark_spec):\n benchmark_spec.max_sentences = FLAGS.robertammlm_max_sentences\n benchmark_spec.nproc_per_node = FLAGS.robertammlm_nproc_per_node\n benchmark_spec.log_interval = FLAGS.robertammlm_log_interval\n benchmark_spec.profiler = FLAGS.robertammlm_profiler\n benchmark_spec.max_epoch = FLAGS.robertammlm_max_epoch\n vms = benchmark_spec.vms\n vm = vms[0]\n num_vms = len(vms)\n benchmark_spec.num_vms = num_vms\n benchmark_spec.global_batch_size = FLAGS.robertammlm_global_batch_size\n num_accelerators = nvidia_driver.QueryNumberOfGpus(vm) * num_vms\n benchmark_spec.num_accelerators = num_accelerators\n if FLAGS.robertammlm_update_freq:\n benchmark_spec.update_freq = FLAGS.robertammlm_update_freq\n else:\n benchmark_spec.update_freq = (benchmark_spec.global_batch_size // (\n benchmark_spec.max_sentences * num_accelerators))\n if FLAGS.robertammlm_num_copies:\n benchmark_spec.num_copies = FLAGS.robertammlm_num_copies\n else:\n benchmark_spec.num_copies = max(1, num_accelerators // 32)", "def configure_cluster(control_node, agent_nodes):\n return sequence([\n run_remotely(\n username='root',\n address=control_node,\n commands=task_enable_flocker_control(),\n ),\n sequence([\n sequence([\n Effect(Func(lambda node=node: configure_ssh(node, 22))),\n run_remotely(\n username='root',\n address=node,\n commands=task_enable_flocker_agent(\n node_name=node,\n control_node=control_node,\n ),\n ),\n ]) for node in agent_nodes\n ])\n ])", "def _update_cluster_name_property(self, name):\n self.configuration_manager.apply_system_override({'cluster_name':\n name})", "def process_cluster(self, cluster):\n raise NotImplementedError", "def update_cluster(self, cluster_id, values):", "def monkey_patch_base_cluster_manager():\n def get_test_cluster_manager(params, config_proto):\n del config_proto\n return cnn_util.BaseClusterManager(params)\n platforms_util.get_cluster_manager = get_test_cluster_manager", "def cluster(**kwargs):\n def cluster_use_metadata_adder(func):\n def extended_test(self, *args, **kwargs):\n self.test_context.before()\n test_result = func(self, *args, **kwargs)\n return self.test_context.after(test_result)\n\n extended_test.__dict__.update(**func.__dict__)\n extended_test.__name__ = func.__name__\n\n Mark.mark(extended_test, ParametrizableClusterMetadata(**kwargs))\n return extended_test\n\n return cluster_use_metadata_adder", "def cli_cosmosdb_managed_cassandra_cluster_update(client,\r\n resource_group_name,\r\n cluster_name,\r\n tags=None,\r\n identity_type=None,\r\n client_certificates=None,\r\n external_gossip_certificates=None,\r\n external_seed_nodes=None,\r\n cassandra_version=None,\r\n authentication_method=None,\r\n hours_between_backups=None,\r\n repair_enabled=None):\r\n\r\n cluster_resource = client.get(resource_group_name, cluster_name)\r\n\r\n if client_certificates is None:\r\n client_certificates = cluster_resource.properties.client_certificates\r\n\r\n if external_gossip_certificates is not None:\r\n external_gossip_certificates = cluster_resource.properties.external_gossip_certificates\r\n\r\n if external_seed_nodes is None:\r\n external_seed_nodes = cluster_resource.properties.external_seed_nodes\r\n\r\n if cassandra_version is None:\r\n cassandra_version = cluster_resource.properties.cassandra_version\r\n\r\n if authentication_method is None:\r\n authentication_method = cluster_resource.properties.authentication_method\r\n\r\n if hours_between_backups is None:\r\n hours_between_backups = cluster_resource.properties.hours_between_backups\r\n\r\n if repair_enabled is None:\r\n repair_enabled = cluster_resource.properties.repair_enabled\r\n\r\n if tags is None:\r\n tags = cluster_resource.tags\r\n\r\n identity = cluster_resource.identity\r\n\r\n if identity_type is not None:\r\n identity = ManagedCassandraManagedServiceIdentity(type=identity_type)\r\n\r\n cluster_properties = ClusterResourceProperties(\r\n provisioning_state=cluster_resource.properties.provisioning_state,\r\n restore_from_backup_id=cluster_resource.properties.restore_from_backup_id,\r\n delegated_management_subnet_id=cluster_resource.properties.delegated_management_subnet_id,\r\n cassandra_version=cassandra_version,\r\n cluster_name_override=cluster_resource.properties.cluster_name_override,\r\n authentication_method=authentication_method,\r\n initial_cassandra_admin_password=cluster_resource.properties.initial_cassandra_admin_password,\r\n hours_between_backups=hours_between_backups,\r\n repair_enabled=repair_enabled,\r\n client_certificates=client_certificates,\r\n external_gossip_certificates=external_gossip_certificates,\r\n gossip_certificates=cluster_resource.properties.gossip_certificates,\r\n external_seed_nodes=cluster_resource.properties.external_seed_nodes,\r\n seed_nodes=cluster_resource.properties.seed_nodes\r\n )\r\n\r\n cluster_resource_create_update_parameters = ClusterResource(\r\n location=cluster_resource.location,\r\n tags=tags,\r\n identity=identity,\r\n properties=cluster_properties)\r\n\r\n return client.begin_create_update(resource_group_name, cluster_name, cluster_resource_create_update_parameters)", "def create_cluster(module, switch, name, node1, node2, mod, CHANGED_FLAG, task, msg):\n cli = pn_cli(module)\n clicopy = cli\n\n if mod == 'l3-vrrp' or mod == 'l2-vrrp':\n spine_list = module.params['pn_spine_list']\n leaf_list = module.params['pn_leaf_list']\n\n cli += ' switch %s system-settings-show ' % node1\n cli += ' format auto-trunk '\n status = run_command(module, cli, task, msg).split()[1]\n if status != 'on':\n if (node1 in leaf_list and node2 in leaf_list) or \\\n (node1 in spine_list and node2 in spine_list):\n\n ports = get_ports(module, node1, node2, task, msg)\n trunk_name = node1 + '-' + node2 + '-trunk'\n ports_string = ','.join(ports)\n CHANGED_FLAG, output = create_trunk(module, node1, trunk_name, ports_string,\n CHANGED_FLAG, task, msg)\n ports = get_ports(module, node2, node1, task, msg)\n trunk_name = node2 + '-' + node1 + '-trunk'\n ports_string = ','.join(ports)\n CHANGED_FLAG, output = create_trunk(module, node2, trunk_name, ports_string,\n CHANGED_FLAG, task, msg)\n cli = clicopy\n\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = list(set(run_command(module, cli, task, msg).split()))\n if name not in cluster_list:\n cli = clicopy\n cli += ' switch %s cluster-create name %s ' % (switch, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n if 'Success' in run_command(module, cli, task, msg):\n CHANGED_FLAG.append(True)\n return ' %s: Created %s \\n' % (switch, name), CHANGED_FLAG\n return '', CHANGED_FLAG", "def runCoClustering(self):\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update `cluster` with a training cluster configuration from flags.
def UpdateClusterParamsFromFlags(self, cluster, job_name): cluster.mode = FLAGS.mode cluster.job = job_name cluster.task = FLAGS.task cluster.controller.name = FLAGS.controller_job cluster.controller.gpus_per_replica = FLAGS.controller_gpus cluster.worker.name = FLAGS.worker_job cluster.worker.replicas = FLAGS.worker_replicas cluster.worker.gpus_per_replica = FLAGS.worker_gpus cluster.worker.tpus_per_replica = FLAGS.worker_tpus cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts cluster.worker.devices_per_split = FLAGS.worker_split_size cluster.ps.name = FLAGS.ps_job cluster.ps.replicas = FLAGS.ps_replicas cluster.ps.gpus_per_replica = FLAGS.ps_gpus cluster.input.name = FLAGS.input_job cluster.input.replicas = FLAGS.input_replicas cluster.input.targets = FLAGS.input_targets cluster.evaler.name = FLAGS.evaler_job cluster.evaler.replicas = FLAGS.evaler_replicas cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus cluster.decoder.name = FLAGS.decoder_job cluster.decoder.replicas = FLAGS.decoder_replicas cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus
[ "def cmd_node_update_cluster(self, args):\n node_id = args[0]\n cluster_id = args[1]\n data = {'cluster_id': cluster_id}\n self._update_obj(node_id, 'node', data)", "def cluster_updated(configuration, cluster_state):", "def update_cluster(self, cluster_id, values):", "def modify_cluster(ClusterId=None, StepConcurrencyLevel=None):\n pass", "def update_cluster(account_id: str, topic_iteration_id: str, cluster: int, x: float, y: float,\n conn: Connection) -> None:\n stmt = update(topic).where(\n and_(topic.c.account == account_id, topic.c.topic_iteration == topic_iteration_id)).values(\n cluster=cluster, x=x, y=y)\n conn.execute(stmt)", "def cli_cosmosdb_managed_cassandra_cluster_update(client,\r\n resource_group_name,\r\n cluster_name,\r\n tags=None,\r\n identity_type=None,\r\n client_certificates=None,\r\n external_gossip_certificates=None,\r\n external_seed_nodes=None,\r\n cassandra_version=None,\r\n authentication_method=None,\r\n hours_between_backups=None,\r\n repair_enabled=None):\r\n\r\n cluster_resource = client.get(resource_group_name, cluster_name)\r\n\r\n if client_certificates is None:\r\n client_certificates = cluster_resource.properties.client_certificates\r\n\r\n if external_gossip_certificates is not None:\r\n external_gossip_certificates = cluster_resource.properties.external_gossip_certificates\r\n\r\n if external_seed_nodes is None:\r\n external_seed_nodes = cluster_resource.properties.external_seed_nodes\r\n\r\n if cassandra_version is None:\r\n cassandra_version = cluster_resource.properties.cassandra_version\r\n\r\n if authentication_method is None:\r\n authentication_method = cluster_resource.properties.authentication_method\r\n\r\n if hours_between_backups is None:\r\n hours_between_backups = cluster_resource.properties.hours_between_backups\r\n\r\n if repair_enabled is None:\r\n repair_enabled = cluster_resource.properties.repair_enabled\r\n\r\n if tags is None:\r\n tags = cluster_resource.tags\r\n\r\n identity = cluster_resource.identity\r\n\r\n if identity_type is not None:\r\n identity = ManagedCassandraManagedServiceIdentity(type=identity_type)\r\n\r\n cluster_properties = ClusterResourceProperties(\r\n provisioning_state=cluster_resource.properties.provisioning_state,\r\n restore_from_backup_id=cluster_resource.properties.restore_from_backup_id,\r\n delegated_management_subnet_id=cluster_resource.properties.delegated_management_subnet_id,\r\n cassandra_version=cassandra_version,\r\n cluster_name_override=cluster_resource.properties.cluster_name_override,\r\n authentication_method=authentication_method,\r\n initial_cassandra_admin_password=cluster_resource.properties.initial_cassandra_admin_password,\r\n hours_between_backups=hours_between_backups,\r\n repair_enabled=repair_enabled,\r\n client_certificates=client_certificates,\r\n external_gossip_certificates=external_gossip_certificates,\r\n gossip_certificates=cluster_resource.properties.gossip_certificates,\r\n external_seed_nodes=cluster_resource.properties.external_seed_nodes,\r\n seed_nodes=cluster_resource.properties.seed_nodes\r\n )\r\n\r\n cluster_resource_create_update_parameters = ClusterResource(\r\n location=cluster_resource.location,\r\n tags=tags,\r\n identity=identity,\r\n properties=cluster_properties)\r\n\r\n return client.begin_create_update(resource_group_name, cluster_name, cluster_resource_create_update_parameters)", "def modify_cluster(self, cluster: str, cluster_config: Union[ClusterConfig, dict]) -> dict:\n uri = Settings.api_resources[\"Clusters\"][\"Modify a Cluster\"].format(GROUP_ID=self.atlas.group,\n CLUSTER_NAME=cluster)\n try:\n self.get_single_cluster_as_obj(cluster=cluster)\n except ErrAtlasNotFound:\n logger.error('Could not find existing cluster {}'.format(cluster))\n raise ValueError('Could not find existing cluster {}'.format(cluster))\n\n if type(cluster_config) == ClusterConfig:\n logger.warning(\"We received a full cluster_config, converting to dict\")\n try:\n new_config = cluster_config.as_modify_dict()\n except Exception as e:\n logger.error('Error while trying to parse the new configuration')\n raise e\n else:\n logger.warning(\"We received a simple dict for cluster config, sending without converting.\")\n new_config = cluster_config\n value_returned = self.atlas.network.patch(uri=Settings.BASE_URL + uri, payload=new_config)\n return value_returned", "def modify_cluster(self, cluster: str, cluster_config: Union[ClusterConfig, dict]) -> dict:\n uri = Settings.api_resources[\"Clusters\"][\"Modify a Cluster\"].format(GROUP_ID=self.atlas.group,\n CLUSTER_NAME=cluster)\n try:\n self.get_single_cluster_as_obj(cluster=cluster)\n except ErrAtlasNotFound as e:\n logger.error('Could not find existing cluster {}'.format(cluster))\n raise ValueError('Could not find existing cluster {}'.format(cluster))\n\n if type(cluster_config) == ClusterConfig:\n logger.warning(\"We recevied a full cluster_config, converting to dict\")\n try:\n new_config = cluster_config.as_modify_dict()\n except Exception as e:\n logger.error('Error while trying to parse the new configuration')\n raise e\n else:\n logger.warning(\"We received a simple dict for cluster config, sending without converting.\")\n new_config = cluster_config\n value_returned = self.atlas.network.patch(uri=Settings.BASE_URL + uri, payload=new_config)\n return value_returned", "def set_cluster(self, data):\n cluster = Cluster(data['name'])\n for host in data['hosts']:\n cluster.add_host(**host)\n self._cluster = cluster", "def db_cluster_update(token, status, cluster_id, master_IP='', state='', password='', error=''):\n try:\n user = UserInfo.objects.get(okeanos_token=token)\n cluster = ClusterInfo.objects.get(id=cluster_id)\n except ObjectDoesNotExist:\n msg = 'Cluster with given name does not exist in pending state'\n raise ObjectDoesNotExist(msg)\n if password:\n user.master_vm_password = u'The root password of \\\"{0}\\\"({1}) master VM is {2}'.format(cluster.cluster_name,cluster.id,password)\n if error:\n user.error_message = u'Cluster \\\"{0}\\\"({1}) creation failed due to error: {2}'.format(cluster.cluster_name,cluster.id, error)\n\n if status == \"Active\":\n cluster.cluster_status = const_cluster_status_active\n user.master_vm_password = ''\n user.error_message = ''\n\n elif status == \"Pending\":\n cluster.cluster_status = const_cluster_status_pending\n \n elif status == \"Failed\":\n cluster.cluster_status = const_cluster_status_failed\n\n elif status == \"Destroyed\":\n cluster.cluster_status = const_cluster_status_destroyed\n cluster.master_IP = ''\n cluster.state= 'Deleted'\n cluster.hadoop_status = const_hadoop_status_stopped\n\n if state:\n cluster.state = state\n if master_IP:\n cluster.master_IP = master_IP\n user.save()\n cluster.save()", "def cli_cosmosdb_managed_cassandra_cluster_update(client,\n resource_group_name,\n cluster_name,\n tags=None,\n identity_type=None,\n client_certificates=None,\n external_gossip_certificates=None,\n external_seed_nodes=None,\n cassandra_version=None,\n authentication_method=None,\n hours_between_backups=None,\n repair_enabled=None):\n\n cluster_resource = client.get(resource_group_name, cluster_name)\n\n if client_certificates is None:\n client_certificates = cluster_resource.properties.client_certificates\n\n if external_gossip_certificates is None:\n external_gossip_certificates = cluster_resource.properties.external_gossip_certificates\n\n if external_seed_nodes is None:\n external_seed_nodes = cluster_resource.properties.external_seed_nodes\n\n if cassandra_version is None:\n cassandra_version = cluster_resource.properties.cassandra_version\n\n if authentication_method is None:\n authentication_method = cluster_resource.properties.authentication_method\n\n if hours_between_backups is None:\n hours_between_backups = cluster_resource.properties.hours_between_backups\n\n if repair_enabled is None:\n repair_enabled = cluster_resource.properties.repair_enabled\n\n if tags is None:\n tags = cluster_resource.tags\n\n identity = cluster_resource.identity\n\n if identity_type is not None:\n identity = ManagedCassandraManagedServiceIdentity(type=identity_type)\n\n cluster_properties = ClusterResourceProperties(\n provisioning_state=cluster_resource.properties.provisioning_state,\n restore_from_backup_id=cluster_resource.properties.restore_from_backup_id,\n delegated_management_subnet_id=cluster_resource.properties.delegated_management_subnet_id,\n cassandra_version=cassandra_version,\n cluster_name_override=cluster_resource.properties.cluster_name_override,\n authentication_method=authentication_method,\n initial_cassandra_admin_password=cluster_resource.properties.initial_cassandra_admin_password,\n hours_between_backups=hours_between_backups,\n repair_enabled=repair_enabled,\n client_certificates=client_certificates,\n external_gossip_certificates=external_gossip_certificates,\n gossip_certificates=cluster_resource.properties.gossip_certificates,\n external_seed_nodes=external_seed_nodes,\n seed_nodes=cluster_resource.properties.seed_nodes\n )\n\n cluster_resource_create_update_parameters = ClusterResource(\n location=cluster_resource.location,\n tags=tags,\n identity=identity,\n properties=cluster_properties)\n\n return client.begin_create_update(resource_group_name, cluster_name, cluster_resource_create_update_parameters)", "async def do_start_cluster(self, cluster):\n raise NotImplementedError", "def update_cluster(self, model_data, **kwargs):\n\n url = self._make_url(\"/v1/cluster/{0}/\".format(model_data[\"id\"]))\n return self._session.put(url, json=model_data, **kwargs)", "def process_cluster(self, cluster):\n raise NotImplementedError", "def resume_cluster():\n log.info(\"Loading info from the IaaS\")\n global nodes, seeds, stash\n if not isfile(save_file):\n log.info(\"No existing created cluster\")\n return\n saved_cluster = loads(open(save_file, 'r').read())\n saved_nodes = list(set(saved_cluster['nodes']))\n saved_seeds = list(set(saved_cluster['seeds']))\n saved_stash = list(set(saved_cluster['stash']))\n nodes[:] = []\n seeds[:] = []\n\n in_nodes = Node.get_all_nodes(check_active=True)\n #check that all saved nodes actually exist\n for n in saved_nodes:\n if n not in [i.name for i in in_nodes]:\n log.error(\"node %s does actually exist in the cloud, re-create the cluster\" % n)\n remove(save_file)\n exit(-1)\n for n in in_nodes:\n if n.name not in saved_nodes+saved_seeds:\n if n.name in saved_stash:\n stash.append(n)\n if \"orchestrator\" in n.name:\n global orchestrator\n orchestrator = n\n continue\n else:\n if n.type == \"seed\":\n seeds.append(n)\n elif n.type == \"node\": nodes.append(n)\n #sort nodes by name\n nodes.sort(key=lambda x: x.name)\n stash.sort(key=lambda x: x.name)", "def set_start_cluster(self, start_cluster: int) -> None:\r\n self.start_cluster = start_cluster", "def update_clusters(self):\n num_ratings = Rating.objects.count()\n \n if self.eligible_to_update(num_ratings):\n ratings_matrix, num_users, all_user_names = \\\n self.construct_ratings_matrix()\n\n k_clusters = int(num_users / 10) + 2 # \"Magical numbers that \n # work the best\"\n from sklearn.cluster import KMeans\n kmeans = KMeans(n_clusters=k_clusters)\n clusters = kmeans.fit(ratings_matrix.tocsr()) # Read sklearn\n # docs to read why tocsr() used. THE MAIN KMEANS CLUSTERING\n\n # Updating the clusters\n Cluster.objects.all().delete()\n new_clusters = {i: Cluster(name=i) for i in range(k_clusters)}\n for cluster in new_clusters.values():\n cluster.save()\n for i, cluster_label in enumerate(clusters.labels_):\n # Add the new users to clusters\n new_clusters[cluster_label].users.add(\n User.objects.get(username=all_user_names[i])\n )", "def _load_cluster_info(cluster, user):\n if 'server' in cluster:\n configuration.host = cluster['server']\n if configuration.host.startswith(\"https\"):\n configuration.ssl_ca_cert = _file_from_file_or_data(\n cluster, 'certificate-authority')\n configuration.cert_file = _file_from_file_or_data(\n user, 'client-certificate')\n configuration.key_file = _file_from_file_or_data(\n user, 'client-key')", "def select_cluster(self, clusters):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs `runners` in parallel threads. Returns when all of them finish.
def StartRunners(self, runners): threads = [] tf.logging.info('Starting runners') for runner in runners: t = threading.Thread(target=runner.Start) t.daemon = True t.start() threads.append(t) tf.logging.info('Total num runner.enqueue_ops: %d', len(runner.enqueue_ops)) for enqueue_op in runner.enqueue_ops: def StartEnqueue(runner, op): tf.logging.info('Starting enqueue op %s', op.name) return lambda: runner.StartEnqueueOp(op) tq = threading.Thread(target=StartEnqueue(runner, enqueue_op)) tq.start() threads.append(tq) tf.logging.info('Waiting for runners to finish...') for t in threads: while True: t.join(1) if not t.isAlive(): break tf.logging.info('All runners done.')
[ "def do_parallel(runs, func, use_threads):\n if use_threads:\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=MAX_TH_DEG)\n else:\n executor = concurrent.futures.ProcessPoolExecutor(max_workers=MAX_PR_DEG)\n\n done = []\n\n first = runs[0]\n is_iterable = isinstance(first, list) or isinstance(first, tuple)\n\n with executor:\n jobs = {}\n runs_left = len(runs)\n runs_iter = iter(runs)\n\n while runs_left:\n for run in runs_iter:\n if is_iterable:\n future = executor.submit(func, *run)\n else:\n future = executor.submit(func, run)\n\n jobs[future] = run\n if len(jobs) > MAX_JOB_NUMBER:\n break\n\n for future in concurrent.futures.as_completed(jobs):\n runs_left -= 1\n result = future.result()\n run = jobs[future]\n del jobs[future]\n done.append(result)\n break\n\n return done", "def _TearDownRunners(runners, timeout=None):\n threads = reraiser_thread.ReraiserThreadGroup(\n [reraiser_thread.ReraiserThread(r.TearDown, name=r.device_serial[-4:])\n for r in runners])\n threads.StartAll()\n threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))", "def run_multiple(self, trials) -> Dict[int, Dict[str, Any]]:\n results = {}\n with concurrent.futures.ThreadPoolExecutor() as executor:\n trial_runs = {executor.submit(self.run, trial=trial): trial.index for trial in trials}\n for future in concurrent.futures.as_completed(trial_runs):\n trial_index = trial_runs[future]\n try:\n results[trial_index] = future.result()\n except Exception as e:\n logger.exception(f\"Error completing run because of {e}!\")\n raise\n concurrent.futures.wait(trial_runs)\n\n return results", "def run():\n # Avoid circular dependencies\n from vjezd import crit_exit, exit\n from vjezd import device as this_device\n from vjezd.threads.print import PrintThread\n from vjezd.threads.scan import ScanThread\n\n if 'print' in this_device.modes:\n threads.append(PrintThread())\n if 'scan' in this_device.modes:\n threads.append(ScanThread())\n\n for t in threads:\n logger.debug('Starting thread {}'.format(t.name))\n t.start()\n\n while not exiting:\n # Check if all threads are still active\n for t in threads:\n logger.debug('Monitoring threads')\n if not t.is_alive():\n logger.critical('Thread {} is not alive. Exiting'.format(\n t.name))\n crit_exit(10, force_thread=True)\n time.sleep(1)\n\n logger.info('Waiting for all threads to join')\n for t in threads:\n t.join()\n\n # Exit depending on exiting state\n if exiting == CRIT_EXITING:\n crit_exit(10)\n else:\n exit()", "def threading_test_runner(num_threads, test_work_items):\n\n # Initialize our global state.\n initialize_global_vars_threading(num_threads, test_work_items)\n\n # Create jobs.\n job_queue = queue.Queue()\n for test_work_item in test_work_items:\n job_queue.put(test_work_item)\n\n result_queue = queue.Queue()\n\n # Create queues for started child pids. Terminating\n # the threading threads does not terminate the\n # child processes they spawn.\n inferior_pid_events = queue.Queue()\n\n # Create workers. We don't use multiprocessing.pool.ThreadedPool\n # due to challenges with handling ^C keyboard interrupts.\n workers = []\n for _ in range(num_threads):\n worker = threading.Thread(\n target=process_dir_worker_threading,\n args=(job_queue,\n result_queue,\n inferior_pid_events))\n worker.start()\n workers.append(worker)\n\n # Main loop: wait for all workers to finish and wait for\n # the socket handlers to wrap up.\n ctrl_c_loop(\n # Main operation of loop\n lambda: pump_workers_and_asyncore_map(\n workers, RUNNER_PROCESS_ASYNC_MAP),\n\n # Return True when we're done with the main loop.\n lambda: workers_and_async_done(workers, RUNNER_PROCESS_ASYNC_MAP),\n\n # Indicate what we do when we receive one or more Ctrl-Cs.\n lambda ctrl_c_count: handle_ctrl_c(\n ctrl_c_count, job_queue, workers, inferior_pid_events,\n kill_all_worker_threads))\n\n # Reap the test results.\n test_results = []\n while not result_queue.empty():\n test_results.append(result_queue.get(block=False))\n return test_results", "def RunGeneratedTestsMultiThread(self, test_func, settings, args,\n name_func):\n n_workers = self.number_of_threads\n\n if n_workers < 0:\n logging.error('invalid setting for number of threads: < 0.')\n n_workers = 1\n\n # Include filter is not empty; Run in sequential.\n if self.test_filter.include_filter:\n n_workers = 1\n\n # Number of thread is set to 0 (automatic)\n if not n_workers:\n n_workers = self._shell_env.GetDeviceNumberOfPresentCpu()\n logging.info('Number of CPU available on device: %i', n_workers)\n\n # Skip multithread version if only 1 worker available\n if n_workers == 1:\n return self.runGeneratedTests(\n test_func=test_func,\n settings=settings,\n args=args,\n name_func=name_func)\n\n settings_multithread = []\n settings_singlethread = []\n for test_case in settings:\n if (test_case.is_staging or test_case.testsuite in\n ltp_configs.TEST_SUITES_REQUIRE_SINGLE_THREAD_MODE):\n settings_singlethread.append(test_case)\n else:\n settings_multithread.append(test_case)\n\n failed_tests = self.runGeneratedTests(\n test_func=test_func,\n settings=settings_singlethread,\n args=args,\n name_func=name_func)\n\n # Shuffle the tests to reduce resource competition probability\n random.seed(RANDOM_SEED)\n random.shuffle(settings_multithread)\n\n # Create a queue for thread workers to pull tasks\n q = queue.Queue()\n map(q.put, settings_multithread)\n\n # Create individual shell sessions for thread workers\n for i in xrange(n_workers):\n self._dut.shell.InvokeTerminal(\"shell_thread_{}\".format(i))\n\n failed_multithread_tests = set()\n with futures.ThreadPoolExecutor(max_workers=n_workers) as executor:\n fs = [\n executor.submit(self.RunLtpWorker, q, args, name_func, i)\n for i in xrange(n_workers)\n ]\n\n failed_test_sets = map(futures.Future.result, fs)\n for failed_test_set in failed_test_sets:\n for test_case in failed_test_set:\n failed_multithread_tests.add(test_case)\n\n for test_case in failed_multithread_tests:\n logging.info(\n \"Test case %s failed during multi-thread run, rerunning...\",\n test_case)\n\n # In the end, rerun all failed tests to confirm their failure\n # in sequential.\n failed_tests.extend(\n self.runGeneratedTests(\n test_func=test_func,\n settings=failed_multithread_tests,\n args=args,\n name_func=name_func))\n\n return failed_tests", "def run_parallel(self, job_list, auto_resubmit=False, tries=5, delay=60,\n raise_on_error=False):\n pass # TODO", "def run_spiders(spiders_to_run, settings, kwargs):\n print(\"starting crawl task with arguments %s\" % str(kwargs))\n runner = CrawlerRunner(settings)\n for spider in spiders_to_run:\n runner.crawl(spider, stop_after_crawl=False, **kwargs)\n # what to do once crawling is over\n d = runner.join()\n d.addBoth(lambda _: check_for_task())", "def main(self) -> list:\r\n\r\n for thread in range(self.threads):\r\n t = threading.Thread(target=self.threader)\r\n t.daemon = True\r\n t.start()\r\n\r\n for curr in self.hosts:\r\n self.q.put(curr)\r\n\r\n self.q.join()\r\n\r\n return self.res", "def _CreateRunners(runner_factory, devices, timeout=None):\n logging.warning('Creating %s test %s.', len(devices),\n 'runners' if len(devices) != 1 else 'runner')\n runners = []\n counter = _ThreadSafeCounter()\n threads = reraiser_thread.ReraiserThreadGroup(\n [reraiser_thread.ReraiserThread(_SetUp,\n [runner_factory, d, runners, counter],\n name=str(d)[-4:])\n for d in devices])\n threads.StartAll()\n threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))\n return runners", "def cleanup(self):\n r = []\n for runner in self.runners:\n if runner.queue.empty(): r.append(runner)\n if not r: return\n for runner in r: runner.stop()\n for runner in r:\n try: self.runners.remove(runner)\n except ValueError: pass\n logging.debug(\"%s - cleaned %s\" % (self.name, [item.name for item in r]))\n logging.debug(\"%s - now running: %s\" % (self.name, self.size()))", "def generate_multi_task_threads(proxies, checker, goods, func=check_list_of_proxies):\n t_list = []\n for i, proxi_list_for_thread in enumerate(proxies):\n t = threading.Thread(target=func, name=f\"Multitask thread # {i}\", args=(proxi_list_for_thread, checker, goods))\n t_list.append(t)\n return t_list", "def terminate_processes(runner):\n try:\n yield\n finally:\n runner.terminate()\n runner.join()", "def join(self):\n for thread in self.threads:\n while 1:\n thread.join(1)\n if not thread.isAlive():\n break", "def run_on_workers(self, command, wait=True):\n tasks = [self.thread_pool.submit(self.run_on_node, worker, command) \\\n for worker in self.public_ips[1:]]\n if wait:\n while not all([i.done() for i in tasks]):\n continue\n return [i.result() for i in tasks]\n return tasks", "def start_all_peers(self):\n for t in self.peers.keys():\n for p in self.peers[t]:\n p.start_all_runners()", "def _wait_workers(self):\n self.client = get_client(self.master_address)\n logging.debug(\"client scheduler info: {}\".format(self.client.scheduler_info()))\n if int(self.world_size) <= 1:\n self.worker_portion = 1\n worker_count_min = int(self.world_size * self.worker_portion)\n\n for _ in range(100):\n time.sleep(1)\n n_workers = len(self.client.scheduler_info()[\"workers\"])\n logging.info(\"Accessed Workers: {}\".format(n_workers))\n if n_workers >= worker_count_min:\n workers = self.client.scheduler_info()[\"workers\"]\n workers_list = []\n workers_port = {}\n for k, _ in workers.items():\n workers_list.append(k)\n (ip, port) = k.replace(\"//\", \"\").split(\":\")[1:]\n if ip in workers_port:\n workers_port[ip].append(port)\n else:\n workers_port[ip] = [port]\n os.environ[\"vega_workers_list\"] = json.dumps(workers_port)\n logging.info(\"worker list: {}\".format(workers_list))\n slave_ips = list(set([item[6:].split(\":\")[0] for item in workers_list]))\n slave_ips.remove(General.cluster.master_ip)\n General.cluster.salves = slave_ips\n return 1\n return 0", "def multiple_threads_handler(threads_count=3):\n threads = []\n results = []\n for i in range(threads_count):\n thread = Thread(target = _send_get_request, args = (\"http://google.com\", results))\n thread.start()\n threads.append(thread)\n for thread in threads:\n thread.join()\n assert len(results) == threads_count", "def _start_all(self):\n for thread in self.threads:\n thread.start()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates the inference graphs for a given model.
def WriteInferenceGraph(self): inference_graph_dir = os.path.join(FLAGS.logdir, 'inference_graphs') tf.gfile.MakeDirs(inference_graph_dir) tf.logging.info('Writing inference graphs to dir: %s', inference_graph_dir) cfg = self.model_registry.GetParams(self._model_name, 'Test') if (issubclass(cfg.cls, base_model.MultiTaskModel) and not FLAGS.model_task_name): tf.logging.info('Cannot write inference graphs for multi-task model ' 'when model_task_name is not specified.') return try: filename_prefix = 'inference' if FLAGS.model_task_name: filename_prefix = '%s_inference' % FLAGS.model_task_name filename_prefix = os.path.join(inference_graph_dir, filename_prefix) # Standard inference graph. self.inference_graph_exporter.InferenceGraphExporter.Export( model_cfg=cfg, model_task_name=FLAGS.model_task_name, export_path=filename_prefix + '.pbtxt') except NotImplementedError as e: tf.logging.error('Cannot write inference graph: %s', e) # TPU inference graph. Not all models support it so fail silently. try: self.inference_graph_exporter.InferenceGraphExporter.Export( model_cfg=cfg, model_task_name=FLAGS.model_task_name, device_options=self.inference_graph_exporter.InferenceDeviceOptions( device='tpu', retain_device_placement=False, var_options='ON_DEVICE', gen_init_op=True, dtype_override=None), export_path=filename_prefix + '_tpu.pbtxt') except Exception as e: # pylint: disable=broad-except tf.logging.info('Error exporting TPU inference graph: %s' % e)
[ "def infer_model():\n # Setup training/testing environment\n setup_env()\n # Construct the model\n model = setup_model()\n # Load model weights\n cp.load_checkpoint(cfg.TEST.WEIGHTS, model)\n logger.info(\"Loaded model weights from: {}\".format(cfg.TEST.WEIGHTS))\n # Create data loaders and meters\n test_loader = data_loader.construct_test_loader()\n test_meter = meters.TestMeter(len(test_loader))\n filename = cfg.OUT_DIR + 'predict.txt'\n loss_fun = builders.build_loss_fun(\"cross_entropy\", \"none\").cuda()\n # Evaluate the model\n infer_epoch(test_loader, model, test_meter, 0, loss_fun, filename, writer=None)", "def inference(path, model_inf):\n inference_dataset = ImageDetectionDataset()\n inference_dataset.load_inference_classes()\n class_names = inference_dataset.get_class_names()\n\n define_path(path, model_inf, class_names)", "def export_model(self):\n mode = utils.INFER\n graph = tf.Graph()\n with graph.as_default():\n infer_model = self.build_export_model()\n infer_model.sess = tf.Session(config=self.session_conf)\n infer_model.saver = tf.train.Saver()\n\n model_path = self.get_model_path(mode)\n infer_model.saver.restore(infer_model.sess, save_path=model_path)\n\n to_saved_model(self.config, infer_model.sess, infer_model.export_inputs,\n infer_model.output_dict)", "def write_graph(self, model: k.Model):\n if model and self.is_write_graph:\n with self.writer.as_default(), summary_ops_v2.always_record_summaries():\n if not model.run_eagerly:\n summary_ops_v2.graph(get_graph(), step=0)\n\n summary_writable = (\n model._is_graph_network or # pylint: disable=protected-access\n model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access\n if summary_writable:\n summary_ops_v2.keras_model('keras', model, step=0)", "def prepare_for_inference(model: TModel) -> TPModel:", "def save_inference_model(model_path,\n epoch_id,\n feed_vars,\n fetch_vars,\n exe,\n prefix='rec_inference'):\n model_path = os.path.join(model_path, str(epoch_id))\n _mkdir_if_not_exist(model_path)\n model_prefix = os.path.join(model_path, prefix)\n paddle.static.save_inference_model(\n path_prefix=model_prefix,\n feed_vars=feed_vars,\n fetch_vars=fetch_vars,\n executor=exe)", "def inference(self, inputs, sess, mode):\n fetches = {}\n if mode == 'depth':\n fetches['depth'] = self.est_depth\n inputs_ph = self.inputs_depth\n if mode == 'egomotion':\n fetches['egomotion'] = self.est_egomotion\n inputs_ph = self.inputs_egomotion\n results = sess.run(fetches, feed_dict={inputs_ph: inputs})\n return results", "def process_graphs(args):\n os.makedirs(args.output_folder, exist_ok=True)\n\n for graph_type in args.graph_type:\n for graph_idx in range(args.num_graphs):\n seed = args.seed+graph_idx\n graph = create_graph(num_vars=args.num_vars,\n num_categs=args.num_categs,\n edge_prob=args.edge_prob,\n graph_type=graph_type,\n num_latents=args.num_latents,\n deterministic=args.deterministic,\n seed=seed)\n name = 'graph_%s_%i_%i' % (graph_type, args.num_vars, seed)\n if args.num_latents > 0:\n name += '_l%i' % (args.num_latents)\n export_graph(filename=os.path.join(args.output_folder, name),\n graph=graph,\n num_obs=args.num_obs,\n num_int=args.num_int)", "def inference(tasks, name, convnet_model, convnet_weight_path, input_patch_size,\n output_patch_size, output_patch_overlap, output_crop_margin, patch_num,\n num_output_channels, dtype, framework, batch_size, bump, mask_output_chunk,\n mask_myelin_threshold, input_chunk_name, output_chunk_name):\n with Inferencer(\n convnet_model,\n convnet_weight_path,\n input_patch_size=input_patch_size,\n output_patch_size=output_patch_size,\n num_output_channels=num_output_channels,\n output_patch_overlap=output_patch_overlap,\n output_crop_margin=output_crop_margin,\n patch_num=patch_num,\n framework=framework,\n dtype=dtype,\n batch_size=batch_size,\n bump=bump,\n mask_output_chunk=mask_output_chunk,\n mask_myelin_threshold=mask_myelin_threshold,\n dry_run=state['dry_run'],\n verbose=state['verbose']) as inferencer:\n \n state['operators'][name] = inferencer \n\n for task in tasks:\n handle_task_skip(task, name)\n if not task['skip']:\n if 'log' not in task:\n task['log'] = {'timer': {}}\n start = time()\n\n task[output_chunk_name] = state['operators'][name](\n task[input_chunk_name])\n\n task['log']['timer'][name] = time() - start\n task['log']['compute_device'] = state[\n 'operators'][name].compute_device\n yield task", "def run_inference(retrain_path, model_types=[], all_lambdas=[], feature_group=False, sequential=False):\n for config in os.listdir(retrain_path):\n config_dir = os.path.join(retrain_path, config)\n if not os.path.isdir(config_dir):\n continue\n if 'bottleneck' in config:\n model_type = 'bottleneck'\n elif 'end2end' in config:\n model_type = 'end2end'\n elif 'use_attr' in config and 'onlyAttr' not in config:\n model_type = 'multitask'\n elif 'onlyAttr' not in config:\n model_type = 'simple_finetune'\n else:\n model_type = 'onlyAttr'\n if model_types and model_type not in model_types:\n continue\n all_val_acc = find_best_perf(os.path.join(config_dir, 'log.txt'))\n epoch = all_val_acc.index(max(all_val_acc))\n #epoch = round(epoch, -1) - 20\n if epoch < 0:\n print(config_dir, ' has not started training')\n print(epoch, '\\t', config)\n model_path = os.path.join(config_dir, '%d_model.pth' % epoch)\n if 'attr_loss_weight' in model_path:\n lambda_val = float(re.findall(r\"attr_loss_weight_\\d*\\.\\d+\", config_dir)[0].split('_')[-1])\n else:\n lambda_val = 1\n if any([t in model_types for t in ['multitask', 'end2end']]) and (all_lambdas and lambda_val not in all_lambdas):\n continue\n if 'NEW_SIGMOID_MODEL' in retrain_path or 'NEW_MODEL' in retrain_path:\n command = 'python inference_sigmoid.py -model_dir %s -eval_data test' % model_path\n else:\n command = 'python inference.py -model_dir %s -eval_data test' % model_path\n if feature_group:\n command += ' -feature_group_results' \n if 'use_attr' in model_path:\n command += ' -use_attr -n_attributes 112 -data_dir class_attr_data_10'\n if 'onlyAttr' in model_path:\n continue\n if 'bottleneck' in model_path:\n def find_onlyAttr_dir(retrain_path, model_path):\n if 'few_shots' in retrain_path:\n n_shots = re.findall(r\"\\d+_shot\", model_path)[0]\n if sequential:\n dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr_Ahat' in c and n_shots in c][0]\n else:\n dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr' in c and 'onlyAttr_Ahat' not in c and n_shots in c][0] \n else: \n if sequential:\n dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr_Ahat' in c][0]\n else:\n dir_name = [c for c in os.listdir(retrain_path) if 'onlyAttr' in c and 'onlyAttr_Ahat' not in c][0]\n return os.path.join(retrain_path, dir_name)\n\n onlyAttr_dir = find_onlyAttr_dir(retrain_path, model_path)\n val_acc = find_best_perf(os.path.join(onlyAttr_dir, 'log.txt'))\n model2_path = os.path.join(onlyAttr_dir, '%d_model.pth' % (val_acc.index(max(val_acc))))\n config_dir = os.path.join(retrain_path, config)\n command += (' -model_dir2 %s -bottleneck' % model2_path)\n if 'onlyAttr_Ahat' not in model2_path:\n command += ' -use_sigmoid'\n if 'adversarial' in retrain_path:\n command += ' -image_dir CUB_adversarial/CUB_fixed/test/'\n subprocess.run([command])\n #TODO: write test inference results to a separate folder", "def _create_graph(self, model: dict) -> Graph:\n\n\t\tgraph = Graph()\n\n\t\tkeys = list(model.keys())\n\n\t\tfor idx, pos in enumerate(keys):\n\t\t\tnode = Node(str(pos), name = str(pos), mlayout = pos[0], nlayout = pos[1])\n\t\t\tgraph.add_node(node)\n\n\t\tfor idx1, pos1 in enumerate(keys):\n\t\t\tnode1 = graph.get_node_by_nid(str(pos1))\n\t\t\tfor idx2, tup in enumerate(model[pos1]):\n\t\t\t\tpos2, _, cost = tup\n\t\t\t\tnode2 = graph.get_node_by_nid(str(pos2))\n\t\t\t\tedge = Edge(node1, node2, directed = False, weight = cost, pheromone_level = 0.0)\n\t\t\t\tgraph.add_edge(edge)\n\n\t\treturn graph", "def launch_inference(self):\n\n self.logger.info('Beginning to submit inference tasks')\n # Make a folder for the models\n model_folder = self.output_dir.joinpath('models')\n model_folder.mkdir(exist_ok=True)\n \n # Submit the chunks to the workflow engine\n for mid in range(len(self.mpnns)):\n # Get a model that is ready for inference\n model = self.ready_models.get()\n \n # Convert it to a pickle-able message\n model_msg = MPNNMessage(model)\n \n # Proxy it once, to be used by all inference tasks\n model_msg_proxy = ps.store.get_store(self.ps_names['infer']).proxy(model_msg, key=f'model-{mid}-{self.inference_batch}')\n \n # Run inference with all segements available\n for cid, (chunk, chunk_msg) in enumerate(zip(self.inference_chunks, self.inference_proxies)):\n self.queues.send_inputs([model_msg_proxy], chunk_msg,\n topic='infer', method='evaluate_mpnn',\n keep_inputs=False,\n task_info={'chunk_id': cid, 'chunk_size': len(chunk), 'model_id': mid})\n self.logger.info('Finished submitting molecules for inference')", "def Inference(self):\n subgraphs = {}\n with tf.name_scope('inference'):\n subgraphs['default'] = self._InferenceSubgraph_Default()\n subgraphs['rnn_step'] = self._InferenceSubgraph_RNNStep()\n return subgraphs", "def inference(imu_data, model_path=\"model_1\"):\n fs_imu = 100\n labels=np.zeros(len(imu_data))\n clean_x,clean_y=clean_datset([imu_data], [labels], fs_imu)\n dataset_feats=featurize_samples(clean_x, fs_imu)\n dataset_feats=np.array(dataset_feats[0]).reshape(1,-1)\n clean_y = np.ravel(clean_y)\n reg_model = load_model(model_path)\n samples_pred = reg_model.predict(dataset_feats)", "def inference(self, dataset, model, config=None, **kwargs):\n # Prepare parameters\n config = config or {}\n config = Config({**self.config['common'], **self.config['inference'], **config, **kwargs})\n orientation = config.pop('orientation')\n self.log(f'Starting {orientation} inference')\n\n # Log: pipeline_config to a file\n self.log_to_file(pformat(config.config, depth=2), '末 inference_config.txt')\n\n # Start resource tracking\n if self.monitor:\n monitor = Monitor(['uss', 'gpu', 'gpu_memory'], frequency=0.5, gpu_list=self.gpu_list)\n monitor.__enter__()\n\n horizons = []\n largest = []\n\n start_time = perf_counter()\n for letter in orientation:\n horizons_ = self._inference(dataset=dataset, model=model,\n orientation=letter, config=config)\n self.log(f'Done {letter}-inference')\n horizons.extend(horizons_)\n largest.append(horizons_[0])\n elapsed = perf_counter() - start_time\n\n # Compare two largest horizons from each orientation\n if len(orientation) == 2:\n with open(self.make_savepath('inference_ix', 'results.txt'), 'w', encoding='utf-8') as result_txt:\n hm = HorizonMetrics(largest)\n hm.evaluate('compare', hist=False,\n plot=True, show=self.plot,\n printer=lambda msg: print(msg, file=result_txt),\n savepath=self.make_savepath('inference_ix', 'l1.png'))\n\n # Merge all the predictions\n horizons = Horizon.merge_list(horizons, minsize=1000, mean_threshold=0.5, adjacency=1)\n self.log(f'Inference done in {elapsed:4.1f}')\n\n # Log: resource graphs\n if self.monitor:\n monitor.__exit__(None, None, None)\n monitor.visualize(savepath=self.make_savepath('末 inference_resource.png'), show=self.plot)\n\n # Log: lengths of predictions\n if horizons:\n horizons.sort(key=len, reverse=True)\n self.log(f'Num of predicted horizons: {len(horizons)}')\n self.log(f'Total number of points in all of the horizons {sum(len(item) for item in horizons)}')\n self.log(f'Len max: {len(horizons[0])}')\n else:\n self.log('Zero horizons were predicted; possible problems..?')\n\n self.inference_log = {\n 'elapsed': elapsed,\n }\n return horizons", "def write_model_graph(sess, dest_path='/tmp', output_model_graph_name='model_graph.pbtxt'):\n\n tf.train.write_graph(sess.graph, dest_path, output_model_graph_name)", "def make_dvip_graph(model_config, reparam, parameterisation_type='exp'):\n\n tf.reset_default_graph()\n\n results = collections.OrderedDict()\n\n _, insightful_parametrisation, _ = ed_transforms.make_learnable_parametrisation(\n learnable_parameters=reparam, parameterisation_type=parameterisation_type)\n\n def model_vip(*params):\n with ed.interception(insightful_parametrisation):\n return model_config.model(*params)\n\n if model_config.bijectors_fn is not None:\n model_vip = ed_transforms.transform_with_bijectors(\n model_vip, model_config.bijectors_fn)\n\n log_joint_vip = ed.make_log_joint_fn(model_vip) # log_joint_fn\n\n with ed.tape() as model_tape:\n _ = model_vip(*model_config.model_args)\n\n target_vip_kwargs = {}\n for param in model_tape.keys():\n if param in model_config.observed_data.keys():\n target_vip_kwargs[param] = model_config.observed_data[param]\n\n def target_vip(*param_args): # latent_log_joint_fn\n i = 0\n for param in model_tape.keys():\n if param not in model_config.observed_data.keys():\n target_vip_kwargs[param] = param_args[i]\n i = i + 1\n return log_joint_vip(*model_config.model_args, **target_vip_kwargs)\n\n elbo, variational_parameters = util.get_mean_field_elbo(\n model_vip,\n target_vip,\n num_mc_samples=FLAGS.num_mc_samples,\n model_args=model_config.model_args,\n model_obs_kwargs=model_config.observed_data,\n vi_kwargs={'parameterisation': reparam})\n\n return target_vip, model_vip, elbo, variational_parameters, None", "def load_inference_model(dirname, executor, load_file_name=None):\n if not os.path.isdir(dirname):\n raise ValueError(\"There is no directory named '%s'\", dirname)\n\n model_file_name = dirname + \"/__model__\"\n with open(model_file_name, \"rb\") as f:\n program_desc_str = f.read()\n\n program = Program.parse_from_string(program_desc_str)\n load_persistables(executor, dirname, program, load_file_name)\n\n feed_target_names = get_feed_targets_names(program)\n fetch_target_names = get_fetch_targets_names(program)\n fetch_targets = [\n program.global_block().var(name) for name in fetch_target_names\n ]\n\n return [program, feed_target_names, fetch_targets]", "def generate_tf_model(graph):\n\n # generate tensorflow model and export to out_file\n\n # with __dict__ we can see the content of the class\n logging.debug(graph.__dict__)\n\n # model_spec contains some info about the model\n for key, value in graph.model_spec.items():\n logging.debug(key)\n logging.debug(value)\n\n network_name = graph.model_spec['name']\n\n filename = get_database( 'benchmark', 'graphs' ,'tf2', network_name+'.pb')\n logging.debug(\"Stored to: %s\" % filename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list starting with the given context's parent followed by each of its parents till we reach the object.
def getParentsFromContextToObject(context, obj): if sameProxiedObjects(context, obj): return [] parents = [] w = context while 1: w = w.__parent__ if sameProxiedObjects(w, obj): parents.append(w) break if w is None: break parents.append(w) return parents
[ "def walk_parents(self):\n active = self.parent_datasets[:]\n while active:\n d = active.pop()\n yield d\n active += d.parent_datasets", "def get_parents(self):\n return []", "def iter_parents(content: IResource) -> typing.Iterator[IResource]:\n content = getattr(content, '__parent__', None)\n while content is not None:\n yield content\n content = getattr(content, '__parent__', None)", "def iter_parents(node):\n parent = node.parent\n\n while parent:\n yield parent\n\n parent = parent.parent", "def get_parent_paths(self, depth=None, hints=None):\n #pylint:disable=too-many-nested-blocks\n if depth is not None and depth == 0:\n return [[self]]\n results = []\n parents = PageElement.objects.filter(\n pk__in=RelationShip.objects.filter(\n dest_element=self).values('orig_element_id'))\n if not parents:\n return [[self]]\n if hints:\n for parent in parents:\n if parent.slug == hints[-1]:\n # we found a way to cut the search space early.\n parents = [parent]\n hints = hints[:-1]\n break\n for parent in parents:\n grandparents = parent.get_parent_paths(\n depth=(depth - 1) if depth is not None else None,\n hints=hints)\n if grandparents:\n for grandparent in grandparents:\n term_index = 0\n if hints:\n for node in grandparent:\n if node.slug == hints[term_index]:\n term_index += 1\n if term_index >= len(hints):\n break\n if not hints or term_index >= len(hints):\n # we have not hints or we consumed all of them.\n results += [grandparent + [self]]\n return results", "def parents(self, term):\n for parent_term in term.is_a:\n yield self[parent_term]\n for grand_parent in self.parents(self[parent_term]):\n yield grand_parent", "def parents(self, host):\n return list(self.iter_parents(host))", "def parent_names(self) -> List[str]:\n return [t.name for t in self.parents]", "def get_parents_recursive( self, item ):\n\n\t\tparents = [ ]\n\n\t\tparent = self.get_parent( item )\n\t\tif parent:\n\t\t\tparents.append( parent )\n\t\t\tparents.extend( self.get_parents_recursive( parent ) )\n\n\t\treturn parents", "def get_parents_list(self, block):\n if isinstance(block, str):\n block = self.blocks[block]\n parents = []\n current = block\n while True:\n if current == self.root_block:\n break\n parents.append(current.parent_block)\n current = current.parent_block\n parents.reverse()\n return parents", "def ancestors(self):\n stack = deque([self])\n parent = self.parent\n while parent:\n stack.appendleft(parent)\n parent = parent.parent\n return list(stack)", "def children(self):\r\n c = self.child\r\n while c:\r\n yield c\r\n c = c.nxt", "def all_parents(self, obj):\n # Check the memoization cache first.\n if obj in self.parent_cache:\n return self.parent_cache[obj]\n\n if not isinstance(obj, Expr):\n raise Error('%s must be an Expr.' % (obj,))\n var = expr('?x')\n query = expr('ISA')(obj, var)\n solutions = self.ask_all(query)\n parents = map(lambda b: b[var], solutions)\n self.parent_cache[obj] = parents\n return parents", "def children(self):\n query_filter = dict(project=self.project, position__gt=self.position)\n \n try:\n next_position = Task.objects.filter(indent=self.indent,\n **query_filter)[0].position\n except IndexError:\n next_position = None\n \n if next_position is not None:\n query_filter['position__lt'] = next_position\n \n query_filter['indent'] = (self.indent or 0) + 1\n \n return Task.objects.filter(**query_filter).all()", "def get_parents(self, id_):\n return # osid.id.IdList", "def parents(self, cached):\n\n data = []\n for motif in cached['motifs']:\n for parent in motif['parents']:\n data.append({\n 'ml_release_id': cached['release'],\n 'motif_id': motif['motif_id'],\n 'parent_ml_release_id': cached['parent'],\n 'parent_motif_id': parent['name']['full'],\n })\n return data", "def all_proper_children(self, obj):\n return self.all_children(obj)[1:]", "def get_children_of_folderish(context):\n brains = api.content.find(\n context=context,\n depth=1,\n sort_on='getObjPositionInParent'\n )\n results = [b.getObject() for b in brains]\n return results", "def get_parents(self, tag):\n families = self.get_parent_tags()\n try:\n parents = families[tag]\n for parent in parents:\n parents.extend(self.get_parents(parent))\n return parents\n except Exception:\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to perform ondevice parallel ROT13 encrypt/decrypt by explicitly allocating device memory for host variables using gpuarray. Returns
def devCipher(self, sentence): # create Event start = cuda.Event() end = cuda.Event() # Get kernel function func = self.mod.get_function("rot13") # Device memory allocation for input and output array(s) mem_size=len(sentence)*4 decrypted=np.empty_like(sentence) #changed here #sentence=np.array(list(sentence)) sentence=np.array(sentence) start.record() a=time.time() #d_sentence = cuda.mem_alloc(mem_size) #d_decrypted = cuda.mem_alloc(mem_size) #cuda.memcpy_htod(d_sentence, sentence) d_sentence = gpuarray.to_gpu(sentence) d_decrypted = gpuarray.to_gpu(decrypted) # Record execution time and execute operation. func(d_sentence, d_decrypted, block=(mem_size,1,1)) # Wait for the event to complete # Fetch result from device to host #cuda.memcpy_dtoh(decrypted, d_decrypted) decrypted = d_decrypted.get() #b=time.time() end.record() end.synchronize() b1=time.time() time_ = start.time_till(end)#milli seconds #print("time()before syn:",1000*(b-a),"time()after syn:",1000*(b1-a),"cuda event:",time) #print((b1-a)*1000,time_) # Convert output array back to string decrypted = str(decrypted) return decrypted, time_
[ "def random_cipher():\n return np.random.permutation(26)", "def _aes_encrypt_permutation(p):\n key_length = 24 # 192 Bit key size\n IV_LENGTH = 16\n BLOCK_SIZE = 16\n key = Random.get_random_bytes(key_length)\n iv = Random.get_random_bytes(IV_LENGTH)\n aes_obj = AES.new(key, AES.MODE_CBC, iv)\n\n message = p.tostring()\n len_padding = 0\n\n while len(message) % BLOCK_SIZE != 0:\n message += random.choice(string.ascii_letters).encode()\n len_padding += 1\n\n enc_params = [key, iv, len_padding]\n cipher_text = aes_obj.encrypt(message)\n\n return cipher_text, enc_params", "def advapi32_RtlEncryptMemory(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"Memory\", \"MemorySize\", \"OptionFlags\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def main():\n\n @dppy.kernel\n def atomic_add(a):\n dppy.atomic.add(a, 0, 1)\n\n global_size = 100\n a = np.array([0])\n\n try:\n d = dpctl.select_gpu_device()\n with dpctl.device_context(d):\n print(\"Offloading to ...\")\n d.print_device_info()\n atomic_add[global_size, dppy.DEFAULT_LOCAL_SIZE](a)\n # Expected 100, because global_size = 100\n print(a)\n except ValueError:\n print(\"No SYCL GPU found.\")", "def tvm_callback_cuda_compile(code):\n ptx = nvcc.compile_cuda(code, target=\"ptx\", arch='sm_52') # use old arch for this to work on old GPUs\n return ptx", "def accel():\n x,y,z = unpack('>hhh',i2c.readfrom_mem(0x68, 0x3B, 6)) \n\n x = x / 16384\n y = y / 16384\n z = z / 16384\n\n return x,y,z", "def cMultiplyVec():\n code_text=\"\"\"\n KERNEL void cMultiplyVec( GLOBAL_MEM float2 *a,\n GLOBAL_MEM float2 *b,\n GLOBAL_MEM float2 *dest)\n { const unsigned int i = get_global_id(0);\n dest[i].x = a[i].x*b[i].x-a[i].y*b[i].y;\n dest[i].y = a[i].x*b[i].y+a[i].y*b[i].x;\n //barrier(CLK_GLOBAL_MEM_FENCE); \n };\n \"\"\" \n return code_text", "def prepare_communication() -> str:\n decrypted_key = None\n\n print(\"[A ]: Node A started!\")\n print(\"[A ]: Operation Mode: \", sys.argv[1], '\\n')\n\n print(\"[A ]: Sending operation mode to B. Answer received:\")\n print(\"[B ]:\", send_message_request(\"B\", sys.argv[1]), '\\n')\n\n print(\"[A ]: Requesting key from KM. Answer received:\")\n encrypted_key = send_message_request(\"KM\", sys.argv[1])\n print(\"[KM]: Encrypted: \", encrypted_key, '\\n')\n decrypted_key = decrypt_message(encrypted_key, key_3, operation_mode=\"ECB\")\n print(\"[KM]: Decrypted: \", decrypted_key, '\\n')\n\n return decrypted_key", "def recover_key(diffs, attack_direction, plaintext, ciphertext):\n # Create a matrix of all possible keys.\n keys = np.zeros((256, 16), np.uint8)\n for first_byte_val in range(256):\n key = np.asarray([diffs[i] ^ first_byte_val for i in range(16)], np.uint8)\n if attack_direction == AttackDirection.OUTPUT:\n key = np.asarray(cwa.aes_funcs.key_schedule_rounds(key, 10, 0), np.uint8)\n keys[first_byte_val] = key\n # Encrypt the plaintext using all candidates in parallel.\n ciphertexts = scared.aes.base.encrypt(plaintext, keys)\n # Recover the key.\n key = keys[(ciphertexts == ciphertext).all(axis=1).nonzero()]\n if key.size > 0:\n return key\n return None", "def cTensorCopy():\n code_text = \"\"\"\n KERNEL void cTensorCopy(\n const unsigned int batch, \n const unsigned int dim,\n GLOBAL_MEM const unsigned int *Nd_elements,\n GLOBAL_MEM const unsigned int *Kd_elements,\n GLOBAL_MEM const float *invNd,\n GLOBAL_MEM const float2 *indata,\n GLOBAL_MEM float2 *outdata,\n const int direction)\n {\n \n const unsigned int gid=get_global_id(0); \n \n unsigned int curr_res = gid;\n unsigned int new_idx = 0;\n unsigned int group;\n \n for (unsigned int dimid =0; dimid < dim; dimid ++){\n group = (float)curr_res*invNd[dimid];\n new_idx += group * Kd_elements[dimid];\n curr_res = curr_res - group * Nd_elements[dimid];\n };\n \n if (direction == 1) {\n for (unsigned int bat=0; bat < batch; bat ++ )\n {\n outdata[new_idx*batch+bat]= indata[gid*batch+bat];\n }; \n };\n \n if (direction == -1) {\n for (unsigned int bat=0; bat < batch; bat ++ )\n {\n outdata[gid*batch+bat]= indata[new_idx*batch+bat];\n }; \n };\n \n \n };\n \"\"\"\n return code_text", "def encrypt_using_materialized_view(self):\n node = self.context.node\n key = f\"{'1' * 36}\"\n iv = f\"{'2' * 16}\"\n aad = \"some random aad\"\n\n for mode, key_len, iv_len, aad_len in modes:\n with Example(f\"\"\"mode={mode.strip(\"'\")} iv={iv_len} aad={aad_len}\"\"\") as example:\n example_key = f\"'{key[:key_len]}'\"\n example_mode = mode\n example_iv = None if not iv_len else f\"'{iv[:iv_len]}'\"\n example_aad = None if not aad_len else f\"'{aad}'\"\n example_transform = f\"encrypt(mode, secret, key{', iv' if example_iv else ''}{', aad' if example_aad else ''})\"\n\n with table(\"user_data\"):\n with mv_transform(\"user_data\", example_transform):\n with When(\"I insert encrypted data\"):\n node.query(f\"\"\"\n INSERT INTO user_data_input\n (date, name, secret, mode, key)\n VALUES\n ('2020-01-01', 'user0', 'user0_secret', {example_mode}, {example_key}{(\", \" + example_iv) if example_iv else \"\"}{(\", \" + example_aad) if example_aad else \"\"}),\n ('2020-01-02', 'user1', 'user1_secret', {example_mode}, {example_key}{(\", \" + example_iv) if example_iv else \"\"}{(\", \" + example_aad) if example_aad else \"\"}),\n ('2020-01-03', 'user2', 'user2_secret', {example_mode}, {example_key}{(\", \" + example_iv) if example_iv else \"\"}{(\", \" + example_aad) if example_aad else \"\"})\n \"\"\")\n\n with And(\"I read inserted data back\"):\n node.query(\"SELECT date, name, hex(secret) FROM user_data ORDER BY date\")\n\n with Then(\"output must match the snapshot\"):\n with values() as that:\n assert that(snapshot(r.output.strip(), \"insert\", name=f\"encrypt_mv_example_{varname(basename(self.name))}\")), error()", "def visitCPU(self, engine):\n encoded_texts = self.encode()\n res = []\n for encoded_elem in encoded_texts:\n plaintext = encoded_elem.encoding\n if plaintext >= (self.pub_key.n - self.pub_key.max_int) and plaintext < self.pub_key.n:\n # Very large plaintext, take a sneaky shortcut using inverses\n neg_plaintext = self.pub_key.n - plaintext # = abs(plaintext - nsquare)\n neg_ciphertext = (self.pub_key.n * neg_plaintext + 1) % self.pub_key.nsquare\n ciphertext = gmpy_math.invert(neg_ciphertext, self.pub_key.nsquare)\n else:\n ciphertext = (self.pub_key.n * plaintext + 1) % self.pub_key.nsquare\n\n if self.obfuscate:\n # r = random.SystemRandom().randrange(1, self.pub_key.n)\n r = random.randrange(1, self.pub_key.n)\n obfuscator = gmpy_math.powmod(r, self.pub_key.n, self.pub_key.nsquare)\n ciphertext = ( ciphertext * obfuscator ) % self.pub_key.nsquare\n \n encrypted_text = PaillierEncryptedNumber(self.pub_key, ciphertext, encoded_elem.exponent)\n res.append(encrypted_text)\n\n return res", "def test_from_cuda_array_interface(self):\n\n dtypes = [\n numpy.complex64,\n numpy.complex128,\n numpy.float64,\n numpy.float32,\n numpy.int64,\n numpy.int32,\n numpy.int16,\n numpy.int8,\n numpy.uint8,\n ]\n for dtype in dtypes:\n numpy_arys = [\n numpy.arange(6).reshape(2, 3).astype(dtype),\n numpy.arange(6).reshape(2, 3).astype(dtype)[1:], # View offset should be ignored\n numpy.arange(6).reshape(2, 3).astype(dtype)[:, None], # change the strides but still contiguous\n ]\n # Zero-copy when using `torch.as_tensor()`\n for numpy_ary in numpy_arys:\n numba_ary = numba.cuda.to_device(numpy_ary)\n torch_ary = torch.as_tensor(numba_ary, device=\"cuda\")\n self.assertEqual(numba_ary.__cuda_array_interface__, torch_ary.__cuda_array_interface__)\n self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype))\n\n # Check that `torch_ary` and `numba_ary` points to the same device memory\n torch_ary += 42\n self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype))\n\n # Implicit-copy because `torch_ary` is a CPU array\n for numpy_ary in numpy_arys:\n numba_ary = numba.cuda.to_device(numpy_ary)\n torch_ary = torch.as_tensor(numba_ary, device=\"cpu\")\n self.assertEqual(torch_ary.data.numpy(), numpy.asarray(numba_ary, dtype=dtype))\n\n # Check that `torch_ary` and `numba_ary` points to different memory\n torch_ary += 42\n self.assertEqual(torch_ary.data.numpy(), numpy.asarray(numba_ary, dtype=dtype) + 42)\n\n # Explicit-copy when using `torch.tensor()`\n for numpy_ary in numpy_arys:\n numba_ary = numba.cuda.to_device(numpy_ary)\n torch_ary = torch.tensor(numba_ary, device=\"cuda\")\n self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype))\n\n # Check that `torch_ary` and `numba_ary` points to different memory\n torch_ary += 42\n self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype) + 42)", "def test_create_cipher_text():\n nb = 4\n states = [[x+1 for x in range(-1, 15)]] * 2\n states_ref = bytearray([0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15] +\n [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15])\n states = aes.create_cipher_text(states, nb)\n assert(states == states_ref)", "def decrypt_using_materialized_view(self):\n node = self.context.node\n key = f\"{'1' * 36}\"\n iv = f\"{'2' * 16}\"\n aad = \"some random aad\"\n\n with Given(\"I load encrypt snapshots\"):\n snapshot_module = SourceFileLoader(\"snapshot\", os.path.join(current_dir(), \"snapshots\", \"insert.py.insert.snapshot\")).load_module()\n\n for mode, key_len, iv_len, aad_len in modes:\n with Example(f\"\"\"mode={mode.strip(\"'\")} iv={iv_len} aad={aad_len}\"\"\") as example:\n example_key = f\"'{key[:key_len]}'\"\n example_mode = mode\n example_iv = None if not iv_len else f\"'{iv[:iv_len]}'\"\n example_aad = None if not aad_len else f\"'{aad}'\"\n example_transform = f\"decrypt(mode, secret, key{', iv' if example_iv else ''}{', aad' if example_aad else ''})\"\n\n with Given(\"I have ciphertexts\"):\n example_name = basename(example.name)\n ciphertexts = getattr(snapshot_module, varname(f\"encrypt_mv_example_{example_name}\"))\n example_ciphertexts = [\"'{}'\".format(l.split(\"\\t\")[-1].strup(\"'\")) for l in ciphertexts.split(\"\\n\")]\n\n with table(\"user_data\"):\n with mv_transform(\"user_data\", example_transform):\n with When(\"I insert encrypted data\"):\n node.query(f\"\"\"\n INSERT INTO user_data_input\n (date, name, secret, mode, key)\n VALUES\n ('2020-01-01', 'user0', 'unhex({example_ciphertexts[0]})', {example_mode}, {example_key}{(\", \" + example_iv) if example_iv else \"\"}{(\", \" + example_aad) if example_aad else \"\"}),\n ('2020-01-02', 'user1', 'unhex({example_ciphertexts[1]})', {example_mode}, {example_key}{(\", \" + example_iv) if example_iv else \"\"}{(\", \" + example_aad) if example_aad else \"\"}),\n ('2020-01-03', 'user2', 'unhex({example_ciphertexts[2]})', {example_mode}, {example_key}{(\", \" + example_iv) if example_iv else \"\"}{(\", \" + example_aad) if example_aad else \"\"})\n \"\"\")\n\n with And(\"I read inserted data back\"):\n r = node.query(\"SELECT date, name, secret FROM user_data ORDER BY date\")\n\n with Then(\"output must match the expected\"):\n expected = r\"\"\"'2020-01-01\\tuser0\\tuser0_secret\\n2020-01-02\\tuser1\\tuser1_secret\\n2020-01-03\\tuser2\\tuser2_secret'\"\"\"\n assert r.output == expected, error()", "def init_pycuda():\n drv.init()\n context = drv.Device(0).make_context()\n devprops = { str(k): v for (k, v) in context.get_device().get_attributes().items() }\n cc = str(devprops['COMPUTE_CAPABILITY_MAJOR']) + str(devprops['COMPUTE_CAPABILITY_MINOR'])\n return context, cc", "def cCopy():\n code_text = \"\"\"\n KERNEL void cCopy( \n GLOBAL_MEM const float2 *CX,\n GLOBAL_MEM float2 *CY)\n {\n // Copy x to y: y = x;\n //CX: input array (float2)\n // CY output array (float2)\n unsigned long gid=get_global_id(0); \n CY[gid]=CX[gid];\n };\n \"\"\" \n return code_text", "def generate_keys():\n aeskey = get_random_bytes(16)\n rsakey = RSA.generate(2048)\n\n \"\"\" Old, bad(?) implementation. We're not using this anymore. To be fair, we never really got an explanation of why it was bad to directly write into bootloader.c, then make, then remove the key, but it was phased out to promote using the proper way instead nonetheless. \n # Change into directory containing bootloader source.\n bldir = FILE_DIR / '..' / 'bootloader' / 'src'\n os.chdir(bldir)\n with open('bootloader.c', 'r') as file:\n bootloader = file.read()\n if bootloader[0:12] == 'char AES_KEY': # Check if a key is already present from a previous build\n bootloader = bootloader[bootloader.index('\\n')+1:] # Remove old key\n byteout = ''\n for i in range(16): \n byteout += ', 0x' + aeskey[i:i+1].hex() # Write the bytes in hex form for C implementation (0xXX, etc.)\n byteout = byteout[2:]\n file.close()\n with open('bootloader.c', 'w') as file:\n file.write('char AES_KEY[16] = {'+byteout+'};\\n') # Write key into bootloader\n file.close()\n with open('bootloader.c', 'a') as file:\n file.write(bootloader) # Append rest of the bootloader code back on\n file.close()\n \"\"\"\n \n # Change into directory containing tools\n os.chdir(FILE_DIR)\n with open('secret_build_output.txt', 'wb') as file: \n file.write(aeskey) # Write AES key into secret file as binary bytes (to be used by fw_protect)\n file.write(rsakey.export_key(format='DER')) # Write RSA key \n print(len(rsakey.export_key(format='DER')))\n# file.write(rsakey.publickey().export_key())\n \n st = make_bootloader(aeskey, rsakey.n.to_bytes(256, 'big'), rsakey.e.to_bytes(3, 'big'))\n# st = make_bootloader(aeskey, struct.pack(\">I\", rsakey.n), struct.pack(\">I\", rsakey.e))\n if st != 0: # Throw error if build failed\n raise SystemExit(f'Build Failed - Make returned code {st}')", "def create_cipher(keyword):\n\n # creating an alphabet as an array of letters\n alphabet=[]\n for i in range(26): alphabet.append(chr(65+i))\n alphabet_full=alphabet[:] #making a copy that will not be altered\n\n# simultaneously creating an encoder and decoder\n encode_map={};\n decode_map={};\n \n # the keyword is the first portion of the map. For keyword='cat', \n # the first three letters of the encode map are a->c, b->a, c->t\n # c,a,t are then removed from the alphabet for the remaining substitutions.\n # an inverse map is created simultaneously with the values reversed.\n for i_orig, new_letter in enumerate(keyword): #using the keyword as the first set of letters\n orig_letter = alphabet_full[i_orig]\n encode_map[ orig_letter ] = new_letter\n decode_map[ new_letter ] = orig_letter\n alphabet.remove(new_letter) #removing this letter from the mapping\n \n # the next section uses the remaining letters of the alphabet and subsittutes\n # sequentially.\n for i_new,new_letter in enumerate(alphabet): \n i_orig += 1\n orig_letter = alphabet_full[i_orig]\n encode_map[orig_letter] = new_letter\n decode_map[new_letter] = orig_letter\n return encode_map,decode_map" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set or update the parent of this node. Set parent to `None` to remove this node's parent.
def setParent(self, parent): # Don't allow a node to set its parent as one of its children! if (parent in self.unorderedChildren): logging.error("Node.setParent: cannot set a node's child to be its own parent! node = {}; parent = {}" .format(self.name, parent.name)) return # 1st, remove this child from its current parent if (self.parent is not None): self.parent.__removeChild(self) # 2nd, set the new parent (setting to None is OK) self.parent = parent if (self.parent is not None): self.parent.__addChild(self)
[ "def set_parent(self, parent: 'Node'):\n if parent == self.parent:\n return\n self.parent = parent\n if parent is not None:\n self.parent.add_child(self)", "def set_parent(self, parent_node):\n self.parent = parent_node", "def setParent(self, parent):\n assert isinstance(parent, RedBlackTree) or parent == None\n self.parentTree = parent", "def set_parent(self, parent):\n self._parent = parent", "def set_parent(self, parent):\n old_parent = self._parent\n if parent is old_parent:\n return\n if parent is self:\n raise ValueError('cannot use `self` as Object parent')\n if parent is not None and not isinstance(parent, Object):\n raise TypeError('parent must be an Object or None')\n self._parent = parent\n self.parent_changed(old_parent, parent)\n if old_parent is not None:\n old_parent._children.remove(self)\n old_parent.child_removed(self)\n if parent is not None:\n parent._children.append(self)\n parent.child_added(self)", "def parent(self, parent):\n warnings.warn(\n \"Setting a parent is potentially dangerous. Consider using \"\n \"Topology.add_subtopology instead\"\n )\n if parent is None:\n raise NotImplementedError(\n \"Setting parents to None is not yet supported\"\n )\n self._parent = _validate_parent(parent)", "def set_parent ( self, parent ):\n self.parent_ref = get_object_ref ( parent )", "def _setParent(self, parent):\n if parent is None:\n self._parent = None\n else:\n self._parent = weakref.ref(parent)", "def set_parent(self, node_id: int):\r\n self.parent = node_id", "def set_parent_id(self, parent_id):\n pass", "def setParent(self,p,uparent=None,eparent=None):\n if self.parent != None:\n self.parent.children.remove(self)\n self.parent = p\n self.uparent = uparent\n self.eparent = eparent\n p.children.append(self)", "def set_parent(self, parent):\n # If the attribute already has a parent (we are moving the attribute) then fail with a runtime exception.\n if self._parent:\n raise CloudioModificationException('The parent of an Attribute can never be changed ' +\n '(Attributes can not be moved)!')\n # assert isinstance(parent, CloudioAttributeContainer), 'Wrong type for parent attribute!'\n self._parent = parent", "def set_parent(self, parent):\n self.parent = parent\n self.level = parent.level + 1 if parent else 0", "def addParent(self, node):\n self.parent = node", "def update_parent(self, new_parent) -> None:\n prev_parent = self.parent\n if prev_parent is not None and prev_parent.children is not None:\n prev_parent.set_children(\n [child for child in prev_parent.children if child is not self]\n )\n self.parent = new_parent\n ls = self.left_sibling\n rs = self.right_sibling\n if ls:\n ls.right_sibling = rs\n if rs:\n rs.left_sibling = ls\n self.left_sibling = None\n self.right_sibling = None\n self.update_depth(new_parent.depth + 1)", "def add_node_with_parent(self,node,parent) :\n node.parent = parent\n if not parent is None:\n parent.add_child(node)", "def set_parent(self, parent):\n # Note: The added/removed events must be executed on the next\n # cycle of the event loop. It's possible that this method is\n # being called from the `construct` class method and the child\n # of the widget will not yet exist. This means that child event\n # handlers that rely on the child widget existing will fail.\n curr = self._parent\n if curr is parent or parent is self:\n return\n\n self._parent = parent\n if curr is not None:\n if self in curr._children:\n curr._children.remove(self)\n if curr._initialized:\n if self._initialized:\n curr.child_removed(self)\n else:\n DeferredCall(curr.child_removed, self)\n\n if parent is not None:\n parent._children.append(self)\n if parent._initialized:\n if self._initialized:\n curr.child_added(self)\n else:\n DeferredCall(parent.child_added, self)", "def set_parent(self, value):\n #Ensure that this sprite is hidden\n if self.visible:\n self.show(False)\n\n #Change the parent\n self._parent = value", "def parent(self, node):\n return node._parent" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the names of the children this node has
def printChildren(self): print("Printing {}'s children:".format(self.name)) if (len(self.orderedChildren) != 0): for child in self.orderedChildren: print(child.name) else: # no children print("NONE")
[ "def printChildren(self):\n for node in self.allNodes:\n node.printChildren()", "def child_names(self) -> List[str]:\n return [t.name for t in self.children]", "def _pprint_children(self):\n return self.children", "def get_children_names(self):\n children_names = self._state.children_names\n return children_names", "def _pprint_children(self):\n return [self.child]", "def printTree(self):\n pass", "def get_child_nodes(self):\n return self.child_nodes", "def Children(self) -> Dwf3dNavigationTreeNodeCollection:", "def getChildren(self):\n return self.children", "def display(self):\n print(self.nodes)", "def entries(self):\n if self._is_leaf:\n return [self._name]\n rv = []\n for child in self._children.itervalues():\n for entry in child.entries():\n if not self.is_root:\n entry = self._name + '/' + entry\n rv.append(entry)\n return rv", "def listChildrenNames(self, flags=0):\n ret = libvirtmod.virDomainSnapshotListChildrenNames(self._o, flags)\n if ret is None: raise libvirtError ('virDomainSnapshotListChildrenNames() failed')\n return ret", "def print_leafs(self, node):\n\t\tif node :\n\t\t\tself.print_leafs(node.lchild)\n\t\t\tif node.lchild is None and node.rchild is None:\n\t\t\t\tprint node.data\n\t\t\tself.print_leafs(node.rchild)", "def displayTags(self):\n for node in self.nodes:\n print(\"{}, tag = {}\".format(node.name, node.tag))", "def childs(self):\n nodes = [node for pri, node in self._childs]\n nodes.reverse()\n return nodes", "def getChildren(self) -> \"SoChildList *\":\n return _coin.SoVRMLText_getChildren(self)", "def print_tree(self, tabwidth=0):\n\n # if teststr == \"silent\":\n print(tabwidth * \" \", self.ele, '*' if self.mark else '', sep=\"\")\n\n \"\"\" Debugging purposes\n elif teststr == \"loud\":\n print(tabwidth*\" \", end = \" \")\n show((self.ele, id(self)))\n #input()#\n \"\"\"\n for childtree in self.children_generator():\n childtree.print_tree(tabwidth + 1)", "def children(self):\n return Query(self.nodes, False)", "def children(self):\n ret = self._get_attr(\"children\")\n return [IMedium(a) for a in ret]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the names of all of the children of each node in this tree
def printChildren(self): for node in self.allNodes: node.printChildren()
[ "def printChildren(self):\n\n print(\"Printing {}'s children:\".format(self.name))\n if (len(self.orderedChildren) != 0):\n for child in self.orderedChildren:\n print(child.name)\n else:\n # no children\n print(\"NONE\")", "def child_names(self) -> List[str]:\n return [t.name for t in self.children]", "def _pprint_children(self):\n return self.children", "def get_children_names(self):\n children_names = self._state.children_names\n return children_names", "def _pprint_children(self):\n return [self.child]", "def printTree(self):\n pass", "def print_leafs(self, node):\n\t\tif node :\n\t\t\tself.print_leafs(node.lchild)\n\t\t\tif node.lchild is None and node.rchild is None:\n\t\t\t\tprint node.data\n\t\t\tself.print_leafs(node.rchild)", "def print_tree(self, tabwidth=0):\n\n # if teststr == \"silent\":\n print(tabwidth * \" \", self.ele, '*' if self.mark else '', sep=\"\")\n\n \"\"\" Debugging purposes\n elif teststr == \"loud\":\n print(tabwidth*\" \", end = \" \")\n show((self.ele, id(self)))\n #input()#\n \"\"\"\n for childtree in self.children_generator():\n childtree.print_tree(tabwidth + 1)", "def entries(self):\n if self._is_leaf:\n return [self._name]\n rv = []\n for child in self._children.itervalues():\n for entry in child.entries():\n if not self.is_root:\n entry = self._name + '/' + entry\n rv.append(entry)\n return rv", "def print_tree(self, maxresults=100, maxdepth=None):\n self.ignore_caller()\n for depth, refid, rep in self.walk(maxresults, maxdepth):\n print (\"%9d\" % refid), (\" \" * depth * 2), rep", "def print_node_summary_table(input_tree):\n for node in input_tree.postorder():\n if node.Parent:\n parent_name = node.Parent.Name\n else:\n parent_name = None\n print \"\\t\".join(map(str,[node.Name,len(node.Children),node.Length,parent_name]))", "def traversal(self):\n if self.is_leaf():\n print(self.payload)\n else:\n for child in self.pointers:\n child.traversal()", "def _rename_children_of(self, parent):\n #rename all branches\n# self.qr.put(('PRINT', 'renaming children of >{}<'.format(parent)))\n e_parent = self.trout.find(\".//\" + parent)\n if e_parent is None:\n return\n# self.qr.put(('PRINT', 'renaming children of {}'.format(e_parent.tag)))\n parent_attribs = e_parent.attrib\n# children = list(e_parent)\n children = e_parent.getchildren()\n# self.qr.put(('PRINT', '>{}< has {} children'.format(e_parent.tag, len(children))))\n# self.qr.put(('PRINT', '{}'.format(list(children))))\n ancestor_name = parent_attribs['Name']\n my_isalpha = True\n if ancestor_name:\n if ancestor_name[-1] == '@':\n my_name = '@'\n else:\n my_name = 1\n my_isalpha = ancestor_name[-1].isdecimal()\n else:\n my_name = 1\n if self.initial_digit:\n my_isalpha = self.initial_digit[-1].isdecimal()\n else:\n my_name = 1\n my_isalpha = False\n my_num = 1\n\n nos_chars = len(to_alpha(len(children))) if my_name == 1 else 0\n nos_digits = (len(str(len(children)))-1) if my_name == 1 else 0\n\n the_format = '{0:0' + '{}'.format(nos_digits) + 'd}'\n alpha_format = '{0:A>' + '{}'.format(nos_chars) + 's}'\n \n for child in children:\n# self.qr.put(('PRINT', 'for {} of {}'.format(child.tag, parent)))\n self.qr.put(('PROGSTEP', 1))\n #bullet proofed in to_aplpha() so not exceed limit of single digit\n my_str = alpha_format.format(to_alpha(my_name - 1)) \\\n if my_isalpha else the_format.format(my_name)\n vout = list()\n if child.attrib['Type'] == 'collection':\n title = self._my_unidecode(child.attrib['TIT2'])\n #strip out any unapproved punctuation - done in my_unidecode\n child.attrib['Name'] = ancestor_name + my_str\n child.text = \"{0}{1}{2}-{3}\".format(self.prefix, \\\n ancestor_name, my_str, title)\n# self.qr.put(('PRINT', '{}/{} is collection'.format(child.tag, child.text)))\n vout = [['Name', child.attrib['Name']], ['TIT2', title]]\n self.to_be_renamed[child.tag] = [vout, child.text]\n my_name += 1\n# self.qr.put(('PRINT', 'rename children of {}'.format(child.tag)))\n# return\n self._rename_children_of(child.tag)\n else: #is file so use\n size = os.path.getsize(child.attrib['Location']) \\\n if child.attrib['Location'] != '-' \\\n else 0\n if size == 0:\n #fetch location, trim off path and '.mp3' extension,\n #transliterate unicode(utf-8) to 7-bit ascii or Latin-1?\n title = self._my_unidecode(os.path.basename(\\\n child.attrib['Location'][:-4]))\n #transliterate unicode(utf-8) to 7-bit ascii or Latin-1?\n #replace spaces and punctuation - done in my_unidecode\n child.attrib['Name'] = ancestor_name + my_str\n child.text = \"{0}{1}{2}-{3}\".format(self.prefix, \\\n ancestor_name, my_str, title)\n# self.qr.put(('PRINT', 'zero length file {}'.format(child.text)))\n vout = [['Name', child.attrib['Name']], ['TIT2', title]]\n else: #idiot/not idiot always downgrade TIT2 to form title\n tit2 = self._downgrade_data('TIT2', child)\n title = self._my_unidecode(tit2)\n child.attrib['Name'] = \"{0}-{1:02d}\".format(\\\n ancestor_name, my_num)\n child.text=\"{0}{1}-{2:02d}-{3}\".format(self.prefix, \\\n ancestor_name, my_num, title)\n# self.qr.put(('PRINT', 'mp3 file {}'.format(child.text)))\n if self.mode: #advanced\n vout = [['Name', child.attrib['Name']],\\\n ['TIT2', child.attrib['TIT2']]]\n else: #simple\n vout = [['Name', child.attrib['Name']],['TIT2', tit2]]\n self.to_be_renamed[child.tag] = [vout, child.text]\n my_num += 1\n self.qr.put(('PROGSTEP', 1))", "def Children(self) -> Dwf3dNavigationTreeNodeCollection:", "def NameTree(self, varNames):\n if self.GetTerminal():\n return\n else:\n for child in self.GetChildren():\n child.NameTree(varNames)\n self.SetName(varNames[self.GetLabel()])", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def print_root_node(self):\n print(self.root_node.name)", "def display(self):\n print(self.nodes)", "def print_tree(self):\r\n f = open(os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)), 'Tree.txt'), 'w')\r\n node = self.root\r\n self.print_node(f, node, \"\")\r\n f.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Graphically print the tree
def printTree(self): pass
[ "def print_tree(self, tabwidth=0):\n\n # if teststr == \"silent\":\n print(tabwidth * \" \", self.ele, '*' if self.mark else '', sep=\"\")\n\n \"\"\" Debugging purposes\n elif teststr == \"loud\":\n print(tabwidth*\" \", end = \" \")\n show((self.ele, id(self)))\n #input()#\n \"\"\"\n for childtree in self.children_generator():\n childtree.print_tree(tabwidth + 1)", "def print_tree(self, maxresults=100, maxdepth=None):\n self.ignore_caller()\n for depth, refid, rep in self.walk(maxresults, maxdepth):\n print (\"%9d\" % refid), (\" \" * depth * 2), rep", "def draw(self):\n print self.treeString()", "def print_tree(self):\r\n f = open(os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)), 'Tree.txt'), 'w')\r\n node = self.root\r\n self.print_node(f, node, \"\")\r\n f.close()", "def print_tree(self, level=None):\n levels = range(self.tree_levels) if level is None else [level]\n for k in levels:\n for j in range(2 ** k - 1, 2 ** (k + 1) - 1):\n print(self.tree[j], end=' ')\n print()", "def print(self):\n print('(', end='')\n self.printBST()\n print(')', end=' ')", "def print_tree(tree, str):\n if type(tree) == dict:\n print(\"%s%s\" % (str, list(tree.keys())[0]))\n for item in list(tree.values())[0].keys():\n print(\"%s\\t%s%s\" % (str, item, \"-\\\\\"))\n print_tree(list(tree.values())[0][item], str + \"\\t\\t\")\n print(\"\")\n else: #printing leaves\n print(\"%s->%s\" % (str, tree))", "def print_tree(t, indent=0):\n print(' ' * indent + str(entry(t)))\n for subtree in subtrees(t):\n print_tree(subtree, indent + 1)", "def plot(self):\n pprint(self.tree)", "def _print_structure(self):\n if self._isthisapropertree() == False:\n print(\"ERROR: this is not a proper tree. +++++++++++++++++++++++\")\n outstr = str(self._payload) + '(' + str(self._height()) + ')['\n if self._left:\n outstr = outstr + str(self._left._payload) + ' '\n else:\n outstr = outstr + '* '\n if self._right:\n outstr = outstr + str(self._right._payload) + ']'\n else:\n outstr = outstr + '*]'\n if self._parent:\n outstr = outstr + ' -- ' + str(self._parent._payload)\n else:\n outstr = outstr + ' -- *'\n print(outstr)\n if self._left:\n self._left._print_structure()\n if self._right:\n self._right._print_structure()", "def print_level_order(tree):\n queue = Queue()\n next_queue = Queue()\n final = ''\n\n queue.enqueue(tree.root)\n\n while queue or next_queue:\n if not queue:\n queue, next_queue = next_queue, queue\n final += '\\n'\n\n current = queue.dequeue()\n final += f'{current.val} '\n for child in current.children:\n next_queue.enqueue(child)\n\n return final", "def PrintTree(self, *args):\n return _itkKdTreePython.itkKdTreeLSVF2_PrintTree(self, *args)", "def printTree(size: int):\n print(\"*\".center(((size * 2) + 1)))\n midSpace = 1\n for sect in reversed(range(size)):\n print(\"/\".rjust(sect + 1), \"\\\\\".rjust(midSpace))\n midSpace += 2\n print(\"-\".center(((size * 2) + 1), \"-\"))\n print(\"#\".center(((size * 2) + 1)))", "def PrintTree(self, *args):\n return _itkKdTreePython.itkKdTreeLSVF3_PrintTree(self, *args)", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def print_tree(path, sep=' '):\r\n print_tree_helper(path, sep, 0)", "def dump_level(self):\n\n if self.is_empty():\n return\n\n queue = Queue()\n queue.put(self._root)\n\n while not queue.empty():\n print()\n count = queue.qsize()\n\n for i in range(0, count):\n queue_element = queue.get()\n if queue_element == \"tab\":\n print(end=\"\\t\")\n else:\n # print size\n print(\"size:\", queue_element.size, end=\" - \")\n\n elements = queue_element.elements\n for j in range(queue_element.size):\n print(elements[j], end=\" \")\n\n for child in queue_element.children:\n if child is not None:\n queue.put(child)\n queue.put(\"tab\")", "def render_tree():\n graph = TREE.graphviz(node_attr={'shape': 'record', 'height': '.1'})\n graph.body\n graph.render(GRAPHDIR, format='png')\n #graph.view()", "def Print(self, indentionLevel, visited): \n\t\ts=''\n\t\tfor i in range(indentionLevel):\n\t\t\ts+=\"-\"\n#\t\tif self.id in visited:\n#\t\t\tprint \"^-%s(*%s*, %s, %s [%s])\" %(s, self.id, self.name, self.desc, self.type)\n#\t\t\treturn\n\t\tvisited.add(self.id)\n\t\tprint \"%s(*%s*, %s, %s [%s])\" %(s, self.id, self.name, self.desc, self.type)\n\t\tprint \"Genes: \", self.genes\n\t\t\n\t\tfor child in self.children:\n\t\t\tchild.Print(indentionLevel+1, visited)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to emit a timing message to the log(s)
def timing(message): # get the appropriate logger logger = AdmitLogging.findLogger() if logger is None: return logger.log(AdmitLogging.TIMING, message)
[ "def time_block(self, message):\n tic = time.time()\n yield\n dt = time.time() - tic\n log = app_log.info if dt > 1 else app_log.debug\n log(\"%s in %.2f ms\", message, 1e3 * dt)", "def use_time_writer(time_writer, msg):\n if not time_writer:\n print(msg)\n else:\n time_writer(msg)", "def log(t0, text):\n print(time.time()-t0, text)", "def print_timed_message(self, text, pipe=sys.stdout):\n msg = \"\"\n if self.name:\n msg = \"[%s]\" % self.name\n if self.use_date:\n now = datetime.now()\n msg = \"%s[%s] %s\" % (msg, now.strftime(\"%Y-%m-%d %H:%M:%S\"), text)\n else:\n time_now = time.localtime()\n time_diff = time.mktime(time_now) - time.mktime(self.start_time)\n (days, res) = divmod(time_diff, 86400)\n (hours, res) = divmod(res, 3600)\n (mins, secs) = divmod(res, 60)\n msg = (\"%s[%dd %02dh %02dm %02ds] %s\"\n % (msg, days, hours, mins, secs, text))\n print(msg, file=pipe)", "def log_elapsed_time(self, prefix=\"Elapsed time: \"):\n self.log(\"{}{}\".format(prefix, self.get_elapsed_time()))", "def record_run_time(self, time_lens):\r\n if self.log_file:\r\n time_file = re.sub(\"\\.log$\", \".time\", self.log_file)\r\n else:\r\n time_file = \"Flow.time\"\r\n time_ob = open(time_file, \"a\")\r\n print >> time_ob, \"%-15s , %s\" % (time_lens, self.cmd)\r\n time_ob.close()", "def IOLogGenerated(self, delay, stream_name, data):\n logger.info(\"IOLogGenerated(%r, %r, %r)\", delay, stream_name, data)", "def trace(message):\r\n if tracing == True:\r\n now = datetime.datetime.now()\r\n date = now.strftime(\"%Y %m %d - %H:%M:%S\")\r\n\r\n trace_file.write('%r %s\\n'%(date, message))\r\n print date, 'sptlqry.py:', message", "def timing(name, duration=None, sample_rate=1, tags=None):", "def log(text):\n if LOG:\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(f\"[{current_time}] {text}\")", "def log_time_trace(self):\n if not self.is_time_trace_enabled:\n return\n time_trace = ['Execution time measured for this run:\\n']\n self.__time_trace_data.reverse()\n for trace in self.__time_trace_data:\n time_trace.append(' ' * trace[2])\n time_trace.append(format(trace[0], '<30'))\n time_trace.append(f'{trace[1]:>5} ms\\n')\n self.debug(''.join(time_trace))\n self.reset_time_trace()", "def timestamp():\n debug(0,'Time elapsed since start: ', time_string(elapsed_time()) )", "def add_logging_level_timings():\n\n # Add logging level below logging.DEBUG to log computational timings\n logging.TIMINGS = 8 # Define level constant\n logging.addLevelName(logging.TIMINGS, \"TIMINGS\") # add to level namepsace\n\n # add logging.timmings('msg') function\n def timings(self, message, *args, **kws): # define function\n if self.isEnabledFor(logging.TIMINGS):\n # Yes, logger takes its '*args' as 'args'.\n self._log(logging.TIMINGS, message, args, **kws)\n\n # add function to logging.Logger class for further calling\n logging.Logger.timings = timings", "def startTime(self, run) :\n\t\tself.sTimes[run] = self.getLogTime()", "def trace(self, msg, *args, **kwargs):\n self.write(msg, level='TRACE', *args, **kwargs)", "def trackTime(self,event):\n self.timings[event] = time.time()", "def output(msg):\n print(f\"{datetime.now()}\\t{msg}\")", "def timer_logger(orig_func):\n\n @wraps(orig_func)\n def wrapper(*args, **kwargs):\n\n start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n t1 = time()\n\n log_file_path = '../QUBEKit_log.txt'\n\n with open(log_file_path, 'a+') as log_file:\n log_file.write(f'{orig_func.__qualname__} began at {start_time}.\\n\\n')\n log_file.write(f'Docstring for {orig_func.__qualname__}:\\n {orig_func.__doc__}\\n\\n')\n\n time_taken = time() - t1\n\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n\n secs, remain = str(float(secs)).split('.')\n\n time_taken = f'{int(hours):02d}h:{int(mins):02d}m:{int(secs):02d}s.{remain[:5]}'\n end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n log_file.write(f'{orig_func.__qualname__} finished in {time_taken} at {end_time}.\\n\\n')\n # Add some separation space between function / method logs.\n log_file.write(f'{\"-\" * 50}\\n\\n')\n\n return orig_func(*args, **kwargs)\n return wrapper", "def appendmessages(self, name, msg):\r\n \r\n time = strftime(\"%H:%M\")\r\n return(time+ ' ' + name + ': ' + msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to emit a regression message to the log(s) It is suggested to start the message with a magic word followed by a colon, so top level scripts can reliably parse them. It is typically not needed to add verbosely what these numbers are, just the numbers are fine, the associated label defines them
def regression(message): # get the appropriate logger logger = AdmitLogging.findLogger() if logger is None: return logger.log(AdmitLogging.REGRESSION, message)
[ "def print_logregress(ds, logys, yname=\"y\"):\n m, b, r, p, se = linregress(ds, logys)\n print(\"\\nlog({_coconut_format_0}) = {_coconut_format_1} d + {_coconut_format_2}\\t(r**2 = {_coconut_format_3})\".format(_coconut_format_0=(yname), _coconut_format_1=(m), _coconut_format_2=(b), _coconut_format_3=(r**2)))\n print(\"{_coconut_format_0} = {_coconut_format_1} * 2**({_coconut_format_2} d))\".format(_coconut_format_0=(yname), _coconut_format_1=(exp(b)), _coconut_format_2=(m / log(2))))\n poly = PolynomialFeatures(degree=2, include_bias=False)\n X = ((poly.fit_transform)((list)((map)(lambda x: [x,], ds))))\n clf = linear_model.LinearRegression()\n clf.fit(X, logys)\n# a d**2 + b d + c\n b, a = clf.coef_\n c = clf.intercept_\n print(\"log({_coconut_format_0}) = {_coconut_format_1} d**2 + {_coconut_format_2} d + {_coconut_format_3}\".format(_coconut_format_0=(yname), _coconut_format_1=(a), _coconut_format_2=(b), _coconut_format_3=(c)))\n# (d - 1)(a d - c)\n# a d**2 - a d - c d + c\n# a d**2 - (a + c) d + c\n print(\"{_coconut_format_0} = exp((d - 1)({_coconut_format_1} d - {_coconut_format_2}) + {_coconut_format_3} d)\".format(_coconut_format_0=(yname), _coconut_format_1=(a), _coconut_format_2=(c), _coconut_format_3=(b + a + c)))\n print(\"{_coconut_format_0} = 2**((d - 1)({_coconut_format_1} d - {_coconut_format_2}) + {_coconut_format_3} d)\".format(_coconut_format_0=(yname), _coconut_format_1=(a / log(2)), _coconut_format_2=(c / log(2)), _coconut_format_3=((a + b + c) / log(2))))\n print(\"{_coconut_format_0} = exp({_coconut_format_1} ((d - 1)(d - {_coconut_format_2}) + {_coconut_format_3} d))\".format(_coconut_format_0=(yname), _coconut_format_1=(a), _coconut_format_2=(c / a), _coconut_format_3=(1 + (b + c) / a)))\n print(\"{_coconut_format_0} = 2**({_coconut_format_1} ((d - 1)(d - {_coconut_format_2}) + {_coconut_format_3} d))\".format(_coconut_format_0=(yname), _coconut_format_1=(a / log(2)), _coconut_format_2=(c / a), _coconut_format_3=(1 + (b + c) / a)))", "def generate_msg(filename, msg, key, value):\n\n log.warning('Dataset ' + filename + ' has (keyword = value) of (' + key + ' = ' + str(value) + ').')\n if msg == Messages.NOPROC.value:\n log.warning('Dataset cannot be aligned.')\n else:\n log.warning('Dataset can be aligned, but the result may be compromised.')", "def labelled_str(label, data):\n return \"**{}:** {}\".format(str(label), str(data))", "def hazard(msg, label=True):\n\n if not should_print_insecure_log_msgs:\n return\n\n final_msg = None\n\n if label:\n final_msg = '***** hazardous log: ' + str(msg)\n else:\n final_msg = str(msg)\n\n print term_red + final_msg + term_reset", "def log_line(self, reduced_stat):\n #loss_per_frame = reduced_stat['loss'] / reduced_stat['total_frames']/4i5\n #loss_per_frame = reduced_stat['loss']\n loss_per_frame = reduced_stat['loss']/reduced_stat['total_frames']/65 \n return f'Lossperframe: {loss_per_frame:.3f}'", "def test_label_many_coefficients(self):\n H = (\n 0.1 * qml.PauliX(0)\n + 0.1 * qml.PauliY(1)\n + 0.3 * qml.PauliZ(0) @ qml.PauliX(1)\n + 0.4 * qml.PauliX(3)\n )\n assert H.label() == \"𝓗\"\n assert H.label(decimals=2) == \"𝓗\"", "def sendTelemetry(msg):\n\t#tele_sock.sendto(msg, (nl.get_address_as_tuple('gs_in')[1], nl.get_address_as_tuple('gs_in')[2])) # [0]: address, [1]: port\n\ttele_sock.send(msg) # [0]: address, [1]: port", "def _sanitizer_logging(self, level, message):\n self._modman.log_message(level, 'scripting: {}'.format(message))", "def PlotLogger(self) -> _n_1_t_1:", "def important(self, msg, *args, **kwargs):\n self.print(40, msg, *args, **kwargs)", "def ExtraMessage(self, msg):\n self._WriteToRecord(EXTRA, msg)", "def make_error_msg(msg, sequence_name, img_idx, det_idx):\n return \"{0}, image index {1}, detection index {2} : {3}\".format(sequence_name, img_idx, det_idx, msg)", "def send_metric(s):\n pass", "def msg():\n\n return \"\"\"ldamark [-h] --topics TOPICS [--iterations ITERATIONS] [--log LOG]\n --m {vsm,mallet} --f {init,train} corpus\n \"\"\"", "def logger_warning(self,text):\n logging.warning(self.log_my_name()+' '+text)", "def report(self) -> str:\n msg = \"RMS diff={0.diff_rms_perc} (rms_tol_perc={0.rms_tol_perc}), number of pixels changed={0.num_diffs_perc} \"\n \"(num_tol_perc={0.num_tol_perc}), max pix diff={0.max_pix_diff} (max_pix_diff_tol={0.max_pix_diff_tol})\"\n \n # for fields that are floating point values, add a precision so don't get too much noise:\n def repl(match):\n attr_name = match.group(1)\n if isinstance(getattr(self, attr_name), float):\n return '{0.' + attr_name + ':.2f}'\n else:\n return match.group(0)\n \n msg = re.sub(r'\\{0\\.(\\w+)\\}', repl, msg)\n \n return msg.format(self)", "def ShortExplanation(self):\n return 'failed: %s' % (self.message,)", "def emit(self) -> str:\n assert len(self.source_lines) == 1\n source_line: Line = self.source_lines[0]\n\n # At time = 0, x1 gets written using x0\n # At time = 1, x0 gets written using x1\n # At time = 2, x1 gets written using x0 etc.\n return \"{} {} {};\".format(\n self.output_line.emit((self.time_step + 1) % 2),\n self.operation,\n source_line.emit((self.time_step) % 2),\n )", "def raise_runtime_warning(self, message):\n print(\"WARNING: In instruction number \" + str(self.exec_pos) + \",\")\n print(message)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to get the appropriate logger. This is done by inspecting the stack, looking for either Admit.py or AT.py, both of which have the name of their loggers.
def findLogger(): aclass = None for i in stack(): # look for either AT.py or Admit.py in the stack if "Admit.py" in i[1] or "AT.py" in i[1]: # when found, get the class instance for k in getargvalues(i[0]).locals.keys(): if 'self' == k: aclass = getargvalues(i[0]).locals[k] break # if there is none found, or the found name is not registered if aclass is None or not hasattr(aclass,"_loggername") or aclass._loggername not in AdmitLogging.loggers: # if there is only 1 registered logger then go with that one if len(AdmitLogging.loggers) == 1: return logging.getLogger(next(iter(AdmitLogging.loggers))) return None return logging.getLogger(aclass._loggername)
[ "def logger(self) -> logging.Logger:\n if self._logger is None:\n name = '.'.join([\n self._callerType.__module__,\n self._callerType.__name__\n ])\n logger = logging.getLogger(name)\n logger = self.configureLogger(logger)\n self._logger = logger\n return self._logger", "def get_logger(self, name) -> Union[\"Logger\", None]:\n return self._loggers.get(name)", "def get_logger(name):\n return getLogger(name)", "def get_logger(source=\"\", tenantToken=\"\"):\n return LogManager._LogManager__implementation.get_logger(source, tenantToken)", "def get_logger(self, fields):\n config = self.get_config(fields)\n if config is not None and \"path\" in config:\n if config[\"path\"] not in self._loggers:\n self.create_logger(config)\n return self._loggers[config[\"path\"]][\"logger\"]\n return None", "def easy():\n import inspect\n try:\n frame = inspect.stack()[1] # caller\n module = inspect.getmodule(frame[0])\n return Logger(module.__name__)\n except IndexError:\n return Logger('UNKNOWN')", "def get_logger(name=None):\n name = \".\".join([x for x in (\"git-upstream\", name) if x])\n\n logger = logging.getLogger(name)\n return logger", "def logger(context: Optional[inspect.FrameInfo] = None) -> logging.Logger:\n if context is None:\n context = inspect.getframeinfo(inspect.currentframe().f_back) # type: ignore\n\n if context is None:\n # This may not be the best way to handle it, but if we get here\n # it'll avoid causing errors, at least.\n return logging.getLogger('??.??')\n\n return logging.getLogger('{}.{}'.format(_get_module(context), context.function))", "def get_logger():\n logger_name = 'development' if settings.DEBUG else 'production'\n return logging.getLogger(logger_name)", "def get_logger(name: str) -> logging.Logger:\n return logging.getLogger(name)", "def get_logger(cls, module_name):\n if module_name is not None:\n logobj = logging.getLogger(module_name)\n # Make sure we set the correct logging level if we have not created the logger before\n logobj.setLevel(log_helper.global_log_level)\n return logobj\n else:\n return logging.getLogger()", "def get_logger(**kwargs):\n # Configure logging modules\n configure()\n # Return structlog\n return structlog.get_logger(**kwargs)", "def _get_logger_for_func(self, func: Callable[Spec, RetVal]) -> Logger:\n if self.__logger is not None:\n return self.__logger\n\n func_module = inspect.getmodule(func)\n for logger_name in VALID_LOGGER_NAMES:\n logger_candidate = getattr(func_module, logger_name, None)\n if isinstance(logger_candidate, Logger):\n return logger_candidate\n return LOGGER", "def derive_logger(name):\n\n return logging.getLogger('{0}.{1}'.format(LOGGER_NAME, name))", "def get_log2():\n return logging.getLogger(__name__)", "def get_logger_from_class(obj): \n #return logging.getLogger(obj.__module__ + \".\" + obj.__class__.__name__)\n return logging.getLogger(obj.__class__.__name__)", "def get_logger(name):\n global DEBUG_MODE\n global MOREF_TYPE\n global LOG_LEVEL\n global LOG_SIZE\n global LOG_DIR\n global MAX_KEEP\n global PATH_SEPARATOR\n global LOGGERS\n\n if platform.system() == 'Windows':\n PATH_SEPARATOR = '\\\\'\n else:\n PATH_SEPARATOR = '/'\n\n if DEBUG_MODE:\n LOG_LEVEL = logging.DEBUG\n else:\n LOG_LEVEL = logging.INFO\n\n if MOREF_TYPE == 'VM':\n file_prefix = 'vm_'\n elif MOREF_TYPE == 'HOST':\n file_prefix = 'esxi_'\n else:\n file_prefix = ''\n\n if loggers.get(name):\n return loggers.get(name)\n else:\n formatter = logging.Formatter(\"%(asctime)s\\t%(name)s\\t%(levelname)s\\t%(message)s\")\n\n logsize = int(LOG_SIZE) * 1048576\n\n logger = logging.getLogger(name)\n logger.setLevel(LOG_LEVEL)\n\n dfh = logging.StreamHandler(stream=sys.stdout)\n dfh.setLevel(logging.DEBUG)\n dfh.setFormatter(formatter)\n\n lfh = logging.handlers.RotatingFileHandler(LOG_DIR + PATH_SEPARATOR + file_prefix + 'get_metrics.log',\n mode='a',\n maxBytes=int(logsize),\n backupCount=int(MAX_KEEP),\n encoding='utf8',\n delay=False)\n lfh.setLevel(logging.INFO)\n lfh.setFormatter(formatter)\n\n efh = logging.handlers.RotatingFileHandler(LOG_DIR + PATH_SEPARATOR + file_prefix + 'get_metrics_error.log',\n mode='a',\n maxBytes=int(logsize),\n backupCount=int(MAX_KEEP),\n encoding='utf8',\n delay=False)\n efh.setLevel(logging.ERROR)\n efh.setFormatter(formatter)\n\n logger.addHandler(lfh)\n logger.addHandler(efh)\n\n loggers.update({name: logger})\n\n return logger", "def logger(self):\n return logging.getLogger('BuilderProvider')", "def logger() -> Logger:\n return _plugin_logger" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to emit a subheader message to the log(s). Subheader messages are encapsulated in an empty line for emphasis
def subheading(message): # get the appropriate logger logger = AdmitLogging.findLogger() if logger is None: return logger.info("") logger.info(" " + message) logger.info("")
[ "def draw_header(self, stream, header):\n stream.writeln(header)\n stream.writeln('~' * len(header))\n stream.writeln()", "def print_header(message: str, level: int = 2) -> None:\n prefix = \"#\" * level\n display(Markdown(f\"{prefix} {message}\"))", "def generate_header():\n trace_id = uuid.uuid4().hex\n span_id = uuid.uuid4().hex[:16]\n trace_option = 1\n\n header = '{}/{};o={}'.format(trace_id, int(span_id, 16), trace_option)\n\n return trace_id, span_id, header", "def header():\n return \"# Some kind of legal stuff\" + LF + LF", "def add_header(self, *args, **kwargs):\r\n self.header = True\r\n self.add_row(ypos=0, *args, **kwargs)", "def header(self):\n self.head_formatter = logging.Formatter('%(message)s')\n self.head_logger = setup_logger(\"Header\",\n cfg.log.root,\n self.head_formatter,\n level=logging.INFO)\n self.head_logger.info(\"\\n\\n\" + \"*\" * 30)\n current_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.head_logger.info(current_time)\n if cfg.model.restart:\n self.head_logger.info(\"Using past training on: {}\".format(cfg.model.savepath)) \n self.head_logger.info(\"Using GPU: {}\".format(cfg.CUDA_VISIBLE_DEVICES))\n self.head_logger.info(\"-\" * 30)", "def append_header(self):\r\n # NOTE before everything\r\n # .TH title_upper section date source manual\r\n if self.header_written:\r\n return\r\n self.head.append(self.header())\r\n self.head.append(MACRO_DEF)\r\n self.header_written = 1", "def write_header(self):\n self.handle.write(\"##maf version=1 scoring=none\\n\")\n self.handle.write(\"# generated by Biopython\\n\\n\")", "def setTraceHeader(self, theader): \n self.traceHeader = theader", "def _tableSubHeaderTag( self ):", "def add_header2(self, content):\n self.add_component(Header(content, 2))", "def handle_header_appendix(self, _header, data):\n self._logger.debug(\"%s | Handling header appendix message\", self._name)\n\n appendix = str(data)\n self._add_dataset(\"global_appendix\", appendix)", "def print_header():\n header = \"| {:<18} | {:<18} | {:<21} | {:<21} |\".format(\"ROLL_NUMBER\",\n \"NAME\",\n \"DATE-OF-BIRTH\",\n \"REGISTRATION_DATE\")\n print(header, '\\n', \"_\"*(len(header)), \"\\n\")", "def format_log_line(header, text):\n if header is None:\n return \"{0}: {1}\".format(current_timestamp(), text)\n else:\n return \"{0}: {1}: {2}\".format(current_timestamp(), header, text)", "def adapt_header(header):\n return header.tostring(sep='\\n')", "def _print_version_section_header(self, version_section, name, lead0x=True,\n indent=1):\n if hasattr(version_section, 'num_versions'):\n num_entries = version_section.num_versions()\n else:\n num_entries = version_section.num_symbols()\n\n self._emitline(\"\\n%s section '%s' contains %s entries:\" %\n (name, bytes2str(version_section.name), num_entries))\n self._emitline('%sAddr: %s Offset: %s Link: %i (%s)' % (\n ' ' * indent,\n self._format_hex(\n version_section['sh_addr'], fieldsize=16, lead0x=lead0x),\n self._format_hex(\n version_section['sh_offset'], fieldsize=6, lead0x=True),\n version_section['sh_link'],\n bytes2str(\n self.elffile.get_section(version_section['sh_link']).name)\n )\n )", "def emit(self, record):\n try:\n if self.shouldRollover(record):\n self.doRollover()\n if self.header_msg is not None:\n for msg in self.header_msg:\n header_record = logging.LogRecord(\"\", 20, \"\", 0, msg, (), None, None)\n logging.FileHandler.emit(self, header_record)\n logging.FileHandler.emit(self, record)\n except (KeyboardInterrupt, SystemExit) as err:\n raise err\n except Exception as err:\n self.handleError(record)", "def _write_only_header(config, header):\n names, lengths = _parse_header(header)\n with pysam.Samfile(\"-\", \"wbu\", text = \"\".join(header),\n referencenames = names,\n referencelengths = lengths,\n add_sq_text = False) as handle:\n return 0", "def AttachHeader(self, msg, length=10):\n msg_list = msg.splitlines()\n if len(msg_list) > length:\n # The pre_body part has a limit of 10 lines, if the msg is larger than 10\n # lines, truncate it and attach to the extra part at the bottom.\n self._WriteToRecord(EXTRA, '...full message of header part\\n')\n self._WriteToRecord(EXTRA, msg)\n self._WriteToRecord(HEAD, '\\n'.join(msg_list[:(length - 1)]))\n self._WriteToRecord(HEAD, TRUNCATE_MESSAGE)\n else:\n self._WriteToRecord(HEAD, msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate weights for a low pass Lanczos filter.
def low_pass_weights(window, cutoff): order = ((window - 1) // 2 ) + 1 nwts = 2 * order + 1 w = np.zeros([nwts]) n = nwts // 2 w[n] = 2 * cutoff k = np.arange(1., n) sigma = np.sin(np.pi * k / n) * n / (np.pi * k) firstfactor = np.sin(2. * np.pi * cutoff * k) / (np.pi * k) w[n-1:0:-1] = firstfactor * sigma w[n+1:-1] = firstfactor * sigma # Needed to add a normalization factor to make the weights add up to 1. norm = np.sum(w) w = w/norm return w[1:-1]
[ "def weights(self) :\n\t\treturn sign(self.L) #1/(self.L + 0.00001) ", "def wwl(X, node_features=None, num_iterations=3, sinkhorn=False, gamma=None):\n D_W = pairwise_wasserstein_distance(X, node_features = node_features, \n num_iterations=num_iterations, sinkhorn=sinkhorn)\n wwl = laplacian_kernel(D_W, gamma=gamma)\n return wwl", "def calculateWeights(self, invCovMatrix, avgFeatureVals):\n numFeatures = len(self.featureSet)\n self.weight0 = 0.0\n self.weights = zeros( numFeatures )\n for w_idx in range(numFeatures):\n for f_i in range(numFeatures):\n self.weights[w_idx] += invCovMatrix[w_idx, f_i] * avgFeatureVals[f_i]\n self.weight0 += self.weights[w_idx] * avgFeatureVals[w_idx]\n self.weight0 *= -0.5", "def initialize_weights(self):\n for p in self.parameters():\n data = p.data\n if data.dim() == 1:\n # bias\n data.zero_()\n elif data.dim() == 2:\n # linear weight\n n = data.size(1)\n stdv = 1.0 / math.sqrt(n)\n data.normal_(0, stdv)\n elif data.dim() in (3, 4):\n # conv weight\n n = data.size(1)\n for k in data.size()[2:]:\n n *= k\n stdv = 1.0 / math.sqrt(n)\n data.normal_(0, stdv)\n else:\n raise NotImplementedError \n self.decoder.embed.weight.data.normal_(0, 1)\n for i in range(len(self.decoder.decoder)):\n bias = self.decoder.decoder[i].bias_ih\n n = bias.size(0)\n start, end = n // 4, n // 2\n bias.data[start:end].fill_(1.0)", "def calc_weight(self):\r\n coeffs = [8.79055, 4.2928] # the coeffs of the linear eauation (found according UR5 and motoman)\r\n weights = [0] # the wieght of each link\r\n acc_length = 0 # accumelated length\r\n acc_weight = 0 # accumelated weight\r\n for link in self.links[1:]:\r\n acc_length = acc_length + float(link)\r\n weights.append(round(acc_length * coeffs[0] + coeffs[1] - acc_weight, 2))\r\n acc_weight = acc_weight + weights[-1]\r\n while len(weights) < 7:\r\n weights.append(1)\r\n return [str(weight) for weight in weights]", "def calc_kl_divergence(self):\n\t\treturn -1. * np.sum(self.Z) + np.sum(self.posterior_weights * self.LLs)", "def init_weights(self):\r\n self.weights = [0 for i in range(len(self.inputs[0][0]))]", "def get_minimum_negative_weight(self, incoming_projection):", "def learn_initial_weights(self, X):\n output = torch.tensor(X, dtype=torch.float32)\n for i in range(len(self.weights)):\n torch.nn.init.xavier_normal_(self.weights[i].weight, torch.nn.init.calculate_gain('tanh'))\n self.weights[i].bias.data.fill_(0)\n output2 = self.weights[i].forward(output)\n mean = output2.mean(axis=0)\n self.weights[i].bias.data = -mean\n output = self.weights[i].forward(output)\n output = self.acts[i](output)\n # print(output.mean(axis=0), output.mean(axis=0).shape)\n torch.nn.init.xavier_normal_(self.outlayer.weight, torch.nn.init.calculate_gain('tanh'))\n self.outlayer.bias.data.fill_(0)\n # self.outlayer.bias.data[1].fill_(np.log(np.exp(1) - 1))\n # Noise can be tuned here...\n self.outlayer.bias.data[1] = -5", "def lows_power(self, filtered=True):\n return (\n self.get_freq_power(0, filtered) + self.get_freq_power(1, filtered)\n ) * 0.5", "def weighted_least_squares(self, spec, weights):\n ww = weights.T # nwave x ntrain\n wx = ww[:, :, None] * self.X # nwave x ntrain x nfeature\n\n b = np.dot(self.X.T, weights * spec).T # nwave x nfeature\n # This is the time suck\n a = np.matmul(self.X.T, wx) # nwave x nfeature x nfeature\n #a = np.dot(self.X.T, wx).transpose(1,0,2)\n return np.linalg.solve(a, b).T", "def waist(z):\n return w0*np.sqrt(1+(abs(z-L_w)/z_R)**2)", "def compute_sample_weight(class_weight, y, *, indices=...):\n ...", "def calculateWeights(stations, df):\n\n #Variables\n\n #List all sensors present in full dataset\n sensors = df[\"Sensor\"].unique()\n\n weights = {}\n\n #################################################################################\n\n #Loop over all the sensors\n for sensor in sensors:\n\n #Make an array with the latitude and longitude of the sensor\n x = np.array(df[df[\"Sensor\"] == sensor].reset_index()[\"SensorLatitude\"][0],\n df[df[\"Sensor\"] == sensor].reset_index()[\"SensorLongitude\"][0]).reshape(1, -1)\n\n station_weights = {}\n #Loop over all stations\n for station in stations:\n\n #Make an array with the latitude and longitude of the station\n y = np.array(df[station + \" Lat\"][0],\n df[station + \" Lon\"][0]).reshape(1, -1)\n\n #Add station weight\n station_weights[station + \" weight\"] = rbf_kernel(x, y)\n\n weights[sensor] = station_weights\n\n return weights", "def get_recurrent_weights(self):\n return npify(self.rnn_layer.weight_hh_l0)", "def init_weights(self):\r\n default_init_weights(self, 1)", "def information_gain(L, kernel):\n return 0.5 * (L ** 2).diag().log().sum().item() - len(L) / 2 * np.log(\n kernel.alpha)", "def get_mean_positive_weight(self, incoming_projection):", "def get_robust_channel_weights(self):\n self.comments.append('[W]')\n live_channels = self.channels.get_live_channels()\n valid_frames = self.frames.is_unflagged(\n self.flagspace.flags.CHANNEL_WEIGHTING_FLAGS) & self.frames.valid\n\n var_sum, var_weight = int_nf.robust_channel_weights(\n frame_data=self.frames.data,\n relative_weights=self.frames.relative_weight,\n sample_flags=self.frames.sample_flag,\n valid_frames=valid_frames,\n channel_indices=live_channels.indices)\n\n self.set_weights_from_var_stats(\n live_channels, var_sum, var_weight)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets the model file name
def _get_model_filename(self) -> str: model_filename = f'{self.model_dir}/{self.description}.{self._get_model_file_extension()}' return model_filename
[ "def _get_model_name_from_file(path: str):\n return os.path.basename(path).split(\".\")[0]", "def _get_model_name(self):\n sysinfo = SystemInfo()\n model_name = sysinfo.get_model_name()\n return model_name", "def get_model_filename(model_dir: str) -> str:\n return os.path.join(model_dir, \"model.joblib\")", "def _get_model_file_extension(self):\n pass", "def get_model_file_name(orig: str, dest: str):\n return f\"pta_{orig}{dest}.pickle\"", "def get_model_name(self):\n return self.model_name", "def get_model_name(args):\n hiddensizes = '_' + str(args.D_h_features)\n model_name = const.MODEL_DIRECTORY + str(args.id) + '_model' + hiddensizes + '.pth'\n analysis_name = const.ANALYSIS_DIRECTORY + str(args.id) + '_model_analysis' + hiddensizes + '.npy'\n return model_name, analysis_name", "def model_name(self) -> str:\n return self.device_info.model_name", "def form_model_name(self):\n model_d = self.parameters['fitmodel']\n model_name = ''\n if model_d['pulse']:\n model_name += 'pulse'\n model_name += '_' + model_d['model']\n if model_d['constrained']:\n model_name += '_constrained'\n if model_d['conv']:\n model_name += '_conv'\n else:\n model_name += '_iter'\n\n return model_name", "def _model_path(self) -> str:\n return Container().data_path() + '/' + self.MODEL_NAME", "def get_file_name(self, output_dir, model_name):\n file_name = \"%sGroup%s_Seg%s_%s.dat\" % (model_name, self.group, self.segment, self.data_name)\n return os.path.join(output_dir, file_name)", "def __str__(self) -> str:\n return self.__class__.__name__.split(\"Model\")[0]", "def model_id(self) -> str:\n return self.model_dir.lower()", "def name(self):\n self.filename = self.model.name+\"_\"\n for k,p in self.params.items():\n self.filename += k+\"_\"+str(p).replace(\".\", \",\")+\"_\"\n self.filename += str(self.nb_dataset)", "def modelpath4file(filename):\n local_path = URLs.LOCAL_PATH / 'models' / filename\n if local_path.exists():\n return local_path\n else:\n return Config.model_path() / filename", "def resolve_model_name(filename):\n first_ = filename.find(\"_\")\n second_ = filename.find(\"_\", first_ + 1)\n model_name = filename[:second_]\n return get_classifier_print_name(model_name)", "def get_filename(self) -> str:\n return self._filename", "def trained_model_filepath(self) -> str:\n return f'/usr/src/app/audit/science/{self.location}/models/{self.model_id}'", "def get_filename(self):\n return self.source.get_filename()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
run fit on model
def _fit_model(self): pass
[ "def test_fit(self):\n self._fit()", "def fit_model(self, data, batch_size, epochs, **kwargs):\n return None", "def fit(self, X):", "def fit_model(self):\n yy, xx = np.indices(self.data.shape)\n xdata_tuple = (xx, yy)\n # return model\n return self.model(xdata_tuple, *self._popt)", "def fit(self) -> None:\n\n self.model = OneClassSVM(nu=self.outlier_fraction, kernel=self.kernel, gamma=self.gamma)\n self.model.fit(self.scale_data)", "def fit_single_model(self, X,y):\n raise NotImplementedError()", "def fit(self, dataset):\n dataset.set_batch_size(dataset.get_n('train') - 1)\n train_data = next(dataset.mixed_batch_iter())\n\n dataset.set_batch_size(dataset.get_n('val') - 1)\n val_data = next(dataset.mixed_batch_iter(data='val'))\n print 'INFO: training on ', dataset.get_n('train') - 1, ' examples'\n self.train_on_batch(*train_data) \n mae = self.mae(*val_data)\n print 'INFO: mae: ', mae", "def fit_if_needed(self):\n if not self._fitted:\n self.fit()", "def fit_model(self):\n return self.gen_model(self._data, *self._popt)", "def _fit_model(self):\n # Determine location parameter from data\n floc = self._determine_loc()\n\n # Fit Weibull to data\n c, loc, scale = self.model.fit(self.ratio, self.c_guess, floc=floc)\n\n # Make Weibull-fitted cdf ratio\n self.fitted_ratio = self.model.pdf(self.bins, c, loc, scale)\n \n self.fitted_pars = {'c': c, 'loc': loc, 'scale': scale}\n self.pars = self.fitted_pars", "def _fit(self, X, y, w):\n pass", "def fit(self, model, *args, **kwargs):\n\n rslt = {}\n for i in self.fcmdict:\n rslt[i] = model.fit(self.fcmdict[i], *args, **kwargs)\n return rslt", "def _train(self):\n self._model.learn(total_timesteps=self._num_timesteps)", "def train_model(self):\n # fit the model\n self.fit_lstm(self.train_scaled, 1, self.nb_epochs, 4)", "def trainModel(self, Model) -> None:\n ...", "def model_fit(sn, model, params, glm = 7, rois = {'cortex':'tesselsWB162', 'cerebellum':'grey_nan'}, \n trainMode = 'crossed', trainExper = 1, inclInstr = 1, scale = True, \n overwrite = True, avg = 1):\n \n # Setting directories\n name = 'mb4_%s_%s'% (rois['cortex'], model)\n outDir = os.path.join(baseDir, 'sc%d'% trainExper, connDir, 'glm%d'%glm, name)\n \n if not os.path.exists(outDir):\n os.makedirs(outDir)\n \n \n # use prep_data.get_wcon to get the data\n Data = {} # dictionary that will have the roi names as its keys\n for ri in list(rois.keys()):\n [Data[ri], Tf] = prep_data.get_wcon(experNum = [1, 2], glm = 7, roi = rois[ri], avg = avg)\n \n X = Data['cortex']\n Y = Data['cerebellum']\n \n \n # Find the data that we want to use for fitting the connectivity\n SI1 = np.argwhere(np.array(((Tf['StudyNum'] == trainExper)*1)*((Tf['sess'] == 1)*1) == 1))\n SI2 = np.argwhere(np.array(((Tf['StudyNum'] == trainExper)*1)*((Tf['sess'] == 2)*1) == 1))\n \n # Arrange data based on the training mode\n trainXindx = np.concatenate((SI1, SI2))\n if trainMode == 'crossed':\n trainYindx = np.concatenate((SI2, SI1))\n elif trainMode == 'uncrossed':\n trainYindx = np.concatenate((SI1, SI2))\n \n trainXindx = trainXindx.flatten()\n trainYindx = trainYindx.flatten()\n \n \n # Estimate the model and store the information attached to it\n RR = {} # dictionary with all the info for the model for all the subjects\n for s in sn:\n print('........ Doing Modelling for s%02d'% s)\n outname = os.path.join(outDir, '%s_s%02d.dat'%(name, s))\n \n # Get data\n xx = X['s%02d'%s][trainXindx, :]\n yy = Y['s%02d'%s][trainYindx, :]\n \n # add the new model to the previous one or over-write it?\n if (os.path.exists(outname) and overwrite == False):\n tmpR = pickle.load(open(outname, \"rb\"))\n else:\n print('!!!!!!!! overwriting the old model file !!!!!!!!')\n # creating a default empty dictionary as reference. Each time a model \n # is fitted an element is appended to the values of this dictionary\n tmpR = {'sn':[], 'M':[], 'params':[], 'model':[], \n 'inclInstr': [], 'trainMode':[], 'xname':[], \n 'R2':[], 'R2vox':[], 'R':[], 'Rvox':[]} \n \n # Run all the models with different parameters\n ## For now, I am just working with a 1-D numpy array\n if not params.size: # if params is empty\n print('parameter array is empty')\n \n else: # if params is not empty\n for ip in params: # looping over all the parameters\n print('...... Doing model fitting for %s param: %s' % (model, ip))\n # fit the model\n M, R2, R, R2vox, Rvox = connect_fit(xx, yy, model = model, scale = True, args = ip)\n # get R2, R\n# (R2, R, R2_vox, R_vox) = R2calc(xx, yy, M)\n \n tmpR['sn'].append(s)\n tmpR['M'].append(M)\n tmpR['params'].append(ip)\n tmpR['model'].append(model)\n tmpR['inclInstr'].append(inclInstr)\n tmpR['trainMode'].append(trainMode)\n tmpR['xname'].append(rois['cortex'])\n \n tmpR['R2'].append(R2)\n tmpR['R'].append(R)\n tmpR['R2vox'].append(np.array(R2vox))\n tmpR['Rvox'].append(np.array(Rvox))\n Rr = tmpR\n \n RR['s%02d'%s] = Rr\n \n # save R\n pickle.dump(Rr, open(outname, \"wb\")) # \"wb\": Writing Binary file\n\n return RR", "def fit_image(self):\n self.params = self.all_params['Fit 0']\n self.fit_results = minimize(self.fit_dict[self.fit_type], self.params,\n args = ())\n #report_fit(self.fit_results)\n sel.fparams = self.fit_results.params", "def on_batch_begin(self, **fit_kwargs):", "def lm_fit(self):\r\n self.LinearModel = LinearRegression().fit(self.x, self.y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }