query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Downloads file from website.
def download_file(url): downloaded_file = requests.get(url) return downloaded_file
[ "def download(url, file_name):\n with open(file_name, \"wb\") as file:\n response = get(url)\n file.write(response.content)", "def download_file(self, url, filename):\n with open(filename, 'wb') as f:\n f.write(self.get_read(url))", "def download(url, filename):\n response = urllib.request.urlopen(url)\n data = response.read()\n with open(BASEDIR+\"/\"+filename,'wb') as file:\n file.write(data)\n file.close()", "def download_url():", "def download(self):\n try:\n loc_filename, headers = urllib.request.urlretrieve(self.geckodriver_download_url, filename=self.current_dir + '/' + self.filename)\n self.__extract()\n self.__add_to_path()\n except Exception as e:\n print(e)", "def download_file(self, file_name, url):\n\n destination_dir = os.path.join(self.root, 'RawData')\n download_file(destination_dir, file_name, url)", "def _download_and_save(cls, url, filepath):\n r = requests.get(url)\n r.raise_for_status()\n with filepath.open(mode='wb') as f:\n f.write(r.content)", "def download_file(url, save_dir='output'):\n \n save_file = Path(save_dir, Path(url).name)\n with requests.get(url, stream=True) as r:\n with open(save_file, mode='wb') as f:\n for chunk in r.iter_content(chunk_size=1048576):\n f.write(chunk)\n \n return save_file", "def download_file(self,url):\r\n output_filepath = self.datafiles_folder + '/' + url[url.rfind(\"/\")+1:]\r\n # Download the file from the url and save it to the data files folder.\r\n try:\r\n with urllib.request.urlopen(url) as response, open(output_filepath, 'wb') as output_file:\r\n shutil.copyfileobj(response, output_file)\r\n except:\r\n logging.error(\"Unable to download file from {}. Exiting program.\".format(url))\r\n exit(1)\r\n logging.info(\"Downloaded file to {}\".format(output_filepath))\r\n return output_filepath", "def _download_from_url(self, url):\n ext = get_file_extension(url)\n if \"?\" in url:\n ext = get_file_extension(os.path.splitext(url.split(\"?\")[0]))\n filepath = \"/tmp/%s.%s\" % (uuid.uuid4().hex, ext)\n request.urlretrieve(url, filepath)\n return filepath", "def download_raw_file( self, url ):\n raw_file_url = self.get_raw_file_url( url )\n f = urlopen( raw_file_url )\n output_filename = self.get_output_file( url )\n output_dirname = os.path.dirname( output_filename )\n print( \"save to %s\" % output_filename )\n if not os.path.exists( output_dirname ):\n os.makedirs( output_dirname )\n with open( output_filename, \"wb\" ) as fp:\n shutil.copyfileobj( f, fp )", "def download(self, urlpath, outfilepath):\n makedirs_file(outfilepath)\n print(urlpath)\n r = requests.get(self.full_url(urlpath), stream=True, **self._reqparams)\n if r.status_code != 200:\n print('Download not allowed or file not exist')\n return False\n with open(outfilepath, 'wb') as f:\n shutil.copyfileobj(r.raw, f)\n print('Download OK')\n return True", "def download(url, path=None):\n print \"Downloading %s...\" % url\n try:\n url_file = urllib2.urlopen(url)\n except urllib2.HTTPError:\n raise IOError(\"Couldn't open URL %s.\" % repr(url))\n\n # Use the provided path, or default to the basename\n filename = path if path else posixpath.basename(url)\n try:\n local_file = open(filename, 'wb')\n local_file.write(url_file.read())\n local_file.close()\n except IOError:\n raise IOError(\"Couldn't write filename %s.\" % repr(filename))\n\n return filename", "def download_file(self, url: str):\n if url.startswith('http'):\n try:\n download_file_response = requests.get(url)\n except (requests.exceptions.RequestException, ConnectionError, TimeoutError) as error:\n self.sheet_updates.error_type = 'CONNECTION ERROR'\n self.sheet_updates.error_message = 'Could not connect to ' + url\n self.logger.exception(error)\n raise DownloadError('Could not download from ' + url + ' because of a connection error.')\n\n if not download_file_response.ok:\n self.sheet_updates.error_type = 'DOWNLOAD ERROR (' + str(download_file_response.status_code) + ')'\n self.sheet_updates.error_message = download_file_response.text\n raise DownloadError('Was unable to download the file from ' + url)\n content = download_file_response.content\n buffer = BytesIO(download_file_response.content)\n\n elif url.startswith('ftp'):\n import urllib.parse\n import ftplib\n parts = urllib.parse.urlparse(url)\n file_name = parts.path.split('/')[-1]\n path = parts.path.replace(file_name, '')\n ftp = ftplib.FTP(parts.netloc)\n ftp.login()\n ftp.cwd(path)\n ftp.retrbinary('RETR ' + file_name, open(self.temp_path + file_name, 'wb').write)\n ftp.quit()\n with open(self.temp_path + file_name, 'rb') as file:\n content = file.read()\n buffer = BytesIO(content)\n else:\n self.sheet_updates.error_type = 'DOWNLOAD ERROR'\n self.sheet_updates.error_message = 'Invalid protocol: only HTTP[S] & FTP are supported!'\n raise DownloadError('Invalid protocol: only HTTP[S] & FTP are supported!')\n\n # save downloaded file locally to ensure that it is unzipped\n # and does not need to be downloaded again for getting an URI\n file_name = self.temp_path + 'temporary.' + self.file_end.lower()\n if url.endswith('.zip'):\n z = zipfile.ZipFile(buffer)\n text = z.read(z.infolist()[0]).decode('utf-8')\n elif url.endswith('.gz'):\n text = gzip.decompress(content).decode('utf-8')\n else:\n text = content.decode('utf-8')\n\n with open(file_name, 'w', encoding='utf-8') as file:\n file.write(text)\n\n self.local_file_name = file_name", "def get_file_from_network(self, url, filename):\n encoded_url = urllib.parse.quote(url, safe='/:')\n req = Request(encoded_url)\n response = urlopen(req)\n with open(filename, 'wb') as return_file:\n return_file.write(response.read())", "def download_file(url):\n\n local_filename = url.split(\"/\")[-1]\n with requests.get(url, stream=True) as r:\n with open(f\"audios/{local_filename}\", \"wb\") as f:\n shutil.copyfileobj(r.raw, f)\n\n return local_filename", "def download_file(self, url):\n resp = requests.get(url, stream=True)\n file_size = int(resp.headers['Content-Length'])\n with open(self.FILE_PATH, 'wb') as f:\n with tqdm(total=file_size / self.CHUNK_SIZE, desc='log file downloading', ) as pbar:\n for chunk in resp.iter_content(chunk_size=self.CHUNK_SIZE):\n if chunk:\n f.write(chunk)\n pbar.update(1)", "def download_page(learnUser, url, target_dest, learn_name):\n target_dest = f\"{target_dest}/Single Files/\"\n learnUser.downloadFile(url, target_dest, learn_name)", "def download_file(self, name):\n subprocess.call([self.curlpath, '-s', self.ftppath+name,\n '--netrc', '--output', self.savepath+name])\n if path.exists(self.savepath+name):\n print(\"Sucessfully downloaded {}.\".format(name))\n else:\n raise ObtainError(\"File {} cannot be processed.\".format(name) +\n \" Either it was not found on the FTP server, \" +\n \"or it could not be saved.\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rename source file to destination filename; replace destination file.
def rename_and_overwrite_file(source_filename, destination_filename): os.replace(source_filename, destination_filename)
[ "def rename_file(source, dest):\r\n os.rename(source, dest)\r\n __remove_pyc_pyo(source)", "def Rename(src, dst):\n os.rename(src, dst)", "def rename_file(self, file_id, name):\n pass", "def rename(src, dst):\n if sys.platform == \"win32\":\n return win32_rename(src, dst)\n else:\n return os.rename(src, dst)", "def UpdateFile(oldFilePath, newFilePath):\n perm = os.stat(oldFilePath).st_mode\n os.rename(newFilePath, oldFilePath)\n os.chmod(oldFilePath, perm)", "def rename_file (\n source_path,\n target_path,\n allow_undo=True,\n no_confirm=False,\n rename_on_collision=True,\n silent=False,\n hWnd=None\n):\n return _file_operation (\n shellcon.FO_RENAME,\n source_path,\n target_path,\n allow_undo,\n no_confirm,\n rename_on_collision,\n silent,\n hWnd\n )", "def rename(oldPath, newPath, **kwargs):\n import os\n return os.rename(oldPath, newPath, **kwargs)", "def rename(self, path, dst, opt=None):\n\n url = self._paths_url(path, 'rename')\n self._post(url, opt, {'dst': dst})", "def rename_file(self, old, new):\n del self.file_dict[os.path.basename(old)]\n self.file_dict[os.path.basename(new)] = new\n # reconstruct to include new file\n self.mp3_basenames = tuple(sorted(self.file_dict.keys()))\n\n del self.meta_cache[os.path.basename(old)]\n self.parse_info_for_status(os.path.basename(new)) # replace in meta_cache", "def move_file(self, path: str, filename: str, new_path: str, new_filename: str = None):\n self.copy_file(path, filename, new_path, new_filename)\n self.delete_file(path, filename)", "def do_rename(self, args):\n print(self.enc_ftp.rename(args.filename, args.new_filename))", "def _rename_filename(self, filename):\n directory = os.path.dirname(self._filepath) # keep the same path\n extension = os.path.splitext(self._filepath)[1] # keep the extension\n\n # Concatenate the new path for the file, rename the file and update the\n # _filepath variable.\n new_path = os.path.join(directory, filename + extension)\n os.rename(self._filepath, new_path)\n self._filepath = new_path", "def rename(file, newFileName):\n\ttry:\n\t\tos.rename(translatePath(file), translatePath(newFileName))\n#\t\tshutil.move(file, newFileName)\n\t\treturn True\n\texcept:\n\t\treturn False", "def rename(filepath, new_file_name=None, prefix=None, suffix=None, new_extension=None):\n old_file_path, old_extension = os.path.splitext(filepath)\n old_file_name = os.path.basename(old_file_path)\n file_dir = os.path.dirname(old_file_path)\n\n if not new_file_name:\n new_file_name = old_file_name\n\n if not new_extension:\n new_extension = old_extension\n\n if prefix:\n new_file_name = prefix + new_file_name\n\n if suffix:\n new_file_name += suffix\n\n new_file_path = os.path.join(file_dir, new_file_name + new_extension)\n os.rename(filepath, new_file_path)\n return new_file_path", "def move(source, destination):\n\tshutil.move(_uri_to_path(source), _uri_to_path(destination)) #Use shutil because it overwrites old files on Windows too.", "def _RenameFile(filename, rename_template, directory_path):\n if rename_template is None:\n return filename\n source_base, extension = os.path.splitext(filename)\n if extension:\n extension = extension[1:]\n source_dir = os.path.basename(os.path.abspath(directory_path))\n now_time = datetime.datetime.utcnow()\n replace_map = {\n 'source_dir': source_dir,\n 'source_base': source_base,\n 'source_ext': extension,\n 'upload_time': unicode(now_time).replace(' ', '_'),\n }\n output_file = string.Template(rename_template).substitute(replace_map)\n return output_file", "def auto_rename(source_file_path, save_path, md_path):\n if os.path.isfile(md_path):\n info_dict = ValidMarkdown(md_path).check_markdown_file()\n else:\n raise FileNotFoundError(f\"{md_path} not exists\")\n\n if os.path.isfile(source_file_path):\n # Create new file name\n file_format = os.path.splitext(source_file_path)[-1]\n name_list = [info_dict['backbone-name'], info_dict['train-backend'], info_dict['mindspore-version']]\n if info_dict.get('train-dataset', None):\n name_list.append(info_dict['train-dataset'])\n now_time = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n name_list.append(now_time)\n new_file_name = '_'.join([str(x) for x in name_list]) + file_format\n new_file_name = new_file_name.replace('/', '-')\n\n # Create new file path\n des_file_path = os.path.join(save_path, info_dict['module-type'], info_dict['backbone-name'], new_file_name)\n\n # If file path is not exist, create des file path\n if not os.path.exists(os.path.dirname(des_file_path)):\n os.makedirs(os.path.dirname(des_file_path))\n\n # Copy file\n if source_file_path and des_file_path:\n try:\n shutil.copyfile(source_file_path, des_file_path)\n except IOError as e:\n raise Exception(e)\n except:\n raise Exception('Unexcepted error: ', sys.exc_info())\n\n print('Rename and copy file done!')\n return des_file_path", "def rename( self, source : str, target : str, *, ext : str = None ):\n src_full = self.fullKeyName( source, ext=ext )\n tar_full = self.fullKeyName( target, ext=ext )\n os.rename(src_full, tar_full)", "def rename_file(file1, file2, overwrite=False):\n if os.path.isfile(file1):\n if os.path.isfile(file2):\n while not overwrite:\n answer = raw_input(\"%s already exists. Do you want to overwrite? Yes [No] Abort: \" \\\n % file2).lower()\n if answer == 'yes':\n overwrite = True\n elif answer == 'abort':\n return None\n else:\n answer = raw_input(\"Enter a new filename: \")\n if answer != '': file2 = os.path.normcase(answer)\n if not os.path.isfile(file2):\n overwrite = True\n if file2 != file1: os.remove(file2)\n try:\n os.rename(file1, file2)\n except Exception as details:\n warn( \"Failed to rename %s to %s: %s\" % (file1, file2, details) )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Groups data by selected criteria (cf. groupby method).
def group_data_by_selection(dataframe, grouping_selection_list): grouped_data = dataframe.groupby(grouping_selection_list) return grouped_data
[ "def grouping_attributes(self, grouping_info, data):\n print(\"************* start grouping some attributes **************\")\n for grouping in grouping_info:\n attributes = grouping['attributes']\n new_attr = grouping['grouped_name']\n\n # group attribute values into tuples\n data[new_attr] = data[attributes].apply(tuple, axis=1)\n\n # map tuples to new values in new columns\n encoding = {v: i for i, v in enumerate(grouping['combinations'])}\n # this row is verbous, data[new_attr] = data[attributes].apply(tuple, axis=1)\n # here we map them to codes again like we map intervals to interval indexes\n data[new_attr] = data[new_attr].map(encoding)\n \n self.encode_mapping[new_attr] = encoding\n # look at this, here decode_mapping is a dict which maps index to real tuple\n self.decode_mapping[new_attr] = grouping['combinations']\n\n # todo: do we still need filter?\n # EMP top 20 filter\n # if \"filter\" in grouping:\n # data = data[~data[new_attr].isin(self.filter_values[new_attr])]\n\n # drop those already included in new_attr\n data = data.drop(attributes, axis=1)\n print(\"new attr:\", new_attr, \"<-\", attributes)\n print(\"new uniques after encoding:\", sorted(data[new_attr].unique()))\n # display after grouping\n print(\"columns after grouping:\", data.columns)\n print(\"grouping attributes done in DataLoader\")\n \n return data", "def get_olap_groups(self, norm_data):\n olap_data = norm_data[norm_data['transactiontype']\n >= self.OLAP_QUERY_LOWER_ID]\n return [groupdata\n for _, groupdata in olap_data.groupby('transactiontype')]", "def add_groupby(self):\n if self.query_model.groupBy_columns is not None and len(self.query_model.groupBy_columns) > 0:\n groupby_clause = \" GROUP BY \"\n for col_name in self.query_model.groupBy_columns:\n groupby_clause += \"?\" + col_name + \" \"\n groupby_clause += \"\\n\"\n self.query_string += groupby_clause", "def group(data, groups, statistic='mean'):\n matched, failed = match_variables(data, groups)\n for x in failed: print('Warning: Can not find variable', x)\n grp = data.groupby(matched)\n table = eval(\"grp.\" + statistic + \"()\")\n table = table.reset_index()\n return table", "def grouper(df):\n print(\"performing groupby and sum\")\n\n df.loc[df['outcome_id'] != 'death2', 'outcome_id'] = 'case'\n\n groups = ['location_id', 'year_start', 'year_end', 'age_group_unit',\n 'age_group_id', 'sex_id', 'source', 'nid',\n 'facility_id', 'representative_id', 'diagnosis_id',\n 'metric_id', 'outcome_id', 'nonfatal_cause_name']\n df = df.groupby(groups).agg({'val': 'sum'}).reset_index()\n\n return df", "def _iter_groups(self, data):\n groups = data.groupby(self.segmentation_col)\n\n for name in self.models:\n yield name, groups.get_group(name)", "def groupby(self, key=None):\n groups = {}\n\n if isinstance(key, str):\n keystr = key\n key = lambda x: x[keystr]\n\n if key is None:\n raise Exception(\"must specify keyfunc\")\n\n for row in self:\n key2 = key(row)\n\n # add new table if necessary\n if key2 not in groups:\n groups[key2] = self.new()\n\n groups[key2].append(row)\n\n return groups", "def _add_group_by_statement(self):\n query = \"group by \" + \"\".join([\"{0},\".format(x) for x in range(1, len(self.index_col) + 1)])\n return query[:-1]", "def _group_elk_results(self, request, results):\n flat_results = (hit for r in results for hit in r)\n result_groups = defaultdict(list)\n\n for hit in flat_results:\n group = _subtype_mapping[hit.subtype]\n display = hit._display\n if hit.subtype in _add_subtype_display:\n display += f' ({hit.subtype})'\n result_groups[group].append({\n '_display': display,\n 'uri': self._get_uri(request, hit)\n })\n\n return result_groups", "def grouped_slice(self, offset, limit):\n counts = self.grouped_counts()\n \n if offset == 0:\n qs_offset = 0\n else:\n qs_offset = sum(int(row['total']) for row in counts[:offset])\n \n qs_limit = sum(int(row['total']) for row in counts[offset:offset+limit])\n\n print(qs_offset)\n print(qs_limit)\n\n sub_qs = self.select_related('uniprot').select_related('transcript').select_related('transcript__gene').order_by('unique_grouping_id')[qs_offset:qs_offset+qs_limit]\n \n grouped_results = {}\n for result in sub_qs:\n try:\n grouped_results[result.unique_grouping_id].append(result)\n except (KeyError, AttributeError):\n grouped_results[result.unique_grouping_id] = [ result ]\n\n return grouped_results", "def build_reduced_grouped_dataframe(self):\n if self.reduced_grouped_dataframe is None:\n reduced_dataframe = self.get_reduced_dataframe()\n self.reduced_grouped_dataframe = reduced_dataframe.groupby(CASE_CONCEPT_NAME)", "def group_data_by_match_interval(sensor_datas, threshold=5):\n temp_groups = []\n\n # Find possibles groups\n for first_data in sensor_datas:\n\n group = [first_data]\n for second_data in sensor_datas:\n\n if first_data != second_data:\n if (first_data.start_timestamp - threshold <= second_data.end_timestamp) or (\n first_data.end_timestamp + threshold >= second_data.start_timestamp):\n group.append(second_data)\n\n temp_groups.append(group)\n\n groups = []\n\n for first_group in temp_groups:\n\n same_group = True\n for second_group in temp_groups:\n\n if first_group != second_group:\n if len(first_group) != len(second_group):\n same_group = False\n break\n\n for i in range(0, len(first_group)):\n if first_group[i] != second_group[i]:\n same_group = False\n break\n\n if not same_group:\n break\n\n if not same_group:\n groups.append(first_group)\n\n return groups", "def get_groupby(self, data_slice, attr_idx):\n ## @TODO: multiple passes through data...\n for label in self.attr_mapper[attr_idx][\"labels\"]:\n yield (label, (row for row in data_slice if row[attr_idx] == label))", "def groups(self):\n return dict(self._results.groups())", "def group(self, keyfunc=None): # XYZZY\n groups = {}\n if keyfunc is None:\n keyfunc = lambda x: x.value\n protogroups = groupby(sorted(self, key=keyfunc), key=keyfunc)\n for k, v in protogroups:\n newbag = xypath.Bag.from_list(v)\n newbag.table = self.table\n groups[k] = newbag\n return groups", "def group_by(self, *rows_queries, **kwargs):\n if len(rows_queries) < 1:\n raise PilosaError(\"Number of rows queries should be greater than or equal to 1\")\n q = [u\",\".join(q.serialize().query for q in rows_queries)]\n limit = kwargs.get(\"limit\")\n if limit is not None:\n q.append(\"limit=%s\" % limit)\n filter = kwargs.get(\"filter\")\n if filter is not None:\n q.append(\"filter=%s\" % filter.serialize().query)\n return PQLQuery(u\"GroupBy(%s)\" % u\",\".join(q), self)", "def group_by(data, keys):\n key_combo = keys[0]\n getter = _multi_itemgetter(key_combo)\n data = sorted(data, key=getter)\n\n if len(keys) > 1:\n grouped_data = []\n for values, subgroups in itertools.groupby(data, getter):\n # create a dict containing key value pairs and _subgroup\n result = dict(zip(key_combo, values))\n result['_subgroup'] = group_by(\n remove_keys_from_all(subgroups, key_combo),\n keys[1:]\n )\n grouped_data.append(result)\n data = grouped_data\n\n return data", "def _group_by(df, by=None, level=None, as_index=True, \n sort=True, group_keys=True, observed=False, \n dropna=True):\n if isinstance(df, pd.core.groupby.DataFrameGroupBy):\n df = _ungroup(df)\n return df.groupby(\n by=by, \n level=level, \n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n observed=observed,\n dropna=dropna)", "def group_dataset(self, group):\n ds = Dataset()\n ds.update(dict(\n [(tag,data_element) for tag,data_element in self.items() if tag.group==group]\n ))\n return ds" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print all asyncio tasks continuosly in debug mode.
def print_tasks(): while True: yield from asyncio.sleep(10) for task in asyncio.Task.all_tasks(): if task.done(): exception = task.exception() if exception is None: logger.info("Task DONE: %s = %s", task, task.result()) else: logger.error("Task FAILED: %s = %s", task, exception) else: logger.debug("Tasks RUNNING: %s", task)
[ "async def debug_task(self, ctx, memory_id: hex_value):\n task = object_at(memory_id)\n if task is None or not isinstance(task, asyncio.Task):\n return await ctx.send(f'Could not find Task object at {hex(memory_id)}.')\n\n if ctx.invoked_with == 'cancel_task':\n task.cancel()\n return await ctx.send(f'Cancelled task object {task!r}.')\n\n paginator = commands.Paginator(prefix='```py')\n fp = io.StringIO()\n frames = len(task.get_stack())\n paginator.add_line(f'# Total Frames: {frames}')\n task.print_stack(file=fp)\n\n for line in fp.getvalue().splitlines():\n paginator.add_line(line)\n\n for page in paginator.pages:\n await ctx.send(page)", "def show_tasks(self):\n print('\\nCompleted to following tasks:')\n for step in self.tasks:\n print('\\t{0}'.format(step))", "async def debug_commands(self, ctx):", "def tasks(**_):\n for task in filter(bool, get_all_tasks()):\n print(task)", "def print_debug(self):\n print('\\n'.join(self.debug_buffer))", "def show_performed_experiments(self):\n for experiment in self.completed_tasks:\n print(experiment)", "def debug_task(self):\n print('Request: {0!r}'.format(self.request))", "def print_queue(self):\n print self.queue", "def timed_print():\n while True:\n print(\"hello\")\n yield TimedTask(3)", "def get_results(self):\n for t in self.task:\n print t.get()", "def debug(self):\n for i in range(self._size):\n print(\"------------------------\")\n print(\"Slot {}\".format(i))\n for x in range(len(self._slots[i])):\n print(self._slots[i][x])\n \n return", "def _print_tasks(env, tasks, mark_active=False):\n\n if env.task.active and mark_active:\n active_task = env.task.name\n else:\n active_task = None\n\n for task, options, blocks in tasks:\n # print heading\n invalid = False\n\n if task == active_task:\n method = 'success'\n else:\n if options is None and blocks is None:\n method = 'error'\n invalid = True\n\n else:\n method = 'write'\n\n opts = list(options or [])\n blks = list(blocks or [])\n\n write = getattr(env.io, method)\n write('~' * 80)\n write(' ' + task)\n write('~' * 80)\n env.io.write('')\n\n # non-block options\n if opts:\n for opt, values in opts:\n env.io.write(' {0}: {1}'.format(opt,\n ', '.join(str(v) for v in values)))\n env.io.write('')\n\n # block options\n if blks:\n had_options = False\n\n for block, options in blks:\n if options:\n had_options = True\n env.io.write(' {{ {0} }}'.format(block))\n\n for opt, values in options:\n env.io.write(' {0}: {1}'.format(opt,\n ', '.join(str(v) for v in values)))\n env.io.write('')\n\n if not had_options:\n blks = None\n\n if not opts and not blks:\n if invalid:\n env.io.write(' Invalid task.')\n else:\n env.io.write(' Empty task.')\n env.io.write('')", "def print_sink():\n while True:\n info = (yield)\n print info", "def debug():\n for file in glob(\"./*.log\"):\n i=0\n try:\n print('showing',file)\n for entry in parse_log(file):\n # entry._fields: Tuple of strings listing the field names\n # entry._asdict(): Return a new OrderedDict which maps field names to their corresponding values\n print(entry)\n i += 1\n if i == 2:\n break\n except Exception as e:\n print('ERROR content of file was not displayed:',e)", "def _suppress_unobserved_task_logging():\n logging.getLogger(\"asyncio\").setLevel(logging.CRITICAL)", "def asyncio_run(awaitable: Awaitable, *, debug: bool = False) -> Any:\n return asyncio.run(awaitable, debug=debug)", "def pprint(self):\r\n self.refresh()\r\n print(self.pool)", "def print_event_handlers(self):\n self.__scheduler.print_event_handlers()", "def debug(*args, offset=None, indent=4, extra=None):\n def debug_decorator(func):\n\n if asyncio.iscoroutinefunction(func):\n @wrapt.decorator\n async def async_debug_wrapper(func, instance, args, kwargs):\n loop = asyncio.get_event_loop()\n offset_space = derive_offset_space(offset, indent)\n printer = PrettyPrinter(indent=indent, width=WIDTH - len(offset_space))\n\n print_enter_info(func, extra, instance, args, kwargs,\n printer, offset_space, loop, is_async=True)\n\n true_start_time = loop.time()\n result = await func(*args, **kwargs)\n end_time = loop.time()\n elapsed_time = end_time - true_start_time\n\n print_exit_info(func, extra, instance, args, kwargs,\n result, end_time, elapsed_time,\n printer, offset_space, loop, is_async=True)\n return result\n\n return async_debug_wrapper(func)\n\n @wrapt.decorator\n def sync_debug_wrapper(func, instance, args, kwargs):\n loop = asyncio.get_event_loop()\n offset_space = derive_offset_space(offset, indent)\n printer = PrettyPrinter(indent=indent, width=WIDTH - len(offset_space))\n\n print_enter_info(func, extra, instance, args, kwargs,\n printer, offset_space, loop)\n\n true_start_time = loop.time()\n result = func(*args, **kwargs)\n end_time = loop.time()\n elapsed_time = end_time - true_start_time\n\n print_exit_info(func, extra, instance, args, kwargs,\n result, end_time, elapsed_time,\n printer, offset_space, loop)\n return result\n\n return sync_debug_wrapper(func)\n\n return factory_direct(debug_decorator, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record pairs generated by Unify's binning model. Pairs are displayed on the "Pairs" page in the Unify UI.
def pairs(self): alias = self.api_path + "/recordPairs" return Dataset(self.client, None, alias)
[ "def fixed_pairs(\n self,\n ) -> Tuple[\n List[Tuple[str, str, Union[int, float]]],\n List[Tuple[str, str, Union[int, float]]],\n List[Tuple[str, str, Union[int, float]]],\n ]:\n assert (\n self.train_pairs is not None and self.test_pairs is not None\n ), \"You need to pass in train and test pairs to use this function\"\n self.train_pairs.loc[:, \"label\"] = self.train_pairs[\"label\"].map(\n {\"NO\": 0, \"YES\": 1, \"0\": 0, 0: 0, \"1\": 1, 1: 1}\n )\n if self.val_pairs is not None:\n self.val_pairs.loc[:, \"label\"] = self.val_pairs[\"label\"].map(\n {\"NO\": 0, \"YES\": 1, \"0\": 0, 0: 0, \"1\": 1, 1: 1}\n )\n train_pairs = list(self.train_pairs.to_records(index=False))\n val_pairs = list(self.val_pairs.to_records(index=False))\n else:\n np.random.seed(self.random_seed)\n # split train into train/val\n train_prob = self.train_ratio / (self.train_ratio + self.val_ratio)\n msk = np.random.rand(len(self.train_pairs)) < train_prob\n train_pairs = list(self.train_pairs[msk].to_records(index=False))\n val_pairs = list(self.train_pairs[~msk].to_records(index=False))\n self.test_pairs.loc[:, \"label\"] = self.test_pairs[\"label\"].map({\"NO\": 0, \"YES\": 1, \"0\": 0, 0: 0, \"1\": 1, 1: 1})\n test_pairs = list(self.test_pairs.to_records(index=False))\n\n return train_pairs, val_pairs, test_pairs", "def create_pairs(binned_dict):\n pairs_list = []\n for key in binned_dict.keys(): #looping through all binned MS2 scans\n pairs = []\n for i in range(0, len(binned_dict[key])):\n for j in range(i+1, len(binned_dict[key])): \n for scan, scan2 in zip(binned_dict[key][i].keys(), binned_dict[key][j].keys()):\n if np.count_nonzero(scan) != 0:\n if np.count_nonzero(scan2):\n pairs.append([binned_dict[key][i][scan], binned_dict[key][j][scan2]])\n \n pairs_list.append(pairs)\n print('successfully created pairs for all matched scans')\n return pairs_list", "def PlotPairDistribution(self, nAverages=10, nStepsBetweenAverages=200,\n\t\t\t\tnBins=30, rMax=3., r=None, showPlot=True):", "def generatePairs(self):\n self.pairs = []\n \n if len(self.reactants) == 1 or len(self.products) == 1:\n # Pair each reactant with each product\n for reactant in self.reactants:\n for product in self.products:\n self.pairs.append((reactant, product))\n \n else:\n \n reactants = self.reactants[:]\n products = self.products[:]\n \n reactantCarbons = [sum([1 for atom in reactant.molecule[0].atoms if atom.isCarbon()]) for reactant in reactants]\n productCarbons = [sum([1 for atom in product.molecule[0].atoms if atom.isCarbon()]) for product in products ]\n reactantOxygens = [sum([1 for atom in reactant.molecule[0].atoms if atom.isOxygen()]) for reactant in reactants]\n productOxygens = [sum([1 for atom in product.molecule[0].atoms if atom.isOxygen()]) for product in products ]\n \n # Sort the reactants and products by carbon number, then by oxygen number\n reactants = [(carbon, oxygen, reactant) for carbon, oxygen, reactant in zip(reactantCarbons,reactantOxygens,reactants)]\n reactants.sort()\n products = [(carbon, oxygen, product) for carbon, oxygen, product in zip(productCarbons,productOxygens,products)]\n products.sort()\n \n while len(reactants) > 1 and len(products) > 1:\n self.pairs.append((reactants[-1][2], products[-1][2]))\n reactants.pop()\n products.pop()\n for reactant in reactants:\n for product in products:\n self.pairs.append((reactant[2], product[2]))", "def enumerate_pairs(fwd_unique, rev_unique):\n pairs_oligos = list(product(fwd_unique.index, rev_unique.index))\n dview.push({\"fwd_unique\":fwd_unique,\n \"rev_unique\":rev_unique,\n \"unite_pairs\":unite_pairs})\n if not len(pairs_oligos):\n print \"No primers were found!\"\n return\n task = lview.map(unite_pairs, pairs_oligos, chunksize = 1000)\n wait_on(task)\n pairs = {pair:set for pair, set in task.result}\n return pairs", "def createPairs(X_source, X_target, labels, nbr_pairs=2000, proportion_positive=0.5):\n\n pairs = []\n sim = []\n i_pair_negative = 0\n i_pair_positive = 0\n nbr_pairs_positive = np.floor(nbr_pairs * proportion_positive)\n nbr_pairs_negative = nbr_pairs - nbr_pairs_positive\n while i_pair_positive < nbr_pairs_positive or i_pair_negative < nbr_pairs_negative:\n index_source = randint(0,len(X_source)-1)\n index_target = randint(0,len(X_target)-1)\n cur_sim = int(labels[index_source] == labels[index_target])\n if cur_sim and i_pair_positive < nbr_pairs_positive:\n sim.append(1)\n pairs.append([X_source[index_source], X_target[index_target]])\n i_pair_positive += 1\n elif not cur_sim and i_pair_negative < nbr_pairs_negative:\n sim.append(0)\n pairs.append([X_source[index_source], X_target[index_target]])\n i_pair_negative += 1\n\n print(\"bincount(sim) : \", np.bincount(sim))\n pairs = np.array(pairs)\n sim = np.array(sim)\n\n return pairs, sim", "def book_feed(self, pair):", "def add_pairing(self, pairing):\n\n\n self.pairings.append(pairing)", "def __str__(self):\n pairs = \"\"\n\n for elements in self.getListOfPairs():\n for e in elements:\n pairs += str(e) + \" \"\n pairs += \"\\n\"\n \n return pairs", "def print_pairs():\n print(\"\")\n print(\"CHANNEL - SERIES CONFIGURATIONS:\")\n print(tab(st.open_series(),headers=\"keys\", tablefmt=\"psql\"))", "def manage_pairs(pairs, pair_no_pair=False, all_qids=None):\n print(\"number of pairs: %d\" % len(pairs))\n if pair_no_pair:\n qid_in_pair_dict = {}\n for p in pairs:\n qid_in_pair_dict[p[0]] = 1\n qid_in_pair_dict[p[1]] = 1\n\n count = 0\n for qid in all_qids:\n if qid_in_pair_dict.get(qid, 0) == 0: # not in\n pairs.append([qid, qid])\n count += 1\n print(\"number of added self-pair: %d\" % count)\n\n return pairs", "def print_pairing_info(melon_types):\n\n for melon in melon_types:\n print \"{} pairs with\".format(melon.name.title())\n for pairing in melon.pairings:\n print \"- {}\".format(pairing)", "def pair_table(self):\n if not self._pair_table:\n self._pair_table = make_pair_table(self._structure)\n for locs in self._pair_table:\n yield list(locs) # Deepcopy", "def pair(self):\n device_public_key = self.get_value(\"DevicePublicKey\", no_session=True)\n if not device_public_key:\n raise MuxError(\"Unable to retrieve DevicePublicKey\")\n buid = self._usbmux.read_system_BUID()\n wifi_address = self.get_value(\"WiFiAddress\", no_session=True)\n\n try:\n from ._ssl import make_certs_and_key\n except ImportError:\n #print(\"DevicePair require pyOpenSSL and pyans1, install by the following command\")\n #print(\"\\tpip3 install pyOpenSSL pyasn1\", flush=True)\n raise RuntimeError(\"DevicePair required lib, fix with: pip3 install pyOpenSSL pyasn1\")\n\n cert_pem, priv_key_pem, dev_cert_pem = make_certs_and_key(device_public_key)\n pair_record = {\n 'DevicePublicKey': device_public_key,\n 'DeviceCertificate': dev_cert_pem,\n 'HostCertificate': cert_pem,\n 'HostID': str(uuid.uuid4()).upper(),\n 'RootCertificate': cert_pem,\n 'SystemBUID': buid,\n }\n\n with self.create_inner_connection() as s:\n ret = s.send_recv_packet({\n \"Request\": \"Pair\",\n \"PairRecord\": pair_record,\n \"Label\": PROGRAM_NAME,\n \"ProtocolVersion\": \"2\",\n \"PairingOptions\": {\n \"ExtendedPairingErrors\": True,\n }\n })\n assert ret, \"Pair request got empty response\"\n if \"Error\" in ret:\n # error could be \"PasswordProtected\" or \"PairingDialogResponsePending\"\n raise MuxError(\"pair:\", ret['Error'])\n\n assert 'EscrowBag' in ret, ret\n pair_record['HostPrivateKey'] = priv_key_pem\n pair_record['EscrowBag'] = ret['EscrowBag']\n pair_record['WiFiMACAddress'] = wifi_address\n \n self.usbmux.send_recv({\n \"MessageType\": \"SavePairRecord\",\n \"PairRecordID\": self.udid,\n \"PairRecordData\": bplist.dumps(pair_record),\n \"DeviceID\": self.devid,\n })\n return pair_record", "def add_pairing(self, pairing):\n self.pairings.append(pairing) # pairings from above is a list. \n\n # Fill in the rest", "def swissPairings():\n pairing_index = 0\n pairs = []\n DB = psycopg2.connect(\"dbname=tournament\")\n c = DB.cursor()\n \n c.execute(\"SELECT playerID from match_record\")\n number_of_pairings = len(c.fetchall())/2\n\n while pairing_index < number_of_pairings:\n c.execute(\"SELECT playerID, name from match_record ORDER BY wins DESC LIMIT 2 OFFSET %s\", (pairing_index * 2,))\n current_list = c.fetchall()\n new_tuple = current_list[0] + current_list[1]\n pairs.append(new_tuple)\n pairing_index = pairing_index + 1\n \n return pairs", "def translate_binance_pairs(cursor: 'DBCursor') -> list[tuple[str, str, str, str]]:\n table_exists = cursor.execute(\n 'SELECT COUNT(*) FROM sqlite_master WHERE type=\"table\" AND name=\"binance_pairs\"',\n ).fetchone()[0]\n if table_exists == 0: # handle binance_pairs not having been created\n cursor.execute( # fix https://github.com/rotki/rotki/issues/5073\n \"\"\"CREATE TABLE IF NOT EXISTS binance_pairs (\n pair TEXT NOT NULL,\n base_asset TEXT NOT NULL,\n quote_asset TEXT NOT NULL,\n location TEXT NOT NULL,\n FOREIGN KEY(base_asset) REFERENCES assets(identifier) ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY(quote_asset) REFERENCES assets(identifier) ON UPDATE CASCADE ON DELETE CASCADE,\n PRIMARY KEY(pair, location)\n );\"\"\", # noqa: E501\n )\n return []\n\n cursor.execute('SELECT pair, base_asset, quote_asset, location from binance_pairs;')\n binance_pairs = []\n for entry in cursor:\n new_base = _maybe_upgrade_identifier(entry[1])\n new_quote = _maybe_upgrade_identifier(entry[2])\n binance_pairs.append((entry[0], new_base, new_quote, entry[3]))\n\n return binance_pairs", "def pairs_to_SNP_Pairs(pairs, populations):\n results = {}\n for snp1, info in pairs.items():\n results[snp1] = []\n if info['matches']:\n for snp2, data in info['matches'].items():\n results[snp1].append(\n SNP_Pair(\n plink = {\n snp1: {\n 'chrom': info['chrom'],\n 'loc' : info['loc'],\n 'matches': {\n snp2: data\n }\n }\n },\n populations = populations\n )\n )\n return results", "def get_pairs(self):\n # Get all of the pairs that Binance trades in\n self.pairs = [Pair(pair['symbol'], pair['baseAsset'], pair['quoteAsset']) for pair in self.info['symbols']]\n self.pairs.append(Pair('BTCEUR', 'EUR', 'BTC'))\n self.pairs.append(Pair('ETHEUR', 'EUR', 'ETH'))\n\n # Get the list of unique currencies that Binance trades in\n self.currencies = [pair.trade_currency for pair in self.pairs] + [pair.base_currency for pair in self.pairs]\n self.currencies = list(set(self.currencies))\n\n # Initialize traded pairs with a guess\n pairs = []\n for coin in TRADE_CURRENCIES:\n pairs.append(coin.alias + 'BNB')\n pairs.append(coin.alias + 'ETH')\n pairs.append(coin.alias + 'BTC')\n\n self.traded_pairs = [self.pairs[self.pairs.index(p)] for p in pairs if p in self.pairs]", "def print_pairing_info(melon_types=make_melon_types()):\n\n for melon in melon_types:\n print(f'{melon.name} pairs with')\n\n for pair in melon.pairings:\n print(f'- {pair}')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Machine learning model for pairmatching for this Mastering project. Learns from verified labels and predicts categorization labels for unlabeled pairs.
def pair_matching_model(self): alias = self.api_path + "/recordPairsWithPredictions/model" return MachineLearningModel(self.client, None, alias)
[ "def predict_NN(self):\r\n data = self.data_train1\r\n labels = self.labels_train\r\n data_test = self.data_test1\r\n labels_test = self.labels_test\r\n \r\n model = MLPClassifier()\r\n model.fit(data, labels.iloc[:,0])\r\n prediction = model.predict(data_test) \r\n model_score = model.score(data_test, labels_test)\r\n \r\n self.NN_prediction = prediction\r\n self.NN_score = model_score", "def linking_as_pairwise_classification(gold_links, pred_links):\n gold_ans = []\n pred_ans = []\n dataset_f1_no_link = []\n dataset_f1_link = []\n\n for i in range(len(gold_links)): # per essay\n gold_rep = TreeBuilder(gold_links[i])\n pred_rep = TreeBuilder(pred_links[i])\n # It is not clear whether it is better to compute it better at paragraph-level or dataset-level\n gold_ans.extend(flatten_list(gold_rep.get_adj_matrix())) \n pred_ans.extend(flatten_list(pred_rep.get_adj_matrix()))\n\n # result per essay\n x = flatten_list(gold_rep.get_adj_matrix())\n y = flatten_list(pred_rep.get_adj_matrix())\n report = classification_report(y_true=x, y_pred=y, output_dict=True)\n\n if '0' in report.keys():\n f1_no_link = report['0']['f1-score']\n else: # all linked\n f1_no_link = 1.0\n f1_link_exist = report['1']['f1-score']\n dataset_f1_no_link.append(f1_no_link)\n dataset_f1_link.append(f1_link_exist)\n\n # dataset level\n print(\"=== Linking as Pairwise Classification (like in Stab and Gurevych) ===\")\n print(classification_report(y_true = gold_ans, y_pred=pred_ans, digits=3))\n report = classification_report(y_true=gold_ans, y_pred=pred_ans, output_dict=True)\n f1_no_link = report['0']['f1-score']\n f1_link_exist = report['1']['f1-score']\n return f1_no_link, f1_link_exist, (f1_no_link+f1_link_exist)/2.0 # Kuribayashi confirmed this\n\n # a = np.average(dataset_f1_no_link)\n # b = np.average(dataset_f1_link)\n # return a, b, (a+b)/2.0", "def predict(self,unlabeled):\r\n y_pred = unlabeled['label']\r\n if(self.main_transformer!=None):\r\n X,y = self.main_transformer.transform(unlabeled)\r\n y_pred = self.model_main.predict(X)\r\n pred_probs = self.model_main.predict_proba(X)\r\n for i,probs in enumerate(pred_probs):\r\n if(max(probs)<self.alpha):\r\n y_pred[i] = 'Unsorted'\r\n unsorted = unlabeled.loc[y_pred == 'Unsorted']\r\n if(self.small_transformer!=None and len(unsorted)!=0):\r\n X,y = self.small_transformer.transform(unsorted)\r\n y = self.model_small.predict(X)\r\n pred_probs = self.model_small.predict_proba(X)\r\n for i,probs in enumerate(pred_probs):\r\n if(max(probs)<self.beta):\r\n y[i] = 'Unsorted'\r\n y_pred[y_pred=='Unsorted'] = y\r\n return y_pred", "def catPrediction():\n #import data to predict\n df_pred_cat = pd.read_csv(\"predictCat_df.csv\")\n df_pred_cat = df_pred_cat.drop(['Unnamed: 0.1', 'chosen_match_final'], axis = 1)\n\n #import models\n G_pred_rf_model = pickle.load(open('GBM_Pred_G.txt', 'rb'))\n\n #predict coverage categories\n A_pred = df_pred_cat['A_final']\n B_pred = df_pred_cat['B_final']\n C_pred = df_pred_cat['C_final']\n D_pred = df_pred_cat['D_final']\n E_pred = df_pred_cat['E_final']\n F_pred = df_pred_cat['F_final']\n G_pred = G_pred_rf_model.predict(df_pred_cat)\n\n #input category predictions into dataframe\n df_pred_cat['A_chosen'] = pd.Series(A_pred, index=df_pred_cat.index)\n df_pred_cat['B_chosen'] = pd.Series(B_pred, index=df_pred_cat.index)\n df_pred_cat['C_chosen'] = pd.Series(C_pred, index=df_pred_cat.index)\n df_pred_cat['D_chosen'] = pd.Series(D_pred, index=df_pred_cat.index)\n df_pred_cat['E_chosen'] = pd.Series(E_pred, index=df_pred_cat.index)\n df_pred_cat['F_chosen'] = pd.Series(F_pred, index=df_pred_cat.index)\n df_pred_cat['G_chosen'] = pd.Series(G_pred, index=df_pred_cat.index)\n\n #save dataframe with predictions\n df_pred_cat.to_csv(\"df_predicted_cat.csv\")\n join()", "def Approach_PDRC(self): \r\n \r\n print(\"Starting number of classifiers = \" + str(len(self.pop.popSet))) \r\n print(\"Original Training Accuracy = \" +str(self.originalTrainAcc))\r\n print(\"Original Testing Accuracy = \" +str(self.originalTestAcc))\r\n \r\n \r\n retainedClassifiers = []\r\n self.matchSet = [] \r\n self.correctSet = []\r\n \r\n cons.env.startEvaluationMode()\r\n cons.env.resetDataRef(True) \r\n for j in range(cons.env.formatData.numTrainInstances):\r\n state_phenotype = cons.env.getTrainInstance()\r\n state = state_phenotype[0]\r\n phenotype = state_phenotype[1]\r\n \r\n #Create MatchSet\r\n for i in range(len(self.pop.popSet)):\r\n cl = self.pop.popSet[i] \r\n if cl.match(state): \r\n self.matchSet.append(i)\r\n \r\n #Create CorrectSet\r\n if cons.env.formatData.discretePhenotype:\r\n for i in range(len(self.matchSet)):\r\n ref = self.matchSet[i]\r\n if self.pop.popSet[ref].phenotype == phenotype:\r\n self.correctSet.append(ref)\r\n else:\r\n for i in range(len(self.matchSet)):\r\n ref = self.matchSet[i]\r\n if float(phenotype) <= float(self.pop.popSet[ref].phenotype[1]) and float(phenotype) >= float(self.pop.popSet[ref].phenotype[0]):\r\n self.correctSet.append(ref)\r\n #Find the rule with highest accuracy, generality and numerosity product\r\n highestValue = 0\r\n highestRef = 0\r\n for i in range(len(self.correctSet)):\r\n ref = self.correctSet[i]\r\n product = self.pop.popSet[ref].accuracy * (cons.env.formatData.numAttributes - len(self.pop.popSet[ref].condition)) / float(cons.env.formatData.numAttributes) * self.pop.popSet[ref].numerosity\r\n if product > highestValue:\r\n highestValue = product\r\n highestRef = ref\r\n \r\n #If the rule is not already in the final ruleset, move it to the final ruleset\r\n if highestValue == 0 or self.pop.popSet[highestRef] in retainedClassifiers:\r\n pass\r\n else:\r\n retainedClassifiers.append(self.pop.popSet[highestRef])\r\n\r\n #Move to the next instance \r\n cons.env.newInstance(True)\r\n self.matchSet = [] \r\n self.correctSet = []\r\n cons.env.stopEvaluationMode()\r\n \r\n self.pop.popSet = retainedClassifiers\r\n print(\"STAGE 1 Ended: Classifiers Remaining = \" +str(len(self.pop.popSet)))", "def Approach_CRA2(self): \r\n \r\n print(\"Starting number of classifiers = \" + str(len(self.pop.popSet))) \r\n print(\"Original Training Accuracy = \" +str(self.originalTrainAcc))\r\n print(\"Original Testing Accuracy = \" +str(self.originalTestAcc))\r\n \r\n retainedClassifiers = []\r\n self.matchSet = [] \r\n self.correctSet = []\r\n \r\n cons.env.startEvaluationMode()\r\n cons.env.resetDataRef(True) \r\n for j in range(cons.env.formatData.numTrainInstances):\r\n state_phenotype = cons.env.getTrainInstance()\r\n state = state_phenotype[0]\r\n phenotype = state_phenotype[1]\r\n \r\n #Create MatchSet\r\n for i in range(len(self.pop.popSet)):\r\n cl = self.pop.popSet[i] \r\n if cl.match(state): \r\n self.matchSet.append(i)\r\n \r\n #Create CorrectSet\r\n if cons.env.formatData.discretePhenotype:\r\n for i in range(len(self.matchSet)):\r\n ref = self.matchSet[i]\r\n if self.pop.popSet[ref].phenotype == phenotype:\r\n self.correctSet.append(ref)\r\n else:\r\n for i in range(len(self.matchSet)):\r\n ref = self.matchSet[i]\r\n if float(phenotype) <= float(self.pop.popSet[ref].phenotype[1]) and float(phenotype) >= float(self.pop.popSet[ref].phenotype[0]):\r\n self.correctSet.append(ref)\r\n \r\n #Find the rule with highest accuracy, generality product\r\n highestValue = 0\r\n highestRef = 0\r\n for i in range(len(self.correctSet)):\r\n ref = self.correctSet[i]\r\n product = self.pop.popSet[ref].accuracy * (cons.env.formatData.numAttributes - len(self.pop.popSet[ref].condition)) / float(cons.env.formatData.numAttributes)\r\n if product > highestValue:\r\n highestValue = product\r\n highestRef = ref\r\n \r\n #If the rule is not already in the final ruleset, move it to the final ruleset\r\n if highestValue == 0 or self.pop.popSet[highestRef] in retainedClassifiers:\r\n pass\r\n else:\r\n retainedClassifiers.append(self.pop.popSet[highestRef])\r\n\r\n #Move to the next instance \r\n cons.env.newInstance(True)\r\n self.matchSet = [] \r\n self.correctSet = []\r\n cons.env.stopEvaluationMode()\r\n \r\n self.pop.popSet = retainedClassifiers\r\n print(\"STAGE 1 Ended: Classifiers Remaining = \" +str(len(self.pop.popSet)))", "def recognize(models: dict, test_set: SinglesData):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n probabilities = []\n guesses = []\n\n sequences = test_set.get_all_sequences()\n XLenghts = test_set.get_all_Xlengths()\n\n for s in sequences:\n X, length = XLenghts[s]\n p = {}\n guess = \"\"\n for word, model in models.items():\n try:\n p[word] = model.score(X, length)\n except:\n p[word] = float('-inf')\n probabilities.append(p)\n values = list(p.values())\n keys = list(p.keys())\n guesses.append(keys[values.index(max(values))])\n\n return probabilities, guesses\n\n \"\"\"\n valid_models = {word: model for word,model in models.items() if model is not None}\n probabilities = [word_probabilities(valid_models, *test_set.get_item_Xlengths(i))\n for i,_ in enumerate(test_set.wordlist)]\n guesses = [best_guess(word_probs) for word_probs in probabilities]\n return probabilities, guesses\n\ndef word_probabilities(models, X, lengths):\n word_probs = {}\n\n for word,model in models.items():\n try:\n word_probs[word] = model.score(X, lengths)\n except ValueError: # The hmmlearn library may not be able to train or score all models.\n word_probs[word] = float('-inf')\n\n return word_probs\n\ndef best_guess(word_probs):\n return max(word_probs.keys(), key=lambda word: word_probs[word])\n \"\"\"", "def cnn_predict():\n\n x_test, y_test, file_name_test_list = load_test_set()\n\n model = cnn()\n\n weight_path = Path(config[\"weight_file\"])\n if weight_path.exists() is False:\n log.error(\"Not found weight file %s. Aborting.\" % (weight_path))\n sys.exit(1)\n\n model.load_weights(weight_path)\n\n y_predicted = model.predict(x_test)\n correct_count = 0\n total_count = x_test.shape[0]\n for i in range(total_count):\n # Ground truth\n # Convert the file name to a string that contains only the ground trugh classes\n name = file_name_test_list[i]\n underscore_pos = name.find(\"_\")\n if underscore_pos < 0:\n log.warning(\"Invalid image file name. Missing classification marker for file %s\" % (name))\n continue\n\n classes = name[0:underscore_pos]\n actual = \"\"\n if DogClassMarker.AIMEE in classes:\n actual = actual + DogClassMarker.AIMEE\n if DogClassMarker.MADDIE in classes:\n actual = actual + DogClassMarker.MADDIE\n if DogClassMarker.OLIVIA in classes:\n actual = actual + DogClassMarker.OLIVIA\n if DogClassMarker.PINK in classes:\n actual = actual + DogClassMarker.PINK\n if len(actual) == 0:\n actual = \"_\"\n\n # Prediction\n # Convert the predicted classes contained in the vector to a string.\n # Before conversion, round down or round up values to 0 or 1 except for the mid-range number.\n # A mid-range number is counted as a \"mismatch\".\n v = y_predicted[i]\n\n low_threshold_flag = v < 0.3\n v[low_threshold_flag] = 0\n\n high_threshold_flag = v > 0.7\n v[high_threshold_flag] = 1\n\n predicted = \"\"\n if v[DogClassIndex.AIMEE] == 1:\n predicted = predicted + DogClassMarker.AIMEE\n if v[DogClassIndex.MADDIE] == 1:\n predicted = predicted + DogClassMarker.MADDIE\n if v[DogClassIndex.OLIVIA] == 1:\n predicted = predicted + DogClassMarker.OLIVIA\n if v[DogClassIndex.PINK] == 1:\n predicted = predicted + DogClassMarker.PINK\n if len(predicted) == 0:\n predicted = \"_\"\n\n # Compare the ground-truth classification string and the predicted classification string\n # Count only the complete match as the match. Do not count the partial match.\n if actual == predicted:\n correct_count = correct_count + 1\n\n print(\"Total count: %d\" % (total_count))\n print(\"Correct count (complete match only): %d\" % (correct_count))\n print(\"Accuracy: %f percent\" % (correct_count * 100 / total_count))", "def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n\n \"*** YOUR CODE HERE ***\"\n labelSize = len(trainingLabels)\n # count labels to find the count of Y --- make count objects from util.py\n labelCounter = util.Counter()\n conditionalCounter = util.Counter()\n\n for i in range(labelSize):\n label = trainingLabels[i]\n labelCounter[label] = labelCounter[label]+1\n\n # count the number of times a feature is true and specific label is used\n # values must be recorded for conditional probability calculations\n # the key for the counter should be a feature and its associated label so that we can represent the AND condition between them\n for feature in self.features:\n if trainingData[i][feature] == 1: # colored pixel\n conditionalCounter[(feature, label)] = conditionalCounter[(feature, label)]+1\n\n finalLabelProbabilities = labelCounter.copy()\n for label in self.legalLabels:\n for feature in self.features:\n finalLabelProbabilities[(feature, label)] = finalLabelProbabilities[(feature,label)] / labelSize\n self.labelProbabilities = finalLabelProbabilities\n\n probabilities = []\n accuracy = []\n validationSize = len(validationLabels)\n\n for k in kgrid:\n # divide conditionalCounter for each feature by the number of times each label appeared using labelCounter\n # |\n # --> = P (F | Y)\n \n tempCondCounter = util.Counter()\n for feature in self.features:\n for label in self.legalLabels:\n tempCondCounter[(feature, label)] = (conditionalCounter[(feature, label)]+k) / (labelCounter[label] + 2*k)\n\n self.conditionalProbabilities = tempCondCounter\n probabilities.append(tempCondCounter)\n\n # check if guess is correct\n guesses = self.classify(validationData)\n numCorrect = 0\n for label in range(validationSize):\n validationLabel = validationLabels[label]\n if validationLabel == guesses[label]:\n numCorrect = numCorrect + 1\n \n accuracy.append(numCorrect)\n \n index = accuracy.index(max(accuracy))\n self.conditionalProbabilities = probabilities[index]", "def verifypair(a, b, model, debug=0):\n def log(s):\n if debug:\n print >>sys.stderr, s\n ret = stor(fields=getmodelfields(model), modelid=model, modelname=MODELS[model])\n # read feature dicts\n fd1 = getfdict(a[0], a[1])\n log('fdict1 : %s' % (fd1))\n fd2 = getfdict(b[0], b[1])\n log('fdict2 : %s' % (fd2))\n # convert to feature vectors\n f1 = getfvec(fd1, model)\n log('fvec1 : %s' % (f1))\n ret.update(dataset1=a[0], name1=a[1], feats1=list(f1))\n f2 = getfvec(fd2, model)\n log('fvec2 : %s' % (f1))\n ret.update(dataset2=b[0], name2=b[1], feats2=list(f2))\n # read verification classifier\n svm_model, simmeths = readmodel(model)\n # get similarity fvec\n fvec = getSimFeaturesFromFvals(f1, f2, simmeths)\n log('simfvec : %s' % (fvec))\n # compute results\n label, score = bulkclassify(svm_model, [fvec])[0]\n ret.score = score * label\n log('score : %s' % (ret.score,))\n # set same or diff and correct or not\n ret.same = ret.correct = 0\n if not ret.dataset1.startswith('job') and not ret.dataset2.startswith('job'):\n name = lambda n: n.lower().rsplit('_', 1)[0]\n same = (name(ret.name1) == name(ret.name2))\n ret.same = 1 if same else -1\n ret.correct = 1 if ret.same*ret.score > 0 else -1\n else:\n ret.same = ret.correct = None\n log('For %s and %s using model %s, got score %s, same %s, correct %s' % (a, b, model, ret.score, ret.same, ret.correct))\n return ret", "def test_predict():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_binary_recommend_ml100k(recommender, 0.1)", "def predict(self, src) -> Tuple[label, confidence]:\n ...", "def label_unlabeled(file_in, file_out_prefix='data/up/'):\n\n table = []\n headers = ['Algorithm', 'UP']\n df = pd.read_csv(file_in, index_col=0, na_filter=False)\n\n for name in ['LCS', 'COS', 'LEV', 'LSH', 'WMD']:\n alg = get_algorithm_by_name(name, True)\n model = ml.logistic_regression_train('data/train.csv', alg)\n X, _ = ml.extract_features(file_in, alg)\n\n predictions = model.predict(X)\n scores = model.predict_proba(X)\n scores = scores[predictions == 1]\n scores = scores[:, 1]\n df_up = df[predictions == 1]\n df_up.drop(columns=['label', 'name1', 'name2'], inplace=True)\n df_up.insert(4, 'score', scores)\n df_up.sort_values(by='score', ascending=False, inplace=True)\n df_up.to_csv(file_out_prefix + 'scores/' + name + '.csv')\n table.append((name, len(df_up.index)))\n\n name = 'SiamX'\n alg = get_algorithm_by_name(name, True)\n scores = alg.run_similarity(df)\n predictions = alg.predict(scores)\n scores = scores[predictions == 1]\n df_up = df[predictions == 1]\n df_up.drop(columns=['label', 'name1', 'name2'], inplace=True)\n df_up.insert(4, 'score', scores)\n df_up.sort_values(by='score', ascending=False, inplace=True)\n df_up.to_csv(file_out_prefix + 'scores/' + name + '.csv')\n table.append((name, len(df_up.index)))\n\n print(tabulate(table, headers, tablefmt='grid'))", "def Prediction():\t\n\tif model == 'HAC':\n\t\tclf = _HAC_model()\n\t\tclf.set_params(**best_params)\n\t\tlabels = clf.fit_predict(np.array(df.astype(int)))\n\t\tdf_ = pd.concat([df,pd.DataFrame(labels,columns=['Cluster'])], axis=1)\n\t\treturn df_\n\n\tclf = _kmeans_model()\n\tclf.set_params(**best_params)\n\tlabels = clf.fit_predict(np.array(df.astype(int)))\n\tdf_ = pd.concat([df,pd.DataFrame(labels,columns=['Cluster'])], axis=1)\n\treturn df_", "def train_model(self, sentences_pairs, is_similar, model_save_directory='./'):\n train_data_x1, train_data_x2, train_labels, leaks_train, \\\n val_data_x1, val_data_x2, val_labels, leaks_val = self.create_train_dev_set(\n sentences_pairs, is_similar, self.max_sequence_length,\n self.validation_split_ratio\n )\n\n embedding_layer = Embedding(119547, 768, weights=[self.embedding_matrix], trainable=False)\n\n lstm_layer = Bidirectional(\n LSTM(self.number_lstm_units, dropout=self.rate_drop_lstm, recurrent_dropout=self.rate_drop_lstm)\n )\n\n sequence_1_input = Input(shape=(self.max_sequence_length,), dtype='int32')\n embedded_sequences_1 = embedding_layer(sequence_1_input)\n x1 = lstm_layer(embedded_sequences_1)\n\n sequence_2_input = Input(shape=(self.max_sequence_length,), dtype='int32')\n embedded_sequences_2 = embedding_layer(sequence_2_input)\n x2 = lstm_layer(embedded_sequences_2)\n\n leaks_input = Input(shape=(leaks_train.shape[1],))\n leaks_dense = Dense(int(self.number_dense_units/2), activation=self.activation_function)(leaks_input)\n\n merged = concatenate([x1, x2, leaks_dense])\n merged = BatchNormalization()(merged)\n merged = Dropout(self.rate_drop_dense)(merged)\n merged = Dense(self.number_dense_units, activation=self.activation_function)(merged)\n merged = BatchNormalization()(merged)\n merged = Dropout(self.rate_drop_dense)(merged)\n preds = Dense(1, activation='sigmoid')(merged)\n\n model = Model(inputs=[sequence_1_input, sequence_2_input, leaks_input], outputs=preds)\n model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['acc'])\n\n early_stopping = EarlyStopping(monitor='val_loss', patience=3)\n\n STAMP = 'lstm_%d_%d_%.2f_%.2f' % (self.number_lstm_units, self.number_dense_units, self.rate_drop_lstm, self.rate_drop_dense)\n\n checkpoint_dir = model_save_directory + 'checkpoints/' + str(int(time.time())) + '/'\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n bst_model_path = checkpoint_dir + STAMP + '.h5'\n\n model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=False)\n\n tensorboard = TensorBoard(log_dir=checkpoint_dir + \"logs/{}\".format(time.time()))\n\n model.fit([train_data_x1, train_data_x2, leaks_train], train_labels,\n validation_data=([val_data_x1, val_data_x2, leaks_val], val_labels),\n epochs=50, batch_size=64, shuffle=True,\n callbacks=[early_stopping, model_checkpoint, tensorboard])\n\n return bst_model_path", "def train_model_track1(train_pool, validation_pool, validation, test_private, features, data_path):\n\n cat = CatBoostClassifier(iterations=3000,\n loss_function='Logloss',\n l2_leaf_reg=2,\n random_seed=100,\n scale_pos_weight=11.92984045,\n eval_metric='AUC',\n use_best_model=True,\n early_stopping_rounds=100,\n max_depth=7,\n max_bin=100\n )\n\n cat.fit(train_pool, eval_set=validation_pool)\n valid_pred_prob = cat.predict_proba(validation.loc[:, features].values)[:, 1]\n valid_score_90 = scoring.rejection90(validation.label.values, valid_pred_prob,\n sample_weight=validation.weight.values)\n # 0.771923225\n print(f\"Score at rejection 90 {valid_score_90}\")\n predictions = cat.predict_proba(test_private.loc[:, features].values)[:, 1]\n prediction_file = os.path.join(data_path, \"test_private.csv\")\n print(f\"Track 1 prediction on private test data is present at {prediction_file}\")\n pd.DataFrame(data={\"prediction\": predictions}, index=test_private.index).to_csv(prediction_file,\n index_label=utils.ID_COLUMN)\n model_file = os.path.join(data_path, 'track_1_best_mode.cbm')\n print(f\"Track 1 best model is saved at {model_file}\")\n cat.save_model(model_file, format='cbm')", "def train_model(evidence, labels):\n model = KNeighborsClassifier(n_neighbors = 1)\n X_training = [row for row in evidence]\n y_training = [row for row in labels]\n result = model.fit(X_training, y_training)\n \n return result", "def agreement(labels_pred, labels_true):\n true_communities_labels = set( labels_true )\n predicted_communities_labels = set( labels_pred )\n if( true_communities_labels == predicted_communities_labels ):\n return max( accuracy_score(labels_true, labels_pred) , 1 - accuracy_score(labels_true, labels_pred) )\n elif len( predicted_communities_labels ) == 1:\n return max( accuracy_score(labels_true, labels_pred) , 1 - accuracy_score(labels_true, labels_pred) )\n else:\n N = len( labels_pred )\n predicted_communities_labels = list( predicted_communities_labels )\n community_size = [ ]\n for label in predicted_communities_labels:\n community_size.append( len( [ i for i in range( N ) if labels_pred[ i ] == label ] ) )\n \n largest_community_labels = [ predicted_communities_labels[ np.argsort(community_size)[-k-1] ] for k in range( len(true_communities_labels) ) ]\n \n \n if (-250 not in true_communities_labels):\n new_labels_pred = np.ones( N ) * (-250)\n true_communities_labels = list( true_communities_labels )\n good_nodes = []\n for i in range(N):\n if labels_pred[i] in largest_community_labels:\n new_labels_pred[ i ] = true_communities_labels[ largest_community_labels.index( labels_pred[i] ) ]\n good_nodes.append( i )\n count = 0\n for i in good_nodes:\n if new_labels_pred[i] == labels_true[i]:\n count += 1\n return max( 0.5, 1/N * max(count, len(good_nodes)-count) )\n \n return 0", "def predict_label(texts, labels, text_new):\r\n # YOUR CODE HERE\r\n\r\n # texts = ['RT @GOPLeader', 'RT @GOPLeader', 'Colorless green ideas sleep furiously.']\r\n # labels = ['rep', 'rep', 'dem']\r\n\r\n train_twitter = texts\r\n test_twitter = text_new\r\n\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n from sklearn.feature_extraction.text import TfidfTransformer\r\n from sklearn.naive_bayes import MultinomialNB\r\n\r\n count_vect = CountVectorizer()\r\n twitter_train_counts = count_vect.fit_transform(train_twitter)\r\n\r\n tf_transformer = TfidfTransformer(use_idf=False).fit(twitter_train_counts)\r\n twitter_train_tf = tf_transformer.transform(twitter_train_counts)\r\n\r\n tfidf_transformer = TfidfTransformer()\r\n twitter_train_tfidf = tfidf_transformer.fit_transform(twitter_train_counts)\r\n\r\n twitter_clf = MultinomialNB().fit(twitter_train_tfidf,labels )\r\n\r\n # transforming the test data\r\n\r\n twitter_test_data = count_vect.transform(test_twitter)\r\n twitter_tfidf = tfidf_transformer.transform(twitter_test_data)\r\n\r\n #prediction\r\n twitter_predicted = twitter_clf.predict(twitter_tfidf)\r\n\r\n for text, class_label in zip(test_twitter, twitter_predicted):\r\n print('%r => %s' % (text, class_label))\r\n\r\n\r\n return list(twitter_predicted)", "def producte_token_labeling_list(self):\n path_to_token_labeling_file = os.path.join(self.path_to_label_file, \"token_label_out.txt\")\n token_labeling_list = self._get_token_labeling_list(path_to_token_labeling_file)\n path_to_token_labeling_test_results_file = os.path.join(self.path_to_predict_label_file,\n \"token_label_prediction_test_results.txt\")\n predict_token_labeling_list = self._get_predict_token_labeling_list(path_to_token_labeling_test_results_file)\n token_labeling_test_list = []\n clean_predict_token_labeling_list = []\n seqence_length_dont_match_index = 0\n for y_test, y_predict in zip(token_labeling_list, predict_token_labeling_list):\n y_predict = y_predict[1:-1] # y_predict.remove('[CLS]') #y_predict.remove('[SEP]')\n while '[Padding]' in y_predict:\n print(\"X\" * 100)\n y_predict.remove('[Padding]')\n while '[##WordPiece]' in y_predict:\n y_predict.remove('[##WordPiece]')\n while '[##WordPiece]' in y_test:\n y_test.remove('[##WordPiece]')\n if len(y_predict) > len(y_test):\n print(y_predict)\n print(y_test)\n print(\"~*\" * 100)\n seqence_length_dont_match_index += 1\n y_predict = y_predict[0:len(y_test)]\n elif len(y_predict) < len(y_test):\n print(y_predict)\n print(y_test)\n print(\"~\" * 100)\n y_predict = y_predict + [\"O\"] * (len(y_test) - len(y_predict))\n seqence_length_dont_match_index += 1\n assert len(y_predict) == len(y_test)\n # 如果有较多的预测句子与正确句子长度不匹配(> 句子总数的1%),说明不能用上述简单方法处理预测出来的句子\n #assert seqence_length_dont_match_index < int(len(token_labeling_list) * 0.01)\n token_labeling_test_list.extend(y_test)\n clean_predict_token_labeling_list.extend(y_predict)\n if \"[CLS]\" in clean_predict_token_labeling_list:\n print(\"[CLS] doesn't just appear at the beginning of a sentence.\")\n clean_predict_token_labeling_list = [y_p.replace(\"[CLS]\", \"O\") for y_p in clean_predict_token_labeling_list]\n print(\"[CLS]\" * 10 + \"\\n\")\n if \"[SEP]\" in clean_predict_token_labeling_list:\n print(\"[SEP] doesn't just appear at the end of a sentence.\")\n clean_predict_token_labeling_list = [y_p.replace(\"[SEP]\", \"O\") for y_p in clean_predict_token_labeling_list]\n print(\"[SEP]\" * 10 + \"\\n\")\n print(\"seqence_length_dont_match numbers\", seqence_length_dont_match_index)\n return token_labeling_test_list, clean_predict_token_labeling_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Record Clusters as a dataset. Unify clusters labeled pairs using pairs model. These clusters populate the cluster review page and get transient cluster ids, rather than published cluster ids (i.e., "Permanent Ids")
def record_clusters(self): alias = self.api_path + "/recordClusters" return Dataset(self.client, None, alias)
[ "def _cluster(self):\n self.kmeans = KMeans(n_clusters=self.cluster_num).fit(self.vectors)\n self.k_books = pd.DataFrame(list(zip(list(self.kmeans.labels_),\n list(self.reviews.index))),\n columns=['k_label', 'book_id'])", "def record_clusters_with_data(self):\n unified_dataset = self.unified_dataset()\n\n # Replace this workaround with a direct API call once API\n # is fixed. APIs that need to work are: fetching the dataset and\n # being able to call refresh on resulting dataset. Until then, we grab\n # the dataset by constructing its name from the corresponding Unified Dataset's name\n name = unified_dataset.name + \"_dedup_clusters_with_data\"\n return self.client.datasets.by_name(name)\n\n # super.__repr__ is sufficient", "def generate_clusters(self):\n\n self.cluster_labels = None", "def main(n_clusters=N_CLUSTER):\n training_data = prepare_dataset(n_clusters)\n pruned_training_data = training_data.drop(TARGET_ATTR, axis=1)\n model = KMeans(n_clusters=n_clusters, max_iter=MAX_ITER)\n\n logger.info(f\"Training K-means with {n_clusters} clusters.\")\n result = model.fit(pruned_training_data)\n write_model(result, n_clusters)\n\n logger.info(f\"Predicting clusters for samples.\")\n predicted = result.predict(pruned_training_data)\n logger.info(f\"Cross-referencing cluesters and labels.\")\n\n data_frame = pd.DataFrame({\n 'Labels': training_data[TARGET_ATTR],\n 'Predicted': predicted,\n })\n\n labels = data_frame['Labels']\n predicted = data_frame['Predicted']\n confusion_matrix = pd.crosstab(labels, predicted, dropna=False)\n logger.debug(confusion_matrix)\n write_confusion_matrix(confusion_matrix, n_clusters)\n\n logger.info(f\"Similarity: {metrics.rand_score(labels, predicted)}\")\n adjusted = metrics.adjusted_rand_score(labels, predicted)\n logger.info(f\"Adjusted for chance: {adjusted}\")", "def predict(self):\n self.df['cluster_id'] = self.model.predict(self.feat_nums_scaled)", "def cluster(self, model):\n labels = model.fit_predict(self.data)\n # Relabel clusters by rank\n if len(set(labels)) > 100:\n warn(\"Too many clusters: labels are not sorted\")\n return\n labels = [string.printable[lbl] for lbl in labels]\n label_counts = Counter(labels)\n ranks = {lbl: rank for rank, (lbl, _) in\n enumerate(label_counts.most_common())}\n self.labels = np.array([ranks[lbl] for lbl in labels])", "def run_example():\r\n #data_table = load_data_table(DATA_3108_URL)\r\n #data_table = load_data_table(DATA_111_URL)\r\n data_table = TEST_DATA_111\r\n \r\n singleton_list = []\r\n for line in data_table:\r\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\r\n \r\n #cluster_list = sequential_clustering(singleton_list, 15) \r\n \r\n #print \"Displaying\", len(cluster_list), \"sequential clusters\"\r\n\r\n #cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, 9)\r\n #print \"Displaying\", len(cluster_list), \"hierarchical clusters\"\r\n\r\n cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, 9, 5) \r\n print \"Displaying\", len(cluster_list), \"k-means clusters\"\r\n\r\n \r\n # draw the clusters using matplotlib or simplegui\r\n if DESKTOP:\r\n alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\r\n #alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\r\n else:\r\n alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers\r", "def _store(self):\n self.logger.debug('Starting to store the data...')\n mapping = {}\n prefix = self.config['spotify']['output']['prefix']\n for label in self.df['label'].unique():\n if label == label: #check for nan, nan's are not equal to itself\n mapping[int(label)] = '{} Playlist {}'.format(prefix, int(label+1))\n else:\n mapping[1337] = '{} Playlist 1337'.format(prefix)\n self.logger.debug('Mapping configuration: {}'.format(mapping))\n cluster_exportable = ((\n self.df[['id','label']]\n ).assign(label=lambda _df: _df['label'].replace(np.nan, 1337))\n ).assign(label=lambda _df: _df['label'].map(mapping))\n cluster_exportable.to_pickle('{}/clusters_{}.pkl'.format(self.config['app']['data']['path'],self.timestamp))", "def run_example():\r\n data_table = load_data_table(DATA_111_URL)\r\n\r\n singleton_list = []\r\n for line in data_table:\r\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\r\n\r\n #cluster_list = sequential_clustering(singleton_list, 15)\r\n #print \"Displaying\", len(cluster_list), \"sequential clusters\"\r\n\r\n #cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, 9)\r\n #print \"Displaying\", len(cluster_list), \"hierarchical clusters\"\r\n\r\n cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, 9, 5)\r\n print \"Displaying\", len(cluster_list), \"k-means clusters\"\r\n\r\n\r\n # draw the clusters using matplotlib or simplegui\r\n if DESKTOP:\r\n alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True)\r\n #alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\r\n else:\r\n alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers\r", "def update_clusters(self):\n num_ratings = Rating.objects.count()\n \n if self.eligible_to_update(num_ratings):\n ratings_matrix, num_users, all_user_names = \\\n self.construct_ratings_matrix()\n\n k_clusters = int(num_users / 10) + 2 # \"Magical numbers that \n # work the best\"\n from sklearn.cluster import KMeans\n kmeans = KMeans(n_clusters=k_clusters)\n clusters = kmeans.fit(ratings_matrix.tocsr()) # Read sklearn\n # docs to read why tocsr() used. THE MAIN KMEANS CLUSTERING\n\n # Updating the clusters\n Cluster.objects.all().delete()\n new_clusters = {i: Cluster(name=i) for i in range(k_clusters)}\n for cluster in new_clusters.values():\n cluster.save()\n for i, cluster_label in enumerate(clusters.labels_):\n # Add the new users to clusters\n new_clusters[cluster_label].users.add(\n User.objects.get(username=all_user_names[i])\n )", "def save_cluster(self):\n self.units0,self.origin0, self.rorder0, self.rorder_origin0=save_cluster(self)", "def generate_clusters(self,D):\n\n condensed = squareform(D.dist_frame)\n linkage = hcl.average(condensed)\n self.clusters = hcl.fcluster(linkage,self.factor,criterion=self.criterion)\n\n self.num_clusters = n_clusters = len(np.unique(self.clusters)) - (1 if -1 in clusters else 0)\n self.cluster_labels = pd.DataFrame({'sequences' : D.dist_frame.index, \n 'cluster' : self.clusters})", "def cluster_to_labeled_samples(self, clusters, trim_number=None, shuffle_each_cluster=True, verbose=False):\n new_clusters = []\n # print \"clusters[0] is\", clusters[0]\n # print \"len(clusters[0]) is\", len(clusters[0])\n\n for cluster in clusters:\n total_files_cluster = 0\n files_subdirs = []\n orig_labels = []\n sum_labels = 0\n # print \"Processing cluster:\", cluster\n if verbose:\n print(\"ClustersAdded:\")\n for (num_files_subdir, label, files_subdir) in cluster:\n if verbose:\n print(\" With label:\" + str(label) + \"(%d imgs)\" % num_files_subdir)\n total_files_cluster += num_files_subdir\n files_subdirs.extend(files_subdir)\n orig_labels += [label] * num_files_subdir\n # TODO: handle non-float labels\n sum_labels += label * num_files_subdir\n avg_label = sum_labels / total_files_cluster\n if verbose:\n print(\"\")\n\n if shuffle_each_cluster:\n selected = list(range(total_files_cluster))\n numpy.random.shuffle(selected)\n files_subdirs = [files_subdirs[i] for i in selected]\n orig_labels = [orig_labels[i] for i in selected]\n\n if len(files_subdirs) != len(orig_labels):\n print(\"Wrong cluster files and orig labels lengths\")\n print(len(files_subdirs))\n print(len(orig_labels))\n\n if trim_number is not None:\n files_subdirs = files_subdirs[0:trim_number]\n orig_labels = orig_labels[0:trim_number]\n total_files_cluster = min(total_files_cluster, trim_number)\n new_clusters.append((total_files_cluster, avg_label, files_subdirs, orig_labels))\n\n new_clusters.reverse()\n return new_clusters", "def cluster(queries):\n\n normalized_queries = min_max_normalization(queries)\n return skc.DBSCAN(eps=0.11, min_samples=4).fit_predict(normalized_queries)", "def cluster_seqs_over_time(self, method='counting_diffs', diffs_cutoff=1, overwrite_data_object=False, cluster_representative_seq='most_abundant', freq_attribute_name=None, keep_first_seq_first=False):\n\t\tnew_cluster_id = 1\n\t\tlist_of_indices = range(len(self.data))\n\t\tfor seq1_index in list_of_indices[:-1]:\n\n\t\t\t#the algorithm below is flawed\n\n\t\t\tif 'cluster_id' in self.data[seq1_index]['other']:\n\t\t\t\tcluster_id = self.data[seq1_index]['other']['cluster_id']\n\t\t\telse:\n\t\t\t\tcluster_id = new_cluster_id\n\t\t\t\tnew_cluster_id += 1\n\t\t\t\tself.data[seq1_index]['other']['cluster_id'] = cluster_id\n\t\t\tfor seq2_index in list_of_indices[seq1_index+1:]:\n\t\t\t\tdiffs = 0\n\t\t\t\tfor i, j in itertools.izip(self.data[seq1_index]['seq'], self.data[seq2_index]['seq']):\n\t\t\t\t\tif i != j:\n\t\t\t\t\t\tdiffs += 1\n\t\t\t\t\t\tif diffs > diffs_cutoff:\n\t\t\t\t\t\t\tbreak\n\t\t\t\tif diffs <= diffs_cutoff:\n\t\t\t\t\tself.data[seq2_index]['other']['cluster_id'] = cluster_id\n\n\t\ttpoint_freq_dic = {}\n\t\tfor i in self.data:\n\t\t\ttpoint = i['id'].split('_')[0]\n\t\t\ttry:\n\t\t\t\ttpoint_freq_dic[tpoint] += float(i['other']['total_freq'])\n\t\t\texcept KeyError:\n\t\t\t\ttpoint_freq_dic[tpoint] = float(i['other']['total_freq'])\n\t\tfor i in tpoint_freq_dic:\n\t\t\tprint 'time-point:', i, '; freq sum:', tpoint_freq_dic[i]\n\t\treturn\n\n\t\t#Now overwrite the data in memory, if desired.\n\t\tif overwrite_data_object:\n\t\t\tif keep_first_seq_first:\n\t\t\t\tif 'timepoint' in self.data[0]['other']:\n\t\t\t\t\ttpoint = self.data[0]['other']['timepoint']\n\t\t\t\t\tfirst_seq_key = '%s_%s' % (tpoint, self.data[0]['other']['cluster_id'])\n\t\t\t\telse:\n\t\t\t\t\tfirst_seq_key = self.data[0]['other']['cluster_id']\n\t\t\t#gather new data\n\t\t\tnew_data = {}\n\t\t\tself.diversity = None#diversity statistic no longer applies (if calculated)\n\t\t\tfor i in self.data:\n\t\t\t\tif 'timepoint' in i['other']:\n\t\t\t\t\ttpoint = i['id'].split('_')[0]\n\t\t\t\t\tcluster_seq_id = '%s_%s' % (tpoint, i['other']['cluster_id'])\n\t\t\t\telse:\n\t\t\t\t\tcluster_seq_id = i['other']['cluster_id']\n\t\t\t\ttry:\n\t\t\t\t\tnew_data[cluster_seq_id]['other']['indiv_seq_counts'].append(i['count'])\n\t\t\t\t\tnew_data[cluster_seq_id]['other']['indiv_seqs'].append(i['seq'])\n\t\t\t\t\tnew_data[cluster_seq_id]['count'] += i['count']\n\t\t\t\t\tif freq_attribute_name:\n\t\t\t\t\t\tnew_data[cluster_seq_id]['other']['indiv_seq_freqs'].append(float(i['other'][freq_attribute_name]))\n\t\t\t\t\t\tnew_data[cluster_seq_id]['other'][freq_attribute_name] += float(i['other'][freq_attribute_name])\n\t\t\t\texcept KeyError:\n\t\t\t\t\tnew_data[cluster_seq_id] = {'id':cluster_seq_id, 'count':i['count'], 'other':{'cluster_id':i['other']['cluster_id'], 'indiv_seqs':[i['seq']], 'indiv_seq_counts':[i['count']]}}\n\t\t\t\t\tif freq_attribute_name:\n\t\t\t\t\t\tnew_data[cluster_seq_id]['other']['indiv_seq_freqs'] = [float(i['other'][freq_attribute_name])]\n\t\t\t\t\t\tnew_data[cluster_seq_id]['other'][freq_attribute_name] = float(i['other'][freq_attribute_name])\n\t\t\t\t\tif 'timepoint' in i['other']:\n\t\t\t\t\t\tnew_data[cluster_seq_id]['other']['timepoint'] = i['other']['timepoint']\n\t\t\t#now update data\n\t\t\tself.data = []\n\t\t\tself.counts = []\n\t\t\tfor i in new_data:\n\t\t\t\t#deal with special case of the first sequence cluster, if desired\n\t\t\t\tif keep_first_seq_first:\n\t\t\t\t\tif i == first_seq_key:\n\t\t\t\t\t\tself.data.insert(0, new_data[i])\n\t\t\t\t\t\tself.counts.insert(0, new_data[i]['count'])\n\t\t\t\t\t\tindex = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.data.append(new_data[i])\n\t\t\t\t\t\tself.counts.append(new_data[i]['count'])\n\t\t\t\t\t\tindex = -1\n\t\t\t\telse:\n\t\t\t\t\tself.data.append(new_data[i])\n\t\t\t\t\tself.counts.append(new_data[i]['count'])\n\t\t\t\t\tindex = -1\n\t\t\t\t#get the representative seq for the cluster\n\t\t\t\tif cluster_representative_seq == 'most_abundant':\n\t\t\t\t\tif freq_attribute_name:\n\t\t\t\t\t\tmax_seq_index = new_data[i]['other']['indiv_seq_freqs'].index(max(new_data[i]['other']['indiv_seq_freqs']))\n\t\t\t\t\telse:\n\t\t\t\t\t\tmax_seq_index = new_data[i]['other']['indiv_seq_counts'].index(max(new_data[i]['other']['indiv_seq_counts']))\n\t\t\t\t\tself.data[index]['seq'] = new_data[i]['other']['indiv_seqs'][max_seq_index]\n\t\t\t\tself.data[index]['other']['indiv_seq_counts'] = ','.join([str(j) for j in self.data[index]['other']['indiv_seq_counts']])\n\t\t\t\tself.data[index]['other']['indiv_seqs'] = ','.join(self.data[index]['other']['indiv_seqs'])\n\t\t\t\tif freq_attribute_name:\n\t\t\t\t\tself.data[index]['other']['indiv_seq_freqs'] = ','.join([str(j) for j in self.data[index]['other']['indiv_seq_freqs']])\n\t\treturn", "def main():\n df = pd.read_csv(\"HW_07_SHOPPING_CART_v137.csv\", header=0)\n df.index = df.ID\n del df['ID']\n global points\n points = {}\n for index, row in df.iterrows():\n # if(index <):\n points[index] = row.tolist()\n global all_clusters, clusters, cluster_number, total_number_of_features\n\n total_number_of_features = len(points[1])\n all_clusters = []\n for index, point in points.items():\n all_clusters.append(Cluster(index))\n all_clusters[index - 1].mean = point\n all_clusters[index - 1].guest_ids.append(index)\n\n cluster_number[len(all_clusters)] = all_clusters\n perform_clustering()\n smallest_cluster()", "def _generate_ids(self):\n clusterid = 0\n\n cluster2class = {}\n class2cluster = {}\n clusters = {}\n\n for (name,members) in self._clusters:\n classes = [self._class2idx[m] for m in members]\n for c in classes:\n class2cluster[c] = clusterid\n # print(c, clusterid)\n cluster2class[clusterid] = classes\n clusters[clusterid] = (name, classes)\n clusterid += 1\n self._clusters_map = clusters\n\n return cluster2class, class2cluster, clusterid-1", "def keywords_clusters(log_dict, nicks, nick_same_list):\n\n '''\n AUTO TFIDF FROM JUST SENTENCES\n '''\n #http://scikit-learn.org/stable/auto_examples/text/document_clustering.html\n #BUILDING CORPUS\n\n keyword_dict_list, user_keyword_freq_dict, user_words_dict_list, nicks_for_stop_words = keywords(log_dict, nicks, nick_same_list)\n\n corpus = []\n\n def build_centroid(km):\n if config.ENABLE_SVD:\n original_space_centroids = svd.inverse_transform(km.cluster_centers_)\n order_centroids = original_space_centroids.argsort()[:, ::-1]\n else:\n order_centroids = km.cluster_centers_.argsort()[:, ::-1]\n return order_centroids\n\n for user_words_dict in user_words_dict_list:\n corpus.append(\" \".join(map(str,user_words_dict['words'])))\n\n print(\"No. of users\", len(corpus))\n\n #TF_IDF\n stop_word_without_apostrophe = []\n for words in common_english_words.words:\n stop_word_without_apostrophe.append(words.replace(\"'\",\"\"))\n\n stop_words_extended = extended_stop_words(nicks_for_stop_words, stop_word_without_apostrophe) \n \n vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words=stop_words_extended,\n use_idf=True)\n print(\"Extracting features from the training dataset using TF-IDF\")\n t0 = time()\n tf_idf = vectorizer.fit_transform(corpus)\n print((\"done in %fs\" % (time() - t0)))\n print(\"n_samples: %d, n_features: %d \\n\" % tf_idf.shape)\n\n # LSA\n if config.ENABLE_SVD:\n print(\"============USING SVD==========\")\n print(\"Performing dimensionality reduction using LSA\")\n t0 = time()\n # Vectorizer results are normalized, which makes KMeans behave as\n # spherical k-means for better results. Since LSA/SVD results are\n # not normalized, we have to redo the normalization.\n svd = TruncatedSVD(100) #recommened value = 100\n normalizer = Normalizer(copy=False)\n lsa = make_pipeline(svd, normalizer)\n\n tf_idf = lsa.fit_transform(tf_idf)\n\n print((\"done in %fs\" % (time() - t0)))\n\n explained_variance = svd.explained_variance_ratio_.sum()\n print((\"Explained variance of the SVD step: {}%\".format(\n int(explained_variance * 100))))\n\n if not config.ENABLE_ELBOW_METHOD_FOR_K:\n # CLUSTERING\n km = KMeans(n_clusters=config.NUMBER_OF_CLUSTERS, init='k-means++',\n random_state=3465, max_iter=100, n_init=8)\n\n print((\"Clustering sparse data with %s\" % km))\n t0 = time()\n km.fit(tf_idf)\n print((\"done in %0.3fs\" % (time() - t0)))\n print(\"Top terms per cluster:\") \n \n order_centroids = build_centroid(km) \n np.set_printoptions(threshold=np.nan)\n\n terms = vectorizer.get_feature_names()\n for i in range(config.NUMBER_OF_CLUSTERS):\n print((\"Cluster %d:\" % i))\n for ind in order_centroids[i, :config.SHOW_N_WORDS_PER_CLUSTER]:\n print(terms[ind]+\"\\t\"+str(round(km.cluster_centers_[i][ind], 2)))\n print(\"\")\n\n else:\n print(\"============ELBOW METHOD =============\")\n\n sum_squared_errors_list = []\n avg_sum_squared_errors_list = []\n\n for i in range(1, config.CHECK_K_TILL + 1):\n\n print(\"\\n===>> K = \", i)\n\n km = KMeans(n_clusters=i, init='k-means++', max_iter=100, n_init=8)\n\n t0 = time()\n km.fit(tf_idf)\n \n order_centroids = build_centroid(km)\n\n distance_matrix_all_combination = cdist(tf_idf, km.cluster_centers_, 'euclidean')\n # cIdx = np.argmin(distance_matrix_all_combination,axis=1)\n distance_from_nearest_centroid = np.min(distance_matrix_all_combination, axis=1)\n sum_squared_errors = sum(distance_from_nearest_centroid)\n avg_sum_squared_errors = sum_squared_errors/tf_idf.shape[0] \n\n print(\"Sum Squared Error =\", sum_squared_errors)\n print(\"Avg Sum Squared Error =\", avg_sum_squared_errors)\n\n sum_squared_errors_list.append(sum_squared_errors)\n avg_sum_squared_errors_list.append(avg_sum_squared_errors)\n print(\"Top terms per cluster:\")\n terms = vectorizer.get_feature_names()\n for i in range(i):\n print((\"Cluster %d:\" % i))\n for ind in order_centroids[i, :config.SHOW_N_WORDS_PER_CLUSTER]:\n print((' %s' % terms[ind]))\n print()\n\n plt.plot(list(range(1, config.CHECK_K_TILL+1)), sum_squared_errors_list, 'b*-')\n # ax.plot(K[kIdx], avgWithinSS[kIdx], marker='o', markersize=12, \n # markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')\n plt.grid(True)\n plt.xlabel('Number of clusters')\n plt.ylabel('Average sum of squares')\n plt.title('Elbow for KMeans clustering')\n plt.show()\n\n #NOTE RANDOM OUTPUTS BECAUSE OF RANDOM INITIALISATION\n print(\"NOTE RANDOM OUTPUTS BECAUSE OF RANDOM INITIALISATION\")", "def separate_data_by_k(df_train: pd.DataFrame,\n\t\t\t\t\t\tdf_test: pd.DataFrame,\n\t\t\t\t\t\tdf_ts: pd.DataFrame,\n\t\t\t\t\t\tfeatures: pd.DataFrame,\n\t\t\t\t\t\ttop_kmeans_models: List[KMeans]) -> Tuple[kmeans_data,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t kmeans_data]:\n\tts_ft_l = features.index.to_list()\n\tclustered_data = dict()\n\tclustered_data_rnd = dict()\n\tfor model in top_kmeans_models:\n\t\tk = model.cluster_centers_.shape[0]\n\t\tdf_k = df_train.groupby('V1')\\\n\t\t\t\t\t\t.apply(get_class_label,\n\t\t\t\t\t\t\t\tmodel=model,\n\t\t\t\t\t\t\t\tts_index_l=ts_ft_l, k=k)\n\t\td = {}\n\t\td_rnd = {}\n\t\tfor class_label in df_k['class'].unique():\n\t\t\t# creating temporary datasets\n\t\t\tdf_train_tmp = df_k[df_k['class']==class_label].iloc[:,:-2]\n\t\t\tdf_test_tmp = df_test.loc[df_test['V1'].isin(df_train_tmp['V1'].values)]\n\t\t\tdf_ts_tmp = df_ts.loc[df_ts['V1'].isin(df_train_tmp['V1'].values)]\n\n\t\t\t# create random data set of same size\n\t\t\tclass_size_ratio = df_train_tmp.shape[0]/df_train.shape[0]\n\n\t\t\tdf_train_tmp_rnd = df_train.iloc[:,:-2].sample(frac=class_size_ratio)\n\t\t\tdf_test_tmp_rnd = df_test.loc[df_test['V1'].isin(df_train_tmp_rnd['V1'].values)]\n\t\t\tdf_ts_tmp_rnd = df_ts.loc[df_ts['V1'].isin(df_train_tmp_rnd['V1'].values)]\n\n\t\t\t# asserting needed properties\n\t\t\ttrain_unique_v1 = sorted(df_train_tmp['V1'].unique())\n\t\t\tts_unique_v1 = sorted(df_ts_tmp['V1'].unique())\n\t\t\tassert train_unique_v1 == ts_unique_v1\n\t\t\tassert (df_train_tmp['V1'].unique() == df_test_tmp['V1'].unique()).all()\n\t\t\t\n\t\t\td[class_label] = (df_train_tmp, df_test_tmp, df_ts_tmp)\n\t\t\td_rnd[class_label] = (df_train_tmp_rnd, df_test_tmp_rnd, df_ts_tmp_rnd)\n\n\t\tclustered_data[k] = d\n\t\tclustered_data_rnd[k] = d_rnd\n\n\n\treturn clustered_data, clustered_data_rnd", "def collate_cluster_data(ensembles_data):\n\n clusters = {} # Loop through all ensemble data objects and build up a data tree\n cluster_method = None\n cluster_score_type = None\n truncation_method = None\n percent_truncation = None\n side_chain_treatments = []\n for e in ensembles_data:\n if not cluster_method:\n cluster_method = e['cluster_method']\n cluster_score_type = e['cluster_score_type']\n percent_truncation = e['truncation_percent']\n truncation_method = e['truncation_method']\n # num_clusters = e['num_clusters']\n cnum = e['cluster_num']\n if cnum not in clusters:\n clusters[cnum] = {}\n clusters[cnum]['cluster_centroid'] = e['cluster_centroid']\n clusters[cnum]['cluster_num_models'] = e['cluster_num_models']\n clusters[cnum]['tlevels'] = {}\n tlvl = e['truncation_level']\n if tlvl not in clusters[cnum]['tlevels']:\n clusters[cnum]['tlevels'][tlvl] = {}\n clusters[cnum]['tlevels'][tlvl]['truncation_variance'] = e['truncation_variance']\n clusters[cnum]['tlevels'][tlvl]['num_residues'] = e['num_residues']\n clusters[cnum]['tlevels'][tlvl]['radius_thresholds'] = {}\n srt = e['subcluster_radius_threshold']\n if srt not in clusters[cnum]['tlevels'][tlvl]['radius_thresholds']:\n clusters[cnum]['tlevels'][tlvl]['radius_thresholds'][srt] = {}\n clusters[cnum]['tlevels'][tlvl]['radius_thresholds'][srt]['num_models'] = e['subcluster_num_models']\n clusters[cnum]['tlevels'][tlvl]['radius_thresholds'][srt]['sct'] = {}\n sct = e['side_chain_treatment']\n if sct not in side_chain_treatments:\n side_chain_treatments.append(sct)\n if sct not in clusters[cnum]['tlevels'][tlvl]['radius_thresholds'][srt]['sct']:\n clusters[cnum]['tlevels'][tlvl]['radius_thresholds'][srt]['sct'][sct] = {}\n clusters[cnum]['tlevels'][tlvl]['radius_thresholds'][srt]['sct'][sct]['name'] = e['name']\n clusters[cnum]['tlevels'][tlvl]['radius_thresholds'][srt]['sct'][sct]['num_atoms'] = e['ensemble_num_atoms']\n\n return {\n 'clusters': clusters,\n 'cluster_method': cluster_method,\n 'cluster_score_type': cluster_score_type,\n 'truncation_method': truncation_method,\n 'percent_truncation': percent_truncation,\n 'side_chain_treatments': side_chain_treatments,\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns pair estimate information for a mastering project
def estimate_pairs(self): alias = self.api_path + "/estimatedPairCounts" estimate_json = self.client.get(alias).successful().json() info = EstimatedPairCounts.from_json(self.client, estimate_json, api_path=alias) return info
[ "def _optimNodePairs(self):\n trialKtauSum = np.zeros(len(self.tree.nodes()))\n trialPairings = []\n # Generate all possible node pairings\n for i, rootNodeID in enumerate(self.tree.nodes()):\n trialPairings.append([])\n for nodeID in self.tree.nodes():\n # iterate though all child nodes,\n # root dataset cannot be paired with itself\n if nodeID != rootNodeID:\n trialPair = pc.PairCopula(self.tree.nodes[nodeID][\"data\"].values,\n self.tree.nodes[rootNodeID][\"data\"].values)\n trialKtau, trialP = trialPair.empKTau()\n trialKtauSum[i] += abs(trialKtau)\n trialPairings[i].append((nodeID, rootNodeID, trialKtau))\n print(\"Tree level: %d, configuration: %d, Ktau Metric: %f\" % (self.level, i, trialKtauSum[i]))\n bestPairingIndex = np.argmax(np.abs(trialKtauSum))\n print(\" === Configuration %d selected === \" % (bestPairingIndex))\n self.rootNodeID = trialPairings[bestPairingIndex][0][1]\n return trialPairings[bestPairingIndex]", "def generate_pairs():\n\n tissue2info = dict()\n\n with open('./info/project-info.csv', 'r') as csvfile:\n\n f_csv = csv.reader(csvfile, delimiter=str(','), quotechar=str('|'))\n next(f_csv)\n\n for row in f_csv:\n tissue = row[1]\n dye = row[2]\n original_name = row[6]\n\n if tissue not in tissue2info:\n tissue2info[tissue] = set()\n\n tissue2info[tissue].add((dye, original_name))\n\n pairs = []\n\n for tissue in tissue2info:\n\n available_items = tissue2info[tissue]\n\n tissue_pairs = list(combinations(\n available_items, 2 if len(available_items) > 1 else 1))\n\n for ((dye1, original_name1), (dye2, original_name2)) in tissue_pairs:\n\n dirs_in_path = os.path.join(Paths.PATH_TO_IMAGES, tissue)\n\n dir_scale_img = [x for x in os.listdir(\n dirs_in_path) if x != '.DS_Store'][0]\n\n path_to_img = os.path.join(\n Paths.PATH_TO_IMAGES, tissue, dir_scale_img)\n\n extension = \".jpg\"\n\n if os.path.exists(os.path.join(path_to_img, f\"{original_name1}.png\")):\n extension = \".png\"\n\n pairs.append((tissue, dye1, dye2, path_to_img,\n original_name1, original_name2, extension))\n\n return pairs", "def find_pair_details(kwargs):\n\n try:\n msisdn_list = []\n\n chk_primary = Pairing.query.filter(Pairing.msisdn == '{}'.format(kwargs['primary_msisdn']),\n Pairing.is_primary == True,\n Pairing.end_date == None).first()\n\n # to check if request is made from primary-pair\n if chk_primary:\n\n chk_sec = Pairing.query.filter(Pairing.primary_id == '{}'.format(chk_primary.id),\n Pairing.end_date == None).all()\n if chk_sec:\n for m in chk_sec:\n msisdn_list.append(int(m.msisdn))\n return msisdn_list\n else:\n return custom_text_response(_(\"No Pair is associated with %(pm)s\", pm=kwargs['primary_msisdn']),\n status=STATUS_CODES.get('UNPROCESSABLE_ENTITY'),\n mimetype=MIME_TYPES.get('TEXT'))\n else:\n return custom_text_response(_(\"%(pm)s is not registered as Primary-Pair\", pm=kwargs['primary_msisdn']),\n status=STATUS_CODES.get('UNPROCESSABLE_ENTITY'),\n mimetype=MIME_TYPES.get('TEXT'))\n except Exception as e:\n db.session.rollback() # pragma: no cover\n\n finally:\n db.session.close()", "def fixed_pairs(\n self,\n ) -> Tuple[\n List[Tuple[str, str, Union[int, float]]],\n List[Tuple[str, str, Union[int, float]]],\n List[Tuple[str, str, Union[int, float]]],\n ]:\n assert (\n self.train_pairs is not None and self.test_pairs is not None\n ), \"You need to pass in train and test pairs to use this function\"\n self.train_pairs.loc[:, \"label\"] = self.train_pairs[\"label\"].map(\n {\"NO\": 0, \"YES\": 1, \"0\": 0, 0: 0, \"1\": 1, 1: 1}\n )\n if self.val_pairs is not None:\n self.val_pairs.loc[:, \"label\"] = self.val_pairs[\"label\"].map(\n {\"NO\": 0, \"YES\": 1, \"0\": 0, 0: 0, \"1\": 1, 1: 1}\n )\n train_pairs = list(self.train_pairs.to_records(index=False))\n val_pairs = list(self.val_pairs.to_records(index=False))\n else:\n np.random.seed(self.random_seed)\n # split train into train/val\n train_prob = self.train_ratio / (self.train_ratio + self.val_ratio)\n msk = np.random.rand(len(self.train_pairs)) < train_prob\n train_pairs = list(self.train_pairs[msk].to_records(index=False))\n val_pairs = list(self.train_pairs[~msk].to_records(index=False))\n self.test_pairs.loc[:, \"label\"] = self.test_pairs[\"label\"].map({\"NO\": 0, \"YES\": 1, \"0\": 0, 0: 0, \"1\": 1, 1: 1})\n test_pairs = list(self.test_pairs.to_records(index=False))\n\n return train_pairs, val_pairs, test_pairs", "def psd_pairwise_comparison():\n # Location of the data \n base_dir = '../example/' \n \n # Data resolution, in nanometers \n resolution = {'res_xy_nm': 100, 'res_z_nm': 70}\n \n # Threshold value for the probability maps. This value does not usually need to be changed. \n thresh = 0.9\n\n # List the file names \n target_filenames = ['PSD95m_1st.tif', 'PSD95r_2nd.tif']\n reference_filenames = ['synapsin_1st.tif', 'synapsin_1st.tif']\n\n # Create a query for each pair\n query_list = []\n for n in range(0, len(target_filenames)):\n target_name = target_filenames[n] # The AB we're interested in testing (PSD)\n reference_name = reference_filenames[n] # The previously validated AB (synapsin)\n \n # Formulate the query\n query = {'preIF': [reference_name], 'preIF_z': [2], \n 'postIF': [target_name], 'postIF_z': [2],\n 'punctumSize': 2}\n query_list.append(query)\n\n # Run the SACT \n measure_list = aa.calculate_measure_lists(query_list, None, base_dir,\n thresh, resolution, target_filenames)\n \n # Convert measure object to a dataframe \n project_names = ['PSD_M', 'PSD_R']\n df = aa.create_df(measure_list, project_names, target_filenames, reference_filenames)\n print(df)\n\n return df", "def _calc_pair_genes(self, ind, return_inds=False):\n i_0 = bisect(self.i0_inds, ind) - 1\n i_1 = ind - self.i0_inds[i_0] + i_0 + 1\n\n if return_inds:\n return (i_0, i_1)\n return (self.get_genes_solo(i_0), self.get_genes_solo(i_1))", "def get_solvent_info(solvent):\n solvent_entry = database.solvation.libraries['solvent'].entries[solvent]\n solvent_data = solvent_entry.data\n solvent_smiles_list = []\n for spc in solvent_entry.item:\n solvent_smiles_list.append(spc.smiles)\n solvent_index = solvent_entry.index\n solvent_href = reverse('database:solvation-entry',\n kwargs={'section': 'libraries', 'subsection': 'solvent',\n 'index': solvent_index})\n solvent_info = (solvent, solvent_smiles_list, solvent_href, solvent_index)\n\n # check whether we have all solvent parameters to calculate dGsolv and dHsolv\n abraham_parameter_list = [solvent_data.s_g, solvent_data.b_g, solvent_data.e_g, solvent_data.l_g,\n solvent_data.a_g, solvent_data.c_g]\n dGsolv_avail = not any(param is None for param in abraham_parameter_list)\n mintz_parameter_list = [solvent_data.s_h, solvent_data.b_h, solvent_data.e_h, solvent_data.l_h,\n solvent_data.a_h, solvent_data.c_h]\n dHsolv_avail = not any(param is None for param in mintz_parameter_list)\n\n return solvent_data, solvent_info, dGsolv_avail, dHsolv_avail", "def getConfidenceMap(self) -> retval:\n ...", "def get_chains_info(ph, selection_list=None):\n\n chains_info = {}\n # asc = ph.atom_selection_cache()\n model = ph.models()[0]\n # build chains_info from hierarchy\n # print \"in get_chains_info\"\n for ch in model.chains():\n # print \"ch_id\", ch.id\n gr = True\n if not chains_info.has_key(ch.id):\n chains_info[ch.id] = Chains_info()\n gr = False\n # This is very time-consuming\n # ph_sel = ph.select(asc.selection(\"chain '%s'\" % ch.id))\n # coc = flex.vec3_double([ph_sel.atoms().extract_xyz().mean()])\n # chains_info[ch.id].center_of_coordinates = coc\n chains_info[ch.id].center_of_coordinates = None\n chains_info[ch.id].chains_atom_number += ch.atoms_size()\n conf = ch.conformers()[0]\n len_conf = len(ch.conformers())\n # Warning devs: the following assert fails when there is no main conf\n # in a residue\n # assert len(ch.residue_groups()) == len(conf.residues())\n for rg, res in zip(ch.residue_groups(), conf.residues()):\n chains_info[ch.id].resid.append(rg.resid())\n chains_info[ch.id].res_names.append(rg.atom_groups()[0].resname)\n # atoms = res.atoms()\n ag0 = rg.atom_groups()[0]\n atoms = ag0.atoms()\n present_anames = [a.name for a in atoms]\n # print \"rg.atom_groups_size()\", rg.atom_groups_size()\n if rg.atom_groups_size() > 1:\n for add_rgs in rg.atom_groups()[1:]:\n for a in add_rgs.atoms():\n # print \" getting atom '%s'\" % a.name, a.name not in present_anames\n if a.name not in present_anames:\n atoms.append(a)\n present_anames.append(a.name)\n chains_info[ch.id].atom_names.append(list(atoms.extract_name()))\n chains_info[ch.id].atom_selection.append(list(atoms.extract_i_seq()))\n chains_info[ch.id].no_altloc.append(not rg.have_conformers() or len_conf==1)\n chains_info[ch.id].gap_residue.append(gr)\n # print \" \", rg.id_str(), rg.have_conformers(), not res.is_pure_main_conf, \"|noaltloc:\", (not rg.have_conformers() or len_conf==1), \"size:\", atoms.size(), \"gr:\", gr\n # for a in atoms:\n # print \" \", a.id_str()\n gr = False\n return chains_info", "def compute_object_pair_sim_score(self, test_object_pair_dfs):\n\n temp_object_pair_dfs = copy.deepcopy(test_object_pair_dfs)\n\n for object_pair in self.object_pair_frequencies:\n object_pair_frequency = self.object_pair_frequencies[object_pair]\n object_pair_gmm = self.object_pair_gmms[object_pair]['gmm']\n\n temp_object_pair_dfs[object_pair][object_pair + '_' + 'sim_scores'] = \\\n (object_pair_frequency *\n object_pair_gmm.score_samples(test_object_pair_dfs[object_pair])\n / self.number_of_training_scenes)\n\n return temp_object_pair_dfs", "def compare_result_fraction_parameters(paths):\n\tresults_cfgrank_9 = obtain_results_from_result(paths[0])\n\tresults_cfgrank_8 = obtain_results_from_result(paths[1])\n\tresults_cfgrank_7 = obtain_results_from_result(paths[2])\n\tresults_cfgrank_6 = obtain_results_from_result(paths[3])\n\n\tindx_1 = 0\n\tindx_3 = 1\n\tindx_5 = 2\n\tindx_10= 3\n\tindx_20= 4\n\n\t# projs1 = ['rs-6d-c3-obj1', 'rs-6d-c3-obj2', 'sol-6d-c2-obj1', 'sol-6d-c2-obj2', 'wc+rs-3d-c4-obj1', 'wc+rs-3d-c4-obj2', 'wc+sol-3d-c4-obj1', 'wc+sol-3d-c4-obj2', 'wc+wc-3d-c4-obj1', 'wc+wc-3d-c4-obj2', 'wc-3d-c4-obj1', 'wc-3d-c4-obj2', 'wc-5d-c5-obj1', 'wc-5d-c5-obj2', 'wc-6d-c1-obj1', 'wc-6d-c1-obj2', 'wc-c1-3d-c1-obj1', 'wc-c1-3d-c1-obj2', 'wc-c3-3d-c1-obj1', 'wc-c3-3d-c1-obj2']\n\t# # boolean projects\n\t# projs2 = ['AJStats', 'Apache', 'BerkeleyC', 'BerkeleyJ', 'clasp', 'Dune', 'Hipacc', 'HSMGP_num', 'LLVM', 'lrzip', 'sac', 'spear', 'SQL', 'WGet', 'x264', 'XZ']\n\t# projs = projs1 + projs2\n\t# proj_id = 0\n\n\tprojs = []\n\tproj_id = 0\n\twith open(paths[0]) as fi:\n\t\tlines = fi.readlines()\n\t\tfor line in lines:\n\t\t\tprojs.append(line.split(\":[[\")[0])\n\n\tprint(\"RDTie of each dataset using ReConfig with different filter ratios (90%, 80%, 70%)\\n\")\n\tprint(\"1) We calculate the RDTie of each scenario in cases of | Top-1 | Top-3 | Top-5 | Top-10 |.\")\n\tprint(\"2) The order of filter ratios is | 90% 80% 70% |.\\n\")\n\n\tprint(\"| %-18s | %-8s %-8s %-8s | %-8s %-8s %-8s | %-8s %-8s %-8s | %-8s %-8s %-8s |\"%(\"Datasets\",\"90%\",\"80%\",\"70%\",\"90%\",\"80%\",\"70%\",\"90%\",\"80%\",\"70%\",\"90%\",\"80%\",\"70%\"))\n\tprint(\"----------------------\")\n\n\twhile indx_1 < len(results_cfgrank_9):\n\t\tprint(\"| %-18s |\" % projs[proj_id], end=\" \")\n\t\tprint(\"%-8.3f %-8.3f %-8.3f |\" % (np.mean(results_cfgrank_9[indx_1]), np.mean(results_cfgrank_8[indx_1]), np.mean(results_cfgrank_7[indx_1])), end=\" \")\n\t\tprint(\"%-8.3f %-8.3f %-8.3f |\" % (np.mean(results_cfgrank_9[indx_3]), np.mean(results_cfgrank_8[indx_3]), np.mean(results_cfgrank_7[indx_3])), end=\" \")\n\t\tprint(\"%-8.3f %-8.3f %-8.3f |\" % (np.mean(results_cfgrank_9[indx_5]), np.mean(results_cfgrank_8[indx_5]), np.mean(results_cfgrank_7[indx_5])), end=\" \")\n\t\tprint(\"%-8.3f %-8.3f %-8.3f |\" % (np.mean(results_cfgrank_9[indx_10]), np.mean(results_cfgrank_8[indx_10]), np.mean(results_cfgrank_7[indx_10])))\n\t\t\n\t\tindx_1 += 5\n\t\tindx_3 += 5\n\t\tindx_5 += 5\n\t\tindx_10 += 5\n\t\tindx_20 += 5\n\t\tproj_id += 1\n\n\t# Tex format \n\t# while indx_1 < len(results_cfgrank_9):\n\t# \tprint(\"%s &\" % projs[proj_id], end=\" \")\n\t# \tprint(\" %.3f & %.3f & %.3f & %.3f &\" % (np.mean(results_cfgrank_9[indx_1]), np.mean(results_cfgrank_8[indx_1]), np.mean(results_cfgrank_7[indx_1]), np.mean(results_cfgrank_6[indx_1])), end=\" \")\n\t# \tprint(\" %.3f & %.3f & %.3f & %.3f &\" % (np.mean(results_cfgrank_9[indx_3]), np.mean(results_cfgrank_8[indx_3]), np.mean(results_cfgrank_7[indx_3]), np.mean(results_cfgrank_6[indx_3])), end=\" \")\n\t# \tprint(\" %.3f & %.3f & %.3f & %.3f &\" % (np.mean(results_cfgrank_9[indx_5]), np.mean(results_cfgrank_8[indx_5]), np.mean(results_cfgrank_7[indx_5]), np.mean(results_cfgrank_6[indx_5])), end=\" \")\n\t# \tprint(\" %.3f & %.3f & %.3f & %.3f \\\\\\\\\" % (np.mean(results_cfgrank_9[indx_10]), np.mean(results_cfgrank_8[indx_10]), np.mean(results_cfgrank_7[indx_10]), np.mean(results_cfgrank_6[indx_10])))\n\t# \tprint(\"\\\\hline\")\n\t\t\n\t# \tindx_1 += 5\n\t# \tindx_3 += 5\n\t# \tindx_5 += 5\n\t# \tindx_10 += 5\n\t# \tindx_20 += 5\n\t# \tproj_id += 1", "def test_estimate(self):\n expectedResult = 0.926\n credibility = TestCredibility.credibilityEstimator.estimate(self.warp)\n self.assertCredibilityEstimation(credibility, expectedResult)", "def match_pair(pair, sim_method):\r\n doc1, doc2 = get_texts(pair)\r\n ents1 = extract_ents(nlp(doc1))\r\n ents2 = extract_ents(nlp(doc2))\r\n # cluster the corefer entities for each document\r\n c1 = cluster_doc(ents1)\r\n c2 = cluster_doc(ents2)\r\n similarity = sim_method(c1, c2)\r\n return similarity, [c1, c2]", "def compute_common_data(self):\n self.filter_expression_and_priors()\n print('Creating design and response matrix ... ')\n self.design_response_driver.delTmin = self.delTmin\n self.design_response_driver.delTmax = self.delTmax\n self.design_response_driver.tau = self.tau\n (self.design, self.response) = self.design_response_driver.run(self.expression_matrix, self.meta_data)\n\n # compute half_tau_response\n print('Setting up TFA specific response matrix ... ')\n self.design_response_driver.tau = self.tau / 2\n (self.design, self.half_tau_response) = self.design_response_driver.run(self.expression_matrix, self.meta_data)", "def _master_info_basic(self):\n out_str = \"\"\n (status, dev_id) = self.__tx_dev.rd(0x400008)\n self.__tx_dev.decode_error_status(status, cmd='rd(0x400008)', print_on_error=True)\n if(status == 0x01):\n dev_id = (dev_id & 0x0f)\n (status, gmd) = self.__tx_dev.get_master_descriptor()\n self.__tx_dev.decode_error_status(status, cmd='get_master_descriptor()', print_on_error=True)\n md = gmd.moduleDescriptor\n if(status == 0x01):\n out_str = \":\".join([\"%.2X\" % i for i in md.macAddress]) + \" \"\n major = md.firmwareVersion >> 5 # (Upper 11-bits)\n minor = md.firmwareVersion & 0x1f # (Lower 5-bits)\n out_str += \"- v%d.%d \" % (major, minor)\n module_id = (md.moduleID & 0xff)\n out_str += \"- %s (0x%.2X) \" % (dec.module_id.get(module_id, \"Unknown moduleID\"), module_id)\n out_str += \"- %s (0x%.2X) \" % (dec.hardware_type.get(md.hardwareType, \"Unknown hardwareType\"), md.hardwareType)\n print(out_str)", "def produce_pairs(self):\n\n # print('\\n Producing Pairs DFs \\n')\n # common_j = self.find_common()\n\n # print ('length of common j', len(common_j))\n\n # #This gives all articles that are in mentions and available in MEDLINE files\n # shared_entities = self.mentions[self.mentions['PMID'].isin(common_j)]\n # shared_entities = shared_entities.sort_values('SerialNo')\n\n # print('shared entities', shared_entities.shape)\n\n # #This gives DF where there are atleast two entries in them\n # ser_no_ents = shared_entities['SerialNo'].tolist()\n # temp_s1_values = self.pairs[self.pairs['SerialNo1'].isin(ser_no_ents)]\n # temp_s2_list = temp_s1_values['SerialNo2'].tolist()\n # both_entries = shared_entities[shared_entities['SerialNo'].isin(temp_s2_list)]\n # both_entries = both_entries.sort_values('PMID')\n\n # print('ser_no_ents', len(ser_no_ents))\n # print('temp_s1_values', temp_s1_values.shape)\n # print('temp_s2_list', len(temp_s2_list))\n # print('both_entries', both_entries.shape)\n\n both_entries = self.mentions.sort_values('PMID')\n\n #Get True Pairs\n true_pairs = self.extract_true_pairs(both_entries)\n\n\n tp_strict_with_sen = self.add_sentences(true_pairs, True, True)\n\n print('Strict shape before drop, ', tp_strict_with_sen.shape)\n tp_strict_with_sen = tp_strict_with_sen[pd.notnull(tp_strict_with_sen['Text'])]\n print('Strict shape after drop, ', tp_strict_with_sen.shape)\n\n\n tp_strict_with_sen[['SerialNo_x',\n 'SerialNo_y']] = tp_strict_with_sen[['SerialNo_x',\n 'SerialNo_y']].astype(int)\n self.strict_pairs_w_sen_df = tp_strict_with_sen[['Entity_name_x',\n 'SerialNo_x', 'Entity_name_y',\n 'SerialNo_y', 'Text']]\n\n name_strict = 'Results/' + self.res_name + '_strict_pairs_w_sen_df.pkl'\n\n pickle.dump(self.strict_pairs_w_sen_df, open(name_strict, 'wb'))\n\n if self.full_sen_set:\n tp_gen_with_sen = self.add_sentences(true_pairs, True, False)\n both_entries_with_sen = self.add_sentences(both_entries, False, False)\n\n print('Gen shape before drop, ', tp_gen_with_sen.shape)\n print('Both ents shape before drop, ', both_entries_with_sen.shape)\n tp_gen_with_sen = tp_gen_with_sen[pd.notnull(tp_gen_with_sen['Text'])]\n both_entries_with_sen = both_entries_with_sen[pd.notnull(both_entries_with_sen['Text'])]\n print('Gen shape after drop, ', tp_gen_with_sen.shape)\n print('Both ents shape after drop, ', both_entries_with_sen.shape)\n\n\n\n tp_gen_with_sen[['SerialNo_x',\n 'SerialNo_y']] = tp_gen_with_sen[['SerialNo_x',\n 'SerialNo_y']].astype(int)\n\n both_entries_with_sen[['SerialNo']] = both_entries_with_sen[['SerialNo']].astype(int)\n\n self.gen_pairs_w_sen_df = tp_gen_with_sen[['Entity_name_x',\n 'SerialNo_x', 'Entity_name_y',\n 'SerialNo_y', 'Text']]\n self.both_ents_w_sen_df = both_entries_with_sen[['Entity_name',\n 'SerialNo', 'Text']]\n\n name_gen = 'Results/' + self.res_name + '_gen_pairs_w_sen_df.pkl'\n name_be = 'Results/' + self.res_name + '_both_ents_w_sen_df.pkl'\n\n pickle.dump(self.gen_pairs_w_sen_df, open(name_gen, 'wb'))\n pickle.dump(self.both_ents_w_sen_df, open(name_be, 'wb'))\n\n print('\\n Done with Pairs DFs \\n')", "def mutual_information(self, ret_prob_activity=False):\n return self.mutual_information_monte_carlo(ret_prob_activity)", "def all_pair_comparisons(w):\n pairs = MG_opt[w.species][\"pairs\"]\n in_files = []\n for s1, s2 in pairs:\n in_files.append(f\"projections/{w.species}/comparison/{w.kind}__{s1}-{s2}.json\")\n return in_files", "def getProjectInfo(self,project):\n m = re.match(r'(.*)-(.*)',project)\n if m:\n name = m.group(1)\n version = m.group(2)\n else:\n name = project\n version = \"*\"\n return name,version", "def loadextendedcompoundinfo(self):\n apiurl = 'http://www.chemspider.com/MassSpecAPI.asmx/GetExtendedCompoundInfo?CSID=%s&token=%s' % (self.csid,TOKEN)\n response = urllib2.urlopen(apiurl)\n tree = ET.parse(response)\n mf = tree.find('{http://www.chemspider.com/}MF')\n self._mf = mf.text.encode('utf-8') if mf is not None else None\n smiles = tree.find('{http://www.chemspider.com/}SMILES')\n self._smiles = smiles.text.encode('utf-8') if smiles is not None else None\n inchi = tree.find('{http://www.chemspider.com/}InChI')\n self._inchi = inchi.text.encode('utf-8') if inchi is not None else None\n inchikey = tree.find('{http://www.chemspider.com/}InChIKey')\n self._inchikey = inchikey.text.encode('utf-8') if inchikey is not None else None\n averagemass = tree.find('{http://www.chemspider.com/}AverageMass')\n self._averagemass = float(averagemass.text.encode('utf-8')) if averagemass is not None else None\n molecularweight = tree.find('{http://www.chemspider.com/}MolecularWeight')\n self._molecularweight = float(molecularweight.text.encode('utf-8')) if molecularweight is not None else None\n monoisotopicmass = tree.find('{http://www.chemspider.com/}MonoisotopicMass')\n self._monoisotopicmass = float(monoisotopicmass.text.encode('utf-8')) if monoisotopicmass is not None else None\n nominalmass = tree.find('{http://www.chemspider.com/}NominalMass')\n self._nominalmass = float(nominalmass.text.encode('utf-8')) if nominalmass is not None else None\n alogp = tree.find('{http://www.chemspider.com/}ALogP')\n self._alogp = float(alogp.text.encode('utf-8')) if alogp is not None else None\n xlogp = tree.find('{http://www.chemspider.com/}XLogP')\n self._xlogp = float(xlogp.text.encode('utf-8')) if xlogp is not None else None\n commonname = tree.find('{http://www.chemspider.com/}CommonName')\n self._commonname = commonname.text.encode('utf-8') if commonname is not None else None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Project's unified dataset with associated clusters.
def record_clusters_with_data(self): unified_dataset = self.unified_dataset() # Replace this workaround with a direct API call once API # is fixed. APIs that need to work are: fetching the dataset and # being able to call refresh on resulting dataset. Until then, we grab # the dataset by constructing its name from the corresponding Unified Dataset's name name = unified_dataset.name + "_dedup_clusters_with_data" return self.client.datasets.by_name(name) # super.__repr__ is sufficient
[ "def published_clusters_with_data(self):\n\n unified_dataset = self.unified_dataset()\n name = unified_dataset.name + \"_dedup_published_clusters_with_data\"\n return self.client.datasets.by_name(name)", "def get_dataset(self, user, name):\n\n return super().get_repo(\"dataset\", user, name)", "def _produce_train_dataset(self):\r\n pass", "def dataset_setup(self):\n settings = self.settings\n if settings.crowd_dataset == CrowdDataset.ucf_qnrf:\n self.dataset_class = UcfQnrfFullImageDataset\n self.train_dataset = UcfQnrfTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.labeled_dataset_size)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = UcfQnrfTransformedDataset(dataset='test', seed=101)\n elif settings.crowd_dataset == CrowdDataset.shanghai_tech:\n self.dataset_class = ShanghaiTechFullImageDataset\n self.train_dataset = ShanghaiTechTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.labeled_dataset_size,\n map_directory_name=settings.map_directory_name,\n image_patch_size=self.settings.image_patch_size,\n label_patch_size=self.settings.label_patch_size)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = ShanghaiTechTransformedDataset(dataset='test', seed=101,\n map_directory_name=settings.map_directory_name,\n image_patch_size=self.settings.image_patch_size,\n label_patch_size=self.settings.label_patch_size)\n elif settings.crowd_dataset == CrowdDataset.ucf_cc_50:\n seed = 0\n self.dataset_class = UcfCc50FullImageDataset\n self.train_dataset = UcfCc50TransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=seed,\n test_start=settings.labeled_dataset_seed * 10,\n inverse_map=settings.inverse_map,\n map_directory_name=settings.map_directory_name)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = UcfCc50TransformedDataset(dataset='test', seed=seed,\n test_start=settings.labeled_dataset_seed * 10,\n inverse_map=settings.inverse_map,\n map_directory_name=settings.map_directory_name)\n else:\n raise ValueError('{} is not an understood crowd dataset.'.format(settings.crowd_dataset))", "def load_dataset(self):", "def clouds(self) -> CloudsData:\n pass", "def load(self):\n import os\n\n import pandas as pd\n\n dtype = {'names': ('cluster_id', 'group'), 'formats': ('i4', 'S10')}\n # One of these (cluster_groups.csv or cluster_group.tsv) is from\n # kilosort and the other from kilosort2\n # and is updated by the user when doing cluster assignment in phy\n # See comments above this class definition for a bit more info\n if fileExists(self.fname_root, \"cluster_groups.csv\"):\n self.cluster_id, self.group = np.loadtxt(\n os.path.join(self.fname_root, \"cluster_groups.csv\"),\n unpack=True,\n skiprows=1,\n dtype=dtype\n )\n if fileExists(self.fname_root, \"cluster_group.tsv\"):\n self.cluster_id, self.group = np.loadtxt(\n os.path.join(self.fname_root, \"cluster_group.tsv\"),\n unpack=True,\n skiprows=1,\n dtype=dtype,\n )\n\n \"\"\"\n Output some information to the user if self.cluster_id is still None\n it implies that data has not been sorted / curated\n \"\"\"\n # if self.cluster_id is None:\n # print(f\"Searching {os.path.join(self.fname_root)} and...\")\n # warnings.warn(\"No cluster_groups.tsv or cluster_group.csv file\n # was found.\\\n # Have you manually curated the data (e.g with phy?\")\n\n # HWPD 20200527\n # load cluster_info file and add X co-ordinate to it\n if fileExists(self.fname_root, \"cluster_info.tsv\"):\n self.cluster_info = pd.read_csv(\n os.path.join(self.fname_root, \"cluster_info.tsv\"), sep=\"\\t\"\n )\n if fileExists(\n self.fname_root, \"channel_positions.npy\") and fileExists(\n self.fname_root, \"channel_map.npy\"\n ):\n chXZ = np.load(\n os.path.join(self.fname_root, \"channel_positions.npy\"))\n chMap = np.load(\n os.path.join(self.fname_root, \"channel_map.npy\"))\n chID = np.asarray(\n [np.argmax(chMap == x) for x in\n self.cluster_info.ch.values]\n )\n self.cluster_info[\"chanX\"] = chXZ[chID, 0]\n self.cluster_info[\"chanY\"] = chXZ[chID, 1]\n\n dtype = {\"names\": (\"cluster_id\", \"KSLabel\"), \"formats\": (\"i4\", \"S10\")}\n # 'Raw' labels from a kilosort session\n if fileExists(self.fname_root, \"cluster_KSLabel.tsv\"):\n self.ks_cluster_id, self.ks_group = np.loadtxt(\n os.path.join(self.fname_root, \"cluster_KSLabel.tsv\"),\n unpack=True,\n skiprows=1,\n dtype=dtype,\n )\n if fileExists(self.fname_root, \"spike_clusters.npy\"):\n self.spk_clusters = np.squeeze(\n np.load(os.path.join(self.fname_root, \"spike_clusters.npy\"))\n )\n if fileExists(self.fname_root, \"spike_times.npy\"):\n self.spk_times = np.squeeze(\n np.load(os.path.join(self.fname_root, \"spike_times.npy\"))\n )\n return True\n warnings.warn(\n \"No spike times or clusters were found \\\n (spike_times.npy or spike_clusters.npy).\\\n You should run KiloSort\"\n )\n return False", "def _to_dataset(self):\n from mikeio import Dataset\n\n return Dataset(\n {self.name: self}\n ) # Single-item Dataset (All info is contained in the DataArray, no need for additional info)", "def get_cluster_stats(self, context, project_id):", "def build_synthetic_dataset(self):\n pass", "def new_dataset(dataset_name: str):\n icedata.template.generate_dataset(dataset_name)", "def load_dataset(self) -> None:\n raise NotImplementedError", "def clustering_experiment(dataset_name, params, verbose=0, random_seed=0):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # loading data and breaking it into different dataset splits\n dataset = ClassificationDataset(data_path=CONFIG[\"paths\"][\"data_path\"],\n dataset_name=dataset_name,\n valid_size=0)\n imgs, labels = dataset.get_all_data()\n\n # computing scattering representations\n t0 = time.time()\n if(params.scattering):\n scattering_net, _ = scattering_layer(J=params.J, shape=(params.shape, params.shape),\n max_order=params.max_order, L=params.L)\n scattering_net = scattering_net.cuda() if device.type == 'cuda' else scattering_net\n print_(\"Computing scattering features for dataset...\", verbose=verbose)\n data = convert_images_to_scat(imgs, scattering=scattering_net,\n device=device, equalize=params.equalize,\n batch_size=params.batch_size,\n pad_size=params.shape)\n else:\n data = imgs\n n_labels = len(np.unique(labels))\n\n # reducing dimensionality of scattering features using pca\n if(params.pca == True):\n print_(f\"Reducidng dimensionality using PCA to {params.pca_dims}\", verbose=verbose)\n n_feats = data.shape[0]\n data = data.reshape(n_feats, -1)\n data = pca(data=data, target_dimensions=params.pca_dims)\n\n # POC preprocessing: removing the top directions of variance as a preprocessing step\n t1 = time.time()\n if(params.poc_preprocessing):\n print_(f\"POC algorithm removing {params.num_dims} directions\", verbose=verbose)\n poc = POC()\n poc.fit(data=data)\n proj_data = poc.transform(data=data, n_dims=params.num_dims)\n else:\n proj_data = data.reshape(data.shape[0], -1)\n\n # clustering using Ultra-Scalable Spectral Clustering\n t2 = time.time()\n if(params.uspec):\n print_(f\"Clustering using U-SPEC\", verbose=verbose)\n uspec = USPEC(p_interm=params.num_candidates, p_final=params.num_reps,\n n_neighbors=5, num_clusters=params.num_clusters,\n num_iters=100, random_seed=random_seed)\n preds = uspec.cluster(data=proj_data, verbose=verbose)\n else:\n print_(f\"Clustering using K-Means\", verbose=verbose)\n kmeans = KMeans(n_clusters=params.num_clusters, random_state=random_seed)\n kmeans = kmeans.fit(proj_data)\n preds = kmeans.labels_\n t3 = time.time()\n\n cluster_score, cluster_acc, cluster_nmi = compute_clustering_metrics(preds=preds,\n labels=labels)\n print_(f\"Clustering Accuracy: {round(cluster_acc,3)}\", verbose=verbose)\n print_(f\"Clustering ARI Score: {round(cluster_score,3)}\", verbose=verbose)\n print_(f\"Clustering ARI Score: {round(cluster_nmi,3)}\", verbose=verbose)\n\n # loading previous results, if any\n results_path = os.path.join(os.getcwd(), CONFIG[\"paths\"][\"results_path\"])\n _ = create_directory(results_path)\n if(params.fname != None and len(params.fname) > 0 and params.fname[-5:]==\".json\"):\n fname = params.fname\n else:\n poc = \"poc\" if params.poc_preprocessing==True else \"not-poc\"\n fname = f\"{dataset_name}_{poc}_clustering_results.json\"\n results_file = os.path.join(results_path, fname)\n if(os.path.exists(results_file)):\n with open(results_file) as f:\n data = json.load(f)\n n_exps = len(list(data.keys()))\n else:\n data = {}\n n_exps = 0\n # saving experiment parameters and results\n with open(results_file, \"w\") as f:\n cur_exp = {}\n cur_exp[\"params\"] = {}\n cur_exp[\"params\"][\"dataset\"] = dataset_name\n cur_exp[\"params\"][\"random_seed\"] = random_seed\n params = vars(params)\n for p in params:\n cur_exp[\"params\"][p] = params[p]\n cur_exp[\"results\"] = {}\n cur_exp[\"results\"][\"cluster_acc\"] = round(cluster_acc,3)\n cur_exp[\"results\"][\"cluster_ari\"] = round(cluster_score,3)\n cur_exp[\"results\"][\"cluster_nmi\"] = round(cluster_nmi,3)\n cur_exp[\"timing\"] = {}\n cur_exp[\"timing\"][\"scattering\"] = t1 - t0\n cur_exp[\"timing\"][\"preprocessing\"] = t2 - t1\n cur_exp[\"timing\"][\"clustering\"] = t3 - t2\n cur_exp[\"timing\"][\"total\"] = t3 - t0\n print_(cur_exp, verbose=verbose)\n data[f\"experiment_{n_exps}\"] = cur_exp\n json.dump(data, f)\n\n return", "def project_training_datasets_sink():\n return fs_utils._do_get_project_training_datasets_sink()", "def get_dataset(self):\n\n # https://developer.nvidia.com/blog/preparing-state-of-the-art-models-for-classification-and-object-detection-with-tlt/\n train_download = not os.path.exists(os.path.join(self.load_path, \"train\"))\n trainval_2012 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n trainval_2007 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n test_download = not os.path.exists(os.path.join(self.load_path, \"test\"))\n valset = datasets.VOCDetection(os.path.join(self.load_path, \"test\"), image_set='test',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=test_download)\n train_loader_2007 = torch.utils.data.DataLoader(trainval_2007, batch_size=1, shuffle=False, num_workers=2)\n train_loader_2012 = torch.utils.data.DataLoader(trainval_2012, batch_size=1, shuffle=False, num_workers=2)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=2)\n\n check = 0\n directories = [os.path.join(self.save_path, \"train\"), os.path.join(self.save_path, \"test\")]\n for directory in directories:\n if os.path.exists(directory):\n check += 1\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if check != len(directories):\n indices_data = {}\n # create folders to save data\n for loader_name, loader in [('train', train_loader_2007),\n ('train', train_loader_2012),\n ('test', val_loader)]:\n for (img, annotation) in tqdm(loader):\n\n #print(annotation)\n # there may be multiple labels, they are concatenated to: 'label1_label2_'\n label = ''\n int_label = []\n\n elems = annotation['annotation']['object']\n # if only 1 label - it is a dictionary, but not list of dictionaries\n # for consistency reasons and to be able to use the loop later\n if not isinstance(elems, list):\n elems = [elems]\n\n # get bboxes, compute object size, add all object sizes and divide by img size (h*w)\n obj_sizes = 0\n num_instances = 0\n\n for elem in elems:\n # every name is in a list\n # there may be multiple instances of the same object\n # those are disregarded for label\n\n if not (bool(int(elem['difficult'][0])) and loader_name == 'test'):\n if not str(self.class_to_idx[elem['name'][0]]) in label:\n label += str(self.class_to_idx[elem['name'][0]]) + '_'\n int_label.append(self.class_to_idx[elem['name'][0]])\n\n num_instances += 1\n # percentage of objects in the image: sum obj_size/img_size\n obj_sizes += (int(elem['bndbox']['xmax'][0]) - int(elem['bndbox']['xmin'][0])) * \\\n (int(elem['bndbox']['ymax'][0]) - int(elem['bndbox']['ymin'][0]))\n obj_sizes /= float(int(annotation['annotation']['size']['width'][0]) *\n int(annotation['annotation']['size']['height'][0]))\n\n img_name = label + '_' + annotation['annotation']['filename'][0]\n\n directory = os.path.join(os.path.join(self.save_path, loader_name), label)\n if not os.path.exists(directory):\n os.makedirs(directory)\n save_image(img, os.path.join(directory, img_name))\n\n indices_data[os.path.join(directory, img_name)] = (int_label,\n obj_sizes, num_instances)\n\n # store img_paths which serves as indices and the labels for further analysis\n indices_data = collections.OrderedDict(sorted(indices_data.items()))\n\n dataframe = pd.DataFrame({'img_paths': list(indices_data.keys()),\n 'labels': np.array(list(indices_data.values()), dtype=object)[:, 0],\n 'obj_sizes': np.array(list(indices_data.values()), dtype=object)[:, 1],\n 'num_instances': np.array(list(indices_data.values()), dtype=object)[:, 2]})\n DatasetMetrics.indices_paths(self.name, dataframe)\n\n train_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n # like in https://papers.nips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf\n # https://arxiv.org/pdf/1409.1556.pdf\n transforms.Resize(256), # resize smaller size to 256\n transforms.RandomCrop(self.args.patch_size), # 224\n transforms.ToTensor()\n ])\n\n test_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n transforms.Resize(256), # resize smaller size to 256\n transforms.CenterCrop((self.args.patch_size, self.args.patch_size)), # 224\n transforms.ToTensor()\n ])\n\n if self.args.compute_dataset_metrics is True:\n # when computing dataset metrics, an original image should be used\n # - without randomness of RandomCrop\n train_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n # if not already set, set batch-size to 1 for computing the metrics\n # due to different image sizes\n self.args.batch_size = 1\n\n # load the image dataset from folder with indices\n trainset = IndxImageFolder(root = os.path.join(self.save_path, \"train\"), transform=train_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n valset = IndxImageFolder(root=os.path.join(self.save_path, \"test\"), transform=test_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n\n return trainset, valset", "def fetch_dataset(self, dataset, train=True):\n homedir = str(pathlib.Path.home())\n if dataset == MNIST:\n return datasets.MNIST(\n homedir+'/data',\n train=train,\n download=train,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ]))\n\n if dataset == CIFAR10:\n if train:\n transforms_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n# transforms.Resize((299,299)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])\n return datasets.CIFAR10(\n homedir+'/data',\n train=True,\n download=True,\n transform=transforms_train)\n else:\n transforms_test = transforms.Compose([\n# transforms.Resize((299,299)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])\n return datasets.CIFAR10(\n homedir+'/data',\n train=False,\n download=False,\n transform=transforms_test)", "def data_center(self) -> str:\n raise NotImplementedError", "def set_cluster(self, data):\n cluster = Cluster(data['name'])\n for host in data['hosts']:\n cluster.add_host(**host)\n self._cluster = cluster", "def datacenter(self):\n\n # Lookup vsphere vars datacenters\n lookup = self.inventory[\"all\"][\"children\"][\"VMware\"][\"vars\"].get(\n \"datacenters\"\n )\n # Add VMware vars datacenters if it does not exist\n if lookup is None:\n self.inventory[\"all\"][\"children\"][\"VMware\"][\"vars\"][\n \"datacenters\"\n ] = {}\n # Add VMware datacenter info\n self.inventory[\"all\"][\"children\"][\"VMware\"][\"vars\"][\"datacenters\"][\n self.resource_config[\"name\"]\n ] = self.resource_config" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Project's unified dataset with associated clusters.
def published_clusters_with_data(self): unified_dataset = self.unified_dataset() name = unified_dataset.name + "_dedup_published_clusters_with_data" return self.client.datasets.by_name(name)
[ "def record_clusters_with_data(self):\n unified_dataset = self.unified_dataset()\n\n # Replace this workaround with a direct API call once API\n # is fixed. APIs that need to work are: fetching the dataset and\n # being able to call refresh on resulting dataset. Until then, we grab\n # the dataset by constructing its name from the corresponding Unified Dataset's name\n name = unified_dataset.name + \"_dedup_clusters_with_data\"\n return self.client.datasets.by_name(name)\n\n # super.__repr__ is sufficient", "def get_dataset(self, user, name):\n\n return super().get_repo(\"dataset\", user, name)", "def _produce_train_dataset(self):\r\n pass", "def dataset_setup(self):\n settings = self.settings\n if settings.crowd_dataset == CrowdDataset.ucf_qnrf:\n self.dataset_class = UcfQnrfFullImageDataset\n self.train_dataset = UcfQnrfTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.labeled_dataset_size)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = UcfQnrfTransformedDataset(dataset='test', seed=101)\n elif settings.crowd_dataset == CrowdDataset.shanghai_tech:\n self.dataset_class = ShanghaiTechFullImageDataset\n self.train_dataset = ShanghaiTechTransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=settings.labeled_dataset_seed,\n number_of_examples=settings.labeled_dataset_size,\n map_directory_name=settings.map_directory_name,\n image_patch_size=self.settings.image_patch_size,\n label_patch_size=self.settings.label_patch_size)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = ShanghaiTechTransformedDataset(dataset='test', seed=101,\n map_directory_name=settings.map_directory_name,\n image_patch_size=self.settings.image_patch_size,\n label_patch_size=self.settings.label_patch_size)\n elif settings.crowd_dataset == CrowdDataset.ucf_cc_50:\n seed = 0\n self.dataset_class = UcfCc50FullImageDataset\n self.train_dataset = UcfCc50TransformedDataset(middle_transform=data.RandomHorizontalFlip(),\n seed=seed,\n test_start=settings.labeled_dataset_seed * 10,\n inverse_map=settings.inverse_map,\n map_directory_name=settings.map_directory_name)\n self.train_dataset_loader = DataLoader(self.train_dataset, batch_size=settings.batch_size,\n pin_memory=self.settings.pin_memory,\n num_workers=settings.number_of_data_workers)\n self.validation_dataset = UcfCc50TransformedDataset(dataset='test', seed=seed,\n test_start=settings.labeled_dataset_seed * 10,\n inverse_map=settings.inverse_map,\n map_directory_name=settings.map_directory_name)\n else:\n raise ValueError('{} is not an understood crowd dataset.'.format(settings.crowd_dataset))", "def load_dataset(self):", "def clouds(self) -> CloudsData:\n pass", "def load(self):\n import os\n\n import pandas as pd\n\n dtype = {'names': ('cluster_id', 'group'), 'formats': ('i4', 'S10')}\n # One of these (cluster_groups.csv or cluster_group.tsv) is from\n # kilosort and the other from kilosort2\n # and is updated by the user when doing cluster assignment in phy\n # See comments above this class definition for a bit more info\n if fileExists(self.fname_root, \"cluster_groups.csv\"):\n self.cluster_id, self.group = np.loadtxt(\n os.path.join(self.fname_root, \"cluster_groups.csv\"),\n unpack=True,\n skiprows=1,\n dtype=dtype\n )\n if fileExists(self.fname_root, \"cluster_group.tsv\"):\n self.cluster_id, self.group = np.loadtxt(\n os.path.join(self.fname_root, \"cluster_group.tsv\"),\n unpack=True,\n skiprows=1,\n dtype=dtype,\n )\n\n \"\"\"\n Output some information to the user if self.cluster_id is still None\n it implies that data has not been sorted / curated\n \"\"\"\n # if self.cluster_id is None:\n # print(f\"Searching {os.path.join(self.fname_root)} and...\")\n # warnings.warn(\"No cluster_groups.tsv or cluster_group.csv file\n # was found.\\\n # Have you manually curated the data (e.g with phy?\")\n\n # HWPD 20200527\n # load cluster_info file and add X co-ordinate to it\n if fileExists(self.fname_root, \"cluster_info.tsv\"):\n self.cluster_info = pd.read_csv(\n os.path.join(self.fname_root, \"cluster_info.tsv\"), sep=\"\\t\"\n )\n if fileExists(\n self.fname_root, \"channel_positions.npy\") and fileExists(\n self.fname_root, \"channel_map.npy\"\n ):\n chXZ = np.load(\n os.path.join(self.fname_root, \"channel_positions.npy\"))\n chMap = np.load(\n os.path.join(self.fname_root, \"channel_map.npy\"))\n chID = np.asarray(\n [np.argmax(chMap == x) for x in\n self.cluster_info.ch.values]\n )\n self.cluster_info[\"chanX\"] = chXZ[chID, 0]\n self.cluster_info[\"chanY\"] = chXZ[chID, 1]\n\n dtype = {\"names\": (\"cluster_id\", \"KSLabel\"), \"formats\": (\"i4\", \"S10\")}\n # 'Raw' labels from a kilosort session\n if fileExists(self.fname_root, \"cluster_KSLabel.tsv\"):\n self.ks_cluster_id, self.ks_group = np.loadtxt(\n os.path.join(self.fname_root, \"cluster_KSLabel.tsv\"),\n unpack=True,\n skiprows=1,\n dtype=dtype,\n )\n if fileExists(self.fname_root, \"spike_clusters.npy\"):\n self.spk_clusters = np.squeeze(\n np.load(os.path.join(self.fname_root, \"spike_clusters.npy\"))\n )\n if fileExists(self.fname_root, \"spike_times.npy\"):\n self.spk_times = np.squeeze(\n np.load(os.path.join(self.fname_root, \"spike_times.npy\"))\n )\n return True\n warnings.warn(\n \"No spike times or clusters were found \\\n (spike_times.npy or spike_clusters.npy).\\\n You should run KiloSort\"\n )\n return False", "def _to_dataset(self):\n from mikeio import Dataset\n\n return Dataset(\n {self.name: self}\n ) # Single-item Dataset (All info is contained in the DataArray, no need for additional info)", "def get_cluster_stats(self, context, project_id):", "def build_synthetic_dataset(self):\n pass", "def new_dataset(dataset_name: str):\n icedata.template.generate_dataset(dataset_name)", "def load_dataset(self) -> None:\n raise NotImplementedError", "def clustering_experiment(dataset_name, params, verbose=0, random_seed=0):\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # loading data and breaking it into different dataset splits\n dataset = ClassificationDataset(data_path=CONFIG[\"paths\"][\"data_path\"],\n dataset_name=dataset_name,\n valid_size=0)\n imgs, labels = dataset.get_all_data()\n\n # computing scattering representations\n t0 = time.time()\n if(params.scattering):\n scattering_net, _ = scattering_layer(J=params.J, shape=(params.shape, params.shape),\n max_order=params.max_order, L=params.L)\n scattering_net = scattering_net.cuda() if device.type == 'cuda' else scattering_net\n print_(\"Computing scattering features for dataset...\", verbose=verbose)\n data = convert_images_to_scat(imgs, scattering=scattering_net,\n device=device, equalize=params.equalize,\n batch_size=params.batch_size,\n pad_size=params.shape)\n else:\n data = imgs\n n_labels = len(np.unique(labels))\n\n # reducing dimensionality of scattering features using pca\n if(params.pca == True):\n print_(f\"Reducidng dimensionality using PCA to {params.pca_dims}\", verbose=verbose)\n n_feats = data.shape[0]\n data = data.reshape(n_feats, -1)\n data = pca(data=data, target_dimensions=params.pca_dims)\n\n # POC preprocessing: removing the top directions of variance as a preprocessing step\n t1 = time.time()\n if(params.poc_preprocessing):\n print_(f\"POC algorithm removing {params.num_dims} directions\", verbose=verbose)\n poc = POC()\n poc.fit(data=data)\n proj_data = poc.transform(data=data, n_dims=params.num_dims)\n else:\n proj_data = data.reshape(data.shape[0], -1)\n\n # clustering using Ultra-Scalable Spectral Clustering\n t2 = time.time()\n if(params.uspec):\n print_(f\"Clustering using U-SPEC\", verbose=verbose)\n uspec = USPEC(p_interm=params.num_candidates, p_final=params.num_reps,\n n_neighbors=5, num_clusters=params.num_clusters,\n num_iters=100, random_seed=random_seed)\n preds = uspec.cluster(data=proj_data, verbose=verbose)\n else:\n print_(f\"Clustering using K-Means\", verbose=verbose)\n kmeans = KMeans(n_clusters=params.num_clusters, random_state=random_seed)\n kmeans = kmeans.fit(proj_data)\n preds = kmeans.labels_\n t3 = time.time()\n\n cluster_score, cluster_acc, cluster_nmi = compute_clustering_metrics(preds=preds,\n labels=labels)\n print_(f\"Clustering Accuracy: {round(cluster_acc,3)}\", verbose=verbose)\n print_(f\"Clustering ARI Score: {round(cluster_score,3)}\", verbose=verbose)\n print_(f\"Clustering ARI Score: {round(cluster_nmi,3)}\", verbose=verbose)\n\n # loading previous results, if any\n results_path = os.path.join(os.getcwd(), CONFIG[\"paths\"][\"results_path\"])\n _ = create_directory(results_path)\n if(params.fname != None and len(params.fname) > 0 and params.fname[-5:]==\".json\"):\n fname = params.fname\n else:\n poc = \"poc\" if params.poc_preprocessing==True else \"not-poc\"\n fname = f\"{dataset_name}_{poc}_clustering_results.json\"\n results_file = os.path.join(results_path, fname)\n if(os.path.exists(results_file)):\n with open(results_file) as f:\n data = json.load(f)\n n_exps = len(list(data.keys()))\n else:\n data = {}\n n_exps = 0\n # saving experiment parameters and results\n with open(results_file, \"w\") as f:\n cur_exp = {}\n cur_exp[\"params\"] = {}\n cur_exp[\"params\"][\"dataset\"] = dataset_name\n cur_exp[\"params\"][\"random_seed\"] = random_seed\n params = vars(params)\n for p in params:\n cur_exp[\"params\"][p] = params[p]\n cur_exp[\"results\"] = {}\n cur_exp[\"results\"][\"cluster_acc\"] = round(cluster_acc,3)\n cur_exp[\"results\"][\"cluster_ari\"] = round(cluster_score,3)\n cur_exp[\"results\"][\"cluster_nmi\"] = round(cluster_nmi,3)\n cur_exp[\"timing\"] = {}\n cur_exp[\"timing\"][\"scattering\"] = t1 - t0\n cur_exp[\"timing\"][\"preprocessing\"] = t2 - t1\n cur_exp[\"timing\"][\"clustering\"] = t3 - t2\n cur_exp[\"timing\"][\"total\"] = t3 - t0\n print_(cur_exp, verbose=verbose)\n data[f\"experiment_{n_exps}\"] = cur_exp\n json.dump(data, f)\n\n return", "def project_training_datasets_sink():\n return fs_utils._do_get_project_training_datasets_sink()", "def get_dataset(self):\n\n # https://developer.nvidia.com/blog/preparing-state-of-the-art-models-for-classification-and-object-detection-with-tlt/\n train_download = not os.path.exists(os.path.join(self.load_path, \"train\"))\n trainval_2012 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n trainval_2007 = datasets.VOCDetection(os.path.join(self.load_path, \"train\"), image_set='trainval',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=train_download)\n test_download = not os.path.exists(os.path.join(self.load_path, \"test\"))\n valset = datasets.VOCDetection(os.path.join(self.load_path, \"test\"), image_set='test',\n year='2007',\n transform=transforms.Compose([transforms.ToTensor()]),\n target_transform=None, download=test_download)\n train_loader_2007 = torch.utils.data.DataLoader(trainval_2007, batch_size=1, shuffle=False, num_workers=2)\n train_loader_2012 = torch.utils.data.DataLoader(trainval_2012, batch_size=1, shuffle=False, num_workers=2)\n val_loader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, num_workers=2)\n\n check = 0\n directories = [os.path.join(self.save_path, \"train\"), os.path.join(self.save_path, \"test\")]\n for directory in directories:\n if os.path.exists(directory):\n check += 1\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if check != len(directories):\n indices_data = {}\n # create folders to save data\n for loader_name, loader in [('train', train_loader_2007),\n ('train', train_loader_2012),\n ('test', val_loader)]:\n for (img, annotation) in tqdm(loader):\n\n #print(annotation)\n # there may be multiple labels, they are concatenated to: 'label1_label2_'\n label = ''\n int_label = []\n\n elems = annotation['annotation']['object']\n # if only 1 label - it is a dictionary, but not list of dictionaries\n # for consistency reasons and to be able to use the loop later\n if not isinstance(elems, list):\n elems = [elems]\n\n # get bboxes, compute object size, add all object sizes and divide by img size (h*w)\n obj_sizes = 0\n num_instances = 0\n\n for elem in elems:\n # every name is in a list\n # there may be multiple instances of the same object\n # those are disregarded for label\n\n if not (bool(int(elem['difficult'][0])) and loader_name == 'test'):\n if not str(self.class_to_idx[elem['name'][0]]) in label:\n label += str(self.class_to_idx[elem['name'][0]]) + '_'\n int_label.append(self.class_to_idx[elem['name'][0]])\n\n num_instances += 1\n # percentage of objects in the image: sum obj_size/img_size\n obj_sizes += (int(elem['bndbox']['xmax'][0]) - int(elem['bndbox']['xmin'][0])) * \\\n (int(elem['bndbox']['ymax'][0]) - int(elem['bndbox']['ymin'][0]))\n obj_sizes /= float(int(annotation['annotation']['size']['width'][0]) *\n int(annotation['annotation']['size']['height'][0]))\n\n img_name = label + '_' + annotation['annotation']['filename'][0]\n\n directory = os.path.join(os.path.join(self.save_path, loader_name), label)\n if not os.path.exists(directory):\n os.makedirs(directory)\n save_image(img, os.path.join(directory, img_name))\n\n indices_data[os.path.join(directory, img_name)] = (int_label,\n obj_sizes, num_instances)\n\n # store img_paths which serves as indices and the labels for further analysis\n indices_data = collections.OrderedDict(sorted(indices_data.items()))\n\n dataframe = pd.DataFrame({'img_paths': list(indices_data.keys()),\n 'labels': np.array(list(indices_data.values()), dtype=object)[:, 0],\n 'obj_sizes': np.array(list(indices_data.values()), dtype=object)[:, 1],\n 'num_instances': np.array(list(indices_data.values()), dtype=object)[:, 2]})\n DatasetMetrics.indices_paths(self.name, dataframe)\n\n train_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n # like in https://papers.nips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf\n # https://arxiv.org/pdf/1409.1556.pdf\n transforms.Resize(256), # resize smaller size to 256\n transforms.RandomCrop(self.args.patch_size), # 224\n transforms.ToTensor()\n ])\n\n test_transform = transforms.Compose([\n # you can add other transformations in this list\n # resize (256 x remaining larger size) and RandomCrop(224)\n transforms.Resize(256), # resize smaller size to 256\n transforms.CenterCrop((self.args.patch_size, self.args.patch_size)), # 224\n transforms.ToTensor()\n ])\n\n if self.args.compute_dataset_metrics is True:\n # when computing dataset metrics, an original image should be used\n # - without randomness of RandomCrop\n train_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n # if not already set, set batch-size to 1 for computing the metrics\n # due to different image sizes\n self.args.batch_size = 1\n\n # load the image dataset from folder with indices\n trainset = IndxImageFolder(root = os.path.join(self.save_path, \"train\"), transform=train_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n valset = IndxImageFolder(root=os.path.join(self.save_path, \"test\"), transform=test_transform,\n num_classes=len(self.class_to_idx), multilabel=self.args.multilabel)\n\n return trainset, valset", "def fetch_dataset(self, dataset, train=True):\n homedir = str(pathlib.Path.home())\n if dataset == MNIST:\n return datasets.MNIST(\n homedir+'/data',\n train=train,\n download=train,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307, ), (0.3081, ))\n ]))\n\n if dataset == CIFAR10:\n if train:\n transforms_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n# transforms.Resize((299,299)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])\n return datasets.CIFAR10(\n homedir+'/data',\n train=True,\n download=True,\n transform=transforms_train)\n else:\n transforms_test = transforms.Compose([\n# transforms.Resize((299,299)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])\n return datasets.CIFAR10(\n homedir+'/data',\n train=False,\n download=False,\n transform=transforms_test)", "def data_center(self) -> str:\n raise NotImplementedError", "def set_cluster(self, data):\n cluster = Cluster(data['name'])\n for host in data['hosts']:\n cluster.add_host(**host)\n self._cluster = cluster", "def datacenter(self):\n\n # Lookup vsphere vars datacenters\n lookup = self.inventory[\"all\"][\"children\"][\"VMware\"][\"vars\"].get(\n \"datacenters\"\n )\n # Add VMware vars datacenters if it does not exist\n if lookup is None:\n self.inventory[\"all\"][\"children\"][\"VMware\"][\"vars\"][\n \"datacenters\"\n ] = {}\n # Add VMware datacenter info\n self.inventory[\"all\"][\"children\"][\"VMware\"][\"vars\"][\"datacenters\"][\n self.resource_config[\"name\"]\n ] = self.resource_config" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Binning model for this project.
def binning_model(self): alias = self.api_path + "/binningModel" # Cannot get this resource and so we hard code resource_json = {"relativeId": alias} return BinningModel.from_json(self.client, resource_json, alias)
[ "def model(self) -> Path:\n return self.path.parent / f\"{self.path.stem}.bin\"", "def build_model(self):\n ...", "def convert_to_binary(self):\n convert_to_binary_params = copy(self.base_params)\n convert_to_binary_params.update(\n {\n \"app\": \"ConvertToBinary: Meshing\",\n \"level\": None,\n \"foreground_masks\": \"\",\n \"run_conversion\": True,\n \"dst_level\": None,\n \"dst_image_type\": \"bin\",\n }\n )\n self.run_halted_queue(convert_to_binary_params, self.frame_chunks)", "def build_model(self):\r\n self.source_images, self.source_labels = self.dataloader.get_model_inputs()\r\n self.target_images, self.target_labels = self.dataloader.get_model_inputs()\r\n\r\n source_model = SimpleModel(self.source_images, self.source_labels, F.output_dim, scope='source_regressor')\r\n target_model = SimpleModel(self.target_images, self.target_labels, F.output_dim, scope='target_regressor')\r\n \r\n self.source_out, _ = source_model.get_model()\r\n self.target_out, _ = target_model.get_model()\r\n\r\n self.get_loss()", "def build(self):\n \n # Ensure layer of base model are not trainable for feature extraction\n for layer in self.baseModel.layers:\n layer.trainable = False\n \n headModel = self.baseModel.output\n headModel = Flatten(name=\"flatten\")(headModel)\n headModel = Dense(self.dense, activation=\"relu\")(headModel)\n headModel = Dropout(self.dropout)(headModel)\n headModel = Dense(self.dense, activation=\"relu\")(headModel)\n headModel = Dropout(self.dropout)(headModel)\n headModel = Dense(self.num_classes, activation=\"softmax\")(headModel)\n self.model = Model(inputs = self.baseModel.input, outputs = headModel)\n return", "def buildBaseModel(self, img_size):\n \n base_model = DenseNet121(weights='imagenet', include_top=False, \n input_shape = (img_size,img_size,3))\n x = base_model.output\n x = layers.GlobalAveragePooling2D()(x)\n predictions = layers.Dense(1, activation='sigmoid', name='last')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n model.load_weights(self.weights)\n return model", "def to_unbinned(self):\n return self", "def getBinormal(*args, **kwargs):\n \n pass", "def build_model():\n model_weights = np.load('models/sound8.npy').item()\n\n filter_parameters = [{'name': 'conv1', 'num_filters': 16, 'padding': 32,\n 'kernel_size': 64, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv2', 'num_filters': 32, 'padding': 16,\n 'kernel_size': 32, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv3', 'num_filters': 64, 'padding': 8,\n 'kernel_size': 16, 'conv_strides': 2},\n\n {'name': 'conv4', 'num_filters': 128, 'padding': 4,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv5', 'num_filters': 256, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2,\n 'pool_size': 4, 'pool_strides': 4},\n\n {'name': 'conv6', 'num_filters': 512, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv7', 'num_filters': 1024, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv8', 'num_filters': 1000, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv8_2', 'num_filters': 401, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n ]\n\n inputs = Input(shape=(None, 1)) # define inputs\n\n x = inputs\n for layer in filter_parameters:\n if 'conv8' not in layer['name']:\n x = ZeroPadding1D(padding=layer['padding'])(x)\n else:\n x = ZeroPadding1D(padding=layer['padding'])(conv7_layer_output)\n\n conv_layer = Conv1D(layer['num_filters'],\n kernel_size=layer['kernel_size'],\n strides=layer['conv_strides'],\n padding='valid', name=layer['name'])\n\n weights = model_weights[layer['name']]['weights'].reshape(conv_layer.get_weights()[0].shape)\n biases = model_weights[layer['name']]['biases']\n conv_layer.set_weights([weights, biases])\n\n x = conv_layer(x)\n\n if 'conv8' not in layer['name']: # except the last layers\n gamma = model_weights[layer['name']]['gamma']\n beta = model_weights[layer['name']]['beta']\n mean = model_weights[layer['name']]['mean']\n var = model_weights[layer['name']]['var']\n\n batch_norm = BatchNormalization()\n batch_norm.set_weights([gamma, beta, mean, var])\n x = batch_norm(x)\n x = Activation('relu')(x)\n if 'pool_size' in layer:\n x = MaxPooling1D(pool_size=layer['pool_size'],\n strides=layer['pool_strides'],\n padding='valid')(x)\n if layer['name'] == 'conv7':\n conv7_layer_output = x\n elif layer['name'] == 'conv8':\n imagenet_output = x\n elif layer['name'] == 'conv8_2':\n places_output = x\n\n model = Model(inputs=inputs,outputs=[imagenet_output, places_output])\n return model", "def create_output(self):\n\n if not self.normalized:\n log.warning(\"Data have not been divided by\"\n \" the sum of the weights.\")\n\n dtype = [('wavelength', self.wavelength_dtype),\n ('flux', self.net_dtype),\n ('error', self.net_dtype),\n ('net', self.net_dtype),\n ('dq', self.dq_dtype),\n ('weight', self.wavelength_dtype),\n ('n_input', np.float)]\n\n data = np.array(list(zip(self.wavelength,\n self.flux,\n self.error,\n self.net,\n self.dq,\n self.weight,\n self.count)), dtype=dtype)\n output_model = datamodels.CombinedSpecModel(spec_table=data)\n\n return output_model", "def basename_binning(self):\n return MultiDimBinning(d.basename_binning for d in self)", "def _remove_bn_from_model(self, model):\n for name, layer in model.named_modules():\n layer_type = get_layer_type(layer)\n\n if _exclude_layer(layer):\n continue\n\n if layer_type == 'BN':\n new_layer = []\n module, last_name = get_module_of_layer(model, name)\n module._modules[str(last_name)] = nn.Sequential(*new_layer)\n\n return model", "def model_to_binexptree(model: RawGPModelType) -> BinaryTree:\n infix_tokens = model_to_infix_tokens(model)\n postfix_tokens = infix_tokens_to_postfix_tokens(infix_tokens)\n tree = postfix_tokens_to_binexp_tree(postfix_tokens)\n return tree", "def get_model():\n return nn.Sequential(\n block(3, 32),\n block(32, 64),\n block(64, 128),\n block(128, 256),\n nn.AdaptiveAvgPool2d(1),\n nn.Flatten(),\n nn.Linear(256, 8))", "def Model1_B():\n M1_B = Model()\n return M1_B", "def load_model(self, number):\n reduced = self.binfile.read_a_model(number)\n # Reduce the model. Cannot be done if we use eval on properties.\n if self.reduced:\n reduced = bec_utils.ReducedModel(reduced, self.properties)\n return reduced", "def buildNihModel(self, img_size, label_len):\n \n base_model = DenseNet121(weights='imagenet', include_top=False, \n input_shape = (img_size,img_size,3))\n x = base_model.output\n x = layers.GlobalAveragePooling2D()(x)\n predictions = layers.Dense(label_len, activation='sigmoid', name='last')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n if not self.weights == 'imagenet':\n model.load_weights(self.weights)\n return model", "def set_binning(self, bin_size=(1, 1)):\n if \"__len__\" in dir(bin_size) and len(bin_size) >= 2:\n bin_size = int(round(float(bin_size[0]))), int(round(float(bin_size[1])))\n else:\n b = int(round(float(bin_size)))\n bin_size = (b, b)\n if bin_size != self._binning:\n ratioX = bin_size[1] / self._binning[1]\n ratioY = bin_size[0] / self._binning[0]\n if self.spline is not None:\n self.spline.bin((ratioX, ratioY))\n self._pixel2, self._pixel1 = self.spline.getPixelSize()\n self._splineCache = {}\n else:\n self._pixel1 *= ratioY\n self._pixel2 *= ratioX\n self._binning = bin_size\n self.shape = (self.max_shape[0] // bin_size[0],\n self.max_shape[1] // bin_size[1])", "def build_model(config):\r\n copy_config = copy.deepcopy(config)\r\n arch_type = copy_config.pop('type')\r\n\r\n assert arch_type in support_model, f'{arch_type} is not developed yet!, only {support_model} are support now'\r\n arch_model = eval(arch_type)(copy_config)\r\n # Model(copy_config)\r\n # print(\"****************\")\r\n # print(\"arch_model:\",arch_model)\r\n # print(\"****************\")\r\n return arch_model", "def run(self, model):\n nodes_with_bias = mu.get_nodes_by_type(model, [op['type'] for op in OPERATIONS_WITH_BIAS])\n quantized_weights_layout = {}\n for fq in mu.get_nodes_by_type(model, ['FakeQuantize']):\n node_input = nu.get_node_input(fq, 0)\n if node_input.type == 'Const':\n quantized_weights_layout[fq.fullname] = {'tensor': lambda tensor: tensor}\n\n self._engine.set_model(model)\n _, quantized_weights = self._engine.predict(quantized_weights_layout, range(1))\n\n for op_node in nodes_with_bias:\n if not nu.node_with_quantized_weights(op_node):\n continue\n self._weight_bias_correction_on_node(\n op_node, quantized_weights, self._safety_eps_variance_factor\n )\n\n return model" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tests self.root of the ktree
def test_ktree_root(ktree_four_nodes): assert ktree_four_nodes.root.val == 1
[ "def test_ktree_empty_root(ktree_empty):\n assert ktree_empty.root == None", "def test_root_node_on_init(five_bst):\n assert five_bst.root.val == 5", "def test_root_tree_manually(): # ***Incomplete test\n ##########################\n # Arrange.\n t = \"t\"\n tx = \"tx\"\n\n ##########################\n # Act.\n #x = root_tree_manually(t,\n #\t\ttx)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def test_bst_three_root(bst_three):\n assert bst_three.root.data == 10\n assert bst_three.root.left.data == 5\n assert bst_three.root.right.data == 15", "def test_root_node_none():\n bst = BinarySearchTree()\n assert not bst.root", "def test_first_tree(self):\n assert self.tree.edges[0].L == 0", "def is_root(self,p):\n return (self.root() == p)", "def test_get_tree(self):\n pass", "def test_rootAtMidpoint(self):\n nodes, tree = self.TreeNode, self.TreeRoot\n #works when the midpoint falls on an existing edge\n tree1 = deepcopy(tree)\n result = tree1.rootAtMidpoint()\n self.assertEqual(result.distance(result.getNodeMatchingName('e')), 4)\n self.assertEqual(result.getDistances(), tree1.getDistances())\n #works when the midpoint falls between two existing edges\n nodes['f'].Length = 1\n nodes['c'].Length = 4\n result = tree.rootAtMidpoint()\n self.assertEqual(result.distance(result.getNodeMatchingName('e')), 5.0)\n self.assertEqual(result.distance(result.getNodeMatchingName('g')), 5.0)\n self.assertEqual(result.distance(result.getNodeMatchingName('h')), 5.0)\n self.assertEqual(result.distance(result.getNodeMatchingName('d')), 2.0)\n self.assertEqual(result.getDistances(), tree.getDistances())", "def test_root_xml(self):\n self.assertEqual(\n self.target_xml_root,\n self.ccc.xml_root\n )", "def test_tree_inits(tree):\n assert isinstance(tree.root, object)", "def __init__(self, root):\n if not isinstance(root, TreeNode):\n root = TreeNode(root)\n self.root = root", "def empty_ktree():\n return KTree()", "def test_insert_root(self):\n\n new_root = TreeNode(-1, 0)\n self.binary_tree.insert_root(new_root)\n\n self.assertEqual(7, self.binary_tree.size())\n self.assertEqual(4, self.binary_tree.depth())\n self.assertEqual(-1, self.binary_tree.root().data())\n\n self.assertEqual([\n [(-1, 4, 0)],\n [(0, 3, 1)], \n [(1, 2, 2), (2, 2, 2)],\n [(4, 1, 3), (6, 1, 3)],\n [(7, 0, 4)]], self.binary_tree._level_order_traversal())", "def test_bst_initialized(bst_empty):\n assert bst_empty.root is None", "def test_nroot():\n calculator = Calculator()\n calculator.current_value = 4\n current_value = calculator.nroot(2)\n assert current_value == 2", "def test_rootAtMidpoint2(self):\n #also checks whether it works if the midpoint is adjacent to a tip\n nodes, tree = self.TreeNode, self.TreeRoot\n nodes['h'].Length = 20\n result = tree.rootAtMidpoint()\n self.assertEqual(result.distance(result.getNodeMatchingName('h')), 14)\n self.assertEqual(result.getDistances(), tree.getDistances())", "def is_root(self):\n return self.node_type() == 0", "def put(self, k):\n\tnew_node = node.Node(k)\n\n\tif(self.root==None):\n\n\t\tself.root=new_node\n\telse:\n\t\tcurrent_node = this.root\n\t\twhile(true):\n\n\t\t\tif (k<=current_node.get_key() and currentNode.get_left()==None):\n\t\t\t\t\tnew_node.set_parent(current_node)\n\t\t\t\t\tcurrentNode.set_left(new_node)\n\t\t\t\t\tcurrentNode.increment_subtree()\n\n\t\t\t\t\tbreak\n\n\t\t\telif(k>current_node.get_key() and current_node.get_right()==None):\n\t\t\t\t\tcurrent_node.increment_subtree()\n\t\t\t\t\tcurrent_node.set_right(new_node)\n\t\t\t\t\tnew_node.set_parent(current_node)\n\n\t\t\t\t\tbreak\n\n\t\t\telif(k<=current_node.get_key()):\n\t\t\t\t\tcurrent_node.increment_subtree()\n\t\t\t\t\tcurrent_node=current_node.get_left()\n\n\t\t\t\t\tcontinue\n\n\t\t\telif (k > current_node.get_key()):\n\n\t\t\t\t\tcurrent_node.increment_subtree()\n\t\t\t\t\tcurrent_node=current_node.get_right()\n\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprint(\"something went wrong\")\n\t\t\t\tpass", "def test_channel_create_chef_tree_is_set(chef_tree_root):\n assert chef_tree_root is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tests for self.root on passed empty ktree
def test_ktree_empty_root(ktree_empty): assert ktree_empty.root == None
[ "def empty_ktree():\n return KTree()", "def test_root_node_none():\n bst = BinarySearchTree()\n assert not bst.root", "def test_empty(test_empty_tree):\n assert find(test_empty_tree) == False", "def test_bst_initialized(bst_empty):\n assert bst_empty.root is None", "def test_tree_is_empty(empty_bst):\n assert empty_bst.size() == 0", "def is_empty(self):\n if self.tree is None:\n return True;\n else:\n return False;", "def is_empty(self):\n return self.is_leaf(self.root)", "def is_root(self):\n return self.node_type() == 0", "def clean_tree(self):\n\n self.root_node = None", "def is_root(self,node) :\n if node.parent is None:\n return True\n else :\n return False", "def test_root_node_on_init(five_bst):\n assert five_bst.root.val == 5", "def test_delete_root_only_node(bst_empty):\n bst_empty.insert(1)\n bst_empty.delete(1)\n assert bst_empty.root is None", "def test_first_tree(self):\n assert self.tree.edges[0].L == 0", "def test_tree_inits(tree):\n assert isinstance(tree.root, object)", "def test_orphan_node(bst_empty):\n bst_empty.insert(1)\n assert bst_empty.root.parent is None", "def test_create_empty_treenode(create_empty_node):\n assert create_empty_node.val is 1\n assert create_empty_node.left is None\n assert create_empty_node.right is None\n assert create_empty_node.parent is None", "def test_empty_tree(self):\n tree = self.make_branch_and_tree('.')\n output = self.run_bzr('diff --stat-dir', retcode=0)[0]\n self.assertEquals(output, ' 0 files changed\\n')\n self.check_output_rules(output)", "def is_root(self,p):\n return (self.root() == p)", "def test_delete_root_node_only_node(bst):\n bst.insert(5)\n bst.delete(5)\n assert bst.root is None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default request keyword arguments to be passed to the requests library.
def _default_request_kwargs(self): default_kwargs = { 'headers': { 'Content-Type': 'application/json' }, 'hooks': { 'response': self._verify_auth } } return default_kwargs
[ "def _default_request_kwargs(self):\n default_kwargs = {\n 'headers': {\n 'Content-Type': 'application/json'\n },\n 'hooks': {}\n }\n return default_kwargs", "def __init__(self, **requests_kwargs):\n self.requests_kwargs = requests_kwargs", "def get_request_kwargs(self):\n return {\n 'timeout': self.timeout,\n 'headers': self.get_headers(),\n }", "def get_request_kwargs(self):\n return {\n 'timeout': self.timeout,\n 'headers': self.get_headers()\n }", "def __init__(self, **kwargs):\n\n # Apply passed keyword arguments to the Request object.\n super(ObjectDetectionClearModels.Request, self).__init__(**kwargs)", "def __call__(self, **kwargs):\n self.default_params.update(kwargs)\n return self", "def __init__(self, request_args=None):\n super(CouchDBSession, self).__init__()\n self.request_args = request_args or {}", "def __init__(self, *default_headers): #, environ, start_response, url):\n self._default_headers={}\n leng = len(default_headers)\n leng -= leng % 2\n for i in range(0,leng,2):\n self._default_headers[default_headers[i]] = default_headers[i+1]\n #self.reset()", "def set_request_args(self, args: Dict[str, Any]) -> None:\n if self.scan_limit is not None:\n args[\"Limit\"] = self.scan_limit\n elif self.item_limit is not None:\n args[\"Limit\"] = max(self.item_limit, self.min_scan_limit)\n else:\n args.pop(\"Limit\", None)", "def kwargs():\n return {}", "def _default_options(cls):\n pass", "def __init__(self, allow_empty=False, method='get', parser=None, **kwargs):\n\n self.method = method\n self.allow_empty = allow_empty\n self.parser = parser\n super(WebRequestHelper, self).__init__(**kwargs)", "def _get_default_parameters(self):\n default_parameter = {}\n return default_parameter", "def set_defaults( ):\n __param=__default", "def _set_http_req_types(self):\n\n self.http_req_types = ('head', 'get', 'post', 'options')\n\n return", "def test_dict_optional_args(self, request_args):\n args = request_args.dict_optional_args(\n autocast_arguments_to_string=False,\n )\n assert args['data'] == {'d1': 1, 'd2': 2}\n assert 'method' not in args\n assert 'url' not in args\n assert 'full_url' not in args", "def _process_kwargs(self, kwargs):\n kwarg_values = inspect.getargspec(self._fct).defaults\n if kwargs and kwarg_values:\n kwarg_names = inspect.getargspec(self._fct).args[-len(kwarg_values) :]\n self._kwargs_default = {k: v for k, v in zip(kwarg_names, kwarg_values)}", "def add_default_params(self, params):\r\n params['key'] = self.key\r\n params['format'] = self.format\r\n # params['unique_id'] = generate_unique_id()\r\n return params", "def test_request_init(self):\n\n\t\tself.assertEqual(self.request.path, '/index')\n\t\tself.assertEqual(self.request.method, 'GET')\n\t\tself.assertEqual(self.request._get_data, None)\n\t\tself.assertEqual(self.request._post_data, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the data from a ``requests.Response`` object.
def _get_response_data(self, response): try: _json = response.json() data = _json.get('data') return data except ValueError as ex: log.exception(ex) return None
[ "def __get_response(self):\n\n response = requests.get(self.URL)\n data = response.text\n parsed_data = BeautifulSoup(data, \"html.parser\")\n return parsed_data", "def extract_content(self, response):\n content = response.content\n return content", "def get_response_body(self, request_id):\n return self._make_request('GET', '/response_body?request_id={}'.format(request_id)) or None", "def data(self):\n return self.response.json()", "def json_of_response(response):\r\n\treturn json.loads(response.data.decode('utf8'))", "def get_response(self, response_pos=0):\n return self.responses[response_pos]", "def _get_response_text(self):\r\n\t\treturn self.response.text if hasattr(self.response, \"text\") else self.response.content", "def dict(self):\r\n return self._resp.dict", "def resp(self):\r\n return self._resp", "def response_to_dict(response):\n return {\n 'status': response.status,\n 'url': response.url,\n 'headers': response.headers.to_unicode_dict(),\n 'body': response.text,\n }", "def _get_content(self, response):\n method = response.request.method\n self.last_response = response\n\n server_error = {\n 'summary': None,\n 'details': None\n }\n\n try:\n content_json = response.json()\n if 'error' in content_json:\n e = content_json['error']\n if 'message' in e:\n server_error['summary'] = e['message']\n if 'detail' in e:\n server_error['details'] = e['detail']\n except ValueError:\n content_json = {}\n\n if method == 'DELETE':\n # Make sure the delete operation returned the expected response\n if response.status_code == 204:\n return {'success': True}\n else:\n raise UnexpectedResponse(\n 204, response.status_code, method,\n server_error['summary'], server_error['details']\n )\n # Make sure the POST operation returned the expected response\n elif method == 'POST' and response.status_code != 201:\n raise UnexpectedResponse(\n 201, response.status_code, method,\n server_error['summary'], server_error['details']\n )\n # It seems that Helsinki and later returns status 200 instead of 404 on empty result sets\n if ('result' in content_json and len(content_json['result']) == 0) or response.status_code == 404:\n if self.raise_on_empty is False:\n content_json['result'] = [{}]\n else:\n raise NoResults('Query yielded no results')\n elif 'error' in content_json:\n raise UnexpectedResponse(\n 200, response.status_code, method,\n server_error['summary'], server_error['details']\n )\n\n return content_json['result']", "def get_content(response: HttpResponse) -> str:\n return response.content.decode()", "def get_response(self, url):\n self.response = requests.get(url)", "def get_response_content(response, decode=True):\n contents = \"\"\n if response.streaming:\n actual_content = BytesIO()\n for content in response.streaming_content:\n actual_content.write(content)\n contents = actual_content.getvalue()\n actual_content.close()\n else:\n contents = response.content\n\n if decode:\n return contents.decode(\"utf-8\")\n return contents", "def build_response(self):\n response = requests.request(\"GET\", self.url, stream=True)\n if response.status_code != 200:\n raise Exception(\"Cannot get stream (HTTP {}): {}\".format(response.status_code, response.text))\n data = str(response.content)\n data = json.loads(data[data.index(\"b'\")+2:data.index(\"\\\\n'\")])\n if type(data) is dict:\n temp = list()\n temp.append(data)\n data = temp\n for record in data:\n yield str({f: record[f] for f in self.request_fields})", "def extract_response_dict(response):\n if response.status_code == 200:\n response_dict = json.loads(response.text)\n else:\n logging.warning('Twitter API did not return a 200 HTTP code: %s',\n response.text)\n response_dict = None\n\n return response_dict", "def _tear_down_response(data):\n response_header = data[2:17]\n # Below is actually not used\n response_payload_size = data[18]\n response_payload = data[19:-2]\n response_end = data[-2:]\n return response_header, response_payload, response_end", "def http_response_body(self):\n return self._response_body", "def _parse(self, response):\n return json.loads(response.text)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A callback handler to verify that the given response object did not receive a 401.
def _verify_auth(self, resp, *args, **kwargs): if resp.status_code == 401: raise errors.AuthFailure( 'Received response code 401 from {} {}.' .format(resp.request.method, resp.request.path_url) )
[ "def assertResponseUnauthorized(self, response):\n self.assertResponseCodeEquals(response, status.HTTP_401_UNAUTHORIZED)", "def unauthorized_handler(self, callback):\n self.unauthorized_callback = callback\n return callback", "def unauthorized():\n return make_response(jsonify({'error': 'Unauthorized access'}), 401)", "def invalidAuthResponse(responseData: object) -> object:\n responseObj = make_response((jsonify(responseData), 401, [(\"Access-Control-Allow-Origin\", \"*\")]))\n return responseObj", "def invalid_token(callback):\n return make_response(render_template(\n \"components/401.html\"))", "def unauthorized(error):\n return render_template('401.html'), 401", "def auth_failure(self):\n if self._unauthorized_handler:\n return self._unauthorized_handler()\n else:\n raise Unauthorized(\"User identity is required\")", "def test_status_code_code_for_empty_authorization_value(self):\n\n resp = HttpResponse()\n http_response = resp. get_http_reponse()\n expected_status_code = 401\n received_status_code = http_response.status_code\n self.assertEqual(expected_status_code, received_status_code)", "def test_03_api_get_events_unauthorized(self):\n response = self.app.get('/api/events')\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data['error'], 'Unauthorized access')", "def test_handler_forbidden(lambda_module, apigateway_event, context):\n\n apigateway_event = copy.deepcopy(apigateway_event)\n del apigateway_event[\"requestContext\"][\"identity\"]\n\n # Send request\n response = lambda_module.handler(apigateway_event, context)\n\n assert response[\"statusCode\"] == 401\n assert \"body\" in response\n body = json.loads(response[\"body\"])\n assert \"message\" in body\n assert isinstance(body[\"message\"], str)", "def test_unauthorized_view_fails(self):\n response = self.api_client.get('/account/', format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def unauthorized_callback():\n return redirect(url_for('auth.login'))", "def test_get_an_interest_by_unauthenticated_user_fails(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_body[\"SubCode\"], \"InvalidToken\")", "def test_forbid(self):\n ret = auth._forbid(self.request)\n self.assertEqual(ret.status_code, 401)", "def _is_successful_response(self, response):\n return response.status_code < 300", "def test_protected_resource_access_denied():\n with client.session_transaction() as local_session:\n local_session.clear()\n rv = client.get('/api/auth/me',\n content_type='application/json')\n assert rv.status_code == 401\n response = json.loads(rv.data)\n assert not response['authenticated']\n assert response['message'] == ('Invalid or nonexistent token. '\n 'Please get a new token.')", "def test_custom_failure(self):\n UNAUTHORIZED = object()\n def error_handler(request, resource, errors):\n return UNAUTHORIZED\n class Resource(object):\n @guard.guard(make_checker(False), error_handler=error_handler)\n def __call__(self, request):\n pass\n request = http.Request.blank('/')\n assert Resource()(request) is UNAUTHORIZED", "def not_authorized(error):\n\n return make_response(jsonify({'Error': 'Not Authorized'}), 405)", "async def raise_for_status_event_handler(response: httpx.Response):\n if response.is_error and response.status_code != 429:\n try:\n response.raise_for_status()\n except httpx.HTTPStatusError as ex:\n await handle_httpstatuserror(ex, log_handler=_LOGGER)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default request keyword arguments to be passed to the requests library.
def _default_request_kwargs(self): default_kwargs = { 'headers': { 'Content-Type': 'application/json' }, 'hooks': {} } return default_kwargs
[ "def _default_request_kwargs(self):\n default_kwargs = {\n 'headers': {\n 'Content-Type': 'application/json'\n },\n 'hooks': {\n 'response': self._verify_auth\n }\n }\n return default_kwargs", "def __init__(self, **requests_kwargs):\n self.requests_kwargs = requests_kwargs", "def get_request_kwargs(self):\n return {\n 'timeout': self.timeout,\n 'headers': self.get_headers(),\n }", "def get_request_kwargs(self):\n return {\n 'timeout': self.timeout,\n 'headers': self.get_headers()\n }", "def __init__(self, **kwargs):\n\n # Apply passed keyword arguments to the Request object.\n super(ObjectDetectionClearModels.Request, self).__init__(**kwargs)", "def __call__(self, **kwargs):\n self.default_params.update(kwargs)\n return self", "def __init__(self, request_args=None):\n super(CouchDBSession, self).__init__()\n self.request_args = request_args or {}", "def __init__(self, *default_headers): #, environ, start_response, url):\n self._default_headers={}\n leng = len(default_headers)\n leng -= leng % 2\n for i in range(0,leng,2):\n self._default_headers[default_headers[i]] = default_headers[i+1]\n #self.reset()", "def set_request_args(self, args: Dict[str, Any]) -> None:\n if self.scan_limit is not None:\n args[\"Limit\"] = self.scan_limit\n elif self.item_limit is not None:\n args[\"Limit\"] = max(self.item_limit, self.min_scan_limit)\n else:\n args.pop(\"Limit\", None)", "def kwargs():\n return {}", "def _default_options(cls):\n pass", "def __init__(self, allow_empty=False, method='get', parser=None, **kwargs):\n\n self.method = method\n self.allow_empty = allow_empty\n self.parser = parser\n super(WebRequestHelper, self).__init__(**kwargs)", "def _get_default_parameters(self):\n default_parameter = {}\n return default_parameter", "def set_defaults( ):\n __param=__default", "def _set_http_req_types(self):\n\n self.http_req_types = ('head', 'get', 'post', 'options')\n\n return", "def test_dict_optional_args(self, request_args):\n args = request_args.dict_optional_args(\n autocast_arguments_to_string=False,\n )\n assert args['data'] == {'d1': 1, 'd2': 2}\n assert 'method' not in args\n assert 'url' not in args\n assert 'full_url' not in args", "def _process_kwargs(self, kwargs):\n kwarg_values = inspect.getargspec(self._fct).defaults\n if kwargs and kwarg_values:\n kwarg_names = inspect.getargspec(self._fct).args[-len(kwarg_values) :]\n self._kwargs_default = {k: v for k, v in zip(kwarg_names, kwarg_values)}", "def add_default_params(self, params):\r\n params['key'] = self.key\r\n params['format'] = self.format\r\n # params['unique_id'] = generate_unique_id()\r\n return params", "def test_request_init(self):\n\n\t\tself.assertEqual(self.request.path, '/index')\n\t\tself.assertEqual(self.request.method, 'GET')\n\t\tself.assertEqual(self.request._get_data, None)\n\t\tself.assertEqual(self.request._post_data, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adjust Amazon Web Services and/or Rackspace Acl Sync feature for this instance.
def acl_sync(self, aws_sync=None, rackspace_sync=None): url = self._url + 'acl_sync' data = {"aws_acl_sync_enabled": False, "rackspace_acl_sync_enabled": False} # Let's get current status of acl sync for this intance to set proper defaults. response = requests.get(url, **self._instances._default_request_kwargs) if response.status_code == 200: resp_json = response.json() current_status = resp_json.get('data', {}) current_aws_sync_status = current_status.get("aws_acl_sync_enabled", False) current_rax_sync_status = current_status.get("rackspace_acl_sync_enabled", False) data.update({ "aws_acl_sync_enabled": current_aws_sync_status, "rackspace_acl_sync_enabled": current_rax_sync_status }) if aws_sync is not None: data.update({"aws_acl_sync_enabled": aws_sync}) if rackspace_sync is not None: data.update({"rackspace_acl_sync_enabled": rackspace_sync}) response = requests.put(url, json=data, **self._instances._default_request_kwargs) return response.json() else: raise errors.ObjectRocketException( "Couldn't get current status of instance, failing. Error: {}".format(response.text) )
[ "def update_autoload(self):\n if self.autoload_enabled and not self.get_current_configuration()[\"host_connectivity_reporting_enabled\"]:\n try:\n rc, host_connectivity_reporting = self.request(\"storage-systems/%s/symbol/setHostConnectivityReporting?verboseErrorResponse=true\" % self.ssid,\n method=\"POST\", data={\"enableHostConnectivityReporting\": self.autoload_enabled})\n except Exception as error:\n self.module.fail_json(msg=\"Failed to enable host connectivity reporting which is needed for automatic load balancing state.\"\n \" Array [%s]. Error [%s].\" % (self.ssid, to_native(error)))\n\n try:\n rc, autoload = self.request(\"storage-systems/%s/symbol/setAutoLoadBalancing?verboseErrorResponse=true\" % self.ssid,\n method=\"POST\", data={\"enableAutoLoadBalancing\": self.autoload_enabled})\n except Exception as error:\n self.module.fail_json(msg=\"Failed to set automatic load balancing state. Array [%s]. Error [%s].\" % (self.ssid, to_native(error)))", "def register_swap(cls):\n data = cls.config_caesar('on')\n address = Address.root()\n\n logger.info('PUT to %s with %s', address, str(data))\n auth_header = AuthCaesar().auth()\n print(auth_header)\n r = requests.put(address, headers=auth_header, json=data)\n logger.info('done')\n\n return r", "def change_sriov_adaptermode(self, ip, managedsystem_uuid, adapter_object, x_api_session):\n super().__init__(ip, self.root, self.content_type, x_api_session)\n directory = os.path.dirname(__file__)\n adapter_id = adapter_object.AdapterID.value()\n if adapter_object.AdapterMode == \"Dedicated\":\n xml = open(directory+\"/data/dedicated_to_shared_adaptermode.xml\").read()\n else:\n xml = open(directory+\"/data/shared_to_dedicated_adaptermode.xml\").read()\n xml = xml%(adapter_id)\n http_object = HTTPClient.HTTPClient(\"uom\", ip,\n self.root, self.content_type,\n x_api_session)\n http_object.HTTPPut(xml, append=\"/\"+managedsystem_uuid+\"/do/ModifySRIOVAdapterMode\")\n if http_object.response_b:\n self.get_job_status(http_object)", "async def do_update(self, data):\n verrors = ValidationErrors()\n old = await self.config()\n new = old.copy()\n new.update(data)\n new['domainname'] = new['domainname'].upper()\n try:\n await self.update_netbios_data(old, new)\n except Exception as e:\n raise ValidationError('activedirectory_update.netbiosname', str(e))\n\n await self.common_validate(new, old, verrors)\n\n if verrors:\n raise verrors\n\n if new['enable'] and not old['enable']:\n try:\n await self.middleware.run_in_thread(self.validate_credentials, new)\n except Exception as e:\n raise ValidationError(\n \"activedirectory_update.bindpw\",\n f\"Failed to validate bind credentials: {e}\"\n )\n\n try:\n await self.middleware.run_in_thread(self.validate_domain, new)\n except ntplib.NTPException:\n self.logger.warning(\"NTP request to Domain Controller failed.\",\n exc_info=True)\n except Exception as e:\n raise ValidationError(\n \"activedirectory_update\",\n f\"Failed to validate domain configuration: {e}\"\n )\n\n new = await self.ad_compress(new)\n await self.middleware.call(\n 'datastore.update',\n 'directoryservice.activedirectory',\n old['id'],\n new,\n {'prefix': 'ad_'}\n )\n\n start = False\n stop = False\n\n if not old['enable']:\n if new['enable']:\n start = True\n else:\n if not new['enable']:\n stop = True\n\n job = None\n if stop:\n await self.stop()\n if start:\n job = (await self.middleware.call('activedirectory.start')).id\n\n if not stop and not start and new['enable']:\n await self.middleware.call('service.restart', 'cifs')\n ret = await self.config()\n ret.update({'job_id': job})\n return ret", "def sync_supported(self):", "def update_system_mode_config(self, context):\n personalities = [constants.CONTROLLER]\n\n # Update manifest files if system mode is updated for simplex to\n # duplex migration\n system = self.dbapi.isystem_get_one()\n if system.capabilities.get('simplex_to_duplex_migration') or \\\n system.capabilities.get('simplex_to_duplex-direct_migration'):\n config_uuid = self._config_update_hosts(context, personalities)\n\n config_dict = {\n \"personalities\": personalities,\n \"classes\": ['platform::kubernetes::duplex_migration::runtime'],\n }\n self._config_apply_runtime_manifest(context, config_uuid, config_dict)\n\n self._config_update_hosts(context, personalities, reboot=True)", "def _set_attr_reg(self):\n tmos_v = self._meta_data['bigip']._meta_data['tmos_version']\n attributes = self._meta_data['attribute_registry']\n v12kind = 'tm:asm:policies:blocking-settings:blocking-' \\\n 'settingcollectionstate'\n v11kind = 'tm:asm:policies:blocking-settings'\n if LooseVersion(tmos_v) < LooseVersion('12.0.0'):\n attributes[v11kind] = Blocking_Settings\n else:\n attributes[v12kind] = Blocking_Settings", "def aws_update_autoscaler():\r\n ami_id = aws_create_ami_from()\r\n cur_date = time.strftime('%Y%m%d', time.gmtime())\r\n lcName = 'ns11-%s' % cur_date\r\n lc = LaunchConfiguration(name=lcName, \r\n image_id=ami_id, instance_type=env.aws.get('instance_type'),\r\n key_name=env.aws.get('key_pair'), \r\n security_groups=env.aws.get('security_groups'))\r\n env.asConn.create_launch_configuration(lc)\r\n print \"Created launchConfiguration %s\" % lcName\r\n \r\n ag = AutoScalingGroup(\r\n connection=env.asConn,\r\n launch_config=lc, \r\n group_name=env.aws.get('as_group'), load_balancers=env.aws.get('balancers'),\r\n availability_zones=env.aws.get('availability_zones'))\r\n # min_size=env.aws.get('min_size'), max_size=env.aws.get('max_size'))\r\n ag.update()\r\n # env.asConn.create_auto_scaling_group(ag) \r\n print \"Added launchConfiguration %s to group %s (updated AutoScaleGroup)\" % (lcName, env.aws.get('as_group'))", "def enable_sr_iov(self, instance, ssh_client):\n conn = self.conn or self.vpc_conn\n log.info('Enabling SR-IOV on {}'.format(instance.id))\n if ssh_client:\n util_path = os.path.dirname(os.path.realpath(__file__))\n ssh_client.put_file(os.path.join(util_path, 'tests', 'enable_sr_iov.sh'),\n '/tmp/enable_sr_iov.sh')\n ssh_client.run('chmod +x /tmp/enable_sr_iov.sh')\n ssh_client.run(\"sed -i 's/\\r//' /tmp/enable_sr_iov.sh\")\n ssh_client.run('/tmp/enable_sr_iov.sh {}'.format(self.instancetype))\n conn.stop_instances(instance_ids=[instance.id])\n self.wait_for_state(instance, 'state', 'stopped')\n if self.instancetype in [constants.AWS_P28XLARGE, constants.AWS_M416XLARGE]:\n log.info('Enabling ENA for instance: {}'.format(self.instancetype))\n import boto3\n client = boto3.client('ec2', region_name=self.region, aws_access_key_id=self.keyid,\n aws_secret_access_key=self.secret)\n client.modify_instance_attribute(InstanceId=instance.id, Attribute='enaSupport',\n Value='true')\n log.info('ENA support: {}'.format(client.__dict__))\n try:\n log.info(conn.get_instance_attribute(instance.id, 'enaSupport'))\n except Exception as e:\n log.info(e)\n pass\n # conn.modify_instance_attribute(instance.id, 'enaSupport', True)\n # ena_status = conn.get_instance_attribute(instance.id, 'enaSupport')\n # log.info('ENA status for {} instance: {}'.format(constants.AWS_P28XLARGE,\n # ena_status))\n elif self.instancetype == constants.AWS_D24XLARGE:\n conn.modify_instance_attribute(instance.id, 'sriovNetSupport', 'simple')\n sriov_status = conn.get_instance_attribute(instance.id, 'sriovNetSupport')\n log.info(\"SR-IOV status is: {}\".format(sriov_status))\n else:\n log.error('Instance type {} unhandled for SRIOV'.format(self.instancetype))\n return None\n conn.start_instances(instance_ids=[instance.id])\n self.wait_for_state(instance, 'state', 'running')\n\n return self.wait_for_ping(instance)", "def _cmd_resync(self):\n self.ctx.awaiting_bridge = True", "def do_change_algorithm(self, args):\n lb = self.findlb(args.loadbalancer)\n lb.algorithm = algorithm_shorthand(args.algorithm)\n lb.update()", "def set_sync_mode(self, sync_mode):\n self._api.set_sync_mode(sync_mode)\n self.update()", "def change_operation(self, context):\n info = self.operations_settings[self.operation]\n params = info['params']\n for i in range(3):\n if i in params:\n self.inputs[i].enabled = True\n self.inputs[i].name = params[i]\n else:\n self.inputs[i].enabled = False\n if BLENDER_VERSION >= \"3.1\" and context:\n self.socket_value_update(context)", "def change_autoload_enabled_required(self):\n if self.autoload_enabled is None:\n return False\n\n change_required = False\n if self.autoload_enabled and not self.get_current_configuration()[\"autoload_capable\"]:\n self.module.fail_json(msg=\"Automatic load balancing is not available. Array [%s].\" % self.ssid)\n\n if self.autoload_enabled:\n if not self.get_current_configuration()[\"autoload_enabled\"] or not self.get_current_configuration()[\"host_connectivity_reporting_enabled\"]:\n change_required = True\n elif self.get_current_configuration()[\"autoload_enabled\"]:\n change_required = True\n\n return change_required", "def updateForAPlus():\n oldSensitivePermissionList = [\"READ_USER_DICTIONARY\", \"READ_SMS\", \"WRITE_SOCIAL_STREAM\",\n \"RECEIVE_MMS\", \"SUBSCRIBED_FEEDS_WRITE\", \"WRITE_HISTORY_BOOKMARKS\", \"BIND_VPN_SERVICE\",\n \"CLEAR_APP_CACHE\", \"USE_CREDENTIALS\", \"KILL_BACKGROUND_PROCESSES\", \"PROCESS_OUTGOING_CALLS\",\n \"CHANGE_NETWORK_STATE\", \"READ_PROFILE\", \"WRITE_EXTERNAL_STORAGE\", \"UNINSTALL_SHORTCUT\",\n \"ADD_VOICEMAIL\", \"BIND_NFC_SERVICE\", \"BLUETOOTH_ADMIN\", \"CHANGE_WIFI_MULTICAST_STATE\",\n \"WRITE_CALL_LOG\", \"WRITE_CALENDAR\", \"CHANGE_WIMAX_STATE\", \"NFC\", \"WRITE_CONTACTS\",\n \"READ_CELL_BROADCASTS\", \"READ_PRECISE_PHONE_STATE\", \"READ_SOCIAL_STREAM\", \"USE_SIP\",\n \"READ_HISTORY_BOOKMARKS\", \"INSTALL_SHORTCUT\", \"RECEIVE_WAP_PUSH\", \"READ_CALENDAR\",\n \"WRITE_PROFILE\", \"BIND_DEVICE_ADMIN\", \"BLUETOOTH_STACK\", \"BRICK\", \"WRITE_SMS\", \"INTERNET\",\n \"CHANGE_WIFI_STATE\", \"AUTHENTICATE_ACCOUNTS\", \"BLUETOOTH\", \"ACCESS_MOCK_LOCATION\",\n \"READ_CONTACTS\", \"READ_CALL_LOG\", \"RECEIVE_SMS\", \"MANAGE_ACCOUNTS\", \"SYSTEM_ALERT_WINDOW\",\n \"GET_TASKS\", \"DISABLE_KEYGUARD\", \"RECORD_AUDIO\", \"GET_ACCOUNTS\", \"ACCESS_COARSE_LOCATION\",\n \"READ_PHONE_STATE\", \"ACCESS_FINE_LOCATION\", \"CALL_PHONE\", \"CAMERA\", \"SEND_SMS\"]\n db.packagePair.update(\n {\"manifestPermissions\": {'$nin':oldSensitivePermissionList}},\n {'$set': {'level': \"A+\"}},\n multi=True)\n\n\n newSensitivePermissionList = [\"REVOKE_RUNTIME_PERMISSIONS\", \"OBSERVE_GRANT_REVOKE_PERMISSIONS\",\n \"INTERACT_ACROSS_USERS_FULL\", \"CONFIGURE_WIFI_DISPLAY\", \"\", \"PEERS_MAC_ADDRESS\",\n \"PROCESS_PHONE_ACCOUNT_REGISTRATION\", \"SERIAL_PORT\", \"MASTER_CLEAR\", \"READ_SYNC_SETTINGS\",\n \"UPDATE_APP_OPS_STATS\", \"FACTORY_TEST\", \"SET_ALWAYS_FINISH\", \"INSTALL_AS_USER\",\n \"ACCESS_SURFACE_FLINGER\", \"READ_SEARCH_INDEXABLES\", \"SET_ACTIVITY_WATCHER\",\n \"SET_SCREEN_COMPATIBILITY\", \"GET_PACKAGE_SIZE\", \"REGISTER_SIM_SUBSCRIPTION\",\n \"PERFORM_CDMA_PROVISIONING\", \"RESET_FINGERPRINT_LOCKOUT\", \"CONTROL_LOCATION_UPDATES\",\n \"SEND_RESPOND_VIA_MESSAGE\", \"SET_KEYBOARD_LAYOUT\", \"CARRIER_FILTER_SMS\", \"WRITE_GSERVICES\", \"BIND_VOICE_INTERACTION\", \"READ_VOICEMAIL\", \"COPY_PROTECTED_DATA\", \"AUTHENTICATE_ACCOUNTS\",\n \"STOP_APP_SWITCHES\", \"RESTART_PACKAGES\", \"ACCESS_ALL_PRINT_JOBS\", \"CALL_PRIVILEGED\",\n \"CAPTURE_SECURE_VIDEO_OUTPUT\", \"DELETE_PACKAGES\", \"ASEC_ACCESS\", \"RECORD_AUDIO\",\n \"WRITE_PROFILE\", \"READ_SOCIAL_STREAM\", \"ASEC_CREATE\", \"SEND_CALL_LOG_CHANGE\",\n \"GRANT_RUNTIME_PERMISSIONS\", \"UNINSTALL_SHORTCUT\", \"INTERACT_ACROSS_USERS\",\n \"ACCESS_CHECKIN_PROPERTIES\", \"SET_ORIENTATION\", \"STATUS_BAR_SERVICE\", \"PACKAGE_USAGE_STATS\",\n \"GLOBAL_SEARCH\", \"CHANGE_WIFI_STATE\", \"READ_PRECISE_PHONE_STATE\", \"FORCE_STOP_PACKAGES\",\n \"KILL_BACKGROUND_PROCESSES\", \"SET_TIME_ZONE\", \"BLUETOOTH_ADMIN\", \"BLUETOOTH_PRIVILEGED\",\n \"INSTALL_GRANT_RUNTIME_PERMISSIONS\", \"INJECT_EVENTS\", \"WRITE_SYNC_SETTINGS\", \"MANAGE_DOCUMENTS\",\n \"MANAGE_ACCOUNTS\", \"SEND_DOWNLOAD_COMPLETED_INTENTS\", \"INTERNAL_SYSTEM_WINDOW\",\n \"BIND_APPWIDGET\", \"BLUETOOTH_MAP\", \"ASEC_MOUNT_UNMOUNT\", \"SET_PREFERRED_APPLICATIONS\", \"NFC\",\n \"MODIFY_AUDIO_ROUTING\", \"OVERRIDE_WIFI_CONFIG\", \"PACKAGE_VERIFICATION_AGENT\",\n \"READ_INSTALL_SESSIONS\", \"READ_HISTORY_BOOKMARKS\", \"BRICK\", \"LOCATION_HARDWARE\",\n \"BROADCAST_PACKAGE_REMOVED\", \"CRYPT_KEEPER\", \"SET_PROCESS_LIMIT\",\n \"MODIFY_APPWIDGET_BIND_PERMISSIONS\", \"NOTIFY_PENDING_SYSTEM_UPDATE\",\n \"READ_NETWORK_USAGE_HISTORY\", \"READ_PHONE_STATE\", \"WRITE_SETTINGS\", \"WRITE_SECURE_SETTINGS\",\n \"WRITE_SOCIAL_STREAM\", \"USE_CREDENTIALS\", \"UPDATE_DEVICE_STATS\", \"SEND_SMS\",\n \"WRITE_USER_DICTIONARY\", \"ACCESS_COARSE_LOCATION\", \"CHANGE_WIFI_MULTICAST_STATE\", \"READ_SMS\",\n \"READ_EXTERNAL_STORAGE\", \"SET_INPUT_CALIBRATION\", \"ASEC_RENAME\", \"MANAGE_VOICE_KEYPHRASES\",\n \"ACCESS_MTP\", \"SET_POINTER_SPEED\", \"WRITE_APN_SETTINGS\", \"ASEC_DESTROY\", \"WRITE_CONTACTS\",\n \"MANAGE_NETWORK_POLICY\", \"MANAGE_MEDIA_PROJECTION\", \"CAPTURE_AUDIO_HOTWORD\",\n \"MODIFY_PHONE_STATE\", \"MANAGE_PROFILE_AND_DEVICE_OWNERS\", \"RECEIVE_MMS\", \"GET_TASKS\",\n \"KILL_UID\", \"READ_SYNC_STATS\", \"CAPTURE_AUDIO_OUTPUT\", \"NET_ADMIN\", \"DISPATCH_NFC_MESSAGE\",\n \"SET_TIME\", \"WRITE_EXTERNAL_STORAGE\", \"MANAGE_DEVICE_ADMINS\", \"REQUEST_SUPERUSER\",\n \"MANAGE_USERS\", \"BROADCAST_NETWORK_PRIVILEGED\", \"CHANGE_WIMAX_STATE\",\n \"MOUNT_UNMOUNT_FILESYSTEMS\", \"SUBSCRIBED_FEEDS_WRITE\", \"MOVE_PACKAGE\", \"UPDATE_CONFIG\", \"PACKAGE_PRIVACY_VERIFICATION_AGENT\", \"MANAGE_FINGERPRINT\", \"ACCESS_FINE_LOCATION\",\n \"WRITE_MEDIA_STORAGE\", \"GET_TOP_ACTIVITY_INFO\", \"SUBSCRIBED_FEEDS_READ\",\n \"RETRIEVE_WINDOW_TOKEN\", \"BROADCAST_WAP_PUSH\", \"UPDATE_LOCK\",\n \"BROADCAST_PHONE_ACCOUNT_REGISTRATION\", \"STATUS_BAR\", \"READ_LOGS\", \"BLUETOOTH\",\n \"MANAGE_CA_CERTIFICATES\", \"ACCESS_NOTIFICATIONS\", \"INSTALL_SHORTCUT\", \"BROADCAST_CALLLOG_INFO\",\n \"WRITE_SMS\", \"WRITE_DREAM_STATE\", \"MODIFY_PARENTAL_CONTROLS\", \"WRITE_VOICEMAIL\",\n \"CAPTURE_VIDEO_OUTPUT\", \"MANAGE_APP_TOKENS\", \"BLUETOOTH_STACK\", \"WRITE_HISTORY_BOOKMARKS\",\n \"NFC_HANDOVER_STATUS\", \"NET_TUNNELING\", \"CHANGE_NETWORK_STATE\", \"RECEIVE_LAUNCH_BROADCASTS\",\n \"READ_WIFI_CREDENTIAL\", \"ACCOUNT_MANAGER\", \"ACCESS_PROVIDER\", \"PROVIDE_TRUST_AGENT\",\n \"READ_PRIVILEGED_PHONE_STATE\", \"REGISTER_CONNECTION_MANAGER\", \"RETRIEVE_WINDOW_CONTENT\",\n \"READ_PROFILE\", \"READ_INPUT_STATE\", \"RECEIVE_SMS\", \"MOUNT_FORMAT_FILESYSTEMS\", \"MANAGE_USB\", \"INTENT_FILTER_VERIFICATION_AGENT\", \"INSTALL_PACKAGES\", \"SET_DEBUG_APP\",\n \"INSTALL_LOCATION_PROVIDER\", \"SET_ANIMATION_SCALE\", \"REGISTER_CALL_PROVIDER\", \"READ_CONTACTS\",\n \"CONTROL_KEYGUARD\", \"BACKUP\", \"READ_USER_DICTIONARY\", \"BROADCAST_SMS\",\n \"RECEIVE_FIRST_LOAD_BROADCAST\"]\n db.packagePair.update(\n {\"manifestPermissions\": {'$nin':newSensitivePermissionList}},\n {'$set': {'new_perms_aplus': True}},\n multi=True)", "def update(self, policy):", "def update_forced_accessmode(log, cmd, transfertype, jobparams, trf_name): ## DEPRECATE ME (anisyonk)\n\n if \"accessmode\" in cmd and transfertype != 'direct':\n accessmode_usect = None\n accessmode_directin = None\n _accessmode_dic = {\"--accessmode=copy\": [\"copy-to-scratch mode\", \"\"],\n \"--accessmode=direct\": [\"direct access mode\", \" --directIn\"]}\n\n # update run_command according to jobPars\n for _mode in list(_accessmode_dic.keys()): # Python 2/3\n if _mode in jobparams:\n # any accessmode set in jobPars should overrule schedconfig\n log.info(\"enforcing %s\" % _accessmode_dic[_mode][0])\n if _mode == \"--accessmode=copy\":\n # make sure direct access is turned off\n accessmode_usect = True\n accessmode_directin = False\n elif _mode == \"--accessmode=direct\":\n # make sure copy-to-scratch gets turned off\n accessmode_usect = False\n accessmode_directin = True\n else:\n accessmode_usect = False\n accessmode_directin = False\n\n # update run_command (do not send the accessmode switch to runAthena)\n cmd += _accessmode_dic[_mode][1]\n if _mode in cmd:\n cmd = cmd.replace(_mode, \"\")\n\n # force usage of copy tool for stage-in or direct access\n if accessmode_usect:\n log.info('forced copy tool usage selected')\n # remove again the \"--directIn\"\n if \"directIn\" in cmd:\n cmd = cmd.replace(' --directIn', ' ')\n elif accessmode_directin:\n log.info('forced direct access usage selected')\n if \"directIn\" not in cmd:\n cmd += ' --directIn'\n else:\n log.warning('neither forced copy tool usage nor direct access was selected')\n\n if \"directIn\" in cmd and \"usePFCTurl\" not in cmd:\n cmd += ' --usePFCTurl'\n\n # need to add proxy if not there already\n if \"--directIn\" in cmd and \"export X509_USER_PROXY\" not in cmd:\n if 'X509_USER_PROXY' in os.environ:\n cmd = cmd.replace(\"./%s\" % trf_name, \"export X509_USER_PROXY=%s;./%s\" %\n (os.environ.get('X509_USER_PROXY'), trf_name))\n\n # if both direct access and the accessmode loop added a directIn switch, remove the first one from the string\n if cmd.count(\"directIn\") > 1:\n cmd = cmd.replace(' --directIn', ' ', 1)\n\n return cmd", "def _set_availablity_zones(self):\n\n if 'availability_zone' in self.config:\n vim_availability_zones = self.config.get('availability_zone')\n if isinstance(vim_availability_zones, str):\n self.availability_zone = [vim_availability_zones]\n elif isinstance(vim_availability_zones, list):\n self.availability_zone = vim_availability_zones\n else:\n self.availability_zone = self._get_openstack_availablity_zones()", "async def request_hvac_update(self) -> None:\n\n state = mapper.map_hvac_sync_state(await self._call_api(urls.hvac))\n\n if state and not state.is_pending:\n await self._call_api(urls.hvac_update, 'put')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The service this instance provides.
def service(self): return self._service
[ "def service(self):\n return self[0]", "def service_constructor(self):\n raise NotImplementedError", "def getHttpService(self):\n # type: () -> IHttpService", "def service(self) -> \"IngressServiceBackend\":\n return typing.cast(\n \"IngressServiceBackend\",\n self._properties.get(\"service\"),\n )", "def service(self):\n return service_builder.build_service(service=services.Service.CLOUDSCHEDULER,\n key=self.credentials.credentials,\n api_key=os.environ['API_KEY'])", "def _service_task(self):\r\n pass", "def additional_service(self):\n return _AdditionalService(self, 'AdditionalService')", "def register_concrete_service(self, service, scope):\n ...", "def get_service(self, id):\n service = self.services.get(id)\n if callable(service):\n self.services[id] = service()\n return self.services[id]\n return service", "def get_service():\n\n service = build(\"customsearch\", \"v1\",\n developerKey=api_key)\n return service", "def service_id(self):\n pass", "def global_service(name):\n return _global_services.get(name)", "def generic_access_service(self) -> GenericAccessService:\r\n return self._generic_access_service", "def _get_service(self):\n\n service = self._selector.get_service(0) # Don't wait\n if service is None:\n raise err.OctpServiceAllFault('Not one service is available!')\n\n return service", "def service(self, service_type: str) -> \"UpnpService\":\n return self.services[service_type]", "def service(self) -> Optional['outputs.InfinispanSpecService']:\n return pulumi.get(self, \"service\")", "def service_resource(self):\n\n # Obtain a method reference if we don't already have one. Otherwise, \n # reuse the one we've already obtained and cached in a static class \n # variable. This avoids significant real time delay.\n # TEMP COMMENTED OUT\n #if not Instance.method_ref:\n #Instance.method_ref = self.gce_project.service.instances()\n #return Instance.method_ref\n return self.gce_project.service.instances()", "def set_service(self, service):\n SHIM_LOGGER.info('Starting service')\n self.request_queue = service.request_queue\n\n self.service = service\n self._request('init')", "def catalog_item_service(self) -> CatalogItemService:\n assert_value(self.token)\n return CatalogItemService(self.token, prod=self.prod)", "def type(self):\n return self._service_type" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The settings on this instance's service.
def settings(self): return self._settings
[ "def settings(self) -> Settings:\n return self.injector.settings", "def get_settings():\n return settings", "def settings(self) -> SettingsManager:\n return self._settings", "def system_settings(self):\n return self._system_settings", "def settings(self) -> pulumi.Input['ExchangeSettingsArgs']:\n return pulumi.get(self, \"settings\")", "def getSettings(self, sender=None):\n log.debug1(\"config.zone.%d.getSettings()\", self.id)\n return self.config.get_zone_config(self.obj)", "def get_plugin_settings(self):\n pass", "def get_settings(self):\n settings = []\n for k, inst in self.items():\n try:\n settings.append(inst.get_settings())\n except:\n print(\"Warning! Could not get settings for instrument: %s\" % k)\n return settings", "def apply_settings(self):\n singleton = Singleton()\n settings = self.settings_dialog.getSettings()\n interval = settings['update_interval'] * 1000 #convert to milliseconds\n self.worker.set_interval(interval)\n self.worker.apply_settings(settings)\n singleton.logging = settings['logging']\n singleton.notif = settings['notifications']", "def settings(self) -> pulumi.Output['outputs.ExchangeSettings']:\n return pulumi.get(self, \"settings\")", "def get_settings(self) -> webenginecore.WebEngineSettings:\n settings = self.settings()\n return webenginecore.WebEngineSettings(settings)", "def get_settings(self):\n return self._selenium_web_driver().get_settings()", "def GetSettings(T)(self):\n pass", "def settings(self):\n\n opts = Expando({})\n try:\n opts = getattr(self.site.config, self.plugin_name)\n except AttributeError:\n pass\n return opts", "def get_settings(self):\r\n\r\n settings = {'serial_device': self.__serial_device,\r\n 'baud_rate': self.__baud_rate,\r\n 'data_bits': self.__data_bits,\r\n 'stop_bits': self.__stop_bits,\r\n 'parity': self.__parity,\r\n 'flow_control': self.__flow_control}\r\n\r\n return settings", "def OperationSettings(self):\n return self.__OperationSettings", "def get_view_settings(self):\n settings = {}\n for name in REMOTE_SETTINGS:\n settings[name] = self.get_conf(name)\n\n return settings", "def get_aws_settings(self: object) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/cloud-connect-aws/GetAWSSettings\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetAWSSettings\"\n )", "def applied_settings(self):\n # type: () -> AppliedStreamSettings\n return self._applied_settings", "def settings(self, key=None):\r\n keys = [key] if key is not None else self.keys()\r\n settings = {}\r\n for ikey in keys:\r\n settings[ikey] = {}\r\n ins = qt.instruments.get(ikey)\r\n for pname in ins.get_parameter_names():\r\n settings[ikey][pname] = ins.get(pname, query=False)\r\n return settings[key] if key is not None else settings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The document used to construct this Instance object.
def _instance_document(self): return self.__instance_document
[ "def create_document_(self, init_dict = None):\n if init_dict is None:\n initV = {}\n else:\n initV = init_dict\n\n return self.document_class(self, initV)", "def newDocument():\n return Document(HopperLowLevel.newDocument())", "def __init__(self, document):\n self.document = document\n self.position = 0", "def main_document(self) -> SpdxDocument:\n self._generate_documents()\n return cast(SpdxDocument, self._main_document)", "def _get_doc(cls):\n return cls.__doc__", "def getCurrentDocument():\n return Document(HopperLowLevel.currentDocument())", "def document_detail(self):\n return self._document_detail", "def create_document(self, init_dict = None):\n if init_dict is not None:\n return self.createDocument_(init_dict)\n else:\n if self._validation[\"on_load\"]:\n self._validation[\"on_load\"] = False\n return self.create_document_(self.default_document)\n self._validation[\"on_load\"] = True\n else:\n return self.create_document_(self.default_document)", "def _document_structure(self):\n logger.debug(\"Documenting dataset structure {}\".format(self))\n key = self.get_structure_key()\n text = json.dumps(self._structure_parameters, indent=2, sort_keys=True)\n self.put_text(key, text)\n\n key = self.get_dtool_readme_key()\n self.put_text(key, self._dtool_readme_txt)", "def generateDocument(self):\n if not self.documentName:\n self.documentName =\\\n \"PyProm Document {}\".format(str(datetime.datetime.now()))\n\n kml_doc = kml.Document(ns=NS,\n name=self.documentName)\n kml_doc.append_style(SADDLE_STYLE)\n kml_doc.append_style(SUMMIT_STYLE)\n kml_doc.append_style(BASIN_SADDLE_STYLE)\n kml_doc.append_style(RUNOFF_STYLE)\n kml_doc.append_style(SPOTELEVATION_STYLE)\n\n if self.saddles._features:\n self.logger.info(\"Saddles: {}\".format(len(self.saddles._features)))\n kml_doc.append(self.saddles)\n if self.summits._features:\n self.logger.info(\"Summits: {}\".format(len(self.summits._features)))\n kml_doc.append(self.summits)\n if self.spotElevations._features:\n self.logger.info(\"Spot Elevations: {}\".format(len(\n self.spotElevations._features)))\n kml_doc.append(self.spotElevations)\n if self.runoffs._features:\n self.logger.info(\"RunOffs: {}\".format(len(self.runoffs._features)))\n kml_doc.append(self.runoffs)\n if self.linkers._features:\n self.logger.info(\"Linkers: {}\".format(len(self.linkers._features)))\n kml_doc.append(self.linkers)\n if self.summitDomains._features:\n self.logger.info(\"SummitDomains: {}\".format(len(\n self.summitDomains._features)))\n kml_doc.append(self.summitDomains)\n return kml_doc", "def makeNewDocument(self):\n\n document = textlayout.Document(\n width=self._propertyToPoints(\"width\"),\n marginTop=self._propertyToPoints(\"margin_top\"),\n marginBottom=self._propertyToPoints(\"margin_bottom\"),\n )\n\n return document", "def to_document(self):\n try:\n return search.Document(\n doc_id=str(self.key.urlsafe()),\n fields=self._get_document_fields())\n\n except (TypeError, ValueError) as e:\n raise DocumentCreationError(e)", "def target_doc(self):\n if not hasattr(self, \"_target_doc\") or not self._target_doc:\n if self._existing:\n self._target_doc = self._existing.get(\"target_doc\", {})\n return self._target_doc", "def __repr__(self) -> str:\n return \"DocumentNode(definitions=%r, location=%r)\" % (\n self.definitions,\n self.location,\n )", "def _document_set():\n return {\n 'type' : 'class',\n 'name' : 'document_set',\n 'base' : None,\n 'is_abstract' : False,\n 'is_entity' : True,\n 'doc' : 'Encapsulates a set of documents.',\n 'properties' : [\n ('meta', 'shared.doc_meta_info', '1.1', None),\n ('data', 'data.data_object', '0.N', 'Associated input/output data.'),\n ('data_references', 'shared.doc_reference', '0.N', 'Reference to set of associated data objects.'),\n ('ensembles', 'activity.ensemble', '0.N', 'Associated ensemble runs.'),\n ('ensembles_references', 'shared.doc_reference', '0.N', 'Reference to set of associated ensembles.'),\n ('experiment', 'activity.numerical_experiment', '0.1', 'Associated numerical experiment.'),\n ('experiment_reference', 'shared.doc_reference', '0.1', 'Reference to the associated experiment.'),\n ('grids', 'grids.grid_spec', '0.N', 'Associated grid-spec.'),\n ('grids_references', 'shared.doc_reference', '0.N', 'Reference to the associated grid specs.'),\n ('model', 'software.model_component', '0.1', 'Associated model component.'),\n ('model_reference', 'shared.doc_reference', '0.1', 'Reference to the associated model component.'),\n ('platform', 'shared.platform', '0.1', 'Associated simulation execution platform.'),\n ('platform_reference', 'shared.doc_reference', '0.1', 'Reference to the associated platform.'),\n ('simulation', 'activity.simulation_run', '0.1', 'Associated simulation run.'),\n ('simulation_reference', 'shared.doc_reference', '0.1', 'Reference to the associated simulation.'),\n ],\n 'decodings' : [\n ('data', 'child::cim:dataObject'),\n ('ensembles', 'child::cim:ensemble'),\n ('experiment', 'child::cim:numericalExperiment'),\n ('model', 'child::cim:modelComponent'),\n ('grids', 'child::cim:gridSpec'),\n ('platform', 'child::cim:platform'),\n ('simulation', 'child::cim:simulationRun'),\n ]\n }", "def import_document(self):\n pass", "def __init__(self):\n this = _coin.new_ScXMLDocument()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def doc_entity(self) -> str:\n return '{}.{}'.format(self.doc_section, self.name.lower)", "def registerDocument(self, document):\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The service specific URL of this instance object.
def _service_url(self): return self._client._url + '{}/{}/'.format(self.service, self.name)
[ "def get_service_url():\n return config.SERVICE_URL", "def resource_url(self):\n return \"/services/\" + self.slug", "def service_endpoint(self) -> str:\n pass", "def endpointurl(self):\n return self._endpointurl", "def api_url(self):\n return f\"{self.instance_url}/api/0/\"", "def http_uri(self):\n host, port = self.http_address\n return \"http://%s:%s\" % (host, port)", "def _get_url(self):\n return self.config[\"rest_url\"] if len(self.config[\"rest_url\"]) > 0 \\\n else get_hostname(self.connect.advertise_addr)", "def _getURL(self):\n return \"http://%s.%s\" % (self.key, self.baseurl)", "def url(self) -> str:\n return self._flask_request.url", "def server_url(self):\n pass", "def base_url(self):\n\n return self.http.base_url", "def single_sign_on_service_uri(self) -> str:\n return pulumi.get(self, \"single_sign_on_service_uri\")", "def url_base(self) -> str:\n return self.config['api_url']", "def soap_url(self):\n if self.url:\n return self.url + \"/soap/server_sa/\"\n\n scheme = \"https\" if self.ssl else \"http\"\n return \"{}://{}:{}/soap/server_sa/\".format(\n scheme, self.host, self.port)", "def base_api_url(self) -> str:\n return self._base_api_url", "def get_uri(self):\n return self.get_protocol() + \"://\" + self.get_host() + \":\" + str(self.get_port())", "def request_url(self):\n return self._url", "def configuration_url(self) -> str:\n protocol: str = \"https://\" if self.ssl else \"http://\"\n return f\"{protocol}{self.ip}:{self.port}\"", "def website_endpoint(self) -> str:\n return pulumi.get(self, \"website_endpoint\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register any extensions under the given namespace.
def _register_extensions(self, namespace): # Register any extension classes for this class. extmanager = ExtensionManager( 'extensions.classes.{}'.format(namespace), propagate_map_exceptions=True ) if extmanager.extensions: extmanager.map(util.register_extension_class, base=self) # Register any extension methods for this class. extmanager = ExtensionManager( 'extensions.methods.{}'.format(namespace), propagate_map_exceptions=True ) if extmanager.extensions: extmanager.map(util.register_extension_method, base=self)
[ "def register_extensions(app):\n bcrypt.init_app(app)\n ma.init_app(app)\n socketio.init_app(app)", "def register_extensions(self, extensions=[]):\n try:\n for extension, config in self.config['extensions'].items():\n\n # extension module base string\n ext_bstr = 'ext.%s' % (extension)\n\n # start script\n ext_sstr = '%s.start' % ext_bstr\n\n ext_startmodule = import_module(ext_sstr, pass_errors=True)\n if ext_startmodule is not None:\n before = getattr(ext_startmodule, 'before')\n before(config)\n\n # register extension commands if exists\n ext_cmdstr = '%s.%s' % (ext_bstr, 'commands')\n\n ext_cmd_module = import_module(ext_cmdstr, pass_errors=True)\n if ext_cmd_module is not None:\n self.commandadapter.register_extension(ext_cmd_module,\n extension)\n\n except Exception as e:\n print(traceback.format_exc())\n GlimLog.error(e)", "def addNamespace(self, namespace):\n \n pass", "def addNamespace(*args, **kwargs):\n \n pass", "def install_builtins(namespace=None):\n\n if not namespace:\n namespace = __builtin__\n\n if not isinstance(namespace, dict):\n namespace = namespace.__dict__\n\n _globals = globals()\n\n for name in __all__:\n if name in _globals:\n namespace[name] = _globals[name]", "def call_register(root_dir):\n for mod in imported_modules:\n if hasattr(mod, \"register\"):\n mod.register()", "def load_extensions():\n satchmo_settings = getattr(settings, 'SATCHMO_SETTINGS', {})\n extenders = [\n import_path(p) for p in satchmo_settings.get('FORM_EXTENDERS', [])\n ]\n for extender in extenders:\n extender.extend()", "def register_template_extensions(\n cls,\n exts_fn: Callable[[CompileCtx], Dict[str, Any]]\n ) -> None:\n assert not cls._template_extensions_frozen\n CompileCtx._template_extensions_fns.append(exts_fn)", "def register_extension(extension):\n if not extension in markdown_extensions:\n markdown_extensions.append(extension)", "def append_namespaces(z, namespace_packages):\n for name in sorted(namespace_packages):\n z.writestr(name.replace('.', '/') + '/__init__.py', NAMESPACE_INIT)", "def add_encoded_extension(self, extension):\n ...", "def _setup_import_hook(cls, extensions: List[Extension]):\n if len(extensions) == 0:\n return\n\n existing = [h for h in sys.meta_path if isinstance(h, _ImportLoadExtInterceptor)]\n if len(existing) > 0:\n hook = existing[0]\n hook.module_to_extension.update({req: e for e in extensions for req in e.reqs})\n else:\n hook = _ImportLoadExtInterceptor(\n module_to_extension={req: e for e in extensions for req in e.reqs}\n )\n sys.meta_path.insert(0, hook)", "def register_fonts(path=fontscollectionpath,valid_extensions=['.ttf','.otf','.ttc','.pfa','.pfb','.ttc','.dfont']):\n for dirpath, _, filenames in os.walk(path):\n for filename in filenames:\n if os.path.splitext(filename)[1] in valid_extensions:\n FontEngine.instance().register_font(os.path.join(dirpath, filename))", "def init_plugin(ext_registry: ExtensionRegistry):\n ext_registry.add_extension(component=object(), point='test.util.test_plugin', name='ext1')\n ext_registry.add_extension(component=object(), point='test.util.test_plugin', name='ext2')\n ext_registry.add_extension(component=object(), point='test.util.test_plugin', name='ext3')", "def register(name, fn):\n return el.Dotted.register(name, fn)", "def setup_extension_modules(self):\n\n try:\n numpy_include = numpy.get_include()\n\n except AttributeError:\n numpy_include = numpy.get_numpy_include()\n\n # Add the NumPy include directory to the include directories\n # list for each type of compiler\n for cc in self.include_directories.keys():\n self.include_directories[cc].append(numpy_include)\n\n # The main openmoc extension (defaults are gcc and single precision)\n self.extensions.append(\n Extension(name = '_openrk',\n sources = copy.deepcopy(self.sources[self.cc]),\n library_dirs = self.library_directories[self.cc],\n libraries = self.shared_libraries[self.cc],\n extra_link_args = self.linker_flags[self.cc],\n include_dirs = self.include_directories[self.cc],\n swig_opts = self.swig_flags + ['-D' + self.cc.upper()]))", "def valid_extensions(self, valid_extensions):\n\n self._valid_extensions = valid_extensions", "def register_libraries():\n\n global _binaries_dir\n\n for binary in os.listdir(_binaries_dir):\n if binary.endswith(('.zip', '.egg', '.whl')):\n sys.path.insert(0, os.path.join(_binaries_dir, binary))", "def _registerClasses(classes) -> None:\n global _registered_classes\n _registered_classes = classes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all ACLs for this instance.
def all(self): return self._instance._client.acls.all(self._instance.name)
[ "def get_acls(self):\n return self.access_list_manager.get_objects()", "def acl(self):\n # type: () -> list[AclEntry]\n return self._acl", "def getACLs(self, account):\n return EquipmentACLInfo.getRulesForEquipment(account, self)", "def get_network_acls(self):\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls?version={}&generation={}\".format(\n self.cfg[\"version\"], self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"GET\", path, headers())[\"data\"]\n\n except Exception as error:\n print(\"Error fetching network ACLs. {}\".format(error))\n raise", "def get_all_acl_names(self):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_ACCESS_CONTROLS, 3)\n\n total_acls = self._get_total_number(self.info['loc_cfg_total_acls_span'], \"Access Controls\")\n max_acls_row = int(self.info['const_cfg_max_acl_rows'])\n traverse_row = 1\n i = 0\n total_entries = []\n\n if total_acls == u'0':\n logging.info(\"There's no ACL rules in the Access Controls table\")\n return []\n\n while i < int(total_acls):\n find_acl_name = self.info['loc_cfg_acl_name_cell']\n find_acl_name = find_acl_name.replace('$_$', str(traverse_row))\n get_acl_name = self.s.get_text(find_acl_name)\n total_entries.append(get_acl_name)\n\n if traverse_row == max_acls_row:\n traverse_row = 0\n self.s.click_and_wait(self.info['loc_cfg_acl_next_image'])\n traverse_row += 1\n i += 1\n time.sleep(1)\n\n return total_entries", "def _get_acls(self, datapath):\n auth_acl = datapath.acls.get(self._auth_acl_name)\n noauth_acl = datapath.acls.get(self._noauth_acl_name)\n return (auth_acl, noauth_acl)", "def get(self, acl):\n return self._instance._client.acls.get(self._instance.name, acl)", "def list(cls, api_client, **kwargs):\n\n cmd = {}\n cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return super(NetworkACLList, cls).list(api_client.listNetworkACLLists(**cmd)['networkacllist'])", "def getacl(self, mailbox):\n typ, dat = self._simple_command('GETACL', mailbox)\n return self._untagged_response(typ, dat, 'ACL')", "def resources() -> [acl.AclResource]:\n return []", "def add_acls(self, acl_list):\n return self.access_list_manager.add_objects(acl_list)", "def access_controls(self):\n access = {'all': False, 'roles': [], 'hosts': []}\n for control in self.safeaccesscontrol_set.all():\n if control.all_hosts:\n access['all'] = True\n return access\n else:\n if type(control.acl_object) == Host:\n access['hosts'].append(control.acl_object)\n elif type(control.acl_object) == Role:\n access['roles'].append(control.acl_object)\n return access", "def all(self):\n centros = Centro.query.all()\n return centros", "def acl_settings(self):\n acl_settings = (\n self._sdk.AuthApi(self._api_client).get_settings_acls().acl_policy_settings\n )\n return {\n \"access\": acl_settings.access,\n \"calcmode\": acl_settings.calcmode,\n \"calcmode_group\": acl_settings.calcmode_group,\n \"calcmode_owner\": acl_settings.calcmode_owner,\n \"chmod\": acl_settings.chmod,\n \"chmod_007\": acl_settings.chmod_007,\n \"chmod_inheritable\": acl_settings.chmod_inheritable,\n \"chown\": acl_settings.chown,\n \"create_over_smb\": acl_settings.create_over_smb,\n \"dos_attr\": acl_settings.dos_attr,\n \"group_owner_inheritance\": acl_settings.group_owner_inheritance,\n \"rwx\": acl_settings.rwx,\n \"synthetic_denies\": acl_settings.synthetic_denies,\n \"utimes\": acl_settings.utimes,\n }", "def getACL(self, account):\n return EquipmentACLInfo.getRule(account, self)", "def get_all_admins(self):\n return self.get_items(TalosAdmin)", "def iter_acl(acl):\n return sorted(acl, key=lambda x: (getattr(x, 'principal', x).principal_order,\n not getattr(getattr(x, 'principal', x), 'is_local', None)))", "def _remove_all_acl_rules(self):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_ACCESS_CONTROLS, 3)\n max_acls_row = int(self.info['const_cfg_max_acl_rows'])\n\n delete_entries = 0\n total_acl = 1\n while delete_entries < 200 and total_acl > 0:\n if not self.s.is_checked(self.info['loc_cfg_acl_all_checkbox']):\n time.sleep(2)\n self.s.click_and_wait(self.info['loc_cfg_acl_all_checkbox'], 4)\n\n self.s.choose_ok_on_next_confirmation()\n self.s.click_and_wait(self.info['loc_cfg_acl_delete_button'], 4)\n if self.s.is_confirmation_present(5):\n self.s.get_confirmation()\n\n delete_entries += max_acls_row\n\n time.sleep(3)\n total_acl = int(self._get_total_number(self.info['loc_cfg_total_acls_span'], \"Access Controls\"))\n time.sleep(3)\n\n return total_acl", "def _get_acl_rules(self, address_type, acl_type, acl_name, seq_range):\n rules_list = []\n\n if address_type == 'mac':\n cmd = acl_template.show_l2_access_list\n elif address_type == 'ip':\n cmd = acl_template.show_ip_access_list\n elif address_type == 'ipv6':\n cmd = acl_template.show_ipv6_access_list\n else:\n raise ValueError('{} not supported'.format(address_type))\n\n t = jinja2.Template(cmd)\n config = t.render(acl_name_str=acl_name)\n config = ' '.join(config.split())\n\n output = self._callback(config, handler='cli-get')\n\n # Check if there is any error\n self._process_cli_output(inspect.stack()[0][3], config, output)\n\n if address_type == 'mac':\n rules_list = self._parse_l2_rule(output, seq_range)\n elif address_type == 'ip':\n if acl_type == 'standard':\n rules_list = self._parse_std_ip_rule(output, seq_range)\n elif acl_type == 'extended':\n rules_list = self._parse_ext_ip_rule(output, seq_range)\n elif address_type == 'ipv6':\n rules_list = self._parse_ext_ipv6_rule(output, seq_range)\n\n return rules_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the ACL specified by ID belonging to this instance.
def get(self, acl): return self._instance._client.acls.get(self._instance.name, acl)
[ "def get_network_acl_by_id(self, id):\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}?version={}&generation={}\".format(\n id, self.cfg[\"version\"], self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"GET\", path, headers())[\"data\"]\n\n except Exception as error:\n print(\"Error fetching network ACL with ID {}. {}\".format(\n id, error))\n raise", "def get_network_acl(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='get_network_acl')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/network_acls/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def acl(self):\n # type: () -> list[AclEntry]\n return self._acl", "def get_network_acl_rules_by_id(self, id):\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}/rules?version={}\"\n \"&generation={}\".format(id, self.cfg[\"version\"],\n self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"GET\", path, headers())[\"data\"]\n\n except Exception as error:\n print(\"Error fetching rules for network ACL with ID\"\n \" {}. {}\".format(id, error))\n raise", "def getACL(self, account):\n return EquipmentACLInfo.getRule(account, self)", "def get_network_acl_rule_by_id(self, acl, id):\n # Retrieve network ACL to get the ID\n # (mostly useful if a name is provided)\n acl_info = self.get_network_acl(acl)\n if \"errors\" in acl_info:\n return acl_info\n\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}/rules/{}?version={}\"\n \"&generation={}\".format(acl_info[\"id\"], id,\n self.cfg[\"version\"],\n self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"GET\", path, headers())[\"data\"]\n\n except Exception as error:\n print(\"Error fetching rule with ID {} for network ACL\"\n \"with ID {}. {}\".format(id, acl_info[\"id\"], error))\n raise", "def get_organization_acl(self, organization_id: str):\n response = self.synapse.restGET(f\"/schema/organization/{organization_id}/acl\")\n return response", "def getacl(self, mailbox):\n typ, dat = self._simple_command('GETACL', mailbox)\n return self._untagged_response(typ, dat, 'ACL')", "def get_role_by_id(conn: dict, id: str) -> dict:\n return get(conn, PCC_ROLES + \"/\" + id)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n acl_operation_type: Optional[pulumi.Input[str]] = None,\n acl_resource_name: Optional[pulumi.Input[str]] = None,\n acl_resource_pattern_type: Optional[pulumi.Input[str]] = None,\n acl_resource_type: Optional[pulumi.Input[str]] = None,\n host: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None) -> 'SaslAcl':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _SaslAclState.__new__(_SaslAclState)\n\n __props__.__dict__[\"acl_operation_type\"] = acl_operation_type\n __props__.__dict__[\"acl_resource_name\"] = acl_resource_name\n __props__.__dict__[\"acl_resource_pattern_type\"] = acl_resource_pattern_type\n __props__.__dict__[\"acl_resource_type\"] = acl_resource_type\n __props__.__dict__[\"host\"] = host\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"username\"] = username\n return SaslAcl(resource_name, opts=opts, __props__=__props__)", "def get_subnet_network_acl(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='get_subnet_network_acl')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/subnets/{id}/network_acl'.format(**path_param_dict)\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def get_acl_type(self):\n return self.acl_type", "def get_network_acl_rule(self,\n network_acl_id: str,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if network_acl_id is None:\n raise ValueError('network_acl_id must be provided')\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='get_network_acl_rule')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['network_acl_id', 'id']\n path_param_values = self.encode_path_vars(network_acl_id, id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/network_acls/{network_acl_id}/rules/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response", "def get_sdc_by_id(self, id):\n for sdc in self.sdc:\n if sdc.id == id:\n return sdc\n raise KeyError(\"SDC with that ID not found\")", "def get(self, id):\r\n catergory = get_a_catergory(id)\r\n if not catergory:\r\n api.abort(404)\r\n else:\r\n return catergory", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n config_json: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n max_token_ttl: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n namespace_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AclAuthMethodNamespaceRuleArgs']]]]] = None,\n partition: Optional[pulumi.Input[str]] = None,\n token_locality: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None) -> 'AclAuthMethod':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AclAuthMethodState.__new__(_AclAuthMethodState)\n\n __props__.__dict__[\"config\"] = config\n __props__.__dict__[\"config_json\"] = config_json\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"max_token_ttl\"] = max_token_ttl\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"namespace\"] = namespace\n __props__.__dict__[\"namespace_rules\"] = namespace_rules\n __props__.__dict__[\"partition\"] = partition\n __props__.__dict__[\"token_locality\"] = token_locality\n __props__.__dict__[\"type\"] = type\n return AclAuthMethod(resource_name, opts=opts, __props__=__props__)", "def get_card(self, id):\n key = self.get_key(id)\n if key >= 0:\n return self.cards[key]\n\n return None", "def _get_acls(self, datapath):\n auth_acl = datapath.acls.get(self._auth_acl_name)\n noauth_acl = datapath.acls.get(self._noauth_acl_name)\n return (auth_acl, noauth_acl)", "def get_acls(self):\n return self.access_list_manager.get_objects()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The SHA1 of the file.
def sha1(self): filehash = sha1() with open(self.path, 'rb') as infile: while True: data = infile.read(BUF_SIZE) if not data: break filehash.update(data) return filehash.hexdigest()
[ "def sha1(self):\n return hashlib.sha1(self.get_bytes()).hexdigest()", "def get_hash_sha1(self):\n\n if sha1 is not None:\n return sha1( self.get_data() ).hexdigest()", "def SHA1Sum(klass, filename):\n return hashlib.sha1(path(filename).text()).hexdigest()[:8]", "def sha1_file(pathname):\n with open(pathname,'rb') as fl:\n return sha1_filelike(fl)", "def sha1_filelike(filelike):\n m = hashlib.sha1()\n while True:\n s = filelike.read()\n if len(s) == 0:\n break\n else:\n m.update(s)\n return m.hexdigest()", "def sha1(self) -> str:\r\n return sha1(self.string.encode()).hexdigest()", "def _get_v1_id(self):\n saved_posn = self.padfile.tell()\n self.padfile.seek(0)\n sha1 = hashlib.sha1()\n string = self.padfile.read(1024)\n sha1.update(string)\n self.padfile.seek(saved_posn)\n return sha1.hexdigest()", "def generate_hash(self, file):\n with open(file, 'rb') as afile:\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n return hasher.hexdigest()", "def checksum(self, filepath):\n command = f\"sha1sum {filepath}\"\n _, stdout, _ = self.exec_command(command)\n lines = stdout.readlines()\n return lines[0].strip()", "def get_hash(self):\r\n path = self.files[self.idx_image]\r\n filename = path.split(\"/\")[-1]\r\n with open(path,\"rb\") as f:\r\n hash_object = hashlib.sha512(f.read())\r\n hex_dig = hash_object.hexdigest()\r\n hash = filename + \" \"+ hex_dig\r\n return hash", "def entry_sha1(entry):\n if entry.kind == 'symlink':\n return osutils.sha_string(entry.symlink_target)\n else:\n return entry.text_sha1", "def _source_hash_file(source):\n h = hashlib.sha1()\n h.update(source.encode('utf-8'))\n return h.hexdigest()", "def filehash(self):\n # This is lazily evaluated as we can be sure that we can always\n # calculate it (unless the FS itself is unreadable)\n if self._filehash is None:\n s = hashlib.sha256()\n with self.wheel_file.open('rb') as f:\n while True:\n buf = f.read(65536)\n if buf:\n s.update(buf)\n else:\n break\n self._filehash = s.hexdigest().lower()\n return self._filehash", "def hash(self):\n if not self.__hash:\n self.__hash = compute_file_hash(str(self.value))\n return self.__hash", "def get_SHA1(variant_rec):\n h = hashlib.sha1()\n keys = ['seq', 'study', 'contig', 'start', 'ref', 'alt']\n h.update('_'.join([str(variant_rec[key]) for key in keys]).encode())\n return h.hexdigest().upper()", "def _Sha1Checksum(payload):\n m = hashlib.sha1()\n m.update(payload)\n return m.hexdigest()", "def hash_file(fname):\n try:\n with open(fname, 'rb') as f:\n sha1 = hashlib.sha1()\n while True:\n data = f.read(4096)\n if not data:\n break\n sha1.update(data)\n\n return sha1.hexdigest()\n except (OSError, IOError):\n return None", "def file_encrypt_sha1(filename):\n n = 2 \n name, ext = os.path.splitext(filename)\n m = hashlib.sha1()\n m.update(name)\n line = m.hexdigest()\n first, second, third, last = [line[i:i+n] for i in range(0, len(line), n) if i < 8]\n cut_dir = os.path.join(first, second, third, last).replace(\"\\\\\", \"/\")\n cut_name = line[8:] + ext\n result = {\n \"path\": os.path.join(cut_dir, cut_name),\n \"dir\": cut_dir,\n \"name\": cut_name,\n }\n\n return result", "def calculate_sha1_and_md5(self) -> None:\n self.sha1, self.md5 = calculate_sha1_and_md5(self.absolute_file_path)\n return self.sha1, self.md5" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Full crawl within the omniglot domain.
def crawl_omniglot(outputdir): homepage = urllib2.urlopen(OMNIGLOT).read() crawled = [] for i in re.findall(AHREF_REGEX,homepage): if not i.startswith("http://") and not i.endswith("/") and \ not i.startswith('https://'): if OMNIGLOT+i not in crawled: print OMNIGLOT+i x = urllib2.urlopen(OMNIGLOT+i).read() filename = (OMNIGLOT+i).rpartition('/')[2] print filename print>>codecs.open(outputdir+filename,'w','utf8'), x time.sleep(random.randrange(5,10)) crawled.append(OMNIGLOT+i)
[ "def crawl(self):\n counter = 0\n to_visit = [self.base_url]\n while counter != self.max_links:\n if to_visit[0] in self.visited_pages:\n to_visit.pop(0)\n \n else:\n w = WebPage(to_visit[0])\n for item in list(w.urls_set()):\n to_visit.append(item)\n self._all_urls = self._all_urls.union(w.urls_set()) \n self._all_emails = self._all_emails.union(w.emails_set()) \n self._all_phones = self._all_phones.union(w.phones_set()) \n self.visited_pages.append(to_visit[0])\n to_visit.pop(0)\n counter += 1", "def crawl_website():\n\n content=get_page_content(url)\n if content is None:\n logging.critical(\"Failed to get content from \"+url)\n sys.exit(1)\n\n category_list=get_category_list(content)\n\n for category in category_list:\n category_url, category_name=category\n category_url=url+category_url\n crawl_category(category_name, category_url)", "def crawl(self):\r\n #beging analyzer and controller thread(actually called their run())\r\n self.__analyzer.start()\r\n self.__controller.start()\r\n #block until controller thread terminate\r\n self.__controller.join(3600)\r\n self.__analyzer.setStopCondition(True)\r\n self.__siteQueueAndCond[1].acquire()\r\n self.__siteQueueAndCond[1].notifyAll()\r\n self.__siteQueueAndCond[1].release()\r\n #block until analyzer thread terminate\r\n self.__analyzer.join()\r\n print \"%d fetchers were useful\" % self.__controller.getNumFetchersUsed()\r\n print(\"%d out of %d sites were succesfully crawles\" %\r\n (len(self.__dbAndLock[0]['pages']),self.__maxPagesToCrawl))\r\n print \"The pages that were succesfully crawled:\"\r\n for s in self.__dbAndLock[0]['pages']:\r\n print self.__dbAndLock[0]['pages'][s].stringUrl\r\n\r\n self.__analyzer.report()\r\n\r\n self.__exporter.export(self.__dbAndLock[0])", "def crawl_website():\n\n\turl = \"http://books.toscrape.com/index.html\"\n\thost_name = \"books.toscrape.com\"\n\n\tcontent = get_page_content(url)\n\tif content is None:\n\t\tlogging.critical(\"Failed to get content from \" + url)\n\t\tsys.exit(1)\n\n\tcategory_list = get_category_list(content)\n\n\tfor category in category_list:\n\t\tcategory_url, category_name = category\n\t\tcategory_url = \"http://\" + host_name + \"/\" + category_url\n\t\t#print(category_url)\n\t\t#sys.exit(1)\n\t\tcrawl_category(category_name, category_url)", "def crawl_job():\n settings = get_project_settings()\n runner = CrawlerRunner(settings)\n return runner.crawl(GamesSpider)", "def crawlsite(self):\n try:\n while True:\n source, url = self.next_in_queue()\n self.logger.debug(\"GOT \" + url)\n if not self.seen[url]:\n self.logger.debug(url)\n self.seen[url] = True\n try:\n resp = self.head(url)\n except requests.exceptions.ConnectionError:\n self.logger.error(\"Connection Error: \" + url)\n self.check(resp, url, source)\n if self.is_crawlable(resp):\n self.crawlpage(url)\n self.logger.info(\"Crawled page \" + url)\n else:\n self.logger.debug(\"SEEN \" + url)\n self.done_with(url)\n except IndexError: # next_in_queue will raise when empty\n pass", "def test_scraping(self):\n self._scraper.scrape()", "def crawl():\n # blog crawler\n runner = CrawlerRunner(\n {\n 'FEED_FORMAT': 'json',\n 'FEED_URI': DATA_FILE,\n }\n )\n runner.crawl(GoogleBlog)\n runner.crawl(OpenAI)\n runner.crawl(DeepMind)\n runner.crawl(Uber)\n\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n\n reactor.run()", "def start_crawling(self):\n global domain\n global subdomain_dict\n global valid_set\n global max_outlinks_url\n global max_outlinks_num\n global previous_num\n \n while self.frontier.has_next_url():\n url = self.frontier.get_next_url()\n logger.info(\"Fetching URL %s ... Fetched: %s, Queue size: %s\", url, self.frontier.fetched, len(self.frontier))\n\n #To track maximum number of outlinks from a certain URL\n if max_outlinks_num < len(self.frontier) - previous_num:\n max_outlinks_num = len(self.frontier) - previous_num\n max_outlinks_url = url\n previous_num = len(self.frontier)\n \n url_data = self.fetch_url(url)\n for next_link in self.extract_next_links(url_data):\n if self.corpus.get_file_name(next_link) is not None:\n if self.is_valid(next_link):\n self.frontier.add_url(next_link)\n\n #To obtain links of valid downloaded/fetched links\n valid_set.add('Fetched URL:\\t{}\\n'.format(next_link))\n\n #To obtain subdomains and their frequencies\n url_subdomain_index = next_link.index(domain)\n subdomain = next_link[:(url_subdomain_index)]\n if 'https' in subdomain:\n subdomain = subdomain.strip('https://')\n elif 'http' in subdomain:\n subdomain = subdomain.strip('http://')\n subdomain_dict[subdomain] += 1\n else:\n #To obtain the links of traps\n traps_set.add('Trap:\\t{}\\n'.format(next_link))\n \n #File Creation for Subdomain Tracking \n subdomain_count_file = open(\"Subdomain Count.txt\", \"w\")\n subdomain_count_file.write(\"Subdomain: \\tCount\\n\")\n for subdomain in dict(subdomain_dict).keys():\n string_to_add = '{}\\t{}\\n'.format(subdomain[:-1], dict(subdomain_dict)[subdomain])\n subdomain_count_file.write(string_to_add) \n subdomain_count_file.close()\n\n #File Creation for Subdomain Creation\n traps_file = open(\"Traps.txt\", \"w\")\n traps_file.write(\"Trap: \\tUrl\\n\")\n for trap in traps_set:\n traps_file.write(trap)\n traps_file.close()\n\n #File Creation for Fetched/Downloaded URLs\n fetched_file = open(\"Fetched URLs.txt\", \"w\")\n fetched_file.write(\"Fetched: \\tUrl\\n\")\n for fetched in valid_set:\n fetched_file.write(fetched)\n fetched_file.close()\n\n #File Creation for Maximum Outlinks Tracker\n max_file = open(\"Max Outlinks.txt\", \"w\")\n max_file.write('URL with maximum outlinks: {}\\n'.format(max_outlinks_url))\n max_file.write('Number of outlinks: {}'.format(max_outlinks_num))\n max_file.close()", "async def nekogirl(self):\n res = tools.fetch('nekogirls')\n await self.client.say(res.url)", "def url_main(self):\n log = self.logger\n st = time.time()\n print(\"Running...\\nCheck logs/threat_hub.log for detailed events\")\n log.info(\"URL Threat intell hub started\")\n log.info(\"database cleanup started\")\n self.process.flush_db()\n log.info(\"database flushed successfully\")\n try:\n for url in self.crawl.url_targets:\n response = self.crawl.send_request(url=url)\n if not response:\n continue\n else:\n dataset = self.process.shape_up(raw_data=response)\n # self.first_run(dataset, url)\n self.normal_run(dataset, url)\n log.info(f\"Total {self.process.db.get_total_url()} url records present.\")\n log.info(f\"Total {self.process.db.get_phishing_count()} phishing urls present\")\n log.info(f\"Total {self.process.db.get_malware_count()} malware urls present\")\n log.info(f\"Total {self.process.db.get_ransom_count()} ransomware urls present\")\n\n except Exception as err:\n log.exception(err, exc_info=False)\n finally:\n end = time.time()\n log.info(\"Finished executing threat intell hub in {} seconds\".format(end - st))\n print(\"Finished...\")", "def schedule_crawler(self) :\n\t\tself.create_new_workspace()\n\t\t#self.add_query_keywords()\n\n\t\treq = urllib2.Request(self.url, json.dumps(self.search_terms), {\"Content-type\" : \"application/json\"})\n\n\t\ttry:\n\t\t\tresponse = urllib2.urlopen(req)\n\t\texcept IOError, e:\n\t\t print \"It looks like something went wrong in scheduling the crawl. Exiting...\"\n\t\t sys.exit(1)\n\n\t\tout = json.loads(response.read())\n\t\t\n\t\tself.job_id = out.keys()[0]\n\n\t\tprint \"Crawling in progress ...\";", "def go(self):\n \n self.setprop('crawl', crol.Crawl({\n 'seed_url' : self.registration.site,\n 'crawl_report' : crol.CrawlReport({'seed_url':self.registration.site}),\n 'log' : self.log,\n 'nofollow_patterns' : self.registration.nofollow_patterns,\n 'ignore_patterns' : self.registration.ignore_patterns\n }))\n \n self.log.filename = self.registration.department.name\n self.crawl.start(self.crawl.crawl_report.reportnode)\n self.log.reporttofile(self.crawl.crawl_report)\n if self.crawl.crawl_report.statistics['broken_count'] > 0: self.applyactions()", "def crawl(self, url, extra_crawl=False):\n\n # check for a schema\n if not url.startswith(\"https://\") and not url.startswith(\"http://\"):\n url = \"http://\" + url\n\n try:\n # make the request\n headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html\"}\n response = requests.get(url, headers=headers, verify=False, timeout=8)\n\n # check if the request was successful\n if response.status_code == 200:\n response_text = response.text\n\n # remove bad characters\n for badchar in (\">\", \":\", \"=\", \"<\", \"/\", \"\\\\\", \";\", \"&\", \"%3A\", \"%3D\", \"%3C\"):\n response_text = response_text.replace(badchar, \" \")\n \n # find all the emails\n self.emails += self.find_emails(response_text)\n\n # check if extra crawling should be done\n if extra_crawl:\n # parse all links in BeautifulSoup\n for link in BeautifulSoup(response.text, parse_only=SoupStrainer(\"a\"), features=\"lxml\"):\n try:\n # check if link has a destination\n if link.has_attr(\"href\"):\n # check it isn't an email link\n if \"mailto:\" in link[\"href\"] or \"@\" in link[\"href\"]:\n continue\n \n # is it a duplicate?\n if link[\"href\"] in self.urls or link[\"href\"] in self.extra_urls:\n continue\n\n # is it the full link?\n if link[\"href\"].startswith(\"/\"):\n link[\"href\"] = url + link[\"href\"]\n\n # check it relates to domain and there isn't too many extra urls\n if self.domain in link[\"href\"] and len(self.extra_urls) < 20:\n self.extra_urls.append(link[\"href\"])\n except:\n pass\n except Exception, ex:\n pass", "def run(self):\n while True:\n for crawler in self.crawlers:\n crawler.crawl()\n\n print 'Sleeping for %s seconds' % self.crawl_wait\n sleep(self.crawl_wait)", "def open_spider(self, spider):\n logging.info('open spider')", "def RUN_CRAWLER(crawler_):\n crawler_.crawl()", "def search(crawl_term):\n crawl_link = crawl_term.replace(' ', '+')\n site ='https://www.g2crowd.com/search/products?max=10&query=' + crawl_link\n hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n }\n req = urllib2.Request(site, headers=hdr)\n try:\n page = urllib2.urlopen(req)\n except urllib2.HTTPError as e:\n print(e)\n content = page.read()\n soup = BeautifulSoup(content, 'html.parser')\n results = soup.find_all('div', {'class':\"slat-right\"})\n\n if results:\n for result in results:\n product = result.a.text\n # If the search term is in the product name we have a match\n if crawl_term.lower() in product.lower():\n # Find the review page start link\n review_link = result.a['href']\n # Open review page and find last link\n site = 'https://www.g2crowd.com' + review_link\n hdr = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n }\n req = urllib2.Request(site, headers=hdr)\n try:\n page = urllib2.urlopen(req)\n except urllib2.HTTPError as e:\n print(e)\n content = page.read()\n soup = BeautifulSoup(content, 'html.parser')\n links = soup.find_all('a', {\"class\":\"pjax\"})\n for l in links:\n text = l.text\n if 'Last' in text:\n link = l['href'].split('/')[-1].split('?')[-1]\n last = [int(part.replace('page=','')) for part in link.split('&') if 'page=' in part][0]\n else:\n last = 0\n else:\n # If product not in any of the results, review link and last are null and 0\n review_link = \"\"\n last = 0\n else:\n # If the search returns nothing, review link and last are null and 0\n review_link = \"\"\n last = 0\n return review_link, last", "def start_crawl():\n self_configuration = get_self_configuration(exception_class=RuntimeError)\n self_node_identifier = self_configuration.node_identifier\n primary_validator = self_configuration.primary_validator\n\n primary_validator_address = format_address(\n ip_address=primary_validator.ip_address,\n port=primary_validator.port,\n protocol=primary_validator.protocol\n )\n\n crawl_banks(primary_validator_address=primary_validator_address, self_node_identifier=self_node_identifier)\n crawl_validators(primary_validator_address=primary_validator_address)\n\n send_connection_requests(node_class=Bank, self_configuration=self_configuration)\n send_connection_requests(node_class=Validator, self_configuration=self_configuration)\n\n cache.set(CRAWL_LAST_COMPLETED, str(timezone.now()), None)\n cache.set(CRAWL_STATUS, CRAWL_STATUS_NOT_CRAWLING, None)\n\n send_crawl_status_notification()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of linked pages from Omniglot's numbers page.
def get_num_pages(): NUMBERS = "http://www.omniglot.com/language/numbers/" num = urllib2.urlopen(MULTILING_URLS['num']).read() return list(set([NUMBERS+str(re.findall(AHREF_REGEX,str(i))[0]) \ for i in bs(num).findAll('dd')]))
[ "def pageNumbers(self):\n\t pages = self.listFolderContents(contentFilter={\"portal_type\":\"Broadsheet\"})\n\t linkPages = []\n\t for page in pages:\n\t\tlinkPages.append(page.pageNumber())\n\n\t return linkPages", "def _wikipedia_Page_linkedPages(self):\n return [page for page in toolserver.Generators.getPagelinks(self)]", "def get_all_links_pages(total_pages):\n\tbase_url = 'http://torrentik.co'\n\tpage_part = '/page/'\n\tlinks_pages = []\n\tfor i in range(1, 2): # int(total_pages) + 1\n\t\turl = base_url + page_part + str(i)\n\t\tlinks_pages.append(url)\n\treturn links_pages", "def list_links(num=MAX_ITEMS):\n hn_links = load_links()\n print ''\n for i in range(num):\n j = i+1\n print_link(j, hn_links[i][0], hn_links[i][1])", "def list_pages(ctx):\n workspace = Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename)\n print(\"\\n\".join(workspace.mets.physical_pages))", "def createPagesLinks(self):\n for i in range(self.pageCount):\n link = self.pageLink + str(i + 1)\n self.linkList.append(link)", "def fetch_listing_pages():\n # startURL = u\"http://www.daft.ie/ireland/houses-for-rent\"\n startURL = u\"http://www.daft.ie/ireland/houses-for-rent/?s%5Bignored_agents%5D%5B0%5D=5732&s%5Bignored_agents%5D%5B1%5D=428&s%5Bignored_agents%5D%5B2%5D=1551&offset=1960\"\n totalpages = mop_listing_pages(startURL, count = 195)\n print(\"\".join([str(totalpages),\n u\" listing pages saved to disk.\"]).encode('utf-8'))", "def get_page(page_num):\n\n response = requests.get(f'https://jobs.github.com/positions.json?page={page_num}')\n\n return [f\"{job['company']} - {job['title']} [{job['location']}]\" for job in json.loads(response.text)]", "def get_pages() -> [List[Dict], List[int]]:\n site = wikipedia('en')\n pages = []\n modules_names = []\n error_pages = []\n # Asks 500 (max) per iteration lua modules pages for api\n for r in site.query(list='allpages', apnamespace=\"828\", aplimit=\"max\"):\n # Iterates in the results\n for page in r.allpages:\n # Check if a documentation file\n if \"/doc\" not in page.title and \"testcase\" not in page.title and \"Module:User:\" not in page.title \\\n and page.title.split(\"/\")[0] not in modules_names:\n try:\n # Not search submodules\n modules_names.append(page.title.split(\"/\")[0])\n # Get module lua content\n for module in site.iterate(\"parse\", pageid=page.pageid, prop=\"wikitext\"):\n data = {'title': module.title, 'pageid': module.pageid, 'size': len(module.wikitext)}\n pages.append(data)\n print(f\"{module.title} successfully added\")\n save_script(module)\n # Wait 1 second\n time.sleep(1)\n except:\n # Saves pages that have errors\n error_pages.append(page.pageid)\n print(f\"An error occurred while downloading the module: {module.title}\")\n return pages, error_pages", "def getPage(self, pageNum):\n pass", "def show_pages():\n\n pages_response = webcli_command('pages')\n for page in pages_response:\n print(page)", "def find_page_numbers(self, cand_lines_by_page):\r\n\r\n total_logical_pages = len(cand_lines_by_page)\r\n\r\n # step 1: assume each line is a line from a page\r\n # a page number suppose to be unique to the correspoding page\r\n # for a seen number, here to find which pages it appears in\r\n physical_to_logical_mapping = {} # physical pgn ==> a set of logical pgn\r\n logical_to_physical_mapping = {} # logical pgn ==> a sorted list of physical pgn\r\n for pgn, line_text in cand_lines_by_page.items():\r\n physical_numbers = re.findall(r\"\\d+\", line_text)\r\n physical_pgn_list = []\r\n if physical_numbers:\r\n # all physical numbers in this line\r\n for num_str in physical_numbers:\r\n num = int(num_str)\r\n physical_pgn_list.append(num)\r\n if num in physical_to_logical_mapping.keys():\r\n physical_to_logical_mapping[num].add(pgn)\r\n else:\r\n physical_to_logical_mapping[num] = {pgn}\r\n\r\n logical_to_physical_mapping[pgn] = sorted(physical_pgn_list)\r\n\r\n self.logger.debug(\"logical_to_physical_mapping:\")\r\n self.logger.debug(logical_to_physical_mapping)\r\n\r\n self.logger.debug(\"physical_to_logical_mapping:\")\r\n self.logger.debug(physical_to_logical_mapping)\r\n\r\n # step 2: the numbers appearing in only 1 page is likely the page number,\r\n # because a page number has to be unique across the pages (they should not appear more than once)\r\n # to allow some resilence to noices, a real page number might happen to occur more than once\r\n uniq_numbers = []\r\n max_occur = 3\r\n for num, pg_list in physical_to_logical_mapping.items():\r\n if len(pg_list) <= max_occur:\r\n uniq_numbers.append(num)\r\n\r\n self.logger.debug(\"len(uniq_numbers) = \" + str(len(uniq_numbers)))\r\n\r\n # step 3: sort the \"unique\" numbers so that they are in ascending order\r\n uniq_numbers.sort()\r\n\r\n self.logger.debug(\"sorted uniq_numbers:\")\r\n self.logger.debug(uniq_numbers)\r\n\r\n # step 4: find all candidate intervals that are roughly continuous and increasing\r\n cand_sequences = []\r\n i = 0\r\n j = 0\r\n n = len(uniq_numbers)\r\n min_len = max(3, round(total_logical_pages * 0.6)) # should cover >60% pages\r\n max_diff = 3 # diff btw two consecutive numbers should be no more than 3\r\n while i < n:\r\n j = i+1\r\n while j<n and uniq_numbers[j] - uniq_numbers[j-1] <= max_diff \\\r\n and self.can_be_valid_pair(\r\n physical_to_logical_mapping[uniq_numbers[j-1]], # logical pg number set1\r\n physical_to_logical_mapping[uniq_numbers[j]], # logical pg number set2\r\n max_diff\r\n ):\r\n j += 1\r\n\r\n if (j-i) > min_len:\r\n cand_sequences.append(uniq_numbers[i:j]) \r\n\r\n i = j\r\n\r\n self.logger.debug(\"cand_sequences:\")\r\n self.logger.debug(cand_sequences)\r\n\r\n # step 5: from the candidate sequence, pick the one whose sum-diff \r\n # (sum of diff of current pgn - previous pgn) is closest to the total logical pages. \r\n # e.g., if the pdf has 9 pages, then the sum-diff should ideally be 9-1 = 8.\r\n # if tie, pick the longer sequence (i.e., the one that covers more pages)\r\n closest_seq = []\r\n min_dist = 99999\r\n for seq in cand_sequences:\r\n sum_diff = seq[-1] - seq[0]\r\n dist = abs(sum_diff - total_logical_pages)\r\n if dist < min_dist:\r\n closest_seq = seq\r\n min_dist = dist\r\n elif dist == min_dist:\r\n # on tie, pick the one has smaller starting page#\r\n if seq[0] < closest_seq[0]:\r\n closest_seq = seq\r\n \r\n self.logger.debug(\"closest_seq:\")\r\n self.logger.debug(closest_seq)\r\n\r\n # step 6: turn list to dictionary\r\n pgn_pairs = {}\r\n\r\n prev_num = -1\r\n for pgn, physical_pgn_list in logical_to_physical_mapping.items():\r\n for num in closest_seq:\r\n if num in physical_pgn_list and num > prev_num:\r\n pgn_pairs[pgn] = num\r\n prev_num = num\r\n break\r\n\r\n self.logger.debug(\"pgn_pairs:\")\r\n self.logger.debug(pgn_pairs)\r\n\r\n return pgn_pairs", "def getNumberList(n):\n\tresult = []\n\ti = 0\n\twhile i < n:\n\t\tresult.append(i)\n\t\ti += 1\n\treturn result", "def pages():\n\n yield None", "def get_next_pages(self, driver):\n return driver.find_elements_by_xpath('//*[@class=\"PagerStyle\"]/td/table/tbody/tr/td/a')", "def listPages(self, space=None):\n return map(lambda i: i['name'], self.listPageInfo(space)) #pylint: disable=W0141", "def page_commented_novel_list(self, page=1):\n self.correct_page_num(page)\n return 'http://mypage.syosetu.com/mypagenovelhyoka/list/userid/' \\\n + self.id + '/index.php?p=' + str(page)", "def get_page(data, page):\n begin = page * 20\n end = page * 20 + 20\n if begin >= len(data):\n return []\n elif end >= len(data):\n return data[begin:]\n else:\n return data[begin:end]", "def get_url_list(start_url, n=2):\n # Remove root if included \n domain = \"https://www.tripadvisor.com\"\n \n if len(start_url.split(domain)) != 1:\n start_url = start_url.split(domain)[1]\n\n # Sets url to access 'reviews' portion of page\n if start_url[-8:] != \"#REVIEWS\":\n start_url = start_url +\"#REVIEWS\"\n \n # Five reviews are displayed per page, adjust range\n n = (n - 1) * 5\n \n pages = range(0,n, 5)\n\n # Start list with start_url\n page_urls = [start_url]\n \n # Split start url to insert page numbers\n url_split = start_url.split('Reviews')\n\n # Generate desired pages according to pattern\n for i in pages:\n page_num = 'Reviews-or' + str(5 + i)\n next_url = url_split[0] + page_num + url_split[1]\n page_urls.append(next_url)\n\n return page_urls" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of linked pages from Omniglot's babel page.
def get_babel_pages(): BABEL = "http://www.omniglot.com/babel/" babel = urllib2.urlopen(MULTILING_URLS['babel']).read() return [(unicode(lang.text), BABEL+lang.get('href')) for lang in \ bs(unicode(bs(babel).findAll('ol')[0])).findAll('a')]
[ "def _wikipedia_Page_linkedPages(self):\n return [page for page in toolserver.Generators.getPagelinks(self)]", "def crawl_babel_pages(outputdir=DATADIR+\"omniglot/babel/\"):\n babel = get_babel_pages()\n # Creates output directory if it doesn't exist.\n if not os.path.exists(outputdir):\n os.makedirs(outputdir)\n for lang, page in babel:\n html = urllib2.urlopen(page).read()\n if outputdir != None:\n with codecs.open(outputdir,'w','utf8') as fout:\n print>>fout, html\n time.sleep(random.randrange(5,10))", "def pages():\n\n yield None", "def iter_page_links(self) -> Iterable[str]:\n\n r = requests.get(self.starting_url)\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n nav_bar = soup.find_all('ul', attrs={'class': 'dropdown-menu'})\n\n # get target column of list items\n pub_list = nav_bar[3]\n\n # extract links\n for li_tag in pub_list.find_all('li')[1:-1]:\n\n link = li_tag.a\n page_text = requests.get(link['href']).content\n\n # find the number of pages in the table\n soup = bs4.BeautifulSoup(page_text, features=\"html.parser\")\n tables = soup.find_all('table', attrs={'class': 'dnnFormItem'})\n pages = tables[1]\n\n if pages.a is None:\n\n yield link['href']\n else:\n page_ext = pages.find_all('a')[-1]['href'][:-1]\n num_pages = pages.find_all('a')[-1]['href'][-1]\n for page in range(1, int(num_pages) + 1):\n\n yield link['href'] + page_ext + str(page)", "def _wikipedia_Page_templatePages(self):\n return [template for template in toolserver.Generators.getTemplatelinks(self)]", "def show_pages():\n\n pages_response = webcli_command('pages')\n for page in pages_response:\n print(page)", "def get_pages() -> [List[Dict], List[int]]:\n site = wikipedia('en')\n pages = []\n modules_names = []\n error_pages = []\n # Asks 500 (max) per iteration lua modules pages for api\n for r in site.query(list='allpages', apnamespace=\"828\", aplimit=\"max\"):\n # Iterates in the results\n for page in r.allpages:\n # Check if a documentation file\n if \"/doc\" not in page.title and \"testcase\" not in page.title and \"Module:User:\" not in page.title \\\n and page.title.split(\"/\")[0] not in modules_names:\n try:\n # Not search submodules\n modules_names.append(page.title.split(\"/\")[0])\n # Get module lua content\n for module in site.iterate(\"parse\", pageid=page.pageid, prop=\"wikitext\"):\n data = {'title': module.title, 'pageid': module.pageid, 'size': len(module.wikitext)}\n pages.append(data)\n print(f\"{module.title} successfully added\")\n save_script(module)\n # Wait 1 second\n time.sleep(1)\n except:\n # Saves pages that have errors\n error_pages.append(page.pageid)\n print(f\"An error occurred while downloading the module: {module.title}\")\n return pages, error_pages", "def list_pages(ctx):\n workspace = Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename)\n print(\"\\n\".join(workspace.mets.physical_pages))", "def retrieve_listing_page_urls(self) -> List[str]:\n return [\"https://fatabyyano.net/newsface/0/\"]", "def get_all_links_pages(total_pages):\n\tbase_url = 'http://torrentik.co'\n\tpage_part = '/page/'\n\tlinks_pages = []\n\tfor i in range(1, 2): # int(total_pages) + 1\n\t\turl = base_url + page_part + str(i)\n\t\tlinks_pages.append(url)\n\treturn links_pages", "def import_pages(self):\n return Tables.Import.PageTable(self.P.pages_dump).read()", "def pageNumbers(self):\n\t pages = self.listFolderContents(contentFilter={\"portal_type\":\"Broadsheet\"})\n\t linkPages = []\n\t for page in pages:\n\t\tlinkPages.append(page.pageNumber())\n\n\t return linkPages", "def _get_pages(self):\n print(\"Read pages.\")\n pages = []\n page_path = os.path.join(BASE_DIR, self.paths['page'])\n for path, subdirs, files in os.walk(page_path):\n for filename in files:\n if os.path.splitext(filename)[1] == '.md':\n page = os.path.join(path, filename)\n print(\".\", end=\"\")\n with open(page, 'r', encoding='utf-8') as f:\n pages.append(Page(f.read()))\n print(\" {} pages found.\\n\".format(len(pages)))\n return pages", "def read_list_page(page):\n\n page_content = read_page(page)\n list_ = page_content.split(\"\\n\")\n return list_", "def _parse_urls_from_page(content: str, debug=False) -> List[str]:\n n_pages = 0\n links = []\n try:\n html_parser = BeautifulSoup(content, \"html.parser\")\n for film_box in html_parser.findAll(\"a\", class_=\"film-small\"):\n links.append(film_box[\"href\"])\n return links;\n except Exception as e:\n if debug:\n print(\"_parse_urls_from_page()\", repr(e))\n return links", "def get_num_pages():\n NUMBERS = \"http://www.omniglot.com/language/numbers/\"\n num = urllib2.urlopen(MULTILING_URLS['num']).read()\n return list(set([NUMBERS+str(re.findall(AHREF_REGEX,str(i))[0]) \\\n for i in bs(num).findAll('dd')]))", "def list_pages():\n\n try:\n pages = helpers.read_json(PAGES_F_PATH)\n if type(pages) is list:\n for page in pages:\n print('%s, id: %s' % (page['title'], page['id']))\n elif type(pages) is bool:\n print(messages.NO_PAGES)\n except JSONDecodeError:\n print(messages.CORRUPTED_PAGES_F_ERR)\n except Exception as ex:\n print('ERROR: %s' % ex)", "def listPages(self, space=None):\n return map(lambda i: i['name'], self.listPageInfo(space)) #pylint: disable=W0141", "def fetch_card_detail_links(pages=1):\n\n links = []\n\n for num_page in range(1, pages + 1):\n\n # TODO: add try expect\n\n postfix = f'?searchString=&morphology=on&search-filter=Дате+размещения&savedSearchSettingsIdHidden=/' \\\n f'&sortBy=UPDATE_DATE/&pageNumber={num_page}&sortDirection=false&recordsPerPage=_10/' \\\n f'&showLotsInfoHidden=false&active=on&archival=on&ownerIdOrg=&customerFz94id=&customerTitle=/' \\\n f'&preselectionNumber=&selectedSubjectsIdHidden=&selectedSubjectsIdNameHidden=%7B%7D&worktypeIds=/' \\\n f'&worktypeNames=&worktypeIdsParent=&priceFrom=&priceTo=&entryRkpoDateFrom=&entryRkpoDateTo=/' \\\n f'entrySvrDateFrom=&entrySvrDateTo=&expirationRkpoDateFrom=&expirationRkpoDateTo=/' \\\n f'&expirationSvrDateFrom=&expirationSvrDateTo=&publishDateFrom=&publishDateTo=&updateDateFrom=/' \\\n f'&updateDateTo=&rejectReasonIdHidden=&rejectReasonIdNameHidden=%7B%7D&customerPlace=/' \\\n f'&customerPlaceCodes='\n\n url = URL + postfix\n\n req = requests.get(url=url, headers=HEADERS)\n soup = BS(req.text, 'lxml')\n\n for elem in soup.find_all(class_=\"search-registry-entry-block\"):\n elem = elem.find(class_=\"registry-entry__header-mid__number\").find(\"a\")\n link = HOST + elem.get('href')\n links.append(link)\n return links" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Crawls Omniglot for babel stories pages and save in outputdir.
def crawl_babel_pages(outputdir=DATADIR+"omniglot/babel/"): babel = get_babel_pages() # Creates output directory if it doesn't exist. if not os.path.exists(outputdir): os.makedirs(outputdir) for lang, page in babel: html = urllib2.urlopen(page).read() if outputdir != None: with codecs.open(outputdir,'w','utf8') as fout: print>>fout, html time.sleep(random.randrange(5,10))
[ "def crawl_omniglot(outputdir):\n homepage = urllib2.urlopen(OMNIGLOT).read()\n crawled = []\n \n for i in re.findall(AHREF_REGEX,homepage): \n if not i.startswith(\"http://\") and not i.endswith(\"/\") and \\\n not i.startswith('https://'): \n if OMNIGLOT+i not in crawled:\n print OMNIGLOT+i\n x = urllib2.urlopen(OMNIGLOT+i).read()\n filename = (OMNIGLOT+i).rpartition('/')[2]\n print filename\n print>>codecs.open(outputdir+filename,'w','utf8'), x\n time.sleep(random.randrange(5,10))\n crawled.append(OMNIGLOT+i)", "def main():\n \n url = 'https://en.wikipedia.org/wiki/2020_NBA_playoffs' \n new_url, data = get_html(url) # Read html text file\n \n plot_stats(data)", "def crawl_page(self, pvm):\n # Crawl the web page here\n saved_filename = step1(settings.DUMP_LOCATION, pvm.investigate_url.analysis_url)\n# saved_filename = crawlHelper.go(pvm.investigate_url.analysis_url)\n # Change the PhishVerdictModel state\n pvm.analysis_stage = 1\n pvm.internal_download_path = os.path.abspath(saved_filename)\n pvm.modified_date = datetime.date.today()\n pvm.save()", "def main():\n logger = logging.getLogger(__name__)\n logger.info('scrap artists names')\n artist_file = open(\"artists.txt\", \"w\")\n artists_list = scrap_artists()\n for artist in artists_list:\n artist_file.write(artist + \"\\n\")\n artist_file.close()\n\n logger.info('scrap songs lyrics')\n lyrics_file = open(\"lyrics.xt\", \"w\")\n lyrics_dict = scrap_songs_lyrics()\n for song_info, lyrics in lyrics_dict.items():\n lyrics_file.write(song_info[0] + \" |\" + song_info[1] + \"|\" + lyrics + \"|| \\n\")", "def generatedocs():\n fe_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../fastestimator')\n save_dir = os.path.join(tempfile.gettempdir(), 'fe')\n #insert project path to system path to later detect the modules in project\n sys.path.insert(0, fe_path)\n #parent directory where all the markdown files will be stored\n\n for subdirs, dirs, files in os.walk(fe_path, topdown=True):\n for f in files:\n fname, ext = os.path.splitext(os.path.basename(f))\n if not f.startswith('_') and ext == '.py':\n #if f == 'pggan.py':\n f_path = os.path.join(subdirs, f)\n mod_dir = os.path.relpath(f_path, fe_path)\n mod = mod_dir.replace('/', '.')\n if subdirs == fe_path:\n save_path = os.path.join(*[save_dir, 'fe'])\n else:\n save_path = os.path.join(*[save_dir, os.path.relpath(subdirs, fe_path)])\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n mdtexts = extractmarkdown(mod, save_path)\n return save_dir", "def apollo15_lfj_scrape_index():\n lfj_base_link = 'https://web.archive.org/web/20171225232132/https://history.nasa.gov/afj/ap15fj/'\n\n headers = {'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8'}\n\n # Make a soup from the page HTML\n r = requests.get(lfj_base_link, headers = headers)\n html_doc = r.text\n soup = BeautifulSoup(html_doc,\"lxml\")\n\n # Extract each link to a \"Day X\" page\n log_links = []\n \n a_s = soup.find_all('a')\n\n for a_ in a_s:\n link_text = a_.get_text()\n if 'Day ' in link_text \\\n or 'Launch and Reaching' in link_text \\\n or 'Earth Orbit' in link_text \\\n or 'Transposition' in link_text \\\n or 'SPS Troubleshooting' in link_text:\n page_name = a_.attrs['href']\n link_name = lfj_base_link + page_name\n log_links.append(link_name)\n\n if not os.path.exists(SCRAPE_DIR):\n os.mkdir(SCRAPE_DIR)\n\n # Follow those links!!!\n # Save each page to disk\n for i,link in enumerate(log_links):\n\n dest = os.path.join(SCRAPE_DIR, os.path.basename(link))\n\n if not os.path.exists(dest):\n\n print(\"Scraping...\")\n print(\" Link: %s\"%(link))\n print(\" Target file: %s\"%(dest))\n\n r = requests.get(link, headers=headers)\n html_doc = r.content.decode('utf-8')\n soup = BeautifulSoup(html_doc, \"lxml\")\n\n with open(dest,'w') as f:\n f.write(soup.text)\n\n print(\"Done.\\n\")\n\n else:\n\n print(\"Skipping %s, file already exists...\"%(dest))\n\n print(\"Done scraping Apollo 15 Lunar Flight Journals.\")", "def apollo15_lsj_scrape_index():\n lsj_base_link = 'https://www.hq.nasa.gov/alsj/a15/'\n lsj_base_page = lsj_base_link+'a15.html'\n\n headers = {'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8'}\n\n # Make a soup from the page HTML\n r = requests.get(lsj_base_page, headers = headers)\n html_doc = r.text\n soup = BeautifulSoup(html_doc,\"lxml\")\n\n # Extract everything under \"<h2>The Journal</h2>\"\n log_links = []\n\n stuff = soup.find_all(['h2','a'])\n \n switch = False\n for s in stuff:\n if s.name=='h2':\n if s.text=='The Journal':\n switch=True\n else:\n switch=False\n if s.name=='a' and switch:\n if 'Flight Journal' not in s.text:\n link_loc = lsj_base_link+\"/\"+s.attrs['href'] \n print(link_loc)\n print(\"Found link:\")\n print(\" Target: %s\"%(link_loc))\n log_links.append(link_loc)\n\n if not os.path.exists(SCRAPE_DIR):\n os.mkdir(SCRAPE_DIR)\n\n # Follow those links!!!\n # Save each page to disk\n for i,link in enumerate(log_links):\n\n dest = os.path.join(SCRAPE_DIR, os.path.basename(link))\n\n if not os.path.exists(dest):\n\n print(\"Scraping...\")\n print(\" Link: %s\"%(link))\n print(\" Target file: %s\"%(dest))\n\n r = requests.get(link, headers=headers)\n html_doc = r.content.decode('ISO-8859-1')\n soup = BeautifulSoup(html_doc, \"lxml\")\n\n with open(dest,'w') as f:\n f.write(soup.text)\n\n print(\"Done.\\n\")\n\n else:\n\n print(\"Skipping %s, file already exists...\"%(dest))\n\n print(\"Done scraping Apollo 15 Lunar Surface Journals.\")", "def wikimedia_to_json(mediawiki_in: pathlib.Path, jsonl_out: pathlib.Path, sub_process_count: int) -> None:\n\n if jsonl_out.exists():\n jsonl_out.unlink()\n\n worker = mpb.EPTS(\n extract = _collect_articles, extract_args = (mediawiki_in),\n transform = _parse_article,\n save = _save_articles_to_jsonl, save_args = (jsonl_out),\n worker_count = sub_process_count,\n show_progress = True)\n worker.start()\n worker.join()", "def prepare_visualization_directory():\n src = html_source_path\n dst = os.path.abspath(os.path.expanduser(\"~/.netwulf/\"))\n\n # always copy source files to the subdirectory\n copy_tree(src, dst)", "def procedure():\n\n copy_content()\n generate_HTML(images=generate_imgs_src())", "def main():\n print(\"Saving single objective network figures...\")\n set_fig_style()\n\n config = get_config()\n networks_path = get_single_obj_filepath(config)\n results = load_jsonpickle(networks_path)\n\n figs_dir, extension = get_figures_params(config)\n\n thetas, n_sensors = get_all_optimisation_params(config)\n _, all_groups = get_objectives(config)\n fig_single_obj(thetas, n_sensors, results, all_groups, figs_dir, extension)\n\n theta, n_sensors = get_default_optimisation_params(config)\n fig_coverage_vs_sensors(results, theta, n_sensors, all_groups, figs_dir, extension)", "def save_page(soup, subdir=\"\"):\n page_title = soup.title.string.strip()\n\n if not subdir:\n course_title = re.search(r\"Course: ([\\w\\W]+)\", page_title)[1]\n course_path = os.path.join(\"output\", course_title)\n else:\n course_title = re.search(r\"Topic: ([\\w\\W]+)\", page_title)[1]\n course_path = os.path.join(\"output\", subdir, \"resources\", course_title)\n\n course_path = re.sub(r\"[~#%&*{}:<>?|\\\"-]\", \"\", course_path)\n course_path = re.sub(r\" +\", \" \", course_path)\n\n course_title = re.sub(r\"[~#%&*{}:<>?|\\\"-]\", \"\", course_title)\n course_title = re.sub(r\" +\", \" \", course_title)\n\n print(f\"Found course: {course_title}\")\n\n # Only overwrite if no subdir\n if os.path.exists(course_path):\n if not subdir:\n print(\"Course folder already found: recreating...\")\n print(\"\")\n shutil.rmtree(course_path)\n else:\n print(f\"Subdir already exists: {course_path}\")\n return course_title\n\n # Create the folder\n os.makedirs(os.path.join(course_path, \"resources\"))\n os.makedirs(os.path.join(course_path, \"img\"))\n os.makedirs(os.path.join(course_path, \"css\"))\n os.makedirs(os.path.join(course_path, \"js\"))\n\n # Save the raw course page for checking\n save_soup(soup, os.path.join(\n course_path, \"resources\", \"index_raw.html\"))\n\n # Select all links, and filter to only include links to a /resource/ suburl\n all_links = soup.find_all('a')\n resource_links = []\n\n print(\"Parsing links in file...\")\n for link in tqdm(all_links):\n try:\n if re.search(r\"/resource/|/page/|course[\\w\\W]+section=\\d+\", link.get('href')):\n resource_links.append(link)\n except TypeError:\n # The link has no destination, ignore it\n pass\n\n # Now do the same for inline images\n all_images = soup.find_all('img')\n resource_images = all_images\n\n print(\"\")\n print(\n f\"Found {len(resource_links)} resources and {len(resource_images)} images to fetch!\")\n\n print(\"Downloading external links...\")\n for resource in tqdm(resource_links):\n url = resource.get('href')\n\n # Check if a subpage instead of a normal file (only recurse once)\n is_section = re.search(r\"course[\\w\\W]+section=\\d+\", url)\n\n if subdir or not is_section:\n file_id = re.search(r\"id=(\\d+)\", url)[1]\n\n r = a.s.get(url)\n\n # Converts response headers mime type to an extension (may not work with everything)\n ext = r.headers['content-type'].split('/')[-1]\n ext = correct_extension_mimetype(ext)\n\n new_path = os.path.join(\"resources\", f\"{file_id}.{ext}\")\n\n # Open the file to write as binary - replace 'wb' with 'w' for text files\n success = False\n while not success:\n try:\n with io.open(os.path.join(course_path, new_path), 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n success = True\n\n except PermissionError:\n import time\n time.sleep(1)\n\n resource.attrs['href'] = new_path\n\n # Recurse to download new page\n else:\n q = urlparse(url).query\n q_id = re.search(r\"id=(\\d+)\", q)[1]\n q_section = re.search(r\"section=(\\d+)\", q)[1]\n\n sub_soup = fetch_course(q_id, q_section)\n sub_page_title = save_page(\n sub_soup, subdir=os.path.join(course_title))\n\n sub_path = os.path.join(\n \"resources\", sub_page_title, f\"{sub_page_title}.html\")\n resource.attrs['href'] = sub_path\n\n print(\"Downloading inline images...\")\n for resource in tqdm(resource_images):\n\n skip_request = False\n\n # Fix for url schema\n if resource.get('src').startswith(\"data:\"):\n # Source contains image data, no need to make request\n data = resource.get('src')\n skip_request = True\n\n elif resource.get('src').startswith(\"https:\") or resource.get('src').startswith(\"http:\"):\n url = resource.get('src')\n\n else:\n url = \"https:\" + resource.get('src')\n\n if not skip_request:\n r = a.s.get(url)\n\n # Try to get the filename from response headers\n filename = get_filename_from_cd(\n r.headers.get('content-disposition'))\n\n if skip_request:\n from binascii import a2b_base64\n\n data_separate = data.split(',')\n binary_data = a2b_base64(data_separate[1])\n ext = re.search(r\"image/(\\w+);\", data_separate[0])[1]\n\n from uuid import uuid4\n filename = str(uuid4()) + \".\" + ext\n\n if filename is None:\n from uuid import uuid4\n\n # Converts response headers mime type to an extension (may not work with everything)\n ext = r.headers['content-type'].split('/')[-1]\n ext = correct_extension_mimetype(ext)\n\n filename = str(uuid4()) + \".\" + ext\n\n new_path = os.path.join(\"img\", filename)\n\n # Open the file to write as binary - replace 'wb' with 'w' for text files\n if not os.path.exists(os.path.join(course_path, new_path)):\n with io.open(os.path.join(course_path, new_path), 'wb') as f:\n\n if not skip_request:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n\n else:\n f.write(binary_data)\n\n resource.attrs['src'] = \"img/\" + filename\n\n print(\"Getting remaining resources (CSS, favicon etc)\")\n\n # Download favicon\n icon_link = soup.find(\"link\", rel=\"shortcut icon\")\n r = a.s.get(icon_link['href'])\n path = os.path.join(\"img\", \"favicon.ico\")\n with io.open(os.path.join(course_path, path), 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n\n icon_link['href'] = \"img/favicon.ico\"\n\n # Get JS files\n for resource in soup.find_all(\"script\"):\n if resource.attrs.get(\"src\"):\n # if the tag has the attribute 'src'\n url = urljoin(url, resource.attrs.get(\"src\"))\n\n r = a.s.get(url)\n\n # Try to get the filename from response headers\n filename = get_filename_from_cd(\n r.headers.get('content-disposition'))\n\n if filename is None:\n from uuid import uuid4\n filename = str(uuid4()) + \".js\"\n\n new_path = os.path.join(\"js\", filename)\n\n # Open the file to write as binary - replace 'wb' with 'w' for text files\n if not os.path.exists(os.path.join(course_path, new_path)):\n with io.open(os.path.join(course_path, new_path), 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n\n resource.attrs['src'] = \"js/\" + filename\n\n # Get CSS files\n for resource in soup.find_all(\"link\"):\n if resource.attrs.get(\"href\"):\n # if the link tag has the 'href' attribute\n url = urljoin(url, resource.attrs.get(\"href\"))\n\n r = a.s.get(url)\n\n if r.status_code != 200:\n continue\n\n from uuid import uuid4\n filename = str(uuid4()) + \".css\"\n\n new_path = os.path.join(\"css\", filename)\n\n # Open the file to write as binary - replace 'wb' with 'w' for text files\n if not os.path.exists(os.path.join(course_path, new_path)):\n with io.open(os.path.join(course_path, new_path), 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n\n resource.attrs['href'] = \"css/\" + filename\n\n save_soup(soup, os.path.join(course_path, f\"{course_title}.html\"))\n\n return course_title", "def make_output_paths(self):\n if jedli_global.ask_output_filename:\n self.new_file = asksaveasfilename(title= \"Choose a name for the output file\",\n initialdir=jedli_global.output_folder,\n filetypes=[(\"html files\", \".html\")])\n if not self.new_file.endswith(\".html\"):\n self.new_file += \".html\"\n self.main_dir, self.base_filename = os.path.split(self.new_file)\n self.base_filename = os.path.splitext(self.base_filename)[0]\n else:\n if self.checklist_name is not None:\n self.base_filename = self.checklist_name.split(\"/\")[-1].split(\".\")[0]\n else:\n self.base_filename = re.sub(\"\\W+\", \"\", self.words[0])\n self.main_dir = self.make_dir(jedli_global.output_folder, self.base_filename)\n \n if self.index_type == \"Context Search\":\n self.new_file = os.path.join(self.main_dir, \"{}_in_context.html\".format(self.base_filename)) \n else:\n self.new_file = os.path.join(self.main_dir, \"index_{}.html\".format(self.base_filename))\n \n self.data_dir = self.make_dir(self.main_dir, \"data\")\n self.json_pth = os.path.join(self.data_dir, \"{}_datecount.json\".format(self.base_filename))\n self.graph_pth = os.path.join(self.data_dir, \"{}_graph.html\".format(self.base_filename))\n self.table_pth = os.path.join(self.data_dir, \"{}_table.html\".format(self.base_filename))", "def main(self):\n rwkos.make_dir(self.dst_path)\n self.gdoc_finder.main()\n self.gdoc_down()\n self.nb_finder.main()\n self.copy_nbs()\n self.clean_up_md_convert()\n self.pdf_finder.main()\n self.copy_pdfs()", "def begin_preview(self, output_dir: str):", "def animate_datapack(datapack,output_folder,num_processes,**kwargs):\n try:\n os.makedirs(output_folder)\n except:\n pass\n\n if num_processes is None: \n num_processes = psutil.cpu_count()\n\n if isinstance(datapack,DataPack):\n datapack = datapack.filename\n\n# with DataPack(datapack) as datapack_fix:\n# datapack_fix.add_antennas(DataPack.lofar_array)\n\n args = []\n for i in range(num_processes):\n args.append((datapack,slice(i,None,num_processes),kwargs,output_folder))\n with futures.ProcessPoolExecutor(max_workers=num_processes) as executor:\n jobs = executor.map(_parallel_plot,args)\n results = list(jobs)\n plt.close('all')\n make_animation(output_folder,prefix='fig',fps=4)", "def test_export_notebook(self):\n (output, resources) = HTMLExporter(template_name=\"lab\").from_filename(self._get_notebook())\n assert len(output) > 0", "def wiki_page_source(request, slug):\n try:\n page = WikiPage.objects.get(slug=slug)\n except WikiPage.DoesNotExist:\n raise Http404\n \n response = HttpResponse(mimetype=\"text\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % slug\n\n response.write(page.content)\n\n return response", "def serial_process_story(in_dir, out_dir, batch_size=1000):\n # 92,579 stories\n stories = os.listdir(in_dir)\n ct = 0\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n res = []\n fname_ct = 0\n for key in stories:\n ct += 1\n story_file = os.path.join(in_dir, key)\n article, summary = get_art_abs(story_file)\n res.append((key, article, summary))\n if ct % batch_size == 0:\n fname_ct += 1\n fname = os.path.join(out_dir, \"%s.pkl\" % fname_ct)\n print('writing %s stories to %s' % (ct, fname))\n with open(fname, 'wb') as f:\n pickle.dump(res, f)\n res = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yield source and tranlsation sentences from the clean Omniglot tarball.
def phrases(intarfile=parentddir+'/data/omniglot/omniglotphrases.tar', \ onlysource=False): for infile in read_tarfile(intarfile): language = infile.split('/')[-1].split('-')[1].split('.')[0].split('_')[0] with codecs.open(infile,'r','utf8') as fin: for line in fin.readlines(): sentence, translation = line.strip().split('\t') if onlysource and sentence: yield language, sentence.strip() else: yield language, sentence, translation
[ "def iter_sentences(self):\n _, filename_tail = split(self.filename)\n filename_base, _ = splitext(splitext(filename_tail)[0])\n with tarfile.open(self.filename, \"r:gz\") as tar:\n sentence_filename = join(filename_base, filename_base +\n '-sentences.txt')\n try:\n fid = tar.extractfile(sentence_filename)\n except KeyError:\n # Try another name\n sentence_filename = filename_base[:-5] + '-sentences.txt'\n fid = tar.extractfile(sentence_filename)\n\n for line in fid:\n yield line.decode('utf-8').split('\\t')[1].strip()", "def texts(self):\n for _, element in etree.iterparse(self.wiki_dump_file):\n if 'text' in element.tag and type(element.text) == str:\n yield self.tokenize_lemmatize(element.text)\n element.clear()\n else:\n element.clear()", "def preprocess(self):\n for key in self.markdown.keys():\n # data goes to this file \n f = open(key + \".txt\", \"wb\")\n # clean the data up before writing to file\n largeString = \"\\n\".join(self.markdown[key])\n sentences = self.get_sentences(largeString)\n for sentence in sentences:\n x = self.remove_chars(sentence) \n y = self.tokenize_punc(x)\n # write data to file sentence by sentence\n f.write(y.lstrip() + '\\n')\n f.close()", "def vrt2lists():\n corpus_folder = os.path.join('data', 'corpora', 'ylenews-sv-2012-2018-s-vrt',\n 'vrt')\n corpus = []\n tag_corpus = []\n files = list(os.walk(corpus_folder))[0][2]\n for file in files:\n with open(os.path.join(corpus_folder, file), encoding='utf8') as f:\n data = f.read().split('</sentence>')\n for sent in data:\n sentence = []\n tag_sentence = []\n items = [element.split('\\t') for element in sent.split('\\n')]\n for item in items:\n if len(item) == 8:\n word = item[0]\n tag = item[3]\n #sentence.append((word, tag))\n sentence.append(word)\n tag_sentence.append(tag)\n if len(sentence) > 1 and len(sentence) == len(tag_sentence):\n corpus.append(sentence)\n tag_corpus.append(tag_sentence)\n\n \n # Save the corpora\n with open(os.path.join('data','corpora','Yle_sv.pkl'), 'wb') as f:\n pickle.dump(corpus, f, 4)\n \n with open(os.path.join('data','corpora','Yle_sv_pos.pkl'), 'wb') as f:\n pickle.dump(tag_corpus, f, 4)\n\n #with open(os.path.join('data','corpora','Yle_sv_words_tags.pkl'), 'wb') as f:\n #pickle.dump(corpus, f, 4)", "def clean_data():\n print('Cleaning data...')\n files = listdir(raw_data_path)\n files.remove('Readme.txt')\n\n # Remove any exported clean files if they exist\n if os.path.exists(clean_data_path):\n rmtree(clean_data_path)\n os.mkdir(clean_data_path)\n\n omit_symbols = ('*', '[t]', '##')\n\n # To detect one or more annotations like [-1]\n pattern = re.compile('\\[[+-]{1}[0-9]\\]')\n data = ''\n labels = ''\n\n for file in files:\n with open(os.path.join(raw_data_path, file), 'rb') as in_file:\n for i, line_bytes in enumerate(in_file):\n line = line_bytes.decode('utf-8')\n # Omit comment lines or review titles\n if len(line) < 2 or line.startswith(omit_symbols):\n continue\n\n # A review has one or multiple <opinion>[(+/-)<strength>]\n # followed by ##<review>. Find beginning of review\n beginning = line.find('##')\n opinion = line[:beginning]\n review = line[beginning + 2:]\n\n # Extract opinion strengths\n values = re.findall(pattern, opinion)\n\n # Some (very few, ~ 3) reviews have annotation mistakes\n if len(values) == 0:\n continue\n\n # Sum all opinion strengths and binarize result\n net_value = sum(map(lambda x: int(x[1:-1]), values))\n sentiment = 1 if net_value >= 0 else 0\n\n # Convert to lower case before saving\n data += review.lower()\n labels += str(sentiment) + '\\n'\n\n if i == 10:\n break\n\n data_file = os.path.join(clean_data_path, 'data.txt')\n with open(data_file, 'w') as out_file:\n out_file.write(data)\n print('Saved {}'.format(data_file))\n\n labels_file = os.path.join(clean_data_path, 'labels.txt')\n with open(labels_file, 'w') as out_file:\n out_file.write(labels)\n print('Saved {}'.format(labels_file))\n\n return data_file, labels_file", "def iter_sentences(self):\n self.download()\n for filename in FILENAMES:\n full_filename = join(self.data_directory(), filename)\n lcc_file = LCCFile(full_filename)\n for sentence in lcc_file.iter_sentences():\n yield sentence", "def Extraction (self):\n with open(self.corpus, 'r') as f:\n line = True\n while line:\n line = f.readline()\n if TAG_START_PAGE in line:\n line = f.readline()\n if ':' not in line:\n #valid page\n word = line[line.index(TAG_START_TITLE) + len(TAG_START_TITLE):line.index(TAG_END_TITLE)] \n #loop until found start tag\n while TAG_START_TRAD not in line and TAG_END_PAGE not in line:\n line = f.readline ()\n# print (line)\n if TAG_END_PAGE in line:\n continue\n #Now start extracting traductions\n while line.strip() != '':\n if line.startswith(TAG_START_LANG) and TAG_END_LANG in line:\n lang = line[len(TAG_START_LANG):line.index(TAG_END_LANG)]\n if '|' in lang:\n lang = lang[:lang.index('|')]\n #first hyper filter\n line = re.sub(HYPER_FILTER,']]',line)\n #traductions extraction\n trad = [t[2:-2] for l in line.split(',') for t in re.findall(PATTERN_TRAD, l) if len(t.split()) > 0]\n #fine filter\n traductions = []\n for t in trad: \n if t.startswith('[['):\n t = t[2:]\n if ']]' in t:\n while ']]' in t and '[[' in t:\n traductions.append(t[:t.index(']]')])\n t = t[t.index('[[')+2:]\n if ']]' in t:\n traductions.append(t[:t.index(']]')])\n elif '[[' in t:\n traductions.append(t[t.index('[[')+2:])\n else:\n traductions.append(t)\n else:\n traductions.append(t) \n #clear non-traductions\n for t in traductions:\n for exclude in self.exclude_Tags :\n if exclude in t:\n traductions.remove(t)\n break\n print (word, self.lang, lang, traductions)\n with open(self.csv, 'a') as csv:\n for t in traductions:\n if len(t.strip()) > 0:\n line = ''.join([self.lang, SEP_CSV, word, SEP_CSV, lang, SEP_CSV, t]) + '\\n'\n csv.write (line)\n line = f.readline ()\n continue", "def clean_about_files(\n dest_dir=THIRDPARTY_DIR,\n):\n local_packages = get_local_packages(directory=dest_dir)\n for local_package in local_packages:\n for local_dist in local_package.get_distributions():\n local_dist.load_about_data(dest_dir=dest_dir)\n local_dist.set_checksums(dest_dir=dest_dir)\n\n if \"classifiers\" in local_dist.extra_data:\n local_dist.extra_data.pop(\"classifiers\", None)\n local_dist.save_about_and_notice_files(dest_dir)", "def showSomeTransformedSentences(data_pipe):\n for sources,targets in data_pipe:\n if sources[0][-1] != 0:\n continue # Just to visualize padding of shorter sentences\n for i in range(4):\n source = \"\"\n for token in sources[i]:\n source += \" \" + source_index_to_string[token]\n target = \"\"\n for token in targets[i]:\n target += \" \" + target_index_to_string[token]\n print(f\"Source: {source}\")\n print(f\"Traget: {target}\")\n break", "def load_generator_coca_corpus(ignore_stopwords, coca_path=\"data/static/COCA/corpus_files/\"):\n eos_regex = re.compile(r\"(!|\\.|\\?)\")\n ignore_regex = re.compile(r\"@\")\n later_re = r\"\\d+(_\\w+)+\\.txt\"\n \n if ignore_stopwords:\n stop_ws = set(stopwords.words('english'))\n ignore = stop_ws.union(set([\"#\"]))\n else:\n ignore = set([])\n\n for filename in os.listdir(coca_path):\n if \".txt\" not in filename:\n continue\n with open(coca_path+filename, 'r', encoding='ascii', errors=\"ignore\") as coca_file:\n sentence = []\n for line in coca_file:\n split_line = line.strip().split('\\t')\n if re.match(later_re, filename):\n if len(split_line) < 5: \n continue\n token = split_line[2].lower()\n pos_tag = split_line[4].strip()\n else:\n if len(split_line) < 3: \n continue\n token = split_line[0].lower()\n pos_tag = split_line[2].strip()\n \n if not re.match(ignore_regex, token) and token not in ignore: \n sentence.append((token, pos_tag))\n if re.match(eos_regex, token):\n sentence = LineWithPos(sentence)\n yield sentence \n sentence = []", "def get_sentences_from_unparsed_text(doc, save_in_dir):\n\n # Delete previous parsing results (if existing)\n if os.path.exists(config.PARSE_RESULTS_PATH+'/coref.conll'):\n os.remove(config.PARSE_RESULTS_PATH+'/coref.conll')\n if os.path.exists(config.PARSE_RESULTS_PATH+'/coref.html'):\n os.remove(config.PARSE_RESULTS_PATH+'/coref.html')\n \n input_type = 'cat'\n CorZu_type = 'CorZu'\n \n # Command line string\n # Parse document and store results in /CorZu_results \n cmd = \"%(type)s %(filename)s | \" \\\n \"python %(parse_path)s/ParZu_NEW/parzu -q -o conll > \"\\\n \"%(parse_res_path)s/parsed.conll && \"\\\n \"python %(parse_path)s/%(corzu)s/extract_mables_from_conll.py \"\\\n \"%(parse_res_path)s/parsed.conll > \"\\\n \"%(parse_res_path)s/markables.txt && \"\\\n \"python %(parse_path)s/%(corzu)s/corzu.py \"\\\n \"%(parse_res_path)s/markables.txt \"\\\n \"%(parse_res_path)s/parsed.conll > \"\\\n \"%(parse_res_path)s/coref.conll \"\\\n \"&& python %(parse_path)s/%(corzu)s/conll_to_html.py \"\\\n \"%(parse_res_path)s/coref.conll > \"\\\n \"%(parse_res_path)s/coref.html\" % {'corzu':CorZu_type, 'type':input_type, 'filename': doc,'parse_path':config.PARSER_PATH, 'parse_res_path':config.PARSE_RESULTS_PATH}\n \n\n # Execute\n process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n stdout, stderr = process.communicate()\n \n # Catch parsing errors from ParZu or CorZu by checking for output files\n if not os.path.isfile(config.PARSE_RESULTS_PATH+'/parsed.conll'):\n raise IOError('Sorry, CorZu failed. Coref.conll file does not exist. Try another document.')\n else:\n with open(config.PARSE_RESULTS_PATH+'/parsed.conll', \"r\") as infile:\n infile = infile.read()\n if len(infile)<1:\n raise IOError('Sorry, ParZu failed. No parsing results.')\n \n if not os.path.isfile(config.PARSE_RESULTS_PATH+'/coref.conll'):\n raise IOError('Sorry, CorZu failed. Coref.conll file does not exist. Try another document.')\n \n \n # Open the parsed result file, split at sentence boarders and get single sentences\n with open(config.PARSE_RESULTS_PATH+'/coref.conll', \"r\") as infile:\n infile = infile.read()\n sentences = infile.split('\\n\\n')[:-1]\n \n # If filename for saving is given, save parsing results\n if save_in_dir:\n shutil.copy2(config.PARSE_RESULTS_PATH+'/coref.conll', save_in_dir)\n \n return sentences", "def create_graphs_for_texts(source_dir, target_dir):\n \n # Get directory\n directory = source_dir \n \n print('Source file:', source_dir)\n \n # Get list of all files in dir\n files = os.listdir(\"./\"+directory)\n \n # Create target dir if it does not exist yet \n if not os.path.exists(target_dir):\n os.mkdir(target_dir)\n \n is_parsed = True\n \n # Set cunks of N sentences\n N_SENTS = 50\n \n # Max number of chunk samples per file\n MAX_SAMPLES = 50\n \n \n for f in files:\n \n # Remove file ending (.txt)\n short_filename = target_dir+f[:-4]\n short_filename = re.sub(r'\\..$','', short_filename) \n \n # Full filename with directory\n filename = directory+f\n \n print('\\t'+f)\n \n # Get all sentences from path\n sentences = get_sentences(filename, is_parsed)\n n_sentences = len(sentences)\n\n # If sentence chunk number is not met, discard\n if n_sentences<N_SENTS:\n continue\n \n # Number of possible chunk splits \n splits = n_sentences//N_SENTS\n \n # For each split\n for i in range(splits):\n \n # Break if max number of samples is reached\n if i >= MAX_SAMPLES:\n break\n \n # Sentences for this chunk \n chunk_sents = sentences[i*N_SENTS:(i+1)*N_SENTS]\n\n # Create filename for this chunk\n split_filename = short_filename+'_'+str(i+1)\n\n # Compute coherence measure and save entity graph as numpy array\n get_coherence_measure(chunk_sents, return_details=False, filename=split_filename)", "def generatedocs():\n fe_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../fastestimator')\n save_dir = os.path.join(tempfile.gettempdir(), 'fe')\n #insert project path to system path to later detect the modules in project\n sys.path.insert(0, fe_path)\n #parent directory where all the markdown files will be stored\n\n for subdirs, dirs, files in os.walk(fe_path, topdown=True):\n for f in files:\n fname, ext = os.path.splitext(os.path.basename(f))\n if not f.startswith('_') and ext == '.py':\n #if f == 'pggan.py':\n f_path = os.path.join(subdirs, f)\n mod_dir = os.path.relpath(f_path, fe_path)\n mod = mod_dir.replace('/', '.')\n if subdirs == fe_path:\n save_path = os.path.join(*[save_dir, 'fe'])\n else:\n save_path = os.path.join(*[save_dir, os.path.relpath(subdirs, fe_path)])\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n mdtexts = extractmarkdown(mod, save_path)\n return save_dir", "def processFiles(fileList):\n all_text = []\n for iFile in fileList:\n with gzip.open(iFile) as f:\n tree = etree.fromstring(f.read())\n text = tree.xpath(\"//DOC[@type='story']/TEXT/P/text()\")\n text = [p for p in text if p]\n all_text = all_text + text\n print(*all_text, sep = '\\n')", "def clean_up_output():\n yield\n if os.path.isdir('output'):\n rmtree('output')", "def each_cy_in_bento(bento_file='bento.info'):\n with open(bento_file) as f:\n for line in f:\n line = line.strip()\n if line.startswith('Extension:'):\n path = line.lstrip('Extension:').strip()\n yield path", "def gutenberg_preprocess():\n if not check_local_data():\n raise FileNotFoundError(\"The dataset does not exist in .data.\" +\n \" Call download_gutenberg, or relocate file to .data\")\n\n metadata = readmetadata()\n\n cleaned = dict()\n\n for k, v in metadata.items():\n \n if v[\"language\"] is None:\n continue\n elif \"en\" not in v[\"language\"]:\n continue\n \n if v[\"title\"] is None:\n continue\n\n book_info = dict()\n book_info[\"title\"] = v[\"title\"]\n book_info[\"subjects\"] = v[\"subjects\"]\n cleaned[k] = book_info\n \n import pickle\n\n with open(op.join(op.dirname(__file__), \"data\", \"titles.pickle\"), \"wb+\") as w:\n pickle.dump(cleaned, w)", "def signature_lines(cls, package_lines):\n sig = re.compile(\"[Ss]ignature\")\n for line in package_lines:\n if sig.search(line):\n yield line.split(\"#\", 1)[-1].lstrip()", "def vrt2lists_fi():\n corpus_folder = os.path.join('data', 'corpora', 'wikipedia-fi-2017-src',\n 'wikipedia-fi-2017-src')\n corpus = []\n tag_corpus = []\n files = list(os.walk(corpus_folder))[0][2]\n for file in files:\n with open(os.path.join(corpus_folder, file), encoding='utf8') as f:\n data = f.read().split('</sentence>')\n for sent in data:\n sentence = []\n tag_sentence = []\n items = [element.split('\\t') for element in sent.split('\\n')]\n for item in items:\n if len(item) == 10:\n word = item[1]\n tag = item[3]\n #sentence.append((word, tag))\n sentence.append(word)\n tag_sentence.append(tag)\n if len(sentence) > 1 and len(sentence) == len(tag_sentence):\n corpus.append(sentence)\n tag_corpus.append(tag_sentence)\n\n \n # Save the corpora\n with open(os.path.join('data','corpora','Wikipedia_fi_2017.pkl'), 'wb') as f:\n pickle.dump(corpus, f, 4)\n \n with open(os.path.join('data','corpora','Wikipedia_fi_2017_pos.pkl'), 'wb') as f:\n pickle.dump(tag_corpus, f, 4)\n\n #with open(os.path.join('data','corpora','Wikipedia_fi_2017_words_tags.pkl'), 'wb') as f:\n #pickle.dump(corpus, f, 4)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of languages available from original data source.
def num_languages(): return len(languages())
[ "def available_languages(self):\r\n return Language.objects.filter(\r\n id__in=RLStats.objects.by_resource(\r\n self\r\n ).order_by().values('language').query\r\n )", "def get_country_count():\r\n\r\n lines = country_pop.split('\\n')\r\n return len(lines)-1\r\n return len(country_pop.split('\\n'))-1", "def get_num_alternatives(self):\n return len(self.alternatives.all())", "def get_countries_page_count():\n return count_query_rows(\"SELECT COUNT(*) AS 'rows' FROM paises ORDER BY nombre\")", "def get_published_languages(self):", "def number_of_players_retrieved_per_locale() -> dict:\n logging.debug('number_of_players_retrieved_per_locale()')\n\n result = {}\n for region in locations:\n for locale in locations[region]:\n DB_LOCALE_PATH = os.path.join(DB_BASE_PATH, region, locale)\n CHARACTER_PATH = os.path.join(DB_LOCALE_PATH, 'character')\n result[locale] = len(os.listdir(CHARACTER_PATH))\n return result", "def _calculate_translated_wordcount(self):\r\n wc = 0\r\n translated = SourceEntity.objects.filter(\r\n id__in=Translation.objects.filter(language=self.language,\r\n resource=self.resource, rule=5).values_list(\r\n 'source_entity_id', flat=True))\r\n wordcount = Translation.objects.filter(source_entity__in=translated,\r\n language=self.resource.source_language).aggregate(Sum('wordcount'))['wordcount__sum']\r\n self.translated_wordcount = wordcount or 0", "def available_languages(self):\n data = self._run(\n action=\"available_languages\"\n )\n return data.get('list', {})", "def get_locale_usage(locale):\n num_pages = Page.objects.filter(locale=locale).exclude(depth=1).count()\n\n num_others = 0\n\n for model in get_translatable_models():\n if model is Page:\n continue\n\n num_others += model.objects.filter(locale=locale).count()\n\n return num_pages, num_others", "def lang_count(row, lang):\n count = 0\n try:\n if \"Other\" in row['Other' + lang]:\n count += 1\n except TypeError:\n pass\n for i in courses:\n try:\n if \"(\" + lang + \")\" in row[i]:\n count += 1\n except TypeError:\n pass\n return count", "def countLang(file):\n n = 0\n with open(file, \"r\") as tweet_corpus:\n lang_count = {}\n \n for line in tweet_corpus.readlines():\n tweet = json.loads(line)\n lang_count[\"lang\"] = lang_count.setdefault(tweet[\"lang\"], 0)\n lang_count[tweet[\"lang\"]] += 1\n n += 1\n print (n)\n return lang_count", "def test_resource_available_languages(self):\r\n self.assertEqual(len(self.resource.available_languages), 3)\r\n self.assertEqual(len(self.resource.available_languages_without_teams), 2)", "def numberOfResource(self):\r\n return len(self.catalogue)", "def number_of_players_retrieved_per_region() -> dict:\n logging.debug('number_of_players_retrieved_per_region()')\n\n result = {'EU': 0, 'KR': 0, 'TW': 0, 'US': 0}\n for region in locations:\n for locale in locations[region]:\n DB_LOCALE_PATH = os.path.join(DB_BASE_PATH, region, locale)\n CHARACTER_PATH = os.path.join(DB_LOCALE_PATH, 'character')\n result[region] += len(os.listdir(CHARACTER_PATH))\n return result", "def getNbLignes(jeu):\n return len(getPlateau(jeu))", "def languages(self):\r\n return Language.objects.filter(\r\n rlstats__resource__in=self.resources.all()\r\n ).exclude(code=self.source_language.code).order_by(\r\n '-rlstats__translated').distinct()", "def GetNumberOfEventSources(self):\n return self._store.GetNumberOfEventSources()", "def test_available_langs_per_resource(self):\r\n self.assertEqual(type(self.resource.available_languages.count()), int)\r\n for user in ['anonymous', 'registered','team_member', 'maintainer']:\r\n resp = self.client[user].get(self.urls['resource'])\r\n self.assertContains(\r\n resp, \"Available languages (%s)\" % (\r\n self.resource.available_languages.count()\r\n ))", "def test_num_datasource(provider):\n datasources_count = 0\n feeds = provider.inventory.list_feed()\n for feed in feeds:\n datasources_count += len(provider.inventory.list_server_datasource(feed_id=feed.id))\n num_datasource = provider.inventory._stats_available['num_datasource'](provider.inventory)\n assert num_datasource == datasources_count, \"Number of datasources is wrong\"", "def get_europe_page_count():\n return count_query_rows(\"SELECT COUNT(*) AS 'rows' FROM paises WHERE continente = 'Europa' ORDER BY nombre\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse data. Returns list of dicts with prices for each day in downloaded period.
def parse_data(self, initial_rates: List[bs4.element.Tag]) -> List[Dict[str, str]]: rates = [] for rate in initial_rates: day_prices = {} for value, column in zip(rate.find_all("td"), COLUMN_TITLES): text = value.text.split("$")[1] if value.text.startswith("$") else value.text if column == "date": day_prices[column] = self.convert_date(text) else: day_prices[column] = text.replace(",", "") rates.append(day_prices) return rates
[ "def _collect_price_time_series(self):\n r = requests.get(self.GRAPH_URL)\n #dictionary of 2 dictionaries, \"daily\" and \"average\"\n response = r.json()\n daily_series = TimeSeries.from_dictionary(response[\"daily\"])\n average_series = TimeSeries.from_dictionary(response[\"average\"])\n return (daily_series, average_series)", "def parse_weather(weather_data_raw):\n\n parsed_weather = {}\n parsed_weather['sunrise'] = dt.fromtimestamp(weather_data_raw.get(\"city\").get(\"sunrise\")).time()\n parsed_weather['sunset'] = dt.fromtimestamp(weather_data_raw.get(\"city\").get(\"sunset\")).time()\n\n\n for period in weather_data_raw['list']:\n # limiting the parsed weather data to weather for the next day\n if dt.fromtimestamp(period.get(\"dt\")).date() == dt.today().date() + timedelta(days=1):\n time_period = dt.fromtimestamp(period.get(\"dt\"))\n # the dict key for each period is a 2-dight 24-hour time, e.g 15 for 3.00pm\n parsed_weather[str(time_period.time())[:2]] = [\n str(time_period.time())[:2],\n round(period.get(\"main\").get(\"temp\")),\n period.get(\"weather\")[0].get(\"main\").center(15),\n str(period.get(\"clouds\").get(\"all\")).zfill(3),\n str(round(period.get(\"wind\").get(\"speed\"))).zfill(3)\n ]\n return parsed_weather", "def DownloadIntradayData(symbol, days):\n\n link = Links.MorningstarIntradayLink(symbol, days)\n print(link)\n f = urlopen(link)\n fStr = f.read()\n f.close()\n jStr = getInnerJSON(fStr)\n jObj = json.loads(jStr).get('data')\n stockData = []\n for day in jObj:\n date = getDate(day.get('date'))\n vols = getVolumeList(day.get('volume'))\n prices = getPriceList(day.get('lastPrice'))\n startMinute = day.get('startTime')\n priceVals = []\n for i in range(len(prices)):\n nMin = startMinute + i\n nPrice = prices[i]\n nVol = vols[i]\n nElem = IntradayElement(nMin, nPrice, nVol)\n priceVals.append(nElem)\n nextData = DailyData(date, priceVals)\n stockData.append(nextData)\n return HistoricalData(symbol, stockData)", "def data_scraper(interval_to_update, crypto=''):\n\n cryptos_info = crypto.split('_')\n crypto_data = pd.DataFrame()\n warnings = []\n for date in tqdm(interval_to_update):\n try:\n # Scraping data\n dataset = pd.read_csv(\n f'https://s3-eu-west-1.amazonaws.com/public-testnet.bitmex.com/data/trade/{date}.csv.gz')\n # Cleaning the name of the cryptocurrency to use it as a filter\n crypto = [crypt for crypt in cryptos_info if crypt in dataset['symbol'].unique()][0]\n crypto_data = pd.concat([crypto_data, dataset[dataset['symbol'] == crypto]])\n except:\n # Adding dates we cannot get data and return it for warnings\n warnings.append(date)\n return crypto_data, warnings, crypto", "def _get_data_post2006(date):\r\n \r\n # build the url based on date & create data container\r\n url = '{}/{}/{}/'.format(BASE_URL, date.year, str(date).replace('-','_'))\r\n data = dict(Air_Temp = [], Barometric_Press = [], Wind_Speed = [])\r\n\r\n print('Fetching online data for {}'.format(date)) \r\n for key in data.keys():\r\n try:\r\n data[key] = request.urlopen('{}{}'.format(url, key)).read().decode(encoding='utf_8').split('\\r\\n')\r\n except:\r\n raise ValueError(date) # error accessing website\r\n else:\r\n data[key].pop() # remove last item which will be an empty string \r\n\r\n # verify lengths of 3 files are equal\r\n lengths = []\r\n for k in data.keys():\r\n lengths.append(len(data[k]))\r\n if lengths[1:] != lengths[:-1]:\r\n raise ValueError(date) # file lengths do not match\r\n \r\n for i in range(len(data['Air_Temp'])):\r\n \r\n # verify timestamps are equal for every related entry in 3 files\r\n timestamps = []\r\n for k in data.keys():\r\n timestamps.append(data[k][i].split()[1])\r\n if timestamps[1:] != timestamps[:-1]:\r\n raise ValueError(date) # timestamps for fields do not line up\r\n \r\n yield dict(Date = data['Air_Temp'][i].split()[0],\r\n Time = data['Air_Temp'][i].split()[1],\r\n Status = 'PARTIAL' if date == date.today() else 'COMPLETE', # assume data from today is incomplete\r\n Air_Temp = data['Air_Temp'][i].split()[2],\r\n Barometric_Press = data['Barometric_Press'][i].split()[2],\r\n Wind_Speed = data['Wind_Speed'][i].split()[2])", "def getPriceBarDataLines():\n\n log.debug(\"futuresSymbol == {}\".format(futuresSymbol))\n log.debug(\"interval == {}\".format(interval))\n log.debug(\"intradayBarSize == {}\".format(intradayBarSize))\n log.debug(\"startTimestampStr == {}\".format(startTimestampStr))\n log.debug(\"endTimestampStr == {}\".format(endTimestampStr))\n\n formattedStartTimestamp = None\n if startTimestampStr != None and startTimestampStr != \"\":\n formattedStartTimestamp = \\\n convertDateStrToUrlVariableStr(startTimestampStr)\n\n formattedEndTimestamp = None\n if endTimestampStr != None and endTimestampStr != \"\":\n formattedEndTimestamp = \\\n convertDateStrToUrlVariableStr(endTimestampStr)\n\n url = \"http://barchart.com/chart.php?\" + \\\n \"sym={}\".format(futuresSymbol) + \\\n \"&style=technical\" + \\\n \"&p={}\".format(interval) + \\\n \"&d=X\"\n\n if formattedStartTimestamp != None:\n url += \"&sd={}\".format(formattedStartTimestamp)\n \n if formattedEndTimestamp != None:\n url += \"&ed={}\".format(formattedEndTimestamp)\n \n if interval == \"I\":\n url += \"&im={}\".format(intradayBarSize)\n\n url += \"&log=0\" + \\\n \"&t=BAR\" + \\\n \"&v=2\" + \\\n \"&g=1\"\n \n log.info(\"Obtaining futures price data by accessing URL: {}\".format(url))\n \n opener = urllib.request.build_opener()\n request = urllib.request.Request(url)\n opener.addheaders = defaultHttpHeaders\n \n log.info(\"Opening HTTP request.\")\n response = opener.open(request)\n \n log.info(\"Reading HTTP response.\")\n data = response.read().decode()\n \n log.info(\"Processing and reformatting the data ...\")\n \n log.debug(\" Data read from {} is: ***{}***\".format(url, data))\n\n # The data is within the map tag.\n mapOpenTagPos = data.find(\"<map\")\n mapCloseTagPos = data.find(\"</map>\")\n\n if mapOpenTagPos == -1 or mapCloseTagPos == -1 or \\\n mapOpenTagPos >= mapCloseTagPos:\n\n log.error(\"Could not get the pricebar data because \" + \\\n \"a valid <map> open and closing tag could not \" + \\\n \"be found in the HTML. \" + \\\n \"Please investigate why.\")\n shutdown(1)\n\n # Get the text between the map tag.\n mapText = data[mapOpenTagPos:mapCloseTagPos]\n \n # Class holding info related to a PriceBar as retrieved from\n # Barchart. We need this because in the HTML/Javascript, the\n # OHLC data is separate from the volume.\n class PriceBar:\n def __init__(self):\n self.timestampStr = None\n self.symbolStr = None\n self.openStr = None\n self.highStr = None\n self.lowStr = None\n self.closeStr = None\n self.volumeStr = \"0\"\n self.openIntStr = \"0\"\n \n # PriceBars. \n priceBars = []\n\n # Get the text between the parenthesis of the function\n # showOHLCTooltip() in the HTML/Javascript.\n # This will have the OHLC data we are seeking.\n matchesOHLC = re.findall(r\"\"\"showOHLCTooltip\\((.*?)\\)\"\"\", mapText)\n \n # Extract OHLC data and put it into PriceBar objects.\n for match in matchesOHLC:\n args = match.split(\",\")\n \n pb = PriceBar()\n pb.timestampStr = (args[2] + args[3]).strip(\" '[]\")\n pb.symbolStr = args[4].strip(\" '\")\n pb.openStr = args[5].strip(\" '\")\n pb.highStr = args[6].strip(\" '\")\n pb.lowStr = args[7].strip(\" '\")\n pb.closeStr = args[8].strip(\" '\")\n\n priceBars.append(pb)\n\n # Get the text between the parenthesis of the function\n # showStudyTooltip() in the HTML/Javascript. This will have\n # the either the volume data or the open interest data.\n matchesStudy = re.findall(r\"\"\"showStudyTooltip\\((.*?)\\)\"\"\", mapText)\n \n # Extract the volume data and add it to the PriceBar object\n # that has the same timestamp.\n for match in matchesStudy:\n args = match.split(\",\")\n\n \n timestampStr = (args[2] + args[3]).strip(\" '[]\")\n studyNameStr = args[4].strip(\" '\")\n studyValueStr = args[5].strip(\" '\")\n\n log.debug(\"timestampStr == {}, studyNameStr == {}, studyValueStr == {}\".\\\n format(timestampStr, studyNameStr, studyValueStr))\n \n if studyNameStr != \"Volume\" and studyNameStr != \"Interest\":\n continue\n elif studyNameStr == \"Volume\":\n # Flag that says we found the PriceBar that has a\n # matching timestamp and we stored the volume in it.\n storedVolumeFlag = False\n \n # Find the PriceBar with a matching timestampStr.\n for pb in priceBars:\n if pb.timestampStr == timestampStr:\n # Found the matching PriceBar.\n # Store the volumeStr.\n pb.volumeStr = studyValueStr\n \n storedVolumeFlag = True\n break\n \n if storedVolumeFlag == False:\n log.warn(\"Couldn't find a matching timestamp to \" + \\\n \"store the volume. Volume timestampStr == {}\".\\\n format(timestampStr))\n #shutdown(1)\n \n elif studyNameStr == \"Interest\":\n # Flag that says we found the PriceBar that has a\n # matching timestamp and we stored the open interest in it.\n storedOpenIntFlag = False\n \n # Find the PriceBar with a matching timestampStr.\n for pb in priceBars:\n if pb.timestampStr == timestampStr:\n # Found the matching PriceBar.\n # Store the openIntStr.\n pb.openIntStr = studyValueStr\n \n storedOpenIntFlag = True\n break\n \n if storedOpenIntFlag == False:\n log.warn(\"Couldn't find a matching timestamp to \" + \\\n \"store the openInt. \" + \\\n \"OpenInt timestampStr == {}\".\\\n format(timestampStr))\n #shutdown(1)\n \n # At this point, 'priceBars' now has a list of PriceBar\n # objects. We need to convert these objects into the str\n # CSV lines we wanted originally.\n \n reformattedLines = []\n for pb in priceBars:\n \n # Do some checks on the data.\n try:\n openFloat = float(pb.openStr)\n except ValueError as e:\n log.error(\"Open price str is not a number.\" + \\\n \" pb.openStr == {}\".format(pb.openStr))\n shutdown(1)\n \n try:\n highFloat = float(pb.highStr)\n except ValueError as e:\n log.error(\"High price str is not a number.\" + \\\n \" pb.highStr == {}\".format(pb.highStr))\n shutdown(1)\n \n try:\n lowFloat = float(pb.lowStr)\n except ValueError as e:\n log.error(\"Low price str is not a number.\" + \\\n \" pb.lowStr == {}\".format(pb.lowStr))\n shutdown(1)\n \n try:\n closeFloat = float(pb.closeStr)\n except ValueError as e:\n log.error(\"Close price str is not a number.\" + \\\n \" pb.closeStr == {}\".format(pb.closeStr))\n shutdown(1)\n\n try:\n volumeFloat = float(pb.volumeStr)\n except ValueError as e:\n log.error(\"Volume str is not a number.\" + \\\n \" pb.volumeStr == {}\".format(pb.volumeStr))\n shutdown(1)\n \n try:\n openIntFloat = float(pb.openIntStr)\n except ValueError as e:\n log.error(\"OpenInt str is not a number.\" + \\\n \" pb.openIntStr == {}\".format(pb.openIntStr))\n shutdown(1)\n\n # Convert the timestamp str to the format we want it in.\n timestampStr = reformatBarchartTimestampField(pb.timestampStr)\n\n # Create the CSV line.\n line = \"{},{},{},{},{},{},{}\".\\\n format(timestampStr,\n pb.openStr,\n pb.highStr,\n pb.lowStr,\n pb.closeStr,\n pb.volumeStr,\n pb.openIntStr)\n\n # Append the line.\n reformattedLines.append(line)\n \n # Sort the lines by the timestamp field.\n reformattedLines.sort(key=cmp_to_key(compLines))\n \n log.info(\"Obtained a total of {} price bars.\".format(len(reformattedLines)))\n \n if len(reformattedLines) > 0:\n log.info(\"Earliest PriceBar is: {}\".format(reformattedLines[0]))\n log.info(\"Latest PriceBar is: {}\".format(reformattedLines[-1]))\n \n return reformattedLines", "def make_prices_vecs(data):\n\n\toutput = []\n\n\t# We have the same set of dates (keys) for all companies,\n\t# so we have to sort keys only once (keys from whatever element).\n\n\tsome_el = data[0]\n\tsorted_keys = some_el[1].keys()\n\tsorted_keys.sort()\n\n\tfor elem in data:\n\t\tdate_price_dict = elem[1]\n\t\toutput.append([date_price_dict[key] for key in sorted_keys])\n\t\n\treturn output;", "def get_business_price_data():\n file_name = 'yelp_academic_dataset_business.json'\n check_data_file(file_name)\n with open(data_dir / file_name, 'rb') as file:\n return dict(get_price(line) for line in file)", "def get_prices(ticker_list, start, stop, price_types=['Close'], logger=logger):\n\n price_array = []\n num = 1\n total = len(ticker_list)\n for stock in ticker_list:\n logger.info(f'Scraping {stock} - {num} out of {total} tickers')\n try:\n price_array.append(web.DataReader(stock, 'yahoo', start, stop))\n except: # noqa\n price_array.append('NA')\n num += 1\n price_df = dict(zip(ticker_list, price_array))\n dels = []\n for key in price_df.keys():\n if type(price_df[key]) == str:\n dels.append(key)\n for key in dels:\n price_df.pop(key, None)\n price_df = pd.concat(price_df)\n price_df = price_df[['Close']].reset_index()\n price_df.columns = ['ticker', 'date'] + [i.lower() for i in ['Close']]\n return price_df", "def daily():\n\tsymbol = request.args.get(\"symbol\")\n\tif symbol == None:\n\t\traise RuntimeError(\"MissingArgument\")\n\tsymbol = symbol.upper()\n\tdata = helpers.history(symbol, \"DAILY\")\n\tif data == \"Error\":\n\t\treturn jsonify(\"Error\")\n\tkeys = map(int, data.keys())\n\tdates = []\n\tfor keys in data[\"Time Series (Daily)\"]:\n\t\tclose = float(data[\"Time Series (Daily)\"][keys][\"4. close\"])\n\t\tdates.append([keys, close])\n\treturn jsonify(dates)", "def prices(ticker):\n\n # Contact API\n api_key = os.environ.get(\"FMP_API_KEY\")\n url = f\"https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?serietype=line&apikey={api_key}\"\n response = urlopen(url)\n prices = response.read().decode(\"utf-8\")\n return json.loads(prices)", "def yield_json_data(url):\n r = requests.get(url)\n json_data = json.loads(r.text) \n parsed_json_data = json_data[\"series\"][0][\"data\"]\n for row in parsed_json_data:\n date = string_to_date(row[0]) \n price = float(row[1])\n yield date, price", "def data(self):\n if self._cache_data is None:\n xml_string = self._xml_file.read()\n raw_data = xmltodict.parse(xml_string)\n\n readings_entry = raw_data['feed']['entry'][3]\n\n href = readings_entry['link']['@href']\n source_id = re.sub(r'/v./', '', href)\n\n readings = readings_entry['content']['IntervalBlock']['IntervalReading']\n\n type, unit, multiplier = self._parse_type_and_unit(raw_data)\n\n if type and unit:\n self._cache_data = [\n {\n 'start_time': int(reading['timePeriod']['start']),\n 'source_id': source_id,\n 'duration': int(reading['timePeriod']['duration']),\n 'Meter Type': type,\n 'Usage Units': unit,\n 'Usage/Quantity': float(reading['value']) * multiplier,\n }\n for reading\n in readings\n ]\n else:\n self._cache_data = []\n\n return self._cache_data", "def _read_prices(self, prices):\n data = prices\n list_prices = []\n for k, v in data.get('products').items():\n if v.get('attributes').get('currentGeneration') == 'Yes':\n self.products.append({k: {\"instance_type\": v.get(\"attributes\").\n get(\"instanceType\"), \"storage\": v.get(\"attributes\").get(\n \"storage\")}})\n\n for sku_id, hourly_price in data.get('terms').get(self.AWS_DEFAULT_TERMS).items():\n for hourly_price_id, price_rates in data.get('terms').get(self.AWS_DEFAULT_TERMS).get(\n sku_id).items():\n rates = \"{}.{}.{}\".format(sku_id, self.AWS_HOURLY_TERMS_CODE, self.AWS_RATE_CODE)\n if \"{}.{}\".format(sku_id, self.AWS_HOURLY_TERMS_CODE):\n if sku_id == price_rates.get('sku'):\n if rates in price_rates.get('priceDimensions'):\n list_prices.append({\"sku\": sku_id,\n \"price\": price_rates.get('priceDimensions').get(\n rates).get('pricePerUnit').get('USD')})\n\n return list_prices", "def deserialize_gsod_daily_temp_data(self, data):\n return deserialize_gsod_daily_temp_data(data)", "def make_prices_vec_by_company(data, company_name):\n\n\t# Find stock prices for company company_name.\n\n\telem = filter(lambda x: x[0].strip() == company_name.strip(), data)\n\tsorted_keys = elem[0][1].keys()\n\tsorted_keys.sort()\n\n\tdate_price_dict = elem[0][1]\n\n\treturn [date_price_dict[key] for key in sorted_keys]", "def _grab_daily_historical_prices(self) -> StockFrame:\n\n new_prices = []\n\n # Loop through each position.\n for symbol in self.positions:\n\n # Grab the historical prices.\n historical_prices_response = self.td_client.get_price_history(\n symbol=symbol,\n period_type='year',\n period=1,\n frequency_type='daily',\n frequency=1,\n extended_hours=True\n )\n\n # Loop through the chandles.\n for candle in historical_prices_response['candles']:\n\n new_price_mini_dict = {}\n new_price_mini_dict['symbol'] = symbol\n new_price_mini_dict['open'] = candle['open']\n new_price_mini_dict['close'] = candle['close']\n new_price_mini_dict['high'] = candle['high']\n new_price_mini_dict['low'] = candle['low']\n new_price_mini_dict['volume'] = candle['volume']\n new_price_mini_dict['datetime'] = candle['datetime']\n new_prices.append(new_price_mini_dict)\n\n # Create and set the StockFrame\n self._stock_frame_daily = StockFrame(data=new_prices)\n self._stock_frame_daily.create_frame()\n\n return self._stock_frame_daily", "def parse_data():\n txt = read_data()\n lines = txt.splitlines()\n return list(DictReader(lines))", "def get_daily_historical_data_yahoo(ticker, start_date=(2000,1,1), end_date=datetime.date.today().timetuple()[0:3]):\n # Construct the Yahoo URL with the correct integer query parameters for start and end dates.\n # Note: Some parameters are zero-based!\n ticker_tuple = (\n ticker, \n start_date[1]-1, start_date[2], start_date[0],\n end_date[1]-1, end_date[2], end_date[0]\n )\n \n yahoo_url = \"http://ichart.finance.yahoo.com/table.csv\"\n yahoo_url += \"?s=%s&a=%s&b=%s&c=%s&d=%s&e=%s&f=%s\"\n yahoo_url = yahoo_url % ticker_tuple\n \n # Try connecting to Yahoo Finance and obtaining the data\n # On failure, print and error message\n prices = []\n\n try:\n yf_data = requests.get(yahoo_url).text.split(\"\\n\")[1:-1]\n for y in yf_data:\n p = y.strip().split(\",\")\n prices.append(\n (datetime.datetime.strptime(p[0], '%Y-%m-%d'),\n p[1], p[2], p[3], p[4], p[5], p[6]\n ))\n except Exception as e:\n print(\"Could not download the Yahoo data: %s\" % e)\n return prices" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a devicegroup and optionally add devices to it
def create_device_group(self, devicegroup, devices=None): self._logger.debug("Create device-group: %s" % (devicegroup,)) if devices is not None: self.set_device_group(devicegroup, devices, exclusive=True) else: self.xapi.set(pandevice.XPATH_DEVICE_GROUPS + "/entry[@name='%s']" % (devicegroup,))
[ "def test_create_services_device_groups_device_group_by_device_group_name(self):\n pass", "def add_to_group(self, device_group_id):\n\n # Conditionally setup the message body, fields which have not been set will not be sent to the API.\n # This avoids null fields being rejected and allows the default value to be used.\n body_params = {}\n if self._id.value_set:\n body_params[\"device_id\"] = self._id.to_api()\n\n return self._client.call_api(\n method=\"post\",\n path=\"/v3/device-groups/{device-group-id}/devices/add/\",\n content_type=\"application/json\",\n path_params={\"device-group-id\": fields.StringField(device_group_id).to_api()},\n body_params=body_params,\n unpack=self,\n )", "def createfsgroup(self, groupname, gid=None, memberlist=None):", "def create_group():\r\n new_group = input(\"| Enter the name of the Group |\")\r\n adgroup.ADGroup.create(new_group, security_enabled=True, scope='GLOBAL')\r\n return \"| Group created |\"", "def create(self, group):\n self.request.mongo_connection.shinken.hostgroups.insert(\n group.as_dict()\n )", "def create_group(self, group, **kwargs):\n\n status, data = self.run_gerrit_command('create-group', group, **kwargs)\n\n return status, data", "def test_get_device_group(self):\n pass", "def test_retrieve_services_device_groups_device_group_device_group(self):\n pass", "def create_group(c, runner, group):\n if group_exists(c, group, runner=runner):\n return True\n\n cmd = \"groupadd {}\".format(group)\n return runner(cmd, hide=True, warn=True).ok", "def set_device_group(self, devicegroup, devices, exclusive=False):\n # TODO: Implement 'exclusive'\n self._logger.debug(\"Set device-group to '%s'\" % (devicegroup))\n if issubclass(devices.__class__, pandevice.base.PanDevice):\n devices = [devices]\n device_refresh_needed = False\n for device in devices:\n if device.serial is None or device.devicegroup is None:\n device_refresh_needed = True\n break\n if device_refresh_needed:\n self.refresh_devices_from_panorama(devices)\n # All devices have serial numbers now, so start setting devicegroup\n for device in devices:\n # If the device was in a group, and that group changed, pull it out of the current group\n if device.devicegroup != devicegroup and \\\n device.devicegroup is not None:\n self._logger.debug(\"Moving device %s out of device-group %s\" % (device.hostname, device.devicegroup))\n self.set_config_changed()\n self.xapi.delete(\n pandevice.XPATH_DEVICE_GROUPS +\n \"/entry[@name='%s']/devices\"\n \"/entry[@name='%s']\"\n % (device.devicegroup, device.serial)\n )\n device.devicegroup = None\n # If assigning device to a new group\n if devicegroup is not None:\n self.set_config_changed()\n self._logger.debug(\"Moving device %s into device-group %s\" % (device.hostname, devicegroup))\n self.xapi.set(\n pandevice.XPATH_DEVICE_GROUPS +\n \"/entry[@name='%s']/devices\" % (devicegroup,),\n \"<entry name='%s'/>\" % (device.serial,)\n )\n device.devicegroup = devicegroup", "def test_create_services_network_group_by_network_group_name(self):\n pass", "def test_update_device_group(self):\n pass", "def do_portgroup_create(cc, args):\n field_list = ['address', 'extra', 'node_uuid', 'name', 'uuid',\n 'standalone_ports_supported', 'mode', 'properties']\n fields = dict((k, v) for (k, v) in vars(args).items()\n if k in field_list and not (v is None))\n fields = utils.args_array_to_dict(fields, 'extra')\n fields = utils.args_array_to_dict(fields, 'properties')\n portgroup = cc.portgroup.create(**fields)\n\n data = dict([(f, getattr(portgroup, f, '')) for f in field_list])\n cliutils.print_dict(data, wrap=72, json_flag=args.json)", "def create_default_group():\n group_entry = CommandGroupEntry.objects.create()\n return group_entry", "async def create_group(self, userid, gameid):\n raise NotImplementedError()", "def create_group(self):\n group_name = self.line_grp.text().strip() # removes whitespaces from left and right\n\n if group_name == '':\n display_msg(MsgIcon.WARNING, \"Warning\", \"Please choose a group name\")\n return\n\n self.line_grp.setText(\"\")\n if self.db.insert_group(group_name): # if creation was successful:\n self.list_grp.addItem(group_name) # adds new group to the list.\n self.db.notify_stats() # update stats tab", "def create_projects_group():\n sudo('addgroup projects')", "def add_device(self, **kwargs):\n return self._make_request(\"devices/\", type=POST, **kwargs)", "def group_create_handler(sender, **kwargs):\n if kwargs[\"created\"]:\n Configuration.objects.create(group=kwargs[\"instance\"])\n Rule.objects.create(group=kwargs[\"instance\"])\n Variable.objects.create(group=kwargs[\"instance\"])", "def make_group():\n try:\n poll_id = request.args.get('poll_id')\n answer = request.args.get('answer')\n name = request.args.get('name')\n group_id = poll_id + \":\" + str(answer)\n service.create_new_group(group_id, poll_id, name)\n users = answer_service.get_users_by_answer(poll_id, answer)\n group_users_service.insert_users_to_group(users, group_id)\n except Exception as e:\n print(\"ERROR: in make_group():\")\n print(e)\n\n return make_response(\n \"Failed to make_group\\n\"\n \"poll id: \" + poll_id, 500)\n return make_response(\n \"OK, in make_group:\\n\"\n \"poll id: \" + poll_id, 200)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For Panorama, set the device group for a device
def set_device_group(self, devicegroup, devices, exclusive=False): # TODO: Implement 'exclusive' self._logger.debug("Set device-group to '%s'" % (devicegroup)) if issubclass(devices.__class__, pandevice.base.PanDevice): devices = [devices] device_refresh_needed = False for device in devices: if device.serial is None or device.devicegroup is None: device_refresh_needed = True break if device_refresh_needed: self.refresh_devices_from_panorama(devices) # All devices have serial numbers now, so start setting devicegroup for device in devices: # If the device was in a group, and that group changed, pull it out of the current group if device.devicegroup != devicegroup and \ device.devicegroup is not None: self._logger.debug("Moving device %s out of device-group %s" % (device.hostname, device.devicegroup)) self.set_config_changed() self.xapi.delete( pandevice.XPATH_DEVICE_GROUPS + "/entry[@name='%s']/devices" "/entry[@name='%s']" % (device.devicegroup, device.serial) ) device.devicegroup = None # If assigning device to a new group if devicegroup is not None: self.set_config_changed() self._logger.debug("Moving device %s into device-group %s" % (device.hostname, devicegroup)) self.xapi.set( pandevice.XPATH_DEVICE_GROUPS + "/entry[@name='%s']/devices" % (devicegroup,), "<entry name='%s'/>" % (device.serial,) ) device.devicegroup = devicegroup
[ "def test_update_device_group(self):\n pass", "def set_group(self, group: t.Optional[jank.graphics.Group]):", "def setGroups(self): \n deviceCounterFile = pyscript.app_config['devive_counter_file']\n deviceCounterMap = readYaml(deviceCounterFile) \n #Set key suffix (in order to write to the correct key in the yaml file) \n if self.user != 'default': \n keySuffix = f'_{self.user}' \n else: \n keySuffix = ''\n #Add the entities to the repective groups\n if self.lightGroup != []: \n deviceCounterMap[f'all_light_entities{keySuffix}']['entities'] = self.lightGroup\n \n writeToYaml(deviceCounterMap, deviceCounterFile)", "def test_get_device_group(self):\n pass", "def set_preset_group(self, group: params.PresetGroup, /) -> GoProResp:", "def test_create_services_device_groups_device_group_by_device_group_name(self):\n pass", "def setGroup(self, new_group):\n if new_group.screen == self:\n return\n elif new_group.screen:\n # g1 <-> s1 (self)\n # g2 (new_group)<-> s2 to\n # g1 <-> s2\n # g2 <-> s1\n g1 = self.group\n s1 = self\n g2 = new_group\n s2 = new_group.screen\n\n s2.group = g1\n g1._setScreen(s2)\n s1.group = g2\n g2._setScreen(s1)\n else:\n if self.group is not None:\n self.group._setScreen(None)\n self.group = new_group\n new_group._setScreen(self)\n hook.fire(\"setgroup\")\n hook.fire(\"focus_change\")\n hook.fire(\"layout_change\",\n self.group.layouts[self.group.currentLayout])", "def setNetGroup(addr): #status: Done, not tested\n saveNvParam(5,addr) #determine addr?\n saveNvParam(6,addr) #determine addr?\n reboot()", "def setNetGroup(addr): #status: Done, not tested\r\n saveNvParam(5,addr) #determine addr?\r\n saveNvParam(6,addr) #determine addr?\r\n reboot()", "def set_group(self, group):\n return _pal.lib.body_base_set_group(self._body_base, c.c_int(group))", "def set_group(self, group):\n try:\n supports_group = self.supports_group(group)\n if not supports_group:\n self.get_logger().error(f\"{self.name} does not support {group}!\")\n else:\n self._group = group\n except NotImplementedError:\n self.get_logger().warning(f\"{self.name} does not support restricting on groups!\")", "def ap_group(self, ap_group):\n\n self._ap_group = ap_group", "def set_bandwidth_group_for_device(self, name, controller_port, device, bandwidth_group):\n if not isinstance(name, basestring):\n raise TypeError(\"name can only be an instance of type basestring\")\n if not isinstance(controller_port, baseinteger):\n raise TypeError(\"controller_port can only be an instance of type baseinteger\")\n if not isinstance(device, baseinteger):\n raise TypeError(\"device can only be an instance of type baseinteger\")\n if not isinstance(bandwidth_group, IBandwidthGroup):\n raise TypeError(\"bandwidth_group can only be an instance of type IBandwidthGroup\")\n self._call(\"setBandwidthGroupForDevice\",\n in_p=[name, controller_port, device, bandwidth_group])", "def set_device(self, device):\n self.device = device\n self.model = self.model.to(device)", "def set_port_group(session, logger, dbinterface, port_group_name,\n check_pg_consistency=True):\n if \"port_group_name\" not in dbinterface.extra_fields:\n raise ArgumentError(\"The port group cannot be set for %s interfaces.\" %\n dbinterface.interface_type)\n\n if not port_group_name:\n if dbinterface.port_group:\n dbinterface.port_group = None\n else:\n dbinterface.port_group_name = None\n return\n\n session = object_session(dbinterface)\n\n if dbinterface.hardware_entity.model.model_type.isVirtualMachineType():\n set_port_group_vm(session, logger, dbinterface, port_group_name)\n else:\n set_port_group_phys(session, dbinterface, port_group_name)\n\n if check_pg_consistency:\n dbinterface.check_pg_consistency(logger=logger)", "def setiaiGroups():\n bpy.types.Scene.iai_groups = PointerProperty(type=groups_collection)", "def group(self) -> str: # TODO: Same as multiroom master?\n self._logger.info(\"Retrieving device group name...\")\n return self._device_info().get(\"GroupName\")", "def test_retrieve_services_device_groups_device_group_device_group(self):\n pass", "def set_group_properties(self,iGroupID,aGroupData):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that 'rm q' outputs no progress indications.
def test_rm_quiet(self): bucket_uri = self.CreateBucket() key_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo') stderr = self.RunGsUtil(['-q', 'rm', suri(key_uri)], return_stderr=True) self.assertEqual(stderr.count('Removing '), 0)
[ "def test_qual_del(self):\n self.check_fails(\"Quality/error_qual_del.fastq\", 3)\n self.check_general_passes(\"Quality/error_qual_del.fastq\", 5)", "def clear_test_result(self, test):", "def __del__(self):\n # Check first that project Q garbage collector not already removed qubits\n self.eng.flush()\n if not len(self.eng.backend.cheat()[0]) == 0:\n for _ in range(self.activeQubits):\n self.measure_qubit(0)", "def test_optimize_single_reset_in_diff_qubits(self):\n qr = QuantumRegister(2, 'qr')\n circuit = QuantumCircuit(qr)\n circuit.reset(qr)\n dag = circuit_to_dag(circuit)\n\n expected = QuantumCircuit(qr)\n\n pass_ = RemoveResetInZeroState()\n after = pass_.run(dag)\n\n self.assertEqual(circuit_to_dag(expected), after)", "def test_999_delete_queue(self):\n url = self.cfg.base_url + '/queues/qtestqueue'\n\n result = http.delete(url, self.header)\n self.assertEqual(result.status_code, 204)", "def test_run_quiet(self):\n cmd = GreenTestCommand(Distribution())\n cmd.quiet = True\n cmd.ensure_finalized()\n cmd.run()\n self.assertThat(green.cmdline.sys.argv, Not(Contains(\"-vvv\")))", "def test(self):\r\n old_queue = SOCO.get_queue()\r\n track_to_remove = old_queue[-1]\r\n SOCO.remove_from_queue(len(old_queue))\r\n wait()\r\n new_queue = SOCO.get_queue()\r\n self.assertNotEqual(old_queue, new_queue, 'No difference between '\r\n 'queues before and after removing the last item')\r\n self.assertEqual(len(new_queue), len(old_queue) - 1, 'The length of '\r\n 'queue after removing a track is not lenght before - '\r\n '1')\r\n # Clean up\r\n SOCO.add_to_queue(track_to_remove['uri'])\r\n wait()\r\n self.assertEqual(old_queue, SOCO.get_queue(), 'Clean up unsuccessful')", "def test_non_existing_progress(self):\n delete_progress(pid=1337)\n self.assertEqual(Progress.objects.count(), 1)", "def test_flush_empties(queue):\n queue.flush()\n assert queue.empty()", "def test_no_qual(self):\n self.check_fails(\"Quality/error_no_qual.fastq\", 0)\n self.check_general_fails(\"Quality/error_no_qual.fastq\", 0)", "def test_parallel_multijob(self):\n dummy_file = os.path.abspath('foobar')\n with open(dummy_file, 'w') as fp:\n fp.write('delete me')\n\n result = exec_call('-n', '2', '-q', '--', 'mpirun', 'rm', dummy_file)\n self.assertNotEqual(result.returncode, EXIT_OK)", "def test_empty_dequeue(empty_q):\n assert empty_q.dequeue() is False", "def test_status_no_eligible_operations(self):\n project = self.mock_project()\n with redirect_stdout(StringIO()):\n with redirect_stderr(StringIO()):\n project.print_status()", "def clear_mutiprocess_queue(q):\n while not q.empty():\n q.get()", "def test_request_clearmemory(self):\n qaobject = Interface()\n qaobject.ask(\"What color is the cow?\")\n qaobject.teach(\"brown\")\n result = qaobject.ask(\"What color is the cow?\")\n self.assertEqual(result, 'brown')\n qaobject.request(\"Please clear memory\")\n result = qaobject.teach('blue')\n self.assertEqual(result, \"Please ask a question first\")\n result = qaobject.ask(\"What color is the cow?\")\n self.assertEqual(result, \"I don't know, please provide the answer\")", "def test_queue_remove(self):\n q1 = self.party.enqueue_song(self.user, 't123')\n q2 = self.party.enqueue_song(self.user, 't456')\n q2.upvote(self.user2)\n next_entry = self.party.dequeue_next_song()\n self.assertEquals(next_entry, q2)\n self.party.save(self.redis)\n p = Party.get(self.redis, self.party.id)\n self.assertEquals(p.queue[0].id, q1.id)", "def test_rm_trials_by_status(single_with_trials):\n trials = setup_storage()._fetch_trials({})\n n_broken = sum(trial.status == \"broken\" for trial in trials)\n assert n_broken > 0\n execute(\"db rm -f test_single_exp --status broken\")\n assert len(setup_storage()._fetch_trials({})) == len(trials) - n_broken", "def test_silent_no_job(capsys):\n output = Silent()\n output.log(OutputMethod.NO_JOB)\n captured = capsys.readouterr()\n assert not captured.out", "async def test_remove_all_outputs():\n src = create_channel()\n out1 = create_channel()\n out2 = create_channel()\n m = create_multiple(src)\n assert m.add_output(out1)\n assert m.add_output(out2)\n await asyncio.sleep(0.05)\n m.remove_all_outputs()\n x = 'x'\n src.offer(x)\n await asyncio.sleep(0.05)\n assert src.empty()\n assert out1.empty()\n assert out2.empty()\n src.close()\n await asyncio.sleep(0.05)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test ask_question method with password.
def test_ask_question__password(self, _): input_value = self.user_manager.ask_question('field', password=True) self.assertEqual(input_value, 'password')
[ "def test_prompting(self):\n pass", "def vqa_prompt(self, question, answer=None) -> str:", "def test_check_guess(self):\n question = Question(\"Test\", \"correct\", [\"correct\", \"incorrect\"])\n self.assertTrue(question.check_guess(\"correct\"))\n self.assertFalse(question.check_guess(\"incorrect\"))\n self.assertFalse(question.check_guess(\"not an option\"))", "def test_password_verifier_works(password):\n (input, result) = password\n print '\\n'\n print 'Inputs->' , input\n print 'Request->', result\n assert check_password(input) == result", "def test_password_prompt(self, fake_getpass, fake_stderr):\n cli_args = ['--clusters', 'myCluster', '--location', '/foo', '--username', 'pat']\n\n iiqtools_cluster_backup.parse_args(cli_args)\n\n fake_getpass.assert_called()", "def prompt_loop(self):\n user_input = \"\"\n\n while True:\n user_input = input(\"> \").lower()\n if user_input in ('exit', 'q'):\n break\n elif user_input == 'hint':\n # give_hint() ???\n pass\n else:\n self.check_password(password=user_input)\n print(\"Goodbye.\")", "def test_ask_reset_password_token(self):\n pass", "def step_see_prompt(context):\n context.cli.expect('wharfee> ')", "def askpass_main():\n\n verbose = os.getenv('PSSH_ASKPASS_VERBOSE')\n\n # It's not documented anywhere, as far as I can tell, but ssh may prompt\n # for a password or ask a yes/no question. The command-line argument\n # specifies what is needed.\n if len(sys.argv) > 1:\n prompt = sys.argv[1]\n if verbose:\n sys.stderr.write('pssh-askpass received prompt: \"%s\"\\n' % prompt)\n if not (prompt.strip().lower().endswith('password:') or 'enter passphrase for key' in prompt.strip().lower()):\n sys.stderr.write(prompt)\n sys.stderr.write('\\n')\n sys.exit(1)\n else:\n sys.stderr.write('Error: pssh-askpass called without a prompt.\\n')\n sys.exit(1)\n\n address = os.getenv('PSSH_ASKPASS_SOCKET')\n if not address:\n sys.stderr.write(textwrap.fill(\"pssh error: SSH requested a password.\"\n \" Please create SSH keys or use the -A option to provide a\"\n \" password.\"))\n sys.stderr.write('\\n')\n sys.exit(1)\n\n sock = socket.socket(socket.AF_UNIX)\n try:\n sock.connect(address)\n except socket.error:\n _, e, _ = sys.exc_info()\n message = e.args[1]\n sys.stderr.write(\"Couldn't bind to %s: %s.\\n\" % (address, message))\n sys.exit(2)\n\n try:\n password = sock.makefile().read()\n except socket.error:\n sys.stderr.write(\"Socket error.\\n\")\n sys.exit(3)\n\n print(password)", "def test_not_ask_password_when_asked_but_password_is_set(mock_factory, getpass_mock):\n # force ask the password\n config = {\n 'ssh': {\n 'passwd': '',\n 'ask_passwd': True,\n },\n }\n\n task = ssh_passwd.SSHPassword(mock_factory(), config)\n\n # trigger action\n task.pre_start()\n\n getpass_mock.assert_not_called()", "def step_expect_prompt(context):\n context.cli.expect('wharfee> ')", "def proquest(askquestions):\n\tif askquestions:\n\t\tanswer = raw_input(\"Tell me, do you want to go on ? (yes/no) \")\n\t\tif answer[:3] != \"yes\":\n\t\t\tsys.exit(\"Ok, bye.\")\n\t\tprint \"\"\t# to skip one line after the question.", "def test_request_prompts():\n def run(txt, prompts):\n with settings(prompts=prompts):\n # try to fulfil the OutputLooper interface, only want to test\n # _get_prompt_response. (str has a method upper)\n ol = OutputLooper(str, 'upper', None, list(txt), None)\n return ol._get_prompt_response()\n\n prompts = {\"prompt2\": \"response2\",\n \"prompt1\": \"response1\",\n \"prompt\": \"response\"\n }\n\n eq_(run(\"this is a prompt for prompt1\", prompts), (\"prompt1\", \"response1\"))\n eq_(run(\"this is a prompt for prompt2\", prompts), (\"prompt2\", \"response2\"))\n eq_(run(\"this is a prompt for promptx:\", prompts), (None, None))\n eq_(run(\"prompt for promp\", prompts), (None, None))", "def practice(aString):\n\tattempt = input(\"Password: \")\n\twhile True:\n\t\tprint(\"\\t{}\".format(attempt == aString))\n\t\tif \"exit\" == attempt:\n\t\t\tbreak\n\t\tattempt = input(\"Password: \")", "def test_not_ask_password_when_not_explicitly_asked(mock_factory, getpass_mock):\n # force NOT to ask the password\n config = {\n 'ssh': {\n 'ask_passwd': 'whatever',\n },\n }\n\n task = ssh_passwd.SSHPassword(mock_factory(), config)\n\n # trigger action\n task.pre_start()\n\n getpass_mock.assert_not_called()", "def test_check_pw_success(dbtransaction, auth_env):\n from .. security import check_password\n password = 'muniri'\n assert check_password(password)", "def input(label, is_password=False, answers=None, default=None):\n def print_question():\n answers_str = (' [%s]' % (','.join(answers)) if answers else '')\n default_str = (' (default=%s)' % default if default else '')\n sys.stdout.write(colored(' %s%s%s: ' % (label, answers_str, default_str), 'cyan'))\n sys.stdout.flush()\n\n def read_answer():\n value = ''\n print_question()\n\n while True:\n c = sys.stdin.read(1)\n\n # Enter pressed\n if c in ('\\r', '\\n') and (value or default):\n sys.stdout.write('\\r\\n')\n break\n\n # Backspace pressed\n elif c == '\\x7f' and value:\n sys.stdout.write('\\b \\b')\n value = value[:-1]\n\n # Valid character\n elif ord(c) in range(32, 127):\n sys.stdout.write(colored('*' if is_password else c, attrs=['bold']))\n value += c\n\n elif c == '\\x03': # Ctrl-C\n raise NoInput\n\n sys.stdout.flush()\n\n # Return result\n if not value and default:\n return default\n else:\n return value\n\n with std.raw_mode(sys.stdin):\n while True:\n value = read_answer()\n\n # Return if valid anwer\n if not answers or value in answers:\n return value\n\n # Otherwise, ask again.\n else:\n sys.stdout.write('Invalid answer.\\r\\n')\n sys.stdout.flush()", "def test_confirm_password_field(self):\n\n rv = self.client.get('/register')\n assert 'Confirm Password' in rv.data", "def ask_and_evaluate(self):\n answer = raw_input(self.question + \" > \")\n if answer == self.correct_answer:\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup and run one demo controller for Vic
def runDemo(vcfile, remote=5621, expire=0.0): secrets = [ 'ALq-w1UKkdrppwZzGTtz4PWYEeWm0-sDHzOv5sq96xJY' 'AxFfJTcSuEE11FINfXMqWttkZGnUZ8KaREhrnyAXTsjw', 'AKuYMe09COczwf2nIoD5AE119n7GLFOVFlNLxZcKuswc', 'A1-QxDkso9-MR1A8rZz_Naw6fgaAtayda8hrbkRVVu1E', 'Alntkt3u6dDgiQxTATr01dy8M72uuaZEf9eTdM-70Gk8', 'AcwFTk-wgk3ZT2buPRIbK-zxgPx-TKbaegQvPEivN90Y', 'A6zz7M08-HQSFq92sJ8KJOT2cZ47x7pXFQLPB0pckB3Q', 'ArwXoACJgOleVZ2PY7kXn7rA0II0mHYDhc6WrBH8fDAc', ] doers = setupController(secrets=secrets, remotePort=remote, indirect=True, vcfile=vcfile) directing.runController(doers=doers, expire=expire)
[ "def test_demo(self):\n self.cbct.run_demo(show=False)", "def test_demo(self):\n self.run_in_dir(\"/experiments/demo/**/\", start_idx=0)", "def main():\n pv_simulator = PVSimulator()\n pv_simulator.consume()", "def run(self):\n segments = self.controller.split('.')\n controller_class = reduce(getattr, segments[1:],\n __import__('.'.join(segments[:-1])))\n cmd_line = ['-f']\n if self.configuration is not None:\n cmd_line.extend(['-c', self.configuration])\n args = parser.get().parse_args(cmd_line)\n controller_instance = controller_class(args, platform)\n try:\n controller_instance.start()\n except KeyboardInterrupt:\n controller_instance.stop()", "def initControllerSetup(self):\r\n # Set the front motors to be the followers of the rear motors\r\n self.frontLeft.set(WPI_TalonSRX.ControlMode.Follower, DRIVETRAIN_REAR_LEFT_MOTOR)\r\n self.frontRight.set(WPI_TalonSRX.ControlMode.Follower, DRIVETRAIN_REAR_RIGHT_MOTOR)\r\n\r\n # Set the neutral output mode to Brake/Coast/\r\n self.leftTalon.setNeutralMode(WPI_TalonSRX.NeutralMode.Brake)\r\n self.rightTalon.setNeutralMode(WPI_TalonSRX.NeutralMode.Brake)\r\n\r\n # Diable the motor-safety\r\n self.diffDrive.setSafetyEnabled(False)\r\n\r\n # Set the feedback sensor phases\r\n self.leftTalon.setSensorPhase(True)\r\n self.rightTalon.setSensorPhase(True)\r\n\r\n # Setup the Pigeon IMU and Talon Mag Encoders\r\n self.initPigeonIMU()\r\n self.initQuadratureEncoder()\r\n\r\n # Set the voltage compensation to 12V and disable it for now\r\n self.leftTalon.configVoltageCompSaturation(12.0, 10)\r\n self.leftTalon.enableVoltageCompensation(False)\r\n self.rightTalon.configVoltageCompSaturation(12.0, 10)\r\n self.rightTalon.enableVoltageCompensation(False)\r\n\r\n # PIDF slot index 0 is for autonomous wheel postion\r\n self.leftTalon.config_kP(0, 0.8, 10)\r\n self.leftTalon.config_kI(0, 0.0, 10)\r\n self.leftTalon.config_kD(0, 0.0, 10)\r\n self.leftTalon.config_kF(0, 1023 / 12, 10) # 10-bit ADC units / 12 V\r\n self.rightTalon.config_kP(0, 0.8, 10)\r\n self.rightTalon.config_kI(0, 0.0, 10)\r\n self.rightTalon.config_kD(0, 0.0, 10)\r\n self.rightTalon.config_kF(0, 1023 / 12, 10) # 10-bit ADC units / 12 V\r\n\r\n # PIDF slot index 1 is for autonomous heading postion\r\n self.leftTalon.config_kP(1, 1.0, 10)\r\n self.leftTalon.config_kI(1, 0, 10)\r\n self.leftTalon.config_kD(1, 0, 10)\r\n self.leftTalon.config_kF(1, 0, 10)\r\n self.rightTalon.config_kP(1, 1.0, 10)\r\n self.rightTalon.config_kI(1, 0, 10)\r\n self.rightTalon.config_kD(1, 0, 10)\r\n self.rightTalon.config_kF(1, 0, 10)", "def serve(opt, env, debug):\n if opt.model is None:\n raise click.UsageError(\n \"The global parameter --model must either be given when calling the serve\"\n \" command or --model-type must be given when creating the model.\"\n )\n click.echo(\"Starting server for model version {}.\".format(env.model_version))\n predictor = opt.model().predictor(env)\n predictor.setup()\n app.predictor = predictor\n atexit.register(predictor.teardown)\n app.run(host=\"0.0.0.0\", port=8080, debug=debug)\n click.echo(\"Done.\")", "def examples(command):\n print(\n \"\"\"\nPanel serves the example notebooks.\n===================================\n\"\"\"\n )\n apps = _get_apps()\n command.run(f\"panel serve {' '.join(apps)} --auto --show\", echo=True)", "def test_otoroshi_controllers_adminapi_templates_controller_initiate_simple_admin(self):\n pass", "def demo(context):\n LOG.info(\"Running scout setup demo\")\n institute_name = context.obj['institute_name']\n user_name = context.obj['user_name']\n user_mail = context.obj['user_mail']\n\n adapter = context.obj['adapter']\n\n setup_scout(\n adapter=adapter,\n institute_id=institute_name,\n user_name=user_name,\n user_mail = user_mail,\n demo=True\n )", "def main():\n DropController()", "def start(self, **kw):\n\t\tsuper(webapp_enhanced, self).__init__(self._controller_map, **kw)", "def run_sample():\n from autumn.projects.covid_19.vaccine_optimisation.sample_code import run_sample_code\n\n run_sample_code()", "def main():\n\n # automaticky_testovac()\n\n riadic()", "def setUpClass(cls) -> None:\n with allure.step(\"Get args from CLI\"):\n # cls.args = get_args()\n pass\n\n with allure.step(\"Set test URL\"):\n cls.URL = 'http://127.0.0.1:5000'\n\n with allure.step(\"Start REST API Service\"):\n print(\"\\nOS: {}\\n\".format(platform.system()))\n\n if platform.system() == 'Windows':\n os.system(\"start /B start cmd.exe @cmd /k python ../api/cars_app.py\")\n time.sleep(5)\n\n if platform.system() == 'Linux':\n # os.system(\"python ../cars_app.py &\")\n pass", "def trigger_management_tests():\n ts.start_torchserve(\n ncs=True, model_store=MODEL_STORE_DIR, log_file=TS_CONSOLE_LOG_FILE\n )\n EXIT_CODE = os.system(\n f\"newman run -e {POSTMAN_ENV_FILE} {POSTMAN_COLLECTION_MANAGEMENT} -d {POSTMAN_MANAGEMENT_DATA_FILE} -r cli,htmlextra --reporter-htmlextra-export {ARTIFACTS_MANAGEMENT_DIR}/{REPORT_FILE} --verbose\"\n )\n ts.stop_torchserve()\n move_logs(TS_CONSOLE_LOG_FILE, ARTIFACTS_MANAGEMENT_DIR)\n cleanup_model_store()\n return EXIT_CODE", "def startup(self):\n core.openflow.addListeners(self)\n core.openflow_discovery.addListeners(self)\n log.info('Controller initialized')", "def main():\r\n params = get_configuration(print_diagnostics=True, with_neptune=True, inject_parameters_to_gin=True)\r\n LOG_PATH = os.path.join(params.BASE_PATH, 'tests', params.GAME)\r\n runner = RolloutsRunner(LOG_PATH,create_rainbow_rollouts_agent)\r\n runner.run_experiment()", "def _main():\n volttron_home = _os.path.normpath(expandall(\n _os.environ.get('VOLTTRON_HOME', '~/.volttron')))\n _os.environ['VOLTTRON_HOME'] = volttron_home\n if not _os.path.exists(volttron_home):\n _os.makedirs(volttron_home, 0o755)\n\n y_or_n = ('Y', 'N', 'y', 'n')\n y = ('Y', 'y')\n n = ('N', 'n')\n print('\\nYour VOLTTRON_HOME currently set to: {}'.format(volttron_home))\n t = ('\\nIs this the volttron you are attempting to setup? [Y]',\n y_or_n,\n 'Y')\n if not prompt_response(t) in y:\n print(\n '\\nPlease execute with VOLTRON_HOME=/your/path volttron-cfg to '\n 'modify VOLTTRON_HOME.\\n')\n return\n t = ('\\nIs this instance discoverable (Y/N)? [N] ', y_or_n, 'N')\n _explain_discoverable()\n is_discoverable = prompt_response(t) in y\n\n if is_discoverable:\n t = ('\\nWhat is the external ipv4 address for this instance? '\n '[127.0.0.1]: ', None, '127.0.0.1')\n external_ip = prompt_response(t)\n t = ('What is the vip port this instance? [22916] ',)\n vip_port = prompt_response(t)\n if not vip_port:\n vip_port = 22916\n\n t = ('\\nWhat is the port for discovery? [8080] ',)\n external_port = prompt_response(t)\n if not external_port:\n external_port = 8080\n t = (\n '\\nWhich IP addresses are allowed to discover this instance? '\n '[/127.*/] ', None, '/127.*/')\n ip_allowed_to_discover = prompt_response(t)\n AuthFile().add(AuthEntry(address=ip_allowed_to_discover,\n credentials='/CURVE:.*/'))\n\n t = ('\\nIs this instance a volttron central (Y/N)? [N] ', y_or_n, 'N')\n do_install_vc = prompt_response(t) in y\n do_vc_autostart = True\n do_platform_autostart = True\n if do_install_vc:\n t = ('\\nShould volttron central autostart(Y/N)? [Y] ',\n y_or_n, 'Y')\n do_vc_autostart = prompt_response(t) in y\n\n t = ('\\nInclude volttron central platform agent on '\n 'volttron central? [Y]', y_or_n, 'Y')\n do_install_platform = prompt_response(t) in y\n else:\n do_install_platform = True\n t = ('\\nAddress of volttron central? [127.0.0.1]: ', None,\n '127.0.0.1')\n vc_ipaddress = prompt_response(t)\n should_resolve = True\n first = True\n t = ('Port of volttron central? [8080] ',)\n vc_port = prompt_response(t)\n if not vc_port:\n vc_port = 8080\n while not _resolvable(vc_ipaddress, vc_port) and should_resolve:\n print(\"Couldn't resolve {}:{}\".format(vc_ipaddress, vc_port))\n t2 = (\n '\\nShould volttron central be resolvable now? [Y] ', y_or_n,\n 'Y')\n if first:\n should_resolve = prompt_response(t2) in ('y', 'Y')\n first = False\n\n if should_resolve:\n t = ('\\nAddress of volttron central? ',)\n vc_ipaddress = prompt_response(t)\n t = ('\\nPort of volttron central? ',)\n vc_port = prompt_response(t)\n\n if do_install_platform:\n t = ('\\nShould platform agent autostart(Y/N)? [Y] ', y_or_n, 'Y')\n do_platform_autostart = prompt_response(t) in y\n\n external_uri = \"tcp://{}:{}\".format(external_ip, vip_port)\n bind_web_address = \"http://{}:{}\".format(external_ip,\n external_port)\n try:\n vc_web_address = \"http://{}:{}\".format(vc_ipaddress, vc_port)\n _make_configuration(external_uri, bind_web_address,\n vc_web_address)\n\n # if vc_ipaddres isn't defined\n # only happens on volttron central.\n except UnboundLocalError:\n _make_configuration(external_uri, bind_web_address)\n\n t = ('\\nShould install sqlite platform historian? [N]', y_or_n, n)\n do_install_platform_historian = prompt_response(t) in y\n\n do_historian_autostart = True\n if do_install_platform_historian:\n t = ('\\nShould historian agent autostart(Y/N)? [Y] ', y_or_n, 'Y')\n do_historian_autostart = prompt_response(t) in y\n\n # in order to install agents we need to start the platform.\n _start_platform()\n _install_agents((do_install_vc, do_vc_autostart),\n (do_install_platform, do_platform_autostart),\n (do_install_platform_historian,\n do_historian_autostart))\n _shutdown_platform()\n print('Finished configuration\\n')\n print('You can now start you volttron instance.\\n')\n print('If you need to change the instance configuration you can edit')\n print('the config file at {}/{}\\n'.format(volttron_home, 'config'))", "def main():\n global config, mqtt\n description = f\"Power Device Monitoring Api, version: {__version__}\"\n parser = ArgumentParser(description=description)\n\n parser.add_argument(\n \"-C\",\n \"--configFile\",\n type=str,\n help=\"Full location of yaml config file\",\n default=\"./powermon-api.yaml\",\n )\n args = parser.parse_args()\n\n # Build configuration from config file etc\n log.info(\"Using config file: %s\", args.configFile)\n config = {\"configFile\": args.configFile}\n # build config with details from config file\n config.update(read_yaml_file(args.configFile))\n\n log.info(f\"{config=}\")\n # if \"mqttbroker\" in config:\n # mqtt_config = MQTTConfig(\n # host=config[\"mqttbroker\"].get(\"name\"),\n # port=config[\"mqttbroker\"].get(\"port\"),\n # keepalive=60,\n # username=config[\"mqttbroker\"].get(\"username\"),\n # password=config[\"mqttbroker\"].get(\"password\"),\n # )\n # try:\n # mqtt = FastMQTT(config=mqtt_config)\n # mqtt.init_app(app)\n # except Exception:\n # print(\"Unable to connect to mqttbroker\")\n # mqtt = None\n\n uvicornConfig = uvicorn.Config(\n \"powermon_api.api:app\",\n host=config[\"api\"].get(\"host\", \"0.0.0.0\"),\n port=config[\"api\"].get(\"port\", 5000),\n log_level=config[\"api\"].get(\"log_level\", \"info\"),\n reload=True,\n )\n server = uvicorn.Server(uvicornConfig)\n server.run()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints an 'instaterecord' (input state record) the state of input at a given level of the input stack.
def printInStateRecord(x): # print(x) assert type(x) == gdb.Value assert str(x.type) == 'instaterecord' assert x.type == gdb.lookup_type('instaterecord') t = x.type assert t.name == 'instaterecord' assert t.code == gdb.TYPE_CODE_TYPEDEF field_names = ['statefield', 'indexfield', 'startfield', 'locfield', 'limitfield', 'namefield'] assert [field.name for field in t.fields()] == field_names # for field in t.fields(): # value = x[field.name] # # print(field.name, value, field.type, int(value)) # print(field.name, int(value)) TOKEN_LIST = 0 MID_LINE = 1 SKIP_BLANKS = 17 NEW_LINE = 33 state = int(x['statefield']) assert state in {TOKEN_LIST, MID_LINE, SKIP_BLANKS, NEW_LINE}, 'Unexpected state: %s' % state if state == TOKEN_LIST: startNode = x['startfield'] tokenList = showTokenList(startNode if x['indexfield'] < MACRO else link(startNode), x['locfield']) return { # See §307 for an explanation of these 'statefield': int(x['statefield']), # is just going to be 0, indicating this is a token list 'indexfield': int(x['indexfield']), # tokenListType 'startfield': int(x['startfield']), # startNode 'locfield': int(x['locfield']), # currentNodeLoc 'limitfield': int(x['limitfield']), # where params start, if MACRO 'namefield': int(x['namefield']), # where in eqtb, if MACRO 'tokens': tokenList, # the actual tokens in the token list! } else: start = int(x['startfield']) limit = int(x['limitfield']) buffer_slice = [int(gdb.parse_and_eval('buffer[%d]' % i)) for i in range(start, limit + 1)] return { # See §303 for an explanation of these 'statefield': int(x['statefield']), # Scanner state 'indexfield': int(x['indexfield']), # Index (files open depth) 'startfield': int(x['startfield']), # where current line starts in buffer 'locfield': int(x['locfield']), # next char to read in buffer (or > limit meaning buffer is read) 'limitfield': int(x['limitfield']), # where current line ends in buffer 'namefield': int(x['namefield']), # file name 'filename': gettexstring(x['namefield']), 'buffertext': buffer_slice, }
[ "def __repr__(self, level = 0, visited = []):\n\t\toffset = \" \" * level\n\t\trep = offset + str(id(self)) + \"\\n\"\n\n\t\tif id(self) not in visited:\n\t\t\t# We add the id of the state to the list of visited states.\n\t\t\tvisited.append(id(self))\n\t\t\tfor (value, state) in self.connections:\n\t\t\t\t# The id of each state will be indented based on its distance from \n\t\t\t\t# the initial state.\n\t\t\t\trep += value + state.__repr__(level + 1, visited)\n\n\t\t# We clear the visited list for future function calls.\n\t\tvisited[:] = []\n\n\t\treturn rep", "def print_state(self, player: int):\n state = self.game.get_state(player)\n print_state(state)", "def printState(self, state, info):\n\n print(\"Current value of state: %s\" % (info))\n for y in range(5):\n line=[]\n for x in range(5):\n line.append(hex(state[x][y]))\n print('\\t%s' % line)", "def print_stacks(self):\n print(self.operand_stack)\n print(self.type_stack)\n print(self.operator_stack)", "def print_state(self):\n print(self.board.get_other_player_name(self.board.current_player.name) + \n \" player action: \" + self.last_command.strip()) \n print(self.board)\n self.print_metadata()", "def print_state(self):\n p1_board = self.board[0:6]\n p2_board = self.board[7:13]\n p2_board.reverse()\n p1_purse = self.board[6]\n p2_purse = self.board[13]\n\n print('\\n')\n print(\"Player 1 Score: {}\".format(self.p1_score))\n print(\"Player 2 Score: {}\".format(self.p2_score))\n print('\\n')\n print(\"Active Player: {}\".format(self.active_player))\n print(\"Actions: \", self.get_legal_actions())\n print(\"Game Over: {}\".format(self.is_over))\n print('\\n')\n print('\\t ' + ' '.join(map(str, p2_board)))\n print('\\t' + str(p2_purse) + '\\t\\t' + str(p1_purse))\n print('\\t ' + ' '.join(map(str, p1_board)))\n print('\\n')\n print(\"=\"*50)", "def print_local_state(self, local_state):\n print(local_state, end='')", "def show(self, state, stream=sys.stdout):\n \n for i in range(self.n):\n fmtstr = []\n for j in range(self.n-1):\n fmtstr.append( \" %s |\"%TicTacToe.Chrs[state.board[i*self.n+j]])\n fmtstr.append(\" %s \"%TicTacToe.Chrs[state.board[(i+1)*self.n-1]])\n line = \"\".join(fmtstr)\n print(line, file=stream)\n if i < self.n-1:\n print('-'*len(line), file=stream)", "def show(self):\n traverse = self.head\n\n if self.top <= -1:\n print(\" Stack Underflow\")\n return\n if traverse is None:\n print(\"Stack is empty\")\n return\n\n while traverse.next is not None:\n print(traverse.data)\n traverse = traverse.next\n print(traverse.data)", "def show(self, internal=False):\n if internal:\n info = ''\n for i in range(0, self.size):\n name = self.name_list[i]\n interface = ''\n iface = self.agents[name]\n for s in iface:\n interface += s + '{' + iface[s]['state'] + '}' + '[' + iface[s]['bond'] + '] '\n info += self.info[name]['sID'] + name + '(' + interface[:-1] + '), '\n print(info[:-2]) # remove last comma+blank\n else:\n print(self.kappa_expression())", "def show_state(self):\n\n pass", "def display_expr_tree(expr):\n \n from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat\n \n stack = [{expand_to_fullexpr(expr): 0}]\n \n while len(stack) > 0:\n sub_expr, level = list(stack.pop().items())[0]\n \n print(\"-\" + 4*level*\"-\",sub_expr)\n if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or \n isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or\n isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):\n for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?\n stack.append({arg: level+1})", "def print_flow_state(flow, active_step_name=None, file=sys.stdout):\n # show flow name\n print(\"[{}]\".format(flow.Meta.name).center(40, \"~\"), file=file)\n # show flow global state\n needs_header = True\n for f_k, f_v in flow.__dict__.items():\n # private stuff is private\n if f_k.startswith(\"_\"):\n continue\n # steps are handled later\n if (isinstance(f_v, Step) or\n (isinstance(f_v, type) and issubclass(f_v, Step))):\n continue\n # skip Meta\n if f_k == 'Meta':\n continue\n if needs_header:\n print(\"STATE:\", file=file)\n needs_header = False\n print(\"{indent}{key}: {value!r}\".format(\n indent=\" \" * 4, key=f_k, value=f_v\n ), file=file)\n # show a list of all the steps, their state as well as a marker that\n # shows where we actively are\n print(\"STEPS:\", file=file)\n for name in flow.Meta.steps.keys():\n step = getattr(flow, name)\n flags = []\n if step.Meta.accepting:\n flags.append('A')\n if step.Meta.initial == name:\n flags.append('I')\n if flags:\n rendered_flags = \" ({})\".format(''.join(flags))\n else:\n rendered_flags = \"\"\n if step.Meta.name == active_step_name:\n indent = \" => \"\n else:\n indent = \" \"\n print(\"{indent}{step}{flags:4}\".format(\n indent=indent, flags=rendered_flags, step=step.Meta.label\n ), file=file)\n needs_header = False\n for s_k, s_v in step.__dict__.items():\n if s_k.startswith(\"_\"):\n continue\n # skip Meta\n if s_k == 'Meta':\n continue\n if needs_header:\n print(\"STATE:\", file=file)\n needs_header = False\n print(\"{indent}{key}: {value!r}\".format(\n indent=\" \" * 8, key=s_k, value=s_v\n ), file=file)\n print(\".\" * 40, file=file)", "def print_machine(machine):\n\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Transition table:\\n\")\n sys.stdout.write(\"\\n\")\n\n TTable = machine.trans_table\n\n sys.stdout.write(\" \")\n for j in xrange(len(TTable[0])):\n sys.stdout.write(\"+-----\")\n sys.stdout.write(\"+\\n\")\n\n sys.stdout.write(\" \")\n for j in xrange(len(TTable[0])):\n sys.stdout.write(\"| %d \" % j)\n sys.stdout.write(\"|\\n\")\n\n sys.stdout.write(\" +---\")\n for j in xrange(len(TTable[0])):\n sys.stdout.write(\"+-----\")\n sys.stdout.write(\"+\\n\")\n\n for i in xrange(len(TTable)):\n sys.stdout.write(\" | %c \" % states[i])\n for j in xrange(len(TTable[i])):\n sys.stdout.write(\"| \")\n if TTable[i][j][0] == -1 and \\\n TTable[i][j][1] == -1 and \\\n TTable[i][j][2] == -1:\n sys.stdout.write(\"--- \")\n else:\n sys.stdout.write(\"%c\" % symbols[TTable[i][j][0]])\n sys.stdout.write(\"%c\" % dirs [TTable[i][j][1]])\n sys.stdout.write(\"%c \" % states [TTable[i][j][2]])\n sys.stdout.write(\"|\\n\")\n\n sys.stdout.write(\" +---\")\n for j in xrange(len(TTable[0])):\n sys.stdout.write(\"+-----\")\n sys.stdout.write(\"+\\n\")\n\n sys.stdout.write(\"\\n\")\n\n sys.stdout.flush()", "def __str__(self):\n\t\tret = self.name + \"\\n\"\n\t\tfor k,v in self.states.items():\n\t\t\tret += v.__str__() + \"\\n\"\n\t\treturn ret", "def print_tree(self, level=None):\n levels = range(self.tree_levels) if level is None else [level]\n for k in levels:\n for j in range(2 ** k - 1, 2 ** (k + 1) - 1):\n print(self.tree[j], end=' ')\n print()", "def pprint(self, tag=None):\n if tag is not None:\n if tag not in self.db:\n return\n idx = self.db.index(tag)\n tags = self.db[idx:idx+1]\n else:\n tags = self.db\n d = {0: \"OUT\", 1: \"IN\"}\n for tag, sessionobj in tags.iteritems():\n print(\"TAG: {0}\".format(tag))\n for callid, ifaceids in sessionobj.callids.iteritems():\n print(\" CALL-ID: {0}\".format(callid))\n for ifaceid, infos in ifaceids.iteritems():\n print(\" IFACEID: {0}\".format(ifaceid))\n for i, info in enumerate(infos):\n print(\" {0:>3}: {1}\".format(d[i], info))\n print(\" SUM: {0}\\n\".format(self.db[tag]))", "def Print(self, indentionLevel, visited): \n\t\ts=''\n\t\tfor i in range(indentionLevel):\n\t\t\ts+=\"-\"\n#\t\tif self.id in visited:\n#\t\t\tprint \"^-%s(*%s*, %s, %s [%s])\" %(s, self.id, self.name, self.desc, self.type)\n#\t\t\treturn\n\t\tvisited.add(self.id)\n\t\tprint \"%s(*%s*, %s, %s [%s])\" %(s, self.id, self.name, self.desc, self.type)\n\t\tprint \"Genes: \", self.genes\n\t\t\n\t\tfor child in self.children:\n\t\t\tchild.Print(indentionLevel+1, visited)", "def log_state(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate that the edge descriptors do not reference nonexistent vertices or columns.
def validate_edge_descriptors(vertex_name_to_table, direct_edges): for edge_name, direct_edge_descriptor in six.iteritems(direct_edges): for vertex_name, column_name in ((direct_edge_descriptor.from_vertex, direct_edge_descriptor.from_column), (direct_edge_descriptor.to_vertex, direct_edge_descriptor.to_column)): if vertex_name not in vertex_name_to_table: raise InvalidSQLEdgeError("SQL edge {} with edge descriptor {} references a " "non-existent vertex {}".format(edge_name, direct_edge_descriptor, vertex_name)) if column_name not in vertex_name_to_table[vertex_name].columns: raise InvalidSQLEdgeError("SQL edge {} with edge descriptor {} references a " "non-existent column {}".format(edge_name, direct_edge_descriptor, column_name))
[ "def _validate_graph(self, G):\n for (v1, v2) in G.edges():\n if 'object' not in G.edges[v1, v2].keys():\n raise ValueError(\"edge_object for ({}, {}) is missing\".format(v1, v2))\n edge_object = G.edges[v1, v2]['object']\n if 'col' not in edge_object.keys():\n raise ValueError(\"edge_object for ({}, {}) is missing the 'col' field\".format(v1, v2))\n if 'lookup' not in edge_object.keys():\n raise ValueError(\"edge_object for ({}, {}) is missing the 'lookup' field\".format(v1, v2))\n if 'field' not in edge_object.keys():\n raise ValueError(\"edge_object for ({}, {}) is missing the 'field' field\".format(v1, v2))", "def _validate_edge(self, edge: Edge):\n\n # Validate that the nodes exist (edges may contain node paths, so we can't just check for nodes directly)\n try:\n from_node = self.get_node(edge.source.node_id)\n to_node = self.get_node(edge.destination.node_id)\n except NodeNotFoundError:\n raise InvalidEdgeError(\"One or both nodes don't exist: {edge.source.node_id} -> {edge.destination.node_id}\")\n\n # Validate that an edge to this node+field doesn't already exist\n input_edges = self._get_input_edges(edge.destination.node_id, edge.destination.field)\n if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation):\n raise InvalidEdgeError(\n f\"Edge to node {edge.destination.node_id} field {edge.destination.field} already exists\"\n )\n\n # Validate that no cycles would be created\n g = self.nx_graph_flat()\n g.add_edge(edge.source.node_id, edge.destination.node_id)\n if not nx.is_directed_acyclic_graph(g):\n raise InvalidEdgeError(\n f\"Edge creates a cycle in the graph: {edge.source.node_id} -> {edge.destination.node_id}\"\n )\n\n # Validate that the field types are compatible\n if not are_connections_compatible(from_node, edge.source.field, to_node, edge.destination.field):\n raise InvalidEdgeError(\n f\"Fields are incompatible: cannot connect {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )\n\n # Validate if iterator output type matches iterator input type (if this edge results in both being set)\n if isinstance(to_node, IterateInvocation) and edge.destination.field == \"collection\":\n if not self._is_iterator_connection_valid(edge.destination.node_id, new_input=edge.source):\n raise InvalidEdgeError(\n f\"Iterator input type does not match iterator output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )\n\n # Validate if iterator input type matches output type (if this edge results in both being set)\n if isinstance(from_node, IterateInvocation) and edge.source.field == \"item\":\n if not self._is_iterator_connection_valid(edge.source.node_id, new_output=edge.destination):\n raise InvalidEdgeError(\n f\"Iterator output type does not match iterator input type:, {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )\n\n # Validate if collector input type matches output type (if this edge results in both being set)\n if isinstance(to_node, CollectInvocation) and edge.destination.field == \"item\":\n if not self._is_collector_connection_valid(edge.destination.node_id, new_input=edge.source):\n raise InvalidEdgeError(\n f\"Collector output type does not match collector input type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )\n\n # Validate if collector output type matches input type (if this edge results in both being set)\n if isinstance(from_node, CollectInvocation) and edge.source.field == \"collection\":\n if not self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination):\n raise InvalidEdgeError(\n f\"Collector input type does not match collector output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )", "def _validate_schema_node_set_references(schema: schema_pb2.GraphSchema):\n for set_name, edge_set in schema.edge_sets.items():\n for feature_name in edge_set.source, edge_set.target:\n if feature_name not in schema.node_sets:\n raise ValidationError(\n \"Edge set '{}' referencing unknown node set '{}'\".format(\n set_name, feature_name))", "def checkDegenerateFaces(self):\n print(\"Checking mesh for degenerate faces...\")\n\n for face in self.faces:\n\n seenPos = set()\n vList = []\n for v in face.adjVerts():\n pos = tuple(v.pos.tolist()) # need it as a hashable type\n if pos in seenPos:\n raise ValueError(\"ERROR: Degenerate mesh face has repeated vertices at position: \" + str(pos))\n else:\n seenPos.add(pos)\n vList.append(v.pos)\n\n # Check for triangular faces with colinear vertices (don't catch other such errors for now)\n if(len(vList) == 3):\n v1 = vList[1] - vList[0]\n v2 = vList[2]-vList[0]\n area = norm(cross(v1, v2))\n if area < 0.0000000001*max((norm(v1),norm(v2))):\n raise ValueError(\"ERROR: Degenerate mesh face has triangle composed of 3 colinear points: \\\n \" + str(vList))\n\n\n print(\" ...test passed\")", "def _check_descriptor_dependencies(self, session, descriptor):\n if session[\"force\"]:\n return\n member_vnfd_index = {}\n if descriptor.get(\"constituent-vnfd\") and not session[\"force\"]:\n for vnf in descriptor[\"constituent-vnfd\"]:\n vnfd_id = vnf[\"vnfd-id-ref\"]\n filter_q = self._get_project_filter(session)\n filter_q[\"id\"] = vnfd_id\n vnf_list = self.db.get_list(\"vnfds\", filter_q)\n if not vnf_list:\n raise EngineException(\"Descriptor error at 'constituent-vnfd':'vnfd-id-ref'='{}' references a non \"\n \"existing vnfd\".format(vnfd_id), http_code=HTTPStatus.CONFLICT)\n # elif len(vnf_list) > 1:\n # raise EngineException(\"More than one vnfd found for id='{}'\".format(vnfd_id),\n # http_code=HTTPStatus.CONFLICT)\n member_vnfd_index[vnf[\"member-vnf-index\"]] = vnf_list[0]\n\n # Cross references validation in the descriptor and vnfd connection point validation\n for vld in get_iterable(descriptor.get(\"vld\")):\n for referenced_vnfd_cp in get_iterable(vld.get(\"vnfd-connection-point-ref\")):\n # look if this vnfd contains this connection point\n vnfd = member_vnfd_index.get(referenced_vnfd_cp[\"member-vnf-index-ref\"])\n for vnfd_cp in get_iterable(vnfd.get(\"connection-point\")):\n if referenced_vnfd_cp.get(\"vnfd-connection-point-ref\") == vnfd_cp[\"name\"]:\n break\n else:\n raise EngineException(\n \"Error at vld[id='{}']:vnfd-connection-point-ref[member-vnf-index-ref='{}']:vnfd-\"\n \"connection-point-ref='{}' references a non existing conection-point:name inside vnfd '{}'\"\n .format(vld[\"id\"], referenced_vnfd_cp[\"member-vnf-index-ref\"],\n referenced_vnfd_cp[\"vnfd-connection-point-ref\"], vnfd[\"id\"]),\n http_code=HTTPStatus.UNPROCESSABLE_ENTITY)", "def test_edge_dirc_spec(self):\n with self.assertRaises(ValueError):\n ed.Edge(\"O\",\"1\")", "def test_undirected_graph_edge_already_exists_exception(self):\n g = UndirectedGraph()\n g.add_vertex(v_val='v0')\n g.add_vertex(v_val='v1')\n g.add_edge(('v0', 'v1'))\n\n with self.assertRaises(ValueError):\n g.add_edge(('v1', 'v0'))", "def check_edge_correctness(graph_long_edges, edge):\n\n if not graph_long_edges:\n return True\n\n bad_edges = [graph_edge for graph_edge in graph_long_edges if\n is_under(edge, graph_edge) or is_under(graph_edge, edge)]\n\n return not bad_edges", "def test_identify_eds_not_adjacent(self):\n # Setup\n first = Edge(None, 0, 1, None, None)\n second = Edge(None, 2, 3, None, None)\n\n # Please, note that we passed the index, copula_name and copula_theta as None\n # To show they are no going to be used in the scope of this test.\n\n # Run / Check\n # As they are not adjacent, we can asure calling _identify_eds_ing will raise a ValueError.\n assert not first.is_adjacent(second)\n\n error_msg = r'too many values to unpack \\(expected 2\\)'\n with pytest.raises(ValueError, match=error_msg):\n Edge._identify_eds_ing(first, second)", "def test_directed_graph_edge_already_exists_exception(self):\n g = DirectedGraph()\n g.add_vertex(v_val='v0')\n g.add_vertex(v_val='v1')\n g.add_edge(('v0', 'v1'))\n\n with self.assertRaises(ValueError):\n g.add_edge(('v0', 'v1'))", "def test_edge_not_match_type(self):\n e1 = ed.Edge(\"O\",\"B\")\n e2 = ed.Edge(\"P\",\"T\")\n self.assertFalse(e1.matches(e2))", "def _validate_schema_reserved_feature_names(schema: schema_pb2.GraphSchema):\n node_set_dicts = [(\"nodes\", name, node_set.features)\n for name, node_set in schema.node_sets.items()]\n edge_set_dicts = [(\"edges\", name, edge_set.features)\n for name, edge_set in schema.edge_sets.items()]\n for set_type, set_name, feature_dict in node_set_dicts + edge_set_dicts:\n if const.SIZE_NAME in feature_dict:\n raise ValidationError(\n \"Feature '{}' from {} set '{}' is reserved\".format(\n const.SIZE_NAME, set_type, set_name))\n for set_type, set_name, feature_dict in edge_set_dicts:\n for name in const.SOURCE_NAME, const.TARGET_NAME:\n # Invalidate reserved feature names.\n if name in feature_dict:\n raise ValidationError(\n \"Feature '{}' from {} set '{}' is reserved\".format(\n name, set_type, set_name))\n\n # TODO(blais): Make this compulsory after we remove the hardcoded\n # feature names from the sampler.\n for set_type, set_name, feature_name, feature in su.iter_features(schema):\n if const.RESERVED_REGEX.match(feature_name):\n logging.error(\"Invalid %s feature name '%s' on set '%s': reserved names \"\n \"are not allowed\", set_type, feature_name, set_name)", "def h3_unidirectional_edge_is_valid(edge):\n try:\n e = _in_scalar(edge)\n return _cy.is_edge(e)\n except (ValueError, TypeError):\n return False", "def _validate_constraint_columns(self, table_data):\n if any(col not in table_data.columns for col in self._constraint_columns):\n raise MissingConstraintColumnError()", "def test_add_edge_data_bad_args():\n from cugraph.experimental import PropertyGraph\n\n transactions = dataset1[\"transactions\"]\n transactions_df = cudf.DataFrame(columns=transactions[0],\n data=transactions[1])\n\n pG = PropertyGraph()\n with pytest.raises(TypeError):\n pG.add_edge_data(42,\n type_name=\"transactions\",\n vertex_col_names=(\"user_id\", \"merchant_id\"),\n property_columns=None)\n with pytest.raises(TypeError):\n pG.add_edge_data(transactions_df,\n type_name=42,\n vertex_col_names=(\"user_id\", \"merchant_id\"),\n property_columns=None)\n with pytest.raises(ValueError):\n pG.add_edge_data(transactions_df,\n type_name=\"transactions\",\n vertex_col_names=(\"user_id\", \"bad_column\"),\n property_columns=None)\n with pytest.raises(ValueError):\n pG.add_edge_data(transactions_df,\n type_name=\"transactions\",\n vertex_col_names=(\"user_id\", \"merchant_id\"),\n property_columns=[\"bad_column_name\", \"time\"])\n with pytest.raises(TypeError):\n pG.add_edge_data(transactions_df,\n type_name=\"transactions\",\n vertex_col_names=(\"user_id\", \"merchant_id\"),\n property_columns=\"time\")", "def test_edge_not_match_direction(self):\n e1 = ed.Edge(\"O\",\"B\")\n e2 = ed.Edge(\"O\",\"B\")\n self.assertFalse(e1.matches(e2))", "def _validate_schema_descriptions(schema: schema_pb2.GraphSchema):\n # This seems to be a common error.\n name_fields = []\n for set_type, set_name, feature_name, feature in su.iter_features(schema):\n if feature.HasField(\"description\"):\n continue\n for dim in feature.shape.dim:\n if dim.name:\n name_fields.append((set_type, set_name, feature_name))\n if name_fields:\n field_names = \",\".join([str(ntuple) for ntuple in name_fields])\n raise ValidationError(\n \"The following features are incorrectly locating the description on \"\n \"the shape dimensions 'name' field: {}; use the 'description' field of \"\n \"the feature instead\".format(field_names))", "def _validate_schema_context_references(schema: schema_pb2.GraphSchema):\n for set_name, node_set in schema.node_sets.items():\n for feature in node_set.context:\n if feature not in schema.context.features:\n raise ValidationError(\"Context feature '{}' does not exist \"\n \"(from node set '{}')\".format(feature, set_name))\n for set_name, edge_set in schema.edge_sets.items():\n for feature in edge_set.context:\n if feature not in schema.context.features:\n raise ValidationError(\"Context feature '{}' does not exist \"\n \"(from edge set '{}')\".format(feature, set_name))", "def has_edge(self, u, v):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the physical dimensions of the context's viewport.
async def get_physical_viewport_dimensions(bidi_session, context): viewport = await get_viewport_dimensions(bidi_session, context) dpr = await get_device_pixel_ratio(bidi_session, context) return (floor(viewport["width"] * dpr), floor(viewport["height"] * dpr))
[ "def getViewportSizePixels(self) -> \"SbVec2s const &\":\n return _coin.SbViewportRegion_getViewportSizePixels(self)", "def viewport (self):\n return self._viewport", "def getViewportSize(self) -> \"SbVec2f const &\":\n return _coin.SbViewportRegion_getViewportSize(self)", "def _get_viewport(self) -> \"adsk::core::Ptr< adsk::core::Viewport >\" :\n return _core.CameraEventArgs__get_viewport(self)", "def _get_viewport(self) -> \"adsk::core::Ptr< adsk::core::Viewport >\" :\n return _core.MouseEventArgs__get_viewport(self)", "def get_screen_size(self) -> Tuple[int, int]:\n return self.get_window_geometry(window=self.root)[2:]", "def window_size() -> Vec2:\n return Vec2(_canvas.GetSize())", "def _get_width(self) -> \"int\" :\n return _core.Viewport__get_width(self)", "def get_viewport_size(driver):\n # noinspection PyBroadException\n try:\n width = extract_viewport_width(driver)\n height = extract_viewport_height(driver)\n return {'width': width, 'height': height}\n except:\n logger.info('Failed to get viewport size. Only window size is available')\n browser_size = driver.get_window_size()\n return {'width': browser_size['width'], 'height': browser_size['height']}", "def get_dimensions(self):\n\n\t\treturn (self._x, self._y, self._w, self._h)", "def get_screen_size(self):\n\n return [self._rows, self._columns]", "def _get_plot_dimensions(self) -> Tuple[int, int]:\n return self._width - AXIS_SPACE_PX, self._height - AXIS_SPACE_PX", "def getViewportAspectRatio(self) -> \"float\":\n return _coin.SbViewportRegion_getViewportAspectRatio(self)", "def get_dimensions(self):\t\t\n\t\t\n\t\treturn (self.x, self.y, self.w, self.h)", "def physical_size_mm(self) -> tuple[int, int]:\n return self._ptr.phys_width, self._ptr.phys_height", "def getWindowSize(self) -> \"SbVec2s const &\":\n return _coin.SbViewportRegion_getWindowSize(self)", "def getPixelsPerInch(self) -> \"float\":\n return _coin.SbViewportRegion_getPixelsPerInch(self)", "def canvas_size() -> Vec2:\n return _canvas.realsize", "def _get_viewportPosition(self) -> \"adsk::core::Ptr< adsk::core::Point2D >\" :\n return _core.MouseEventArgs__get_viewportPosition(self)", "def _get_height(self) -> \"int\" :\n return _core.Viewport__get_height(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from cursors. Can raise ConnectionFailure, OperationFailure, etc.
def run_operation( self, conn: Connection, operation: Union[_Query, _GetMore], read_preference: _ServerMode, listeners: Optional[_EventListeners], unpack_res: Callable[..., List[_DocumentOut]], ) -> Response: duration = None assert listeners is not None publish = listeners.enabled_for_commands if publish: start = datetime.now() use_cmd = operation.use_command(conn) more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come if more_to_come: request_id = 0 else: message = operation.get_message(read_preference, conn, use_cmd) request_id, data, max_doc_size = self._split_message(message) if publish: cmd, dbn = operation.as_command(conn) assert listeners is not None listeners.publish_command_start( cmd, dbn, request_id, conn.address, service_id=conn.service_id ) start = datetime.now() try: if more_to_come: reply = conn.receive_message(None) else: conn.send_message(data, max_doc_size) reply = conn.receive_message(request_id) # Unpack and check for command errors. if use_cmd: user_fields = _CURSOR_DOC_FIELDS legacy_response = False else: user_fields = None legacy_response = True docs = unpack_res( reply, operation.cursor_id, operation.codec_options, legacy_response=legacy_response, user_fields=user_fields, ) if use_cmd: first = docs[0] operation.client._process_response(first, operation.session) _check_command_response(first, conn.max_wire_version) except Exception as exc: if publish: duration = datetime.now() - start if isinstance(exc, (NotPrimaryError, OperationFailure)): failure: _DocumentOut = exc.details # type: ignore[assignment] else: failure = _convert_exception(exc) assert listeners is not None listeners.publish_command_failure( duration, failure, operation.name, request_id, conn.address, service_id=conn.service_id, ) raise if publish: duration = datetime.now() - start # Must publish in find / getMore / explain command response # format. if use_cmd: res: _DocumentOut = docs[0] elif operation.name == "explain": res = docs[0] if docs else {} else: res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] if operation.name == "find": res["cursor"]["firstBatch"] = docs else: res["cursor"]["nextBatch"] = docs assert listeners is not None listeners.publish_command_success( duration, res, operation.name, request_id, conn.address, service_id=conn.service_id, ) # Decrypt response. client = operation.client if client and client._encrypter: if use_cmd: decrypted = client._encrypter.decrypt(reply.raw_command_response()) docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) response: Response if client._should_pin_cursor(operation.session) or operation.exhaust: conn.pin_cursor() if isinstance(reply, _OpMsg): # In OP_MSG, the server keeps sending only if the # more_to_come flag is set. more_to_come = reply.more_to_come else: # In OP_REPLY, the server keeps sending until cursor_id is 0. more_to_come = bool(operation.exhaust and reply.cursor_id) if operation.conn_mgr: operation.conn_mgr.update_exhaust(more_to_come) response = PinnedResponse( data=reply, address=self._description.address, conn=conn, duration=duration, request_id=request_id, from_command=use_cmd, docs=docs, more_to_come=more_to_come, ) else: response = Response( data=reply, address=self._description.address, duration=duration, request_id=request_id, from_command=use_cmd, docs=docs, ) return response
[ "def _run_query (self, query):\n self._login()\n return self.api_obj.query(query)", "def query( self, path_suffix, get_params ):\n #\n # Do sleep for delay query if necessary...\n if self.query_interval is not None:\n while time.time() < self.earliest_query_time:\n sleep_dur = self.earliest_query_time - time.time()\n time.sleep( sleep_dur )\n #~ Potential for scheduler thrashing if time difference\n # is tiny? Near-zero millis rounded down => repeated looping?\n \n #\n # Build & issue request...\n params = copy.copy( get_params )\n token = self.access_tokens[self.next_access_token_index]\n params['oauth_token'] = token\n \n path_suffix = path_suffix.lstrip( '/' )\n \n url = self.api_base_url + '/' + path_suffix + \"?\" + urllib.urlencode( params )\n \n try:\n response = urllib2.urlopen( url )\n except urllib2.HTTPError, e:\n raise e\n except urllib2.URLError, e:\n raise e\n \n raw_data = response.read()\n py_data = json.loads( raw_data )\n \n # Request error handling...\n response_code = int( py_data['meta']['code'] )\n if response_code != 200:\n error_type = py_data['meta']['errorType'][0]\n error_detail = py_data['meta']['errorDetail'][0]\n if error_type == 'rate_limit_exceeded':\n raise RateLimitExceededError( response_code, error_type, \n error_detail )\n \n raise FoursquareRequestError( response_code, error_type, \n error_detail )\n \n #\n # Prep for next call...\n self.next_access_token_index = \\\n ( self.next_access_token_index + 1 ) % len( self.access_tokens )\n \n if self.query_interval is not None:\n self.earliest_query_time = time.time() + self.query_interval\n \n #\n # Fin\n return py_data", "def call(self, max_retries=0):\n # type: (int) -> BxQueryResponse\n max_retries -= 1\n try:\n raw_result = self._call(self.request_url, self.parameters)\n result = BxQueryResponse(raw_result)\n except ce.QueryLimitExceededException:\n if max_retries >= 0:\n time.sleep(2)\n result = self.call(max_retries)\n else:\n raise\n return result", "def _api_query(self, command: str, req: Optional[Dict] = None) -> Union[Dict, List]:\n if req is None:\n req = {}\n log.debug(\n 'Poloniex API query',\n command=command,\n post_data=req,\n )\n\n tries = QUERY_RETRY_TIMES\n while tries >= 0:\n try:\n response = self._single_query(command, req)\n except requests.exceptions.ConnectionError as e:\n raise RemoteError(f'Poloniex API request failed due to {str(e)}')\n\n if response is None:\n if tries >= 1:\n backoff_seconds = 20 / tries\n log.debug(\n f'Got a recoverable poloniex error. '\n f'Backing off for {backoff_seconds}',\n )\n gevent.sleep(backoff_seconds)\n tries -= 1\n continue\n else:\n break\n\n if response is None:\n raise RemoteError(\n f'Got a recoverable poloniex error and did not manage to get a '\n f'request through even after {QUERY_RETRY_TIMES} '\n f'incremental backoff retries',\n )\n\n result: Union[Dict, List]\n try:\n if command == 'returnLendingHistory':\n result = rlk_jsonloads_list(response.text)\n else:\n # For some reason poloniex can also return [] for an empty trades result\n if response.text == '[]':\n result = {}\n else:\n result = rlk_jsonloads_dict(response.text)\n result = _post_process(result)\n except JSONDecodeError:\n raise RemoteError(f'Poloniex returned invalid JSON response: {response.text}')\n\n if isinstance(result, dict) and 'error' in result:\n raise RemoteError(\n 'Poloniex query for \"{}\" returned error: {}'.format(\n command,\n result['error'],\n ))\n\n return result", "def _query(self, rs):\n url = self._url(rs)\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n response = requests.get(url, headers)\n\n if response.ok:\n return response.json()\n else:\n print('{} status code for \"{}\"'.format(response.status_code, rs))\n return None", "def query(self, **kwargs):\n return self.iterate('query', **kwargs)", "def _perform_single_query(khoros_object, query, fields=None, cursor=None):\n # Construct the entire LiQL query\n cursor = '' if not cursor else liql.structure_cursor_clause(cursor)\n query = f\"{query} {cursor}\" if cursor else query\n\n # Perform the API call and retrieve the data\n response = liql.perform_query(khoros_object, liql_query=query)\n data = liql.get_returned_items(response)\n\n # Get the cursor when present\n cursor = None\n if response.get('data').get('next_cursor'):\n cursor = response['data'].get('next_cursor')\n\n # Add missing columns to message data as needed\n data = _add_missing_cols(data, fields)\n try:\n data = sorted(data, key=itemgetter(*tuple(data[0].keys())))\n except KeyError as missing_key:\n logger.error(f'Could not sort the message data because the \\'{missing_key}\\' key was missing.')\n\n # Return the user data and cursor\n return data, cursor", "def GetApiQueryResponseFromDb(api_query):\r\n status = 400\r\n content = co.DEFAULT_ERROR_MESSAGE\r\n\r\n if api_query and api_query.is_active:\r\n try:\r\n query_response = api_query.api_query_responses.get()\r\n\r\n if query_response:\r\n status = 200\r\n content = query_response.content\r\n else:\r\n status = 400\r\n content = {\r\n 'error': co.ERROR_INACTIVE_QUERY,\r\n 'code': status,\r\n 'message': co.ERROR_MESSAGES[co.ERROR_INACTIVE_QUERY]}\r\n except db.BadKeyError:\r\n status = 400\r\n content = {\r\n 'error': co.ERROR_INVALID_QUERY_ID,\r\n 'code': status,\r\n 'message': co.ERROR_MESSAGES[co.ERROR_INVALID_QUERY_ID]}\r\n\r\n response = {\r\n 'status': status,\r\n 'content': content\r\n }\r\n\r\n return response", "def execute(self):\n response = self.client.get_item(**self.build())\n return GetResponse(response, self.reconstructor)", "def _Dynamic_RunQuery(self, request, response):\n runquery_response = datastore_pb.QueryResult()\n self.__call('datastore_v3', 'RunQuery', request, runquery_response)\n if runquery_response.result_size() > 0:\n response.CopyFrom(runquery_response)\n return\n\n next_request = datastore_pb.NextRequest()\n next_request.mutable_cursor().CopyFrom(runquery_response.cursor())\n next_request.set_count(request.limit())\n self.__call('datastore_v3', 'Next', next_request, response)", "def _execute(sql, operation, cursor):\n result = None\n try:\n ts = datetime.now().timestamp()\n cursor.execute(sql)\n if operation == 'read':\n result = [row for row in cursor]\n query_time = datetime.now().timestamp() - ts\n app.logger.debug(f'LRS query returned {len(result)} rows in {query_time} seconds:\\n{sql}')\n else:\n result = cursor.statusmessage\n query_time = datetime.now().timestamp() - ts\n app.logger.debug(f'LRS query returned status {result} in {query_time} seconds:\\n{sql}')\n except psycopg2.Error as e:\n error_str = str(e)\n if e.pgcode:\n error_str += f'{e.pgcode}: {e.pgerror}\\n'\n error_str += f'on SQL: {sql}'\n app.logger.warning({'message': error_str})\n return result", "def execute(\r\n self,\r\n els_client = None,\r\n get_all = False,\r\n use_cursor = False,\r\n view = None,\r\n count = 25,\r\n fields = []\r\n ):\r\n ## TODO: add exception handling\r\n url = self._uri\r\n if use_cursor:\r\n url += \"&cursor=*\"\r\n if view:\r\n url += \"&view={}\".format(view)\r\n api_response = els_client.exec_request(url)\r\n self._tot_num_res = int(api_response['search-results']['opensearch:totalResults'])\r\n self._results = api_response['search-results']['entry']\r\n if get_all is True:\r\n while (self.num_res < self.tot_num_res) and not self._upper_limit_reached():\r\n for e in api_response['search-results']['link']:\r\n if e['@ref'] == 'next':\r\n next_url = e['@href']\r\n api_response = els_client.exec_request(next_url)\r\n self._results += api_response['search-results']['entry']\r\n with open('dump.json', 'w') as f:\r\n f.write(json.dumps(self._results))\r\n self.results_df = recast_df(pd.DataFrame(self._results))", "def _api_query(\n self,\n endpoint: str,\n options: Optional[Dict[str, Any]] = None,\n pagination_next_uri: str = None,\n ignore_pagination: bool = False,\n ) -> List[Any]:\n request_verb = \"GET\"\n if pagination_next_uri:\n request_url = pagination_next_uri\n else:\n request_url = f'/{self.apiversion}/{endpoint}'\n if options:\n request_url += urlencode(options)\n\n timestamp = str(int(time.time()))\n message = timestamp + request_verb + request_url\n\n signature = hmac.new(\n self.secret,\n message.encode(),\n hashlib.sha256,\n ).hexdigest()\n log.debug('Coinbase API query', request_url=request_url)\n\n self.session.headers.update({\n 'CB-ACCESS-SIGN': signature,\n 'CB-ACCESS-TIMESTAMP': timestamp,\n 'CB-ACCESS-KEY': self.api_key,\n # This is needed to guarantee the up to the given date\n # API version response.\n 'CB-VERSION': '2019-08-25',\n })\n full_url = self.base_uri + request_url\n try:\n response = self.session.get(full_url)\n except requests.exceptions.ConnectionError as e:\n raise RemoteError(f'Coinbase API request failed due to {str(e)}')\n\n if response.status_code == 403:\n raise CoinbasePermissionError(f'API key does not have permission for {endpoint}')\n\n if response.status_code != 200:\n raise RemoteError(\n f'Coinbase query {full_url} responded with error status code: '\n f'{response.status_code} and text: {response.text}',\n )\n\n try:\n json_ret = rlk_jsonloads_dict(response.text)\n except JSONDecodeError:\n raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}')\n\n if 'data' not in json_ret:\n raise RemoteError(f'Coinbase json response does not contain data: {response.text}')\n\n final_data = json_ret['data']\n\n # If we got pagination and this is the first query, gather all the subsequent queries\n if 'pagination' in json_ret and not pagination_next_uri and not ignore_pagination:\n if 'next_uri' not in json_ret['pagination']:\n raise RemoteError('Coinbase json response contained no \"next_uri\" key')\n\n next_uri = json_ret['pagination']['next_uri']\n if not next_uri:\n # As per the docs: https://developers.coinbase.com/api/v2?python#pagination\n # once we get an empty next_uri we are done\n return final_data\n\n additional_data = self._api_query(\n endpoint=endpoint,\n options=options,\n pagination_next_uri=next_uri,\n )\n final_data.extend(additional_data)\n\n return final_data", "def cursor(self, *args, **kwargs):\n return self.connection.cursor(*args, **kwargs)", "def __iter__(self):\n return self._perform_query()", "def _cursor_retry_wrapper(self, method_name, query_string, param_tuple):\n # logging.info(query_string)\n # logging.info(param_tuple)\n\n if not self.retry_on_error:\n getattr(self.cursor, method_name)(query_string, param_tuple)\n return\n\n final_exception = None\n tries = 0\n while True:\n try:\n # Either execute or execute_many\n getattr(self.cursor, method_name)(query_string, param_tuple)\n break # call succeeded, don't try again\n except self.exceptions as e:\n # Log the error and try again. Close the problematic connection\n # and make a new one.\n logging.warning(\"MySQLApi exception on query; will retry.\")\n logging.warning(e)\n final_exception = e\n self.connection.close()\n self.connect_to_db()\n tries += 1\n if tries >= self.num_tries:\n # That's enough tries, just throw an error.\n logging.info(query_string)\n logging.info(param_tuple)\n logging.error(\"Recurrent exception, gave up querying.\")\n raise final_exception\n self.sleep_for_backoff_interval(tries)", "def __query(self, sql, con=None, ret_dict=0):\n if con:\n # execute only without closing the connection\n try:\n cursor = con.getCursor(ret_dict=ret_dict)\n #self.log(\"cursor: %s\"%(cursor))\n cursor.execute(sql)\n #self.log(\"returning results.\")\n return cursor.fetchall()\n except:\n self.log( \"Exception in query: %s\\nException: %s\" % \\\n (sql, traceBack()), logging.ERROR )\n else:\n try:\n try:\n con = self.__proof.getConnection(self.__db_name)\n cursor = con.getCursor(ret_dict=ret_dict)\n cursor.execute(sql)\n return cursor.fetchall()\n except:\n self.log( \"Exception in query: %s\" % (traceBack()), logging.ERROR )\n finally:\n self.__proof.closeConnection(con)", "def _api_query_request(self, endpoint, data=None):\n auth_header = {\n 'Authorization': f'Bearer {self._access_token}'\n }\n response = requests.get(f'{self._api_url}{endpoint}', headers=auth_header, params=data)\n response_data = response.json()\n\n if response.ok:\n return response_data\n elif response.status_code == 401 and response_data['error']['message'] == ERROR_MSG_TOKEN_EXPIRED:\n self._refresh_authorisation()\n return self._api_query_request(endpoint, data)\n elif response.status_code == 429:\n timeout = int(response.headers['Retry-After']) + 10\n print(f'Rate limited. Waiting: {timeout} seconds')\n time.sleep(timeout)\n return self._api_query_request(endpoint, data)\n else:\n print('An unexpected error has occurred')\n print(response.text)\n exit(1)", "def executeQueryFetch(self, query = None):\n\t\tif query == None:\n\t\t\tquery = self.query\n\t\tself.cursor.execute(query)\n\t\tself.result = self.cursor.fetchall()\n\t\treturn self.result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Code to apply a savgol filter to the recorded body pose
def savgol_filter(body_3D_pose, left_hand_3D_pose,right_hand_3D_pose, threshold = 0.2): #Define properties of the savgol filter window_length, polyorder = 11, 2 #iterate over the hands for hand_pose in right_hand_3D_pose, left_hand_3D_pose: #iterate over the joints in each hand for joint in HAND: #Apply savgol filter to x, y and z position lists of that joint seperately x_filtered = signal.savgol_filter(list(zip(*hand_pose[joint.value]))[0], window_length, polyorder) y_filtered = signal.savgol_filter(list(zip(*hand_pose[joint.value]))[1], window_length, polyorder) z_filtered = signal.savgol_filter(list(zip(*hand_pose[joint.value]))[2], window_length, polyorder) #Define a list of whether the point we have found is believed to be valid, for use in the step below lost_track_list = list(zip(*hand_pose[joint.value]))[3] #Define a list of (x,y,z) points using the filtered list above smoothed_list = [list(elem) for elem in list(zip(x_filtered,y_filtered,z_filtered, lost_track_list))] #Update the hand_pose list for that joint with the new smoothed list of said joint's positions hand_pose[joint.value] = smoothed_list #iterate over the body's joints for joint in BODY: #Apply savgol filter to x, y and z position lists of that joint seperately x_filtered = signal.savgol_filter(list(zip(*body_3D_pose[joint.value]))[0], window_length, polyorder) y_filtered = signal.savgol_filter(list(zip(*body_3D_pose[joint.value]))[1], window_length, polyorder) z_filtered = signal.savgol_filter(list(zip(*body_3D_pose[joint.value]))[2], window_length, polyorder) #Define a list of whether the point we have found is believed to be valid, for use in the step below lost_track_list = list(zip(*body_3D_pose[joint.value]))[3] #Define a list of (x,y,z) points using the filtered list above smoothed_list = [list(elem) for elem in list(zip(x_filtered,y_filtered,z_filtered, lost_track_list))] #Update the hand_pose list for that joint with the new smoothed list of said joint's positions body_3D_pose[joint.value] = smoothed_list return body_3D_pose, left_hand_3D_pose,right_hand_3D_pose
[ "def get_arithmetic_mean_filter(self, kernel):\n kernel= np.array([[-1,-1,-1],[-1, 9,-1],[-1,-1,-1]])\n sharpened_img = cv2.filter2D(sp_05, -1, kernel_sharpening) \n return sharpened_img", "def apply(self, sed):\n\t\tWaveLength = np.array(sed['wavelength'])\n\t\tFluxLam = np.array(sed['flux'])\n\t\tif ('zeropoint' in sed):\n\t\t\tZeroPoint = np.array(sed['zeropoint'])\n\t\telse:\n\t\t\tZeroPoint = np.full(len(WaveLength),3.63e-5)\n\t\t\n\t\tApplyFilter = np.interp(WaveLength, self.wavelength, self.throughput, left=0.0, right=0.0)\n\t\tindex, = np.where(ApplyFilter > 0.0) # Range of wavelengths over which the filter is non-zero\n\t\t\n\t\tif len(index) == 0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\tintslice = slice(index.min(),index.max())\n\t\t\t\n\t\t\tif (self.format == 'energy'):\n\t\t\t\tFilterFlux = integrate.trapz(ApplyFilter[intslice]*FluxLam[intslice],WaveLength[intslice])\n\t\t\t\tFilterNorm = integrate.trapz(ApplyFilter[intslice]*ZeroPoint[intslice],WaveLength[intslice])\n\t\t\telse:\n\t\t\t\tFilterFlux = integrate.trapz(ApplyFilter[intslice]*WaveLength[intslice]*FluxLam[intslice],WaveLength[intslice])\n\t\t\t\tFilterNorm = integrate.trapz(ApplyFilter[intslice]*WaveLength[intslice]*ZeroPoint[intslice],WaveLength[intslice])\n\t\t\n\t\t\treturn FilterFlux/FilterNorm", "def filter_frame(frame):\r\n\r\n denoised = ndimage.gaussian_filter(frame,0.03)\r\n \r\n return denoised", "def preprocess(frame):\n return cv2.GaussianBlur(frame, (5, 5), 2)", "def sg_smooth(array, num_joints=25, dim=2): \n for i in range(num_joints):\n joint = array[:, dim*i:dim*i+dim] \n filtered_joint = savgol_filter(joint, 15, 2, axis=0) # window length: 15, poly degree: 2\n array[:, dim*i:dim*i+dim] = filtered_joint\n return array", "def smooth(processed):\n smoothed = savgol_filter(processed, 45, 6)\n # For future this could be a window that you type the order and the\n # number of points into, and then it will plot it to show you the\n #smooth before moving on\n return smoothed", "def gaussfiltering(img, sigma):\n\n return np.array(smooth_img)", "def naive_smoother(img):\n\t\t# type: (Image) -> Image\n\t\treturn img.filter(ImageFilter.GaussianBlur())", "def sample_then_blur(self,pts_t,text_1d,mag,psf_x,psf_y,x,y):\n\t\t# propagate the coordinate of each sensor pixel onto the \n\t\t# texture plane\n\t\tX_s = (np.arange(self.cfg['szx_sensor']\\\n\t\t\t+psf_x.shape[1]-1+psf_y.shape[1]-1)\\\n\t\t\t-self.cfg['x_prinpts']-(psf_x.shape[1]-1)/2\\\n\t\t\t-(psf_y.shape[1]-1)/2)/mag\\\n\t\t\t-x/self.img_cfg['pix_in_m']\n\t\tY_s = (np.arange(self.cfg['szy_sensor']\\\n\t\t\t+psf_x.shape[0]-1+psf_y.shape[0]-1)\\\n\t\t\t-self.cfg['y_prinpts']-(psf_y.shape[0]-1)/2\\\n\t\t\t-(psf_x.shape[0]-1)/2)/mag\\\n\t\t\t-y/self.img_cfg['pix_in_m']\n\n\t\t# As we are using round padding, we need to mod the X_s\n\t\t# and Y_s to make them within the range of XX and YY\n\t\tX_s = np.remainder(X_s, self.img_cfg['res'][1]-1)\n\t\tY_s = np.remainder(Y_s, self.img_cfg['res'][0]-1)\n\n\t\tX_s, Y_s = np.meshgrid(\n\t\t\tX_s, Y_s\n\t\t)\n\t\tpts_s = np.concatenate(\n\t\t\t(\n\t\t\t\tnp.reshape(X_s,(-1,1)),\n\t\t\t\tnp.reshape(Y_s,(-1,1))\n\t\t\t),\n\t\t\taxis = 1\n\t\t)\n\t\t# the sharp image captured by camera can be approximated\n\t\t# as the interpolation of the sensor coordinates onto\n\t\t# the texture coordinate map\n\t\tP = interpolate.griddata(pts_t, text_1d, pts_s, method = 'linear')\n\t\tP = np.reshape(P,X_s.shape)\n\t\t\n\t\t# We then convolve the sharp image with the blur kernel\n\t\ttemp = signal.convolve2d(P,psf_x,mode='valid')\n\t\treturn signal.convolve2d(temp,psf_y,mode='valid')", "def post_process(ps_volume, inv_depth, accum_count):\n\n mask = np.ones(ps_volume.shape[1:], dtype=np.bool)\n inv_depth_image = np.zeros(ps_volume.shape[1:], dtype=np.float64)\n \"\"\" YOUR CODE STARTS HERE \"\"\"\n inv_depth_image = compute_depths(ps_volume, inv_depth)\n mask = inv_depth_image <= np.mean(\n inv_depth_image) + (2.5 * np.std(inv_depth_image))\n inv_depth_image = scipy.ndimage.gaussian_filter(inv_depth_image, 2)\n\n print(mask)\n \"\"\" YOUR CODE ENDS HERE \"\"\"\n\n return inv_depth_image, mask", "def run_SE(self, updates, no_conv):\n filts = self.par_info['filt']\n\n # get list of images, all of which will be run in dual image mode\n if no_conv:\n images = self.par_info['image']\n else:\n images = self.par_info['convim']\n\n # detection image\n wdet = np.where(self.par_info['filt'] == self.detect_filt)\n detim = images[wdet][0]\n\n for i in range(len(images)):\n image = images[i]\n\n rms = self.par_info['rms'][i]\n cat = os.path.join(self.outdir, '%s_cat.fits' % filts[i]) \n options = self.Config.options(filts[i])\n params = {}\n for option in options:\n params[option] = self.Config.get(filts[i], option)\n # detection parameters should be from detection image section\n for option in ['-detect_minarea', '-detect_thresh', '-filter',\\\n '-filter_name', '-deblend_nthresh', \\\n '-deblend_mincont', '-back_filtersize', \\\n '-thresh_type', '-back_size']:\n params[option] = self.Config.get(self.detect_filt, option)\n\n # update rms maps for dual image mode\n params['-weight_image'] = '%s,%s' % (self.detect_rms, rms)\n \n params.update(updates)\n \n # fix filenames of SExtractor required files\n for key in ['-c', '-parameters_name', '-filter_name', \\\n '-starnnw_name']:\n params[key] = os.path.join(self.scriptdir, params[key])\n\n # set parameters specific to this image\n params['-gain'] = '%.1f' % self.par_info['exptime'][i]\n params['-mag_zeropoint'] = '%.4f' % self.par_info['zp'][i]\n params['-catalog_name'] = cat\n # segmentation map, filtered and background images\n if i == wdet[0][0]:\n seg = os.path.join(self.outdir, '%s_seg.fits' % filts[i]) \n bck = os.path.join(self.outdir, '%s_bck.fits' % filts[i]) \n fil = os.path.join(self.outdir, '%s_fil.fits' % filts[i]) \n aps = os.path.join(self.outdir, '%s_aps.fits' % filts[i])\n checkstr = 'SEGMENTATION,BACKGROUND,FILTERED,APERTURES'\n params['-checkimage_type'] = checkstr\n params['-checkimage_name'] = '%s,%s,%s,%s' % (seg,bck,fil,aps)\n\n # set up SE parameters\n args = [os.path.join(self.scriptdir, 'sex'), detim, image]\n for key,value in params.iteritems():\n args.append(key)\n args.append(value)\n subprocess.check_call(args)", "def test_process_slcwa_scores_filtered(self):\n positive_scores = torch.rand(self.batch_size, 1, requires_grad=True)\n negative_scores = torch.rand(self.batch_size, self.num_neg_per_pos, requires_grad=True)\n batch_filter = torch.rand(self.batch_size, self.num_neg_per_pos) < 0.5\n self.help_test_process_slcwa_scores(\n positive_scores=positive_scores,\n negative_scores=negative_scores[batch_filter],\n batch_filter=batch_filter,\n )", "def boxfiltering(img, filter_size):\n\n return np.array(smooth_img)", "def p_s(self, p, avg, mke):\r\n p_s1 = self.up * self.up * mke.dudx + self.vp * self.up * mke.dvdx + self.w_uvp * self.up * mke.dwdx #uv grid\r\n p_s2 = self.up * self.vp * mke.dudy + self.vp * self.vp * mke.dvdy + self.w_uvp * self.vp * mke.dwdy #uv grid\r\n p_s3 = self.up * self.w_uvp * mke.dudz + self.vp * self.w_uvp * mke.dvdz + self.w_uvp * self.w_uvp * mke.dwdz #uv grid\r\n #p_s3=interp_w_uv(p,p_s3_w) #uv grid\r\n self.P_s = -(p_s1 + p_s2 + p_s3)", "def sar_bm3d_filter(self, image, image_name, L=50):\n width, height = image.shape\n out_file = '{}.mat'.format(image_name)\n out_file_python = os.path.join(self.OUT_DIR_PYTHON, out_file)\n out_file_matlab = os.path.join('temp', out_file)\n\n image = image.astype(np.double) # The MATLAB SAR-BM3D filter wants the image encoded as a double.\n savemat(out_file_python, {'image_data': image}) # Encode the numpy ndarray as a MATLAB .mat file\n FILTERED_IMAGE = self.eng.SARBM3D_Python_Helper(out_file_matlab, L)\n FILTERED_IMAGE = np.array(FILTERED_IMAGE._data) # Convert from mlarray.double into numpy ndarray\n # Rescale back to [-1, 1]\n FILTERED_IMAGE = FILTERED_IMAGE.astype(np.float32) # Convert back to float32\n FILTERED_IMAGE = FILTERED_IMAGE.reshape((width, height), order='F') # Reshape into original width and height\n FILTERED_IMAGE = cv2.normalize(FILTERED_IMAGE, None, alpha=-1, beta=1, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_32F)\n os.remove(out_file_python) # Delete the .mat file\n return FILTERED_IMAGE", "def get_mean_negative_weight(self, incoming_projection):", "def get_mean_positive_weight(self, incoming_projection):", "def evolve(self, stage):", "def SignalFilter_LPFBessel(\n signal: np.ndarray,\n LPF: float,\n samplefreq: float,\n NPole: int = 8,\n reduce: bool = False,\n) -> np.ndarray:\n\n if debugFlag:\n print(\"sfreq: %f LPF: %f HPF: %f\" % (samplefreq, LPF))\n flpf = float(LPF)\n sf = float(samplefreq)\n wn = [flpf / (sf / 2.0)]\n reduction = 1\n if reduce:\n if LPF <= samplefreq / 2.0:\n reduction = int(samplefreq / LPF)\n if debugFlag is True:\n print(\n \"signalfilter: samplef: %f wn: %f, lpf: %f, NPoles: %d \"\n % (sf, wn, flpf, NPole)\n )\n filter_b, filter_a = spSignal.bessel(NPole, wn, btype=\"low\", output=\"ba\")\n if signal.ndim == 1:\n sm = np.mean(signal)\n w = spSignal.lfilter(\n filter_b, filter_a, signal - sm\n ) # filter the incoming signal\n w = w + sm\n if reduction > 1:\n w = spSignal.resample(w, reduction)\n return w\n if signal.ndim == 2:\n sh = np.shape(signal)\n for i in range(0, np.shape(signal)[0]):\n sm = np.mean(signal[i, :])\n w1 = spSignal.lfilter(filter_b, filter_a, signal[i, :] - sm)\n w1 = w1 + sm\n if reduction == 1:\n w1 = spSignal.resample(w1, reduction)\n if i == 0:\n w = np.empty((sh[0], np.shape(w1)[0]))\n w[i, :] = w1\n return w\n if signal.ndim == 3:\n sh = np.shape(signal)\n for i in range(0, np.shape(signal)[0]):\n for j in range(0, np.shape(signal)[1]):\n sm = np.mean(signal[i, j, :])\n w1 = spSignal.lfilter(filter_b, filter_a, signal[i, j, :] - sm)\n w1 = w1 + sm\n if reduction == 1:\n w1 = spSignal.resample(w1, reduction)\n if i == 0 and j == 0:\n w = np.empty((sh[0], sh[1], np.shape(w1)[0]))\n w[i, j, :] = w1\n return w\n if signal.ndim > 3:\n print(\"Error: signal dimesions of > 3 are not supported (no filtering applied)\")\n return signal" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to overwrite a specific invalid point with its last good value, defining the last valid point, and the replacement point, from the corresponding offset joint
def overwrite_position(joint, point_bad, frame_num, last_good_list, pose, body_3D_pose, last_good_body_list, offset_joint = False): #If the point is not invalid, just update the last good list if point_bad == False: last_good_list[joint.value] = [pose[joint.value][frame_num][0], pose[joint.value][frame_num][1], pose[joint.value][frame_num][2], pose[joint.value][frame_num][3], frame_num] #If the point is invalid and we have a previous valid point elif point_bad == True and last_good_list[joint.value] != []: #Treat the offsets from the wrists seperately as they are stored on seperate lists to the 'pose' list if we call for them from the left or right hand pose lists if offset_joint == BODY.LEFT_WRIST or offset_joint == BODY.RIGHT_WRIST: #If we have a previous offset valid point if last_good_body_list[offset_joint.value] != []: #Define the frame that the previous valid point is from offset_original_frame = last_good_list[joint.value][4] #Find the previous valid offset point location offset_position = body_3D_pose[offset_joint.value][offset_original_frame] #Find said location in relation to the offset joint old_pos_from_offset = np.subtract(last_good_list[joint.value][0:3], offset_position[0:3]) #Use the current position of the offset joint to find the position to overwrite the invalid point with new_pos = np.add(old_pos_from_offset[0:3], last_good_body_list[offset_joint.value][0:3]) #Update the invalid point on the pose list with the valid point pose[joint.value][frame_num] = [new_pos[0], new_pos[1], new_pos[2], True] #If you have no previous valid point, do nothing else: pass #For any other offset joint elif offset_joint != False: if last_good_list[offset_joint.value] != []: #Define the frame that the previous valid point is from offset_original_frame = last_good_list[joint.value][4] #Find the previous valid offset point location offset_position = pose[offset_joint.value][offset_original_frame] #Find said location in relation to the offset joint old_pos_from_offset = np.subtract(last_good_list[joint.value][0:3], offset_position[0:3]) #Use the current position of the offset joint to find the position to overwrite the invalid point with new_pos = np.add(old_pos_from_offset[0:3], last_good_list[offset_joint.value][0:3]) #Update the invalid point on the pose list with the valid point pose[joint.value][frame_num] = [new_pos[0], new_pos[1], new_pos[2], True] #If we do not have a previous valid point else: pass #If we call this without an offset joint, just use the last good value elif offset_joint == False: pose[joint.value][frame_num] = last_good_list[joint.value] #If we do not have a previous valid point else: pass return last_good_list, pose
[ "def error_correction(y_original, ind, labels, threshold):\n\n# WARNING, this function has not been tested and may not improve the\n# accuracy of the reconstruction, use with caution\n\n y = y_original.copy()\n ind = np.array(ind)\n # gives the position of the points that have a label == 1\n ind_var = [i for i in range(len(ind)) if labels[i]==1]\n for i in ind_var:\n # last known point before the point with label==1\n ref_point = y[ind[i-1]]\n # infimum of interval where the original point should be\n inf = ref_point - np.abs(ref_point)*threshold\n # supremum of interval where the original point should be\n sup = ref_point + np.abs(ref_point)*threshold\n # position of reconstructed points to correct,\n # between the 2 known points\n x = np.arange(ind[i-1]+1, ind[i],1)\n if x.size == 0:\n continue\n else:\n for pt in x:\n if y[pt]< inf:\n y[pt] = ref_point*(1-threshold)\n elif y[pt]> sup:\n y[pt]= ref_point*(1+threshold)\n else :\n continue\n return y", "def adjust_move_node(self,i,new_pnt,nbrs):\n\n # HERE -- not compatible with pure python code.\n \n # find existing constrained edges\n # for each constrained edge:\n # will the updated edge still be valid?\n # if not, update new_pnt to be halfway between the old and the new,\n # and loop again.\n\n for shorten in range(15): # maximum number of shortenings allowed\n all_good = True\n\n # Create a probe vertex so we can call check_line_is_clear()\n # sort of winging it here for a measure of close things are.\n if abs(self.points[i] - new_pnt).sum() / (1.0+abs(new_pnt).max()) < 1e-8:\n log.warning(\"adjust_move_node: danger of roundoff issues\")\n all_good = False\n break\n\n all_good=self.check_line_is_clear_batch(p1=new_pnt,n2=nbrs)\n if all_good:\n break\n else:\n new_pnt = 0.5*(self.points[i]+new_pnt)\n log.debug('adjust_move_node: adjusting') \n if all_good:\n return new_pnt\n else:\n return self.points[i]", "def pointing_constant_offset(self,pointing_rms, pointing_timescale,PB_FWHM230):\n self.PB_FWHM = PB_FWHM230 / (self.chan_freq.mean() / 230e9) # convert 230 GHz PB to current obs frequency\n self.num_mispoint_epochs = max(1, int(np.floor(self.obslength / (pointing_timescale * 60.)))) # could be number of scans, for example\n self.mjd_per_ptg_epoch = (self.mjd_obs_end - self.mjd_obs_start) / self.num_mispoint_epochs\n self.mjd_ptg_epoch_timecentroid = np.arange(self.mjd_obs_start,self.mjd_obs_end,\n self.mjd_per_ptg_epoch) + (self.mjd_per_ptg_epoch/2.)\n\n \n self.pointing_offsets = pointing_rms.reshape(self.Nant,1) * np.random.randn(self.Nant,self.num_mispoint_epochs) # units: arcsec\n for ant in range(self.Nant):\n ind = (self.mjd_ptg_epoch_timecentroid < self.mjd_ant_rise[ant]) \\\n | (self.mjd_ptg_epoch_timecentroid > self.mjd_ant_set[ant])\n\n self.pointing_offsets[ant,ind] = np.nan # this masks out pointing offsets for stowed antennas\n\n\n\n PB_model = ['gaussian']*self.Nant # primary beam model set in input config file. Hardwired to Gaussian for now. \n\n amp_errors = np.zeros([self.Nant,self.num_mispoint_epochs])\n for ant in range(self.Nant):\n if PB_model[ant] == 'consine3':\n amp_errors[ant,:] = np.cos(self.pointing_offsets[ant,:]/206265.)**3 #placeholder, incorrect\n\n elif PB_model[ant] == 'gaussian':\n amp_errors[ant,:] = np.exp(-0.5*(self.pointing_offsets[ant,:]/(self.PB_FWHM[ant]/2.35))**2) \n\n \n self.pointing_amp_errors = amp_errors", "def adjust_alignment_points(\n self, adjust_point=2, axis=2, variable=\"implant_length\"\n ):\n to_adjust = self.reference_pads_positions[adjust_point - 1][axis - 1]\n to_adjust += float(self.sensor_pad_file[\"additional_params\"].get(variable, 0))\n new_coord = list(self.reference_pads_positions[adjust_point - 1])\n new_coord[axis - 1] = to_adjust\n self.reference_pads_positions[adjust_point - 1] = tuple(new_coord)", "def test_set_final_point_fails_if_scalar():\n point = 42\n v.final_point = point", "def constrain_origin(self, new_origin):\n # type: (Point) -> Point\n return Point(0, 0)", "def test_position_adjustment_returns_0_on_regular_spot(self):\n b = cs.Board()\n position = 2\n zero = b.position_adjustment(position)\n assert zero == 0", "def set_zero_point(self):\n self.current_position = 0.0\n self.goal_position = 0.0", "def undo(self, guess_coord):\n coord = None\n while coord != guess_coord:\n coord = self.coord_changes.pop()\n value = self.solver.solution[coord]\n del self.solver.solution[coord]", "def _reset_pt(pt, value, pt_range):\n pt = int(np.clip(pt, a_min=pt_range[0], a_max=pt_range[1]) if pt >= 0 else value)\n return pt", "def _clean_up_coordinates(self):\n self.data.fillna(method='bfill', inplace=True)", "def adjust_fgmax_1d(x1_desired, x2_desired, x1_domain, dx):\n\n i1 = numpy.floor((x1_desired-x1_domain - 0.5*dx)/dx)\n x1_new = x1_domain + (i1 + 0.5)*dx\n i2 = numpy.floor((x2_desired-x1_domain + 0.5*dx)/dx)\n x2_new = x1_domain + (i2 + 0.5)*dx\n npoints = int(i2 - i1) + 1\n return x1_new, x2_new, npoints", "def fixConstraintViolations(self, proposed, previous, fixInfo):\n # DESIGN\n # While not okay:\n # 1. See if cutting the step will fix it.\n # 2. If not, try rotating towards a random perpendicular. Repeat 1.\n # 3. If not, try a new random perpendicular. Repeat 1. Repeat N times.\n # TODO should this be specific to step manipulators, or something else?\n # TODO updating opt point in place! Is this safe?\n minStepSize = fixInfo['minStepSize']\n stepVector = dict((var, proposed[var] - previous[var]) for var in self._optVars)\n stepDistance, stepDirection, _ = mathUtils.calculateMagnitudeAndVersor(list(stepVector.values()))\n if 'originalStepSize' not in fixInfo:\n fixInfo['originalStepSize'] = stepDistance\n if 'perpDir' in fixInfo:\n perpDir = fixInfo['perpDir']\n # if not done cutting step, start cutting\n if stepDistance > minStepSize:\n # cut step again\n stepSize = 0.5 * stepDistance # TODO user option?\n for v, var in enumerate(stepVector):\n proposed[var] = previous[var] + stepSize * stepDirection[v]\n print(' ... cutting step ...') # norm step to {}, new norm opt {}'.format(stepSize, proposed))\n return proposed, stepSize, fixInfo\n else:\n ### rotate vector and restore full step size\n stepSize = fixInfo['originalStepSize']\n # store original direction\n if 'originalDirection' not in fixInfo:\n fixInfo['originalDirection'] = np.atleast_1d(stepDirection)\n # if this isn't the first time, check if there's angle left to rotate through; reset if not\n if 'perpDir' in fixInfo:\n ang = mathUtils.angleBetweenVectors(stepDirection, fixInfo['perpDir'])\n print(' ... trying angle:', ang)\n if ang < self._minRotationAngle:\n del fixInfo['perpDir']\n\n if 'perpDir' not in fixInfo:\n # find perpendicular vector\n perp = randomUtils.randomPerpendicularVector(fixInfo['originalDirection'])\n # NOTE we could return to point format, but no reason to\n # normalize perpendicular to versor and resize\n rotations = fixInfo.get('numRotations', 0)\n if rotations > self._numRandomPerp:\n raise NoConstraintResolutionFound\n _, perpDir, _ = mathUtils.calculateMagnitudeAndVersor(perp)\n fixInfo['perpDir'] = perpDir\n fixInfo['numRotations'] = rotations + 1\n # END fixing perpendicular direction\n # rotate vector halfway towards perpendicular\n perpDir = fixInfo['perpDir']\n\n # rotate towards selected perpendicular\n splitVector = {} # vector that evenly divides stepDirection and perp\n for v, var in enumerate(self._optVars):\n splitVector[var] = stepDirection[v] + perpDir[v]\n #splitVector[var] = - stepDirection[v] + perpDir[v]\n _, splitDir, _ = mathUtils.calculateMagnitudeAndVersor(list(splitVector.values()))\n for v, var in enumerate(self._optVars):\n proposed[var] = previous[var] + stepSize * splitDir[v]\n print(' ... rotating step ...') #ed norm direction to {}, new norm opt {}'.format(splitDir, proposed))\n return proposed, stepSize, fixInfo", "def fix_pos(i, j, ii, jj):\n return (i, j) if pos_illegal(ii, jj) else (ii, jj)", "def test_remap_position_outside_expected_range(self):\n coordinate = {\"chromosome\": \"1\", \"position\": 35, \"reference\": \"A\"}\n align_tuples = [\n (100,\"1\",100,300,\"2\"),\n (300,\"2\",200,20,\"7\") \n ]\n new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])\n self.assertEqual(new_mapping, None)\n coordinate = {\"chromosome\": \"1\", \"position\": 201, \"reference\": \"A\"}\n align_tuples = [\n (100,\"1\",100,300,\"2\"),\n (300,\"2\",200,20,\"7\") \n ]\n new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])\n self.assertEqual(new_mapping, None)", "def extrapolate(self):\n # Check if a copy of the parameters was already made.\n is_empty = len(self.params_copy) == 0\n for group in self.param_groups:\n for param in group[\"params\"]:\n u = self.update(param, group)\n if is_empty:\n self.params_copy.append(param.data.clone()) # save w[t]\n\n param.data += u # w[t + .5] = w[t] - eta * F(w[t])", "def imputeNaN(data, newValue):\n\tdata[np.isnan(data)] = newValue; # Se asigno este valor de manera arbitraria para que no marcara un error de validacion por valores muy grandes", "def __validate_point(self, point):\n\n if point.x() < 0:\n point.setX(0)\n\n if point.y() < 0:\n point.setY(0)\n\n img_width = self._data.shape[1] - 1\n if point.x() > img_width:\n point.setX(img_width)\n\n img_height = self._data.shape[0] - 1\n if point.y() > img_height:\n point.setY(img_height)\n\n return point", "def reset_position(self):\n self.xyz = self.xyz + self.tfm" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to crop a pose list to a certain length
def crop(listtocrop, length, start = 0): croppedlist = [] for row in listtocrop: croppedlist.append(row[start:length+start]) return croppedlist
[ "def cropImage():", "def crop_receptive(batch, crop_size):\n n,hx,wx,_ = batch.shape\n hy,wy = crop_size\n dhq, dhr = (hx-hy)//2, (hx-hy)%2\n dwq, dwr = (wx-wy)//2, (wx-wy)%2\n return batch[:, dhq: hx - (dhq + dhr), dwq: wx - (dwq + dwr) ]", "def crop(image):\r\n return image[60:-25, :, :] # remove the sky and the car front\r", "def crop_on_annotations():\n #if len(os.listdir(cropped_output)) == 0:\n annotations = load_annotations()\n image_list = create_image_list(annotations)\n crop_images(image_list, annotations)", "def crop_to_shape(x: torch.Tensor, shape: Tuple[int, int]) -> torch.Tensor:\n h, w = x.shape[-2:]\n\n if h > shape[0]:\n x = x[:, :, : shape[0], :]\n if w > shape[1]:\n x = x[:, :, :, : shape[1]]\n return x", "def crop_first(signal: np.array, cut_size: int=14400) -> np.array:\n # cut the last signal part of length cut\n return signal[cut_size:]", "def crop(a, n):\n start = random.randint(0, len(a) - n - 1)\n return a[start:start+n]", "def crop_landmark2(image, landmarks, part, show_crop=False):\n dims = np.load('landmark_dims.npy')\n\n if (part == \"left eyebrow\" or part == 0):\n rango = range(17, 22)\n w, h = dims[0] // 2\n elif (part == \"right eyebrow\" or part == 1):\n rango = range(22, 27)\n w, h = dims[1] // 2\n elif (part == \"nose\" or part == 2):\n rango = range(27, 36)\n w, h = dims[5] // 2\n elif (part == \"left eye\" or part == 3):\n rango = range(36, 42)\n w, h = dims[2] // 2\n elif (part == \"right eye\" or part == 4):\n rango = range(42, 48)\n w, h = dims[3] // 2\n elif (part == \"mouth\" or part == 5):\n rango = range(48, 68)\n w, h = dims[4] // 2\n\n landmarks = np.array(landmarks)\n rango = np.array(rango)\n x_max = int(landmarks[rango, 0].max())\n x_min = int(landmarks[rango, 0].min())\n y_max = int(landmarks[rango, 1].max())\n y_min = int(landmarks[rango, 1].min())\n\n X = int(np.mean((x_min, x_max)).round(0))\n Y = int(np.mean((y_min, y_max)).round(0))\n\n landmark = _crop_image(image, X, Y, w, h)\n if show_crop:\n cv2.imshow(\"Image\", landmark)\n cv2.waitKey(15000)\n # cv2.waitKey(0)\n return landmark", "def random_crop(self, length, n_crops=1):\n length = parse_depth(length, check_positive=True, var_name=\"length\")\n self._check_segment_lengths(length)\n wells = self.iter_level(-2)\n p = np.array([sum([segment.length for segment in item]) for item in wells])\n random_wells = Counter(np.random.choice(wells, n_crops, p=p/sum(p)))\n for well in wells:\n if well in random_wells:\n n_well_crops = random_wells[well]\n p = np.array([item.length for item in well])\n random_segments = Counter(np.random.choice(well.segments, n_well_crops, p=p/sum(p)))\n well.segments = [\n Well(segments=segment.random_crop(length, n_segment_crops))\n for segment, n_segment_crops in random_segments.items()\n ]\n else:\n well.segments = []\n return self.prune()", "def crop(img, side):\n side = side // 2\n y = img.shape[0] // 2\n x = img.shape[1] // 2\n print(y, x)\n print(img.shape)\n return img[y - side:y +side, x - side : x+side]", "def crop(F, T = 0.0, B = 0.0, L = 0.0, R = 0.0):\n# Create assertions for parameter values \n assert (0<=T<1), 'Error: T must be in [0,1)'\n assert (0<=B<1), 'Error: B must be in [0,1)'\n assert (0<=L<1), 'Error: L must be in [0,1)'\n assert (0<=R<1), 'Error: R must be in [0,1)' \n assert (T+B<1), 'Error: Ensure T + B < 1'\n assert (L+R<1), 'Error: Ensure L+R<1'\n\n# Define crop boundaries\n m, n, k = F.shape\n Rt = int(np.floor(T*m))\n Rb = int(np.floor(B*m))\n Rl = int(np.floor(L*n))\n Rr = int(np.floor(R*n))\n F = F[ Rt : m-Rb , Rl : n-Rr, :]\n\n# Normalise F\n big = F.max()\n F = (1/big)*F\n\n return F", "def remove_slices(data_to_crop, slices_of_interest):\n # Parse numbers based on delimiter: ' or :\n slices_list = parse_num_list(slices_of_interest)\n # Remove slices that are not wanted (+1 is to include the last selected slice as Python \"includes -1\"\n data_cropped = data_to_crop[..., slices_list]\n return data_cropped, slices_list", "def crop_boxes(boxes, crop_shape):\n\n crop_x1 = crop_shape[0]\n crop_y1 = crop_shape[1]\n crop_x2 = crop_shape[2]\n crop_y2 = crop_shape[3]\n\n l0 = boxes[:, 0] >= crop_x1\n l1 = boxes[:, 1] >= crop_y1\n l2 = boxes[:, 2] <= crop_x2\n l3 = boxes[:, 3] <= crop_y2\n\n L = l0 * l1 * l2 * l3\n cropped_boxes = boxes[L, :]\n\n cropped_boxes[:, 0] = cropped_boxes[:, 0] - crop_x1\n cropped_boxes[:, 1] = cropped_boxes[:, 1] - crop_y1\n cropped_boxes[:, 2] = cropped_boxes[:, 2] - crop_x1\n cropped_boxes[:, 3] = cropped_boxes[:, 3] - crop_y1\n\n return cropped_boxes", "def crop_to_moves(img):\n\n img = cv2.resize(img, (256, 224))\n\n x1, y1 = (195, 59) # Top Left\n x2, y2 = (x1 + 42, y1 + 48)\n\n return img[y1:y2, x1:x2]", "def crop_last(signal: np.array, cut_size: int=14400) -> np.array:\n # cut the last signal part of length cut\n return signal[len(signal)-cut_size:]", "def slices_padding(slice_list):\n max_size = torch.tensor([torch.tensor(slice.shape[1:]).max().item() for slice in slice_list]).max()\n return [slice_padding(slice, max_size) for slice in slice_list]", "def crop(image, tl_row, tl_col, target_size):\n rows = image.size()[0]\n cols = image.size()[1]\n x = [[[image.pixels[i][j][k] for k in range(cols) \\\n if ((j >= tl_row) and (j <= tl_row + target_size[0])) and \\\n ((k >= tl_col) and (k <= tl_col + target_size[1]))]\n for j in range(rows)] for i in range(len(image.pixels))]\n my_x = list(map(lambda lst: list(filter(lambda val: \\\n True if len(val) > 0 else False, lst)), x))\n return RGBImage(my_x)", "def crop_points_np(points, max_z, min_z):\n\n # crop the point cloud with roi \n z = points[:, 2]\n idx = np.where((z > min_z) & (z < max_z))\n idx = np.array(idx)\n crop_pts = np.squeeze(np.take(points, idx, axis=0))\n \n return crop_pts", "def center_crop(audio, frame_size):\n pad_amount = int(frame_size // 2) # Symmetric even padding like librosa.\n return audio[:, pad_amount:-pad_amount]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the percent change of daily close price.
def change_price_precent(self): stock_firstday = self.closeprice[0] self.dataframe['stock_%chg'] = (self.closeprice - stock_firstday)/stock_firstday change_price_precent = self.dataframe['stock_%chg'] return change_price_precent
[ "def daily_pct_change(self, save=False):\n logging.info('daily_pct_change() called')\n byYear = pd.DataFrame(self.group_by_year())\n dailyPctChange = pd.DataFrame()\n for year in byYear:\n # Here I use numpy to calculate the pct change\n dailyPctChange[year] = round(np.log(byYear[year]).diff()*100, 3)\n dailyPctChange = dailyPctChange[1:]\n if save:\n dailyPctChange.to_csv(f'Daily_Pct_Change_{self.ticker}.csv')\n return dailyPctChange", "def pct_change():\n original_value = bank_of_rick.original_value\n current_total_value = sum(total_value())\n return 100 * (current_total_value - original_value) / original_value", "def pct_chg(data, period):\n change = {'change_'+str(period) : data['Close'].diff(period)}\n return pd.DataFrame(list(change.values())[0]).rename(columns = {'Close': str(list(change.keys())[0])})", "def rate_of_change(close, n=1):\n return (close[n:]-close[:len(close)-n])/close[:len(close)-n]", "def _get_percent_returns(self, cumulative=False):\n if cumulative is True:\n return self._daily['adj_close'] / (\n self._daily['adj_close'].ix[0, :]) - 1.0\n else:\n return self._daily['adj_close'].pct_change()", "def yearly_pct_calc(self):\r\n\t\tyield_loss_share = self.current_value - self.purchase_price\r\n\t\tyield_loss_pct = yield_loss_share/self.purchase_price\r\n\t\tcurrent_date = datetime.today()\r\n\t\tpurchase_date = datetime.strptime(self.purchase_date, '%m/%d/%Y')\r\n\t\tday_diff = (current_date - purchase_date).days\r\n\t\tyear_diff = round((day_diff/365), 2)\r\n\t\tself.yearly_earning_loss = \"{:.2%}\".format(yield_loss_pct/year_diff)\r\n\t\treturn self.yearly_earning_loss", "def set_price_changes(self):\n self.market_data['pricechange'] = self.market_data['adj_close'].diff(1)\n self.market_data['percentchange'] = (np.log(self.market_data['adj_close']) - np.log(self.market_data['adj_close'].shift(1))).fillna(0)", "def test_price_diff_percentage(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n diff_percentage = diff / price_was\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff_percentage, diff_percentage)", "def profit_loss_percentage(df: pd.DataFrame, days=None) -> float:\n df_days = StockAnalysis.filter_days(df, days)\n close_price = df_days.Close_Price.values\n\n if len(close_price) == 0:\n return 0 # BUGFIX: IndexError: index -1 is out of bounds for axis 0 with size 0\n else:\n profit = (close_price[-1] - close_price[0]) / close_price[-1]\n profit_pc = profit * 100 # QUESTION: Should this return as 0.x fractional percentage or 0-100%\n return round(profit_pc, 2) # return as percentage", "def percent_price_reduction(change):\n \n upcoming_price_changes(change)\n\n # TODO do you wish to continue?\n\n sql_update = \"\"\"\n update `tabItem Price` ip\n \n left join `tabItem` it\n on ip.item_code = it.item_code\n \n set ip.price_list_rate = ip.price_list_rate + (ip.price_list_rate * %s / 100.0)\n\n where ip.selling = 1\n and it.ebay_id REGEXP '[0-9]'\n \n and it.modified < now() - interval 10 day\n \n and ((it.standard_rate >25 and it.delivery_type = 'Standard Parcel')\n or (it.standard_rate >75 and it.delivery_type = 'Pallet'))\n and (select count(sii.name) from `tabSales Invoice Item` sii where sii.item_code = it.item_code and sii.docstatus=1) = 0\n \n \"\"\"%(change)\n\n frappe.db.sql(sql_update, auto_commit=True)\n \n sql_update_it = \"\"\"\n update `tabItem` it\n\n set \n it.standard_rate = it.standard_rate + (it.standard_rate * %s / 100.0),\n it.vat_inclusive_price = it.vat_inclusive_price + (it.vat_inclusive_price * %s / 100.0)\n \n where \n it.ebay_id REGEXP '[0-9]'\n and it.modified < now() - interval 30 day\n \n and ((it.standard_rate >25 and it.delivery_type = 'Standard Parcel')\n or (it.standard_rate >75 and it.delivery_type = 'Pallet'))\n and (select count(sii.name) from `tabSales Invoice Item` sii where sii.item_code = it.item_code and sii.docstatus=1) = 0\n \n \"\"\"%(change, change)\n\n frappe.db.sql(sql_update_it, auto_commit=True)\n\n print(\"Price reduction completed\")", "def cagr(df_original):\n df = df_original.copy()\n df['daily_return'] = df_original['Adj Close'].pct_change()\n df['cumulative_return'] = (1 + df['daily_return']).cumprod()\n n = len(df) / 252 # Number of trading days in a year (UK)\n cagr = (df['cumulative_return'][-1])**(1/n) - 1\n return cagr", "def get_final_price(price, discount_percentage=10):\n return price-( price* discount_percentage / 100)", "def make_pct(self, X):\n return 100.0*X/self.ev.pop", "def Ret_everyndays(DF,n):\n df = DF.copy().drop('Return', axis = 1).iloc[::n, :]\n ret = df['Adj Close'].pct_change().to_list()\n return ret", "def Percentage_Yield():\n ActualYield = float(app.question('Actual Yield',\"Please enter as a number (e.g '1.890') the actual yield\"))\n TheoreticalYield = float(app.question('Theoretical Yield',\"Please enter as a number (e.g '1.890') the theoretical yield\"))\n Perc_Yield = (ActualYield/TheoreticalYield)*100\n #Percentage yeild is calculated by dividing the actual yeild by the theoretical yeild and timesing it by 100\n print(\"Percentage Yield\",Perc_Yield)\n return Perc_Yield", "def calc_dividend_yield(self):\n if self._price is None:\n return 0\n if self._type is Type.common:\n return self._last_dividend / self._price\n return (self._fixed_dividend * self._par_value) / self._price", "def apply_percent_coupon(self):\r\n return self.price - self.price*self.coupon.percent_amount", "def final_price(self):\n return self.price - self.price * self.discount", "def _cur_close(self):\n open = self._prices.open[self._offset]\n rel_close = self._prices.close[self._offset] # so close is rel ?\n return open * (1.0 + rel_close)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare and plot the percent change of the stock close price and that of the actual market over time.
def plot_changeprice_comparison(self): fig = plt.figure() self.change_price_precent().plot(color = 'b',label = self.stock) market = Market(self.starttime,self.endtime) market.change_price_precent().plot(color = 'r',label = 'market') plt.legend() plt.xticks(rotation=45) plt.title('The Comparison between {} and market close price '.format(self.stock)) return fig
[ "def set_price_changes(self):\n self.market_data['pricechange'] = self.market_data['adj_close'].diff(1)\n self.market_data['percentchange'] = (np.log(self.market_data['adj_close']) - np.log(self.market_data['adj_close'].shift(1))).fillna(0)", "def plot_stock(df):\n fig,ax = plt.subplots(3)\n stz, = ax[0].plot(df['date'],df[df.columns[1]])\n stz.set_label(df.columns[1])\n other, = ax[0].plot(df['date'],df[df.columns[3]])\n other.set_label(df.columns[3])\n ax[0].xaxis.set_major_locator(plt.MaxNLocator(10))\n ax[0].legend()\n stz, = ax[1].plot(df['date'],df[df.columns[2]])\n stz.set_label(df.columns[2])\n other, = ax[1].plot(df['date'],df[df.columns[4]])\n other.set_label(df.columns[4])\n ax[1].xaxis.set_major_locator(plt.MaxNLocator(10))\n ax[1].legend()\n diffP, = ax[2].plot(df['date'],df['price_diff'])\n ax2 = ax[2].twinx()\n diffC, = ax2.plot(df['date'], df['poc_diff'], color='orange')\n ax[2].xaxis.set_major_locator(plt.MaxNLocator(10))\n ax2.legend((diffP, diffC),('price_diff', 'poc_diff'))\n plt.show()", "def stock_chart(symbol):\r\n\r\n data_frame = wb.DataReader(symbol, data_source='yahoo', start='2000-1-1')['Adj Close']\r\n data_frame.iloc[-15:].plot(figsize=(10, 10))\r\n return plt.show()", "def plot(self):\n\n df = pandas.DataFrame(self.trade_history)\n\n ylim1_min = min(df['stock_price'])\n ylim1_max = max(df['stock_price'])\n ylim2_min = min(df['profit_percent'])\n ylim2_max = max(df['profit_percent'])\n\n # Stock price\n plt.subplot(211)\n plt.ylabel('Stock price')\n plt.xlim([self.dates[0], self.dates[-1]])\n plt.ylim([ylim1_min, ylim1_max])\n plt.plot(self.dates, df['stock_price'], color='b')\n\n # Percentage profit\n plt.subplot(212).cla()\n plt.xlim([self.dates[0], self.dates[-1]])\n plt.ylim([ylim2_min, ylim2_max])\n plt.ylabel('Percentage profit')\n plt.xlabel('Date')\n plt.plot(self.dates, df['profit_percent'], color='b')\n\n plt.show()", "def test_price_diff_percentage(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n diff_percentage = diff / price_was\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff_percentage, diff_percentage)", "def plot_buy_sell_points(executor, stock):\n assert stock in executor.stocks, f\"Need to choose a valid stock\"\n assert len(executor.portfolio.historical) > 0, \"Must first run the backtest\"\n\n # Get all specified stocks in the portfolio historical holdings\n port_holdings = list(filter(lambda h: h.stock==stock, executor.portfolio.historical))\n buys = [(h.buy_date, h.buy_price) for h in port_holdings]\n buy_dates, buy_price = zip(*buys)\n buy_dates = plot_date_transform(buy_dates)\n\n sells = [(h.sell_date, h.sell_price) for h in port_holdings]\n sell_dates, sell_price = zip(*sells)\n sell_dates = plot_date_transform(sell_dates)\n\n # Stock price over time\n all_dates = executor.market.dates\n valid_dates = [d for d in all_dates if executor.market.data[d].get(stock)!=None]\n stock_values = [executor.market.data[d][stock]['open'] for d in valid_dates if executor.market.data[d][stock].get('open')!=None]\n valid_dates = plot_date_transform(valid_dates)\n\n plt.plot_date(valid_dates, stock_values, fmt=\"m\", color='blue')\n plt.plot_date(sell_dates, sell_price, fmt=\"m\", color='red', linestyle=\"\", marker='o')\n plt.plot_date(buy_dates, buy_price, fmt=\"m\", color='green', linestyle=\"\", marker='o')\n plt.show()", "def close_highlow_graph(comp_name, filter_data, cmp_tick, from_date_s, to_date_s):\r\n # close vs high vs low graph\r\n plt.figure(figsize=(15,6))\r\n plt.title(\"Close v/s High v/s Low of Stock prices for {} -> {} from {} - {}\".format(cmp_tick, comp_name, from_date_s, to_date_s))\r\n plt.plot(filter_data.Close, \"b\", label = \"Closing Price\")\r\n plt.plot(filter_data.High, \"g--\", label = \"High Price\")\r\n plt.plot(filter_data.Low, \"r--\", label = \"Low Price\")\r\n plt.xlabel(\"Time\")\r\n plt.ylabel(\"Stock Price\") \r\n plt.legend(loc = \"best\")\r\n plt.show()", "def rel_comparison_plot_df(df1, df2, variable='Close', lb1='ref', lb2='other', ax=None, xDimensionless=False):\n I,L,x,p1,p2,t1 = rel_comparison(df1,df2, more_out=True, variable=variable)\n print('Comparison integral: ',I)\n print('Comparison last : ',L)\n\n if not xDimensionless:\n from pandas.plotting import register_matplotlib_converters\n register_matplotlib_converters()\n x=t1\n\n if ax is None:\n import matplotlib.pyplot as plt\n fig,ax = plt.subplots(1,1)\n ax.plot(x, p1,'k-', label=lb1)\n ax.plot(x, p2, label=lb2)\n ax.plot(x, p2-p1, '--', label='delta')\n if not xDimensionless:\n ax.set_xlabel('Time')\n else:\n ax.set_xlabel('Dimensionless time [-]')\n ax.set_ylabel('Change in price from period start [%]')\n ax.legend()\n return ax", "def change_price_precent(self):\n stock_firstday = self.closeprice[0]\n self.dataframe['stock_%chg'] = (self.closeprice - stock_firstday)/stock_firstday\n change_price_precent = self.dataframe['stock_%chg']\n return change_price_precent", "def plotTrade(id, indexes, result):\n # individual trade graphs with volatility\n global current_directory, folder_name\n plt.clf()\n plt.plot(data['index'], data['implied_volatility'], label = 'impl_volatility', color = 'yellow')\n plt.plot(data['index'], data['historical_volatility'], label = 'hist_volatility', color = 'cyan')\n\n clr = ''\n if result == 'profit':\n clr = 'green'\n plt.plot([data.loc[i, 'index'] for i in indexes], [data.loc[i, 'implied_volatility'] for i in indexes], color = clr, label = 'profit_trade')\n else:\n clr = 'red'\n plt.plot([data.loc[i, 'index'] for i in indexes], [data.loc[i, 'implied_volatility'] for i in indexes], color = clr, label = 'loss_trade')\n plt.xlabel('index')\n plt.ylabel('volatility in decimal')\n plt.legend(loc = 'best')\n plt.savefig(current_directory + '/output/{}/graphs/volatility/trade-data{}.svg'.format(folder_name, id), format = 'svg', dpi = 1200)\n\n # individual trade graphs with vega x vol_diff\n plt.clf()\n plt.plot(data['index'], (data['vega'] * (data['implied_volatility'] - data['historical_volatility'])), label = 'vega_x_(IV-HV)', color = 'yellow')\n\n clr = ''\n if result == 'profit':\n clr = 'green'\n plt.plot([data.loc[i, 'index'] for i in indexes], [data.loc[i, 'vega'] * (data.loc[i, 'implied_volatility'] - data.loc[i, 'historical_volatility']) for i in indexes], color = clr, label = 'profit_trade')\n else:\n clr = 'red'\n plt.plot([data.loc[i, 'index'] for i in indexes], [data.loc[i, 'vega'] * (data.loc[i, 'implied_volatility'] - data.loc[i, 'historical_volatility']) for i in indexes], color = clr, label = 'loss_trade')\n plt.xlabel('index')\n plt.ylabel('vega_x_diff(HV, IV) in decimal')\n plt.legend(loc = 'best')\n plt.savefig(current_directory + '/output/{}/graphs/vega/trade-data{}.svg'.format(folder_name, id), format = 'svg', dpi = 1200)", "def plot_orders(self):\n fig, axis = plt.subplots(2)\n\n #Only showing last s values for all values\n s = -1*self.show_times\n\n #Plot all checked prices and times, this leaves out the current incomplete closing price\n axis[0].plot_date(plt_dates.date2num(self.checked_times[s:]), self.checked_prices[s:], xdate=True, fmt='c-')\n\n #Plot bollinger bands\n axis[0].plot_date(plt_dates.date2num(self.bb['time'][s:]), self.bb['sma'][s:], xdate=True, fmt='k-') #-1 since sma not calculate for recent incomplete closing price\n axis[0].plot_date(plt_dates.date2num(self.bb['time'][s:]), self.bb['upper_band'][s:], xdate=True, fmt='k--')\n axis[0].plot_date(plt_dates.date2num(self.bb['time'][s:]), self.bb['lower_band'][s:], xdate=True, fmt='k--')\n\n #Plot stoch_rsi (%K_Slow, %D_Slow) in different subplot from time when buy_sell algorithm starts\n axis[1].plot_date(plt_dates.date2num(self.k_slow_array['time'][s:]), self.k_slow_array['k_slow'][s:], xdate=True, fmt='g-')\n axis[1].plot_date(plt_dates.date2num(self.d_slow_array['time'][s:]), self.d_slow_array['d_slow'][s:], xdate=True, fmt='b-')\n\n #Plot stoch_rsi user set upper and lower limits\n upper_limit = np.ones(shape=np.asarray(self.k_slow_array['time'][s:]).shape) * self.stoch_upper\n lower_limit = np.ones(shape=np.asarray(self.k_slow_array['time'][s:]).shape) * self.stoch_lower\n axis[1].plot_date(plt_dates.date2num(self.k_slow_array['time'][s:]), upper_limit, xdate=True, fmt='r--')\n axis[1].plot_date(plt_dates.date2num(self.k_slow_array['time'][s:]), lower_limit, xdate=True, fmt='r--')\n\n #Plot buy and sell orders\n orders = np.asarray(self.orders)\n buy_times = np.where( (np.asarray(self.orders['time']) > self.d_slow_array['time'][s]) & (np.asarray(self.orders['order_type']) == 'buy') )\n sell_times = np.where( (np.asarray(self.orders['time']) > self.d_slow_array['time'][s]) & (np.asarray(self.orders['order_type']) == 'sell') )\n if len(buy_times[0] > 0):\n axis[0].plot_date(plt_dates.date2num(np.take(self.orders['time'], buy_times[0])), np.take(self.orders['order_limit'], buy_times[0]), xdate=True, fmt='go')\n if len(sell_times[0] > 0):\n axis[0].plot_date(plt_dates.date2num(np.take(self.orders['time'], sell_times[0])), np.take(self.orders['order_limit'], sell_times[0]), xdate=True, fmt='ro')\n\n #Plot attributes\n axis[0].legend(labels=('Closing Prices', 'SMA', 'Bolling Upper Band', 'Bollinger Lower Band', 'Buy Orders', 'Sell Order'), loc='upper right', prop={'size':5})\n axis[1].legend(labels=('%k slow', '%d slow', 'user limits'), loc='upper right', prop={'size': 5})\n\n axis[0].set_xlabel('Date')\n axis[0].set_ylabel('USD')\n\n axis[1].set_xlabel('Date')\n axis[1].set_ylabel('%')\n\n fig.autofmt_xdate() #Auto aligns dates on x axis\n plt.show()", "def plotTrading(self):\n\n bw = self.bandwith\n t0 = self.slowMA.period\n T = self.df.shape[0]\n zscore_MA = (self.df[self.sym]\n .rolling(window=self.zscore.period)\n .mean())\n\n plt.subplot(311)\n self.df.loc[t0:T, self.sym].plot(label=self.sym)\n self.slowMA.values.loc[t0:T].plot(label=self.slowMA.name)\n zscore_MA.loc[t0:T].plot(label=self.zscore.name)\n if self.fastMA.period > 1:\n self.fastMA.values.loc[t0:T].plot(label=self.fastMA.name)\n plt.ylabel('{}/BTC'.format(self.sym))\n [plt.axvline(x, c='g', lw=0.5, ls='--') for x in self.opentimes]\n [plt.axvline(x, c='r', lw=0.5, ls='--') for x in self.closetimes]\n plt.legend()\n\n plt.subplot(312)\n self.zscore.values.loc[t0:T].plot()\n plt.plot([t0, T], [bw, bw], c='k', ls='--', lw=0.5)\n plt.plot([t0, T], [-bw, -bw], c='k', ls='--', lw=0.5)\n plt.plot([t0, T], [0, 0], c='k', ls='--', lw=0.5)\n [plt.axvline(x, c='g', lw=0.5, ls='--') for x in self.opentimes]\n [plt.axvline(x, c='r', lw=0.5, ls='--') for x in self.closetimes]\n plt.ylabel('Z Score')\n\n plt.subplot(313)\n returns = self.df.loc[t0:T, 'returns'].cumsum()*100\n returns.plot()\n plt.ylabel('Returns (%)')\n plt.xlabel('Hours')\n plt.show()", "def ticker_bar():\n popular_ticker_list = [\"SPY\", \"QQQ\", \"DIA\", \"AAPL\", \"GME\", \"AMC\", \"TSLA\", \"NIO\", \"PLTR\", \"NVDA\"]\n popular_name_list = [\"S&P500 ETF\", \"NASDAQ-100\", \"Dow ETF\", \"Apple\", \"GameStop\", \"AMC\", \"Tesla\", \"Nio\",\n \"Palantir\", \"NVIDIA\"]\n\n price_list = list()\n for ticker in popular_ticker_list:\n \n ticker = yf.Ticker(ticker)\n price_df = ticker.history(period=\"3d\")['Close']\n opening_price = float(price_df.iloc[1])\n closing_price = float(price_df.iloc[2])\n price_change = round(closing_price - opening_price, 2)\n\n percentage_change = round(((price_change / opening_price) * 100), 2)\n if percentage_change >= 0:\n price_change = '+' + str(price_change)\n percentage_change = '+' + str(percentage_change)\n\n price_list.append([round(closing_price, 2), price_change, percentage_change])\n return popular_ticker_list, popular_name_list, price_list", "def pct_change():\n original_value = bank_of_rick.original_value\n current_total_value = sum(total_value())\n return 100 * (current_total_value - original_value) / original_value", "def pct_chg(data, period):\n change = {'change_'+str(period) : data['Close'].diff(period)}\n return pd.DataFrame(list(change.values())[0]).rename(columns = {'Close': str(list(change.keys())[0])})", "def test_get_prices_diff(self):\n stock_day2 = StockDayFactory(\n company=self.stock_day.company,\n created_date=date(2018, 12, 6),\n open_price=Decimal('115'),\n close_price=Decimal('116'),\n high_price=Decimal('117'),\n low_price=Decimal('118'),\n )\n result = self.stock_day.get_prices_diff(stock_day2)\n self.assertEqual(\n stock_day2.open_price - self.stock_day.open_price,\n result['open_price']\n )\n self.assertEqual(\n stock_day2.close_price - self.stock_day.close_price,\n result['close_price']\n )\n self.assertEqual(\n stock_day2.high_price - self.stock_day.high_price,\n result['high_price']\n )\n self.assertEqual(\n stock_day2.low_price - self.stock_day.low_price,\n result['low_price']\n )", "def durationImbalancePrices(dfimbalance, fromdate, todate, price, incl_zero):\r\n #fromdatepd=pd.to_datetime(fromdate,format=\"%Y-%m-%d\")\r\n #todatepd=pd.to_datetime(todate,format=\"%Y-%m-%d\")\r\n dfmaskimbalance=dfimbalance.loc[(dfimbalance.index >= fromdate) & (dfimbalance.index < todate)]\r\n\r\n calcneg = dfmaskimbalance.copy()\r\n calcneg[calcneg >= 0] = np.nan\r\n calcneg=calcneg[\"POS in euro/MWh\"]\r\n negtime = (calcneg.count()/(dfmaskimbalance[\"POS in euro/MWh\"].count()))*100\r\n\r\n if (price == \"NEG\") & (incl_zero == True):\r\n dfmaskimbalance[dfmaskimbalance > 0.0] = np.nan\r\n if (price == \"NEG\") & (incl_zero == False):\r\n dfmaskimbalance[dfmaskimbalance >= 0.0] = np.nan\r\n if (price == \"POS\") & (incl_zero == True):\r\n dfmaskimbalance[dfmaskimbalance < 0.0] = np.nan\r\n if (price == \"POS\") & (incl_zero == False):\r\n dfmaskimbalance[dfmaskimbalance <= 0.0] = np.nan\r\n\r\n POS=dfmaskimbalance[\"POS in euro/MWh\"]\r\n POS=POS.sort_values()\r\n POS=POS.reset_index()\r\n POS=POS.drop(\"Timestamp\",axis=1)\r\n\r\n fig, ax = plt.subplots()\r\n POS.plot(figsize=(16,6), ax=ax,kind=\"line\", style='-',grid=True)\r\n ax.set_xlabel(\"# of quarters\")\r\n ax.set_ylabel(\"price in euro/MWh\")\r\n\r\n fig.suptitle(\"Duration curve POS \\n from \" + fromdate + \" to \" + todate + \"\\n negative for \" + \"{0:.2f}\".format(float(negtime)) + \" % of the total time\")\r\n if (price == \"NEG\") & (incl_zero == True):\r\n fig.suptitle(\"Duration curve negative POS including zero \\n from \" + fromdate + \" to \" + todate + \"\\n negative for \" + \"{0:.2f}\".format(float(negtime)) + \" % of the total time\")\r\n if (price == \"NEG\") & (incl_zero == False):\r\n fig.suptitle(\"Duration curve negative POS excluding zero \\n from \" + fromdate + \" to \" + todate + \"\\n negative for \" + \"{0:.2f}\".format(float(negtime)) + \" % of the total time\")\r\n if (price == \"POS\") & (incl_zero == True):\r\n fig.suptitle(\"Duration curve positive POS including zero \\n from \" + fromdate + \" to \" + todate + \"\\n negative for \" + \"{0:.2f}\".format(float(negtime)) + \" % of the total time\")\r\n if (price == \"POS\") & (incl_zero == False):\r\n fig.suptitle(\"Duration curve positive POS excluding zero \\n from \" + fromdate + \" to \" + todate + \"\\n negative for \" + \"{0:.2f}\".format(float(negtime)) + \" % of the total time\")", "def plot_stock_balance(data, data_name, balance=False) -> None:\n supply_sites = data['Supply Site Code'].unique()\n max_metric = []\n reorder_metric = []\n for i in range(len(supply_sites)):\n supply_site = supply_sites[i]\n supply_site_rows = (data['Supply Site Code'] == supply_site)\n supply_site_data = data[supply_site_rows]\n skus = supply_site_data['SKU'].unique()\n for j in range(len(skus)):\n sku_rows = (data['SKU'] == skus[j])\n grid_rows = supply_site_rows & sku_rows\n grid = data[grid_rows]\n balanced_max = 0\n balanced_reorder = 0\n for row in grid.itertuples():\n if balance:\n stock_index = data.columns.to_list().index('x_opt')\n else:\n stock_index = data.columns.to_list().index('Closing Stock')\n\n max_stock_index = data.columns.to_list().index('MaxDOC (Hl)')\n reorder_stock_index = data.columns.to_list().index('Reorder Point (Hl)')\n\n current_stock = row[stock_index + 1]\n max_stock = row[max_stock_index + 1]\n reorder_stock = row[reorder_stock_index + 1]\n if max_stock >= current_stock:\n balanced_max = balanced_max + 1\n if current_stock >= reorder_stock:\n balanced_reorder = balanced_reorder + 1\n max_percentage = (balanced_max / len(grid)) * 100\n reorder_percentage = (balanced_reorder / len(grid)) * 100\n max_metric.append(max_percentage)\n reorder_metric.append(reorder_percentage)\n plt.hist(max_metric, bins=20)\n plt.title('Estoques balanceados no grid (max)')\n plt.xlabel('Porcentagem')\n plt.ylabel('Quantidade de grids')\n plt.xlim(0, 100)\n plt.tight_layout()\n plt.savefig('figures/' + data_name + '_max_stock.png')\n plt.figure()\n\n plt.hist(reorder_metric, bins=20)\n plt.title('Estoques balanceados no grid (reorder)')\n plt.xlabel('Porcentagem')\n plt.ylabel('Quantidade de grids')\n plt.xlim(0, 100)\n plt.tight_layout()\n plt.savefig('figures/' + data_name + '_reorder_stock.png')\n plt.figure()", "def rel_comparison_plot_tick(tick1, tick2, ts_start=None, ts_end=None, period=None, interval='1d', variable='Close', ax=None, xDimensionless=False):\n #\n symb1 = Symbol(tick1)\n symb2 = Symbol(tick2)\n df1= symb1.download_history(ts_start=ts_start, ts_end=ts_end, period=period, interval=interval)\n df2= symb2.download_history(ts_start=ts_start, ts_end=ts_end, period=period, interval=interval)\n return rel_comparison_plot_df(df1, df2, variable=variable, lb1=tick1, lb2=tick2, ax=ax, xDimensionless=xDimensionless)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that divides equally and a positive number of lists to divide in to returns 3 lists all with 3 elements.
def test_equally_divisible_list_and_positive_int(self): result = split_list(self.equally_divisible_list, self.positive_int) self.assertEqual(len(result), 3) self.assertEqual(len(result[0]), 3) self.assertEqual(len(result[1]), 3) self.assertEqual(len(result[2]), 3)
[ "def testListDivide():\n test_a = listDivide([1, 2, 3, 4, 5])\n test_b = listDivide([2, 4, 6, 8, 10])\n test_c = listDivide([30, 54, 63, 98, 100], divide=10)\n test_d = listDivide([])\n test_e = listDivide([1, 2, 3, 4, 5], 1)\n\n tests = (test_a, test_b, test_c, test_d, test_e)\n while test_a == int(2):\n while test_b == int(5):\n while test_c == int(2):\n while test_d == int(0):\n while test_e == int(5):\n return tests\n else:\n raise ListDivideException('Exception: a number is incorrect')", "def testListDivide():\n\n result = listDivide([1, 2, 3, 4, 5])\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([2, 4, 6, 8, 10])\n if result != 5:\n raise ListDivideException('Fail!')\n result = listDivide([30, 54, 63, 98, 100], divide=10)\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([])\n if result != 0:\n raise ListDivideException('Fail!')\n result = listDivide([1, 2, 3, 4, 5], 1)\n if result != 5:\n raise ListDivideException('Fail!')", "def testListDivide():\n \n result = listDivide([1,2,3,4,5])\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([2,4,6,8,10])\n if result != 5:\n raise ListDivideException(\"Failed\")\n result = listDivide([30,54,63,98,100], divide=10)\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([])\n if result != 0:\n raise ListDivideException(\"Failed\")\n result = listDivide([1,2,3,4,5],1)\n if result != 5:\n raise ListDivideException(\"Failed\")", "def test_equally_divisible_list_and_1(self):\n result = split_list(self.equally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.equally_divisible_list)", "def test_unequally_divisible_list_and_1(self):\n result = split_list(self.unequally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.unequally_divisible_list)", "def test_equally_divisible_list_and_zero(self):\n result = split_list(self.equally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def listDivide(numbers, divide=2):\n\n num = []\n for x in numbers:\n if x % divide == 0:\n num.append(numbers)\n return len(num)", "def test_unequally_divisible_list_and_zero(self):\n result = split_list(self.unequally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def divide(intList, num, step): #4\n newIntList = []\n thingsToAdd = []\n for index in range(0, len(intList), step):\n thingsToAdd.append(index)\n for index, item in enumerate(intList):\n if index in thingsToAdd:\n newIntList.append(item / float(num))\n else:\n newIntList.append(item)\n return newIntList", "def test_first_triangle_number_to_have_over_3_divisors(self):\n\t\tself.assertEqual(6, first_triangle_number_over_n_divisors(3))", "def test_division(self):\n Mod5 = IntegersModP(5)\n Mod11 = IntegersModP(11)\n\n polysOverQ = polynomials_over(Fraction).factory\n polysMod5 = polynomials_over(Mod5).factory\n polysMod11 = polynomials_over(Mod11).factory\n for p in [polysOverQ, polysMod5, polysMod11]:\n # division\n assert p([1,1,1,1,1,1]) == p([-1,0,0,0,0,0,1]) / p([-1,1])\n assert p([-1,1,-1,1,-1,1]) == p([1,0,0,0,0,0,1]) / p([1,1])\n assert p([]) == p([]) / p([1,1])\n assert p([1,1]) == p([1,1]) / p([1])\n assert p([1,1]) == p([2,2]) / p([2])", "def test_compute_divisors_quantities_correctly(self):\n\t\tself.assertEqual(1, compute_divisor_quantity(1))\n\t\tself.assertEqual(2, compute_divisor_quantity(3))\n\t\tself.assertEqual(4, compute_divisor_quantity(15))\n\t\tself.assertEqual(6, compute_divisor_quantity(28))", "def div3(a, vec, result):\n\tresult[0] = vec[0] / a\n\tresult[1] = vec[1] / a\n\tresult[2] = vec[2] / a", "def percentageDivide(percentage, subjectsList, randomFlag): \n\n # Number of subjects\n L = len(subjectsList)\n indexes = np.array(range(L))\n\n # Shuffling the list of subjects\n if randomFlag: \n shuffle(indexes)\n\n\n per = int(percentage * L)\n if per == 0 and L > 1: \n per = 1\n\n # list of validation subjects\n TestVal_List = [subjectsList[i] for i in indexes[:per]]\n\n # list of training subjects\n Train_List = [subjectsList[i] for i in indexes[per:]]\n\n return Train_List, TestVal_List", "def list_element_wise_division(a, b):\n return numpy.divide(a, b, out=numpy.zeros_like(a), where=b != 0.)", "def test_4():\n assert multiples_of_3_and_5(8456) == 16687353", "def test_3():\n assert multiples_of_3_and_5(1000) == 233168", "def nucdivind(tuplelist, numsnps):\n\tif numsnps == 1:\n\t\tnucdivlist = []\n\t\tfor pop in tuplelist:\n\t\t\talleledic1 = {}\n\t\t\tzipped = zip(pop[0].split('/'), pop[1].split('/'))\n\t\t\tfor allele in zipped:\n\t\t\t\tif allele[0] in alleledic1:\n\t\t\t\t\talleledic1[allele[0]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic1[allele[0]] = int(allele[1])\n\t\t\tsumalleles = sum([alleleidepth for allelei, alleleidepth in alleledic1.items()])\n\t\t\tnucdiv = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic1.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdivlist.append(((nucdiv / 42), sumalleles))\n\t\treturn nucdivlist\n\telif numsnps == 2:\n\t\tnucdivlist = []\n\t\tfor pop in tuplelist:\n\t\t\talleledic1 = {}\n\t\t\talleledic2 = {}\n\t\t\tzipped = zip(pop[0].split('/'), pop[1].split('/'))\n\t\t\tfor allele in zipped:\n\t\t\t\tif allele[0][0] in alleledic1:\n\t\t\t\t\talleledic1[allele[0][0]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic1[allele[0][0]] = int(allele[1])\n\t\t\t\tif allele[0][1] in alleledic2:\n\t\t\t\t\talleledic2[allele[0][1]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic2[allele[0][1]] = int(allele[1])\n\t\t\tsumalleles = sum([alleleidepth for allelei, alleleidepth in alleledic1.items()])\n\t\t\tnucdiv1 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic1.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdiv2 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic2.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdivlist.append((((nucdiv1 + nucdiv2) / 42), sumalleles))\n\t\treturn nucdivlist\n\telif numsnps == 3:\n\t\tnucdivlist = []\n\t\tfor pop in tuplelist:\n\t\t\talleledic1 = {}\n\t\t\talleledic2 = {}\n\t\t\talleledic3 = {}\n\t\t\tzipped = zip(pop[0].split('/'), pop[1].split('/'))\n\t\t\tfor allele in zipped:\n\t\t\t\tif allele[0][0] in alleledic1:\n\t\t\t\t\talleledic1[allele[0][0]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic1[allele[0][0]] = int(allele[1])\n\t\t\t\tif allele[0][1] in alleledic2:\n\t\t\t\t\talleledic2[allele[0][1]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic2[allele[0][1]] = int(allele[1])\n\t\t\t\tif allele[0][2] in alleledic3:\n\t\t\t\t\talleledic3[allele[0][2]] += int(allele[1])\n\t\t\t\telse:\n\t\t\t\t\talleledic3[allele[0][2]] = int(allele[1])\n\t\t\tsumalleles = sum([alleleidepth for allelei, alleleidepth in alleledic1.items()])\n\t\t\tnucdiv1 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic1.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdiv2 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic2.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdiv3 = 1 - (sum([binomial(alleleidepth, 2) for allelei, alleleidepth in alleledic3.items()]) / binomial(sumalleles, 2))\n\t\t\tnucdivlist.append((((nucdiv1 + nucdiv2 + nucdiv3) / 42), sumalleles))\n\t\treturn nucdivlist", "def lists_are_multiple(l1, l2):\n # can we compare these two lists?\n if len(l1) != len(l2):\n return False\n for a, b in [(l1, l2), (l2, l1)]: # check the reverse of the lists too\n # see if all a are perfectly divisible by all b\n if all([(x % y == 0) for x, y in zip(a, b)]):\n # see if all have the same multiple\n if len(set([x/y for x, y in zip(a, b)])) == 1:\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that divides equally and number of lists to divide in to set as 1 returns 1 list with the same elements as the original list.
def test_equally_divisible_list_and_1(self): result = split_list(self.equally_divisible_list, 1) self.assertEqual(len(result), 1) self.assertEqual(result[0], self.equally_divisible_list)
[ "def test_unequally_divisible_list_and_1(self):\n result = split_list(self.unequally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.unequally_divisible_list)", "def test_unequally_divisible_list_and_zero(self):\n result = split_list(self.unequally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def testListDivide():\n\n result = listDivide([1, 2, 3, 4, 5])\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([2, 4, 6, 8, 10])\n if result != 5:\n raise ListDivideException('Fail!')\n result = listDivide([30, 54, 63, 98, 100], divide=10)\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([])\n if result != 0:\n raise ListDivideException('Fail!')\n result = listDivide([1, 2, 3, 4, 5], 1)\n if result != 5:\n raise ListDivideException('Fail!')", "def testListDivide():\n \n result = listDivide([1,2,3,4,5])\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([2,4,6,8,10])\n if result != 5:\n raise ListDivideException(\"Failed\")\n result = listDivide([30,54,63,98,100], divide=10)\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([])\n if result != 0:\n raise ListDivideException(\"Failed\")\n result = listDivide([1,2,3,4,5],1)\n if result != 5:\n raise ListDivideException(\"Failed\")", "def test_equally_divisible_list_and_zero(self):\n result = split_list(self.equally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def testListDivide():\n test_a = listDivide([1, 2, 3, 4, 5])\n test_b = listDivide([2, 4, 6, 8, 10])\n test_c = listDivide([30, 54, 63, 98, 100], divide=10)\n test_d = listDivide([])\n test_e = listDivide([1, 2, 3, 4, 5], 1)\n\n tests = (test_a, test_b, test_c, test_d, test_e)\n while test_a == int(2):\n while test_b == int(5):\n while test_c == int(2):\n while test_d == int(0):\n while test_e == int(5):\n return tests\n else:\n raise ListDivideException('Exception: a number is incorrect')", "def test_equally_divisible_list_and_positive_int(self):\n result = split_list(self.equally_divisible_list, self.positive_int)\n self.assertEqual(len(result), 3)\n self.assertEqual(len(result[0]), 3)\n self.assertEqual(len(result[1]), 3)\n self.assertEqual(len(result[2]), 3)", "def listDivide(numbers, divide=2):\n\n num = []\n for x in numbers:\n if x % divide == 0:\n num.append(numbers)\n return len(num)", "def halvesies(number_list):\n is_halved = []\n number_list[:] = [float(item) / 2 for item in number_list]\n # for each item in number_list, convert item to float, divide by 2, iterate.\n is_halved = number_list\n # this may be unnecessary but I like having a special var for this function\n return is_halved", "def allEqual(self, list):\n return not list or list == [list[0]] * len(list)", "def lists_are_multiple(l1, l2):\n # can we compare these two lists?\n if len(l1) != len(l2):\n return False\n for a, b in [(l1, l2), (l2, l1)]: # check the reverse of the lists too\n # see if all a are perfectly divisible by all b\n if all([(x % y == 0) for x, y in zip(a, b)]):\n # see if all have the same multiple\n if len(set([x/y for x, y in zip(a, b)])) == 1:\n return True\n return False", "def test_list(self):\n\n test = ['test', 1, list()]\n\n result = hashiter(test)\n\n self.assertEqual(\n result,\n hash(list) +\n (hash('test') + 1) * 1 +\n (hash(1) + 1) * 2 + (hashiter([]) + 1) * 3\n )", "def getSizePlueOneItemSet(Klist):\n candidate = list()\n for e in Klist:\n for f in Klist:\n a = e.union(f)\n if len(a) == len(e)+1:\n candidate.append(a)\n #print(candidate)\n #print(len(candidate))\n newlist = []\n for i in candidate:\n if i not in newlist:\n newlist.append(i)\n candidate = newlist\n #print(candidate)\n \"\"\" here is the normal pruning process \"\"\"\n newlist = []\n for e in candidate:\n counter = 0\n for f in globOriginalList:\n if(f.issuperset(e)):\n counter = counter+ 1\n if((counter/float(globNumberOfTransactions)) >= globMinSup):\n newlist.append(e)\n #print(len(candidate))\n return newlist", "def divide(intList, num, step): #4\n newIntList = []\n thingsToAdd = []\n for index in range(0, len(intList), step):\n thingsToAdd.append(index)\n for index, item in enumerate(intList):\n if index in thingsToAdd:\n newIntList.append(item / float(num))\n else:\n newIntList.append(item)\n return newIntList", "def list_element_wise_division(a, b):\n return numpy.divide(a, b, out=numpy.zeros_like(a), where=b != 0.)", "def test_division(self):\n Mod5 = IntegersModP(5)\n Mod11 = IntegersModP(11)\n\n polysOverQ = polynomials_over(Fraction).factory\n polysMod5 = polynomials_over(Mod5).factory\n polysMod11 = polynomials_over(Mod11).factory\n for p in [polysOverQ, polysMod5, polysMod11]:\n # division\n assert p([1,1,1,1,1,1]) == p([-1,0,0,0,0,0,1]) / p([-1,1])\n assert p([-1,1,-1,1,-1,1]) == p([1,0,0,0,0,0,1]) / p([1,1])\n assert p([]) == p([]) / p([1,1])\n assert p([1,1]) == p([1,1]) / p([1])\n assert p([1,1]) == p([2,2]) / p([2])", "def test_set(self):\n\n test = set([1, 2, 3])\n\n result = hashiter(test)\n\n self.assertEqual(\n result,\n hash(set) +\n (hash(1) + 1) * 1 + (hash(2) + 1) * 2 + (hash(3) + 1) * 3\n )", "def moyenne(listOfNumber):\n\n #result:float\n result = 0.0\n\n #number:number\n for number in listOfNumber:\n result = result + number\n return result/len(listOfNumber)", "def test_list(self):\n x = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n\n z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True)\n assert_array_almost_equal_nulp(z, x, 10)\n z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)\n assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that divides equally and number of lists to divide in to set as 0 returns an empty list.
def test_equally_divisible_list_and_zero(self): result = split_list(self.equally_divisible_list, 0) self.assertEqual(len(result), 0)
[ "def test_unequally_divisible_list_and_zero(self):\n result = split_list(self.unequally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def test_unequally_divisible_list_and_1(self):\n result = split_list(self.unequally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.unequally_divisible_list)", "def test_equally_divisible_list_and_1(self):\n result = split_list(self.equally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.equally_divisible_list)", "def testListDivide():\n test_a = listDivide([1, 2, 3, 4, 5])\n test_b = listDivide([2, 4, 6, 8, 10])\n test_c = listDivide([30, 54, 63, 98, 100], divide=10)\n test_d = listDivide([])\n test_e = listDivide([1, 2, 3, 4, 5], 1)\n\n tests = (test_a, test_b, test_c, test_d, test_e)\n while test_a == int(2):\n while test_b == int(5):\n while test_c == int(2):\n while test_d == int(0):\n while test_e == int(5):\n return tests\n else:\n raise ListDivideException('Exception: a number is incorrect')", "def testListDivide():\n\n result = listDivide([1, 2, 3, 4, 5])\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([2, 4, 6, 8, 10])\n if result != 5:\n raise ListDivideException('Fail!')\n result = listDivide([30, 54, 63, 98, 100], divide=10)\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([])\n if result != 0:\n raise ListDivideException('Fail!')\n result = listDivide([1, 2, 3, 4, 5], 1)\n if result != 5:\n raise ListDivideException('Fail!')", "def testListDivide():\n \n result = listDivide([1,2,3,4,5])\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([2,4,6,8,10])\n if result != 5:\n raise ListDivideException(\"Failed\")\n result = listDivide([30,54,63,98,100], divide=10)\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([])\n if result != 0:\n raise ListDivideException(\"Failed\")\n result = listDivide([1,2,3,4,5],1)\n if result != 5:\n raise ListDivideException(\"Failed\")", "def test_empty_lists_case():\n assert check_sum_of_four([], [], [], []) == 0", "def test_equally_divisible_list_and_positive_int(self):\n result = split_list(self.equally_divisible_list, self.positive_int)\n self.assertEqual(len(result), 3)\n self.assertEqual(len(result[0]), 3)\n self.assertEqual(len(result[1]), 3)\n self.assertEqual(len(result[2]), 3)", "def test_empty_lists():\n check_sum_of_four([], [], [], []) == 0", "def listDivide(numbers, divide=2):\n\n num = []\n for x in numbers:\n if x % divide == 0:\n num.append(numbers)\n return len(num)", "def test_zero_lists():\n arr = [0, 0, 0]\n check_sum_of_four(arr, arr, arr, arr) == len(arr) ** 4", "def list_element_wise_division(a, b):\n return numpy.divide(a, b, out=numpy.zeros_like(a), where=b != 0.)", "def test_division(self):\n Mod5 = IntegersModP(5)\n Mod11 = IntegersModP(11)\n\n polysOverQ = polynomials_over(Fraction).factory\n polysMod5 = polynomials_over(Mod5).factory\n polysMod11 = polynomials_over(Mod11).factory\n for p in [polysOverQ, polysMod5, polysMod11]:\n # division\n assert p([1,1,1,1,1,1]) == p([-1,0,0,0,0,0,1]) / p([-1,1])\n assert p([-1,1,-1,1,-1,1]) == p([1,0,0,0,0,0,1]) / p([1,1])\n assert p([]) == p([]) / p([1,1])\n assert p([1,1]) == p([1,1]) / p([1])\n assert p([1,1]) == p([2,2]) / p([2])", "def test_empty(self):\n s = IntegerSet()\n self.assertEqual(s.cardinality(), 0)\n self.assertNotIn(1337, s)\n L = list(s)\n self.assertEqual(L, [])\n self.assertFalse(s)\n self.assertEqual(0, len(s))", "def test_empty_list(self):\n empty = []\n self.assertEqual(max_integer(empty), None)", "def test_compute_divisors_quantities_correctly(self):\n\t\tself.assertEqual(1, compute_divisor_quantity(1))\n\t\tself.assertEqual(2, compute_divisor_quantity(3))\n\t\tself.assertEqual(4, compute_divisor_quantity(15))\n\t\tself.assertEqual(6, compute_divisor_quantity(28))", "def test_returns_zero_if_list_is_empty(self):\n result = island_counter([])\n self.assertEqual(result, 0)", "def testEmpty(self):\n assert Iter.foldl(self.f, 23, self.empty()) == 23\n assert Iter.foldr(self.f, 32, self.empty()) == 32", "def test_good_lists(self):\n input_numbers_list = []\n valid_numbers_list = []\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [1, 2, 3]\n valid_numbers_list = [1, 2, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [\"1\", 2, 3]\n valid_numbers_list = [1, 2, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [\"1\", 2.1, 3]\n valid_numbers_list = [1, 2.1, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that does not divide equally and number of lists to divide in to set as 1 returns 1 list with the same elements as the original list.
def test_unequally_divisible_list_and_1(self): result = split_list(self.unequally_divisible_list, 1) self.assertEqual(len(result), 1) self.assertEqual(result[0], self.unequally_divisible_list)
[ "def test_equally_divisible_list_and_1(self):\n result = split_list(self.equally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.equally_divisible_list)", "def test_unequally_divisible_list_and_zero(self):\n result = split_list(self.unequally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def testListDivide():\n\n result = listDivide([1, 2, 3, 4, 5])\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([2, 4, 6, 8, 10])\n if result != 5:\n raise ListDivideException('Fail!')\n result = listDivide([30, 54, 63, 98, 100], divide=10)\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([])\n if result != 0:\n raise ListDivideException('Fail!')\n result = listDivide([1, 2, 3, 4, 5], 1)\n if result != 5:\n raise ListDivideException('Fail!')", "def testListDivide():\n \n result = listDivide([1,2,3,4,5])\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([2,4,6,8,10])\n if result != 5:\n raise ListDivideException(\"Failed\")\n result = listDivide([30,54,63,98,100], divide=10)\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([])\n if result != 0:\n raise ListDivideException(\"Failed\")\n result = listDivide([1,2,3,4,5],1)\n if result != 5:\n raise ListDivideException(\"Failed\")", "def test_equally_divisible_list_and_zero(self):\n result = split_list(self.equally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def testListDivide():\n test_a = listDivide([1, 2, 3, 4, 5])\n test_b = listDivide([2, 4, 6, 8, 10])\n test_c = listDivide([30, 54, 63, 98, 100], divide=10)\n test_d = listDivide([])\n test_e = listDivide([1, 2, 3, 4, 5], 1)\n\n tests = (test_a, test_b, test_c, test_d, test_e)\n while test_a == int(2):\n while test_b == int(5):\n while test_c == int(2):\n while test_d == int(0):\n while test_e == int(5):\n return tests\n else:\n raise ListDivideException('Exception: a number is incorrect')", "def test_equally_divisible_list_and_positive_int(self):\n result = split_list(self.equally_divisible_list, self.positive_int)\n self.assertEqual(len(result), 3)\n self.assertEqual(len(result[0]), 3)\n self.assertEqual(len(result[1]), 3)\n self.assertEqual(len(result[2]), 3)", "def listDivide(numbers, divide=2):\n\n num = []\n for x in numbers:\n if x % divide == 0:\n num.append(numbers)\n return len(num)", "def allEqual(self, list):\n return not list or list == [list[0]] * len(list)", "def getSizePlueOneItemSet(Klist):\n candidate = list()\n for e in Klist:\n for f in Klist:\n a = e.union(f)\n if len(a) == len(e)+1:\n candidate.append(a)\n #print(candidate)\n #print(len(candidate))\n newlist = []\n for i in candidate:\n if i not in newlist:\n newlist.append(i)\n candidate = newlist\n #print(candidate)\n \"\"\" here is the normal pruning process \"\"\"\n newlist = []\n for e in candidate:\n counter = 0\n for f in globOriginalList:\n if(f.issuperset(e)):\n counter = counter+ 1\n if((counter/float(globNumberOfTransactions)) >= globMinSup):\n newlist.append(e)\n #print(len(candidate))\n return newlist", "def halvesies(number_list):\n is_halved = []\n number_list[:] = [float(item) / 2 for item in number_list]\n # for each item in number_list, convert item to float, divide by 2, iterate.\n is_halved = number_list\n # this may be unnecessary but I like having a special var for this function\n return is_halved", "def lists_are_multiple(l1, l2):\n # can we compare these two lists?\n if len(l1) != len(l2):\n return False\n for a, b in [(l1, l2), (l2, l1)]: # check the reverse of the lists too\n # see if all a are perfectly divisible by all b\n if all([(x % y == 0) for x, y in zip(a, b)]):\n # see if all have the same multiple\n if len(set([x/y for x, y in zip(a, b)])) == 1:\n return True\n return False", "def test_good_lists(self):\n input_numbers_list = []\n valid_numbers_list = []\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [1, 2, 3]\n valid_numbers_list = [1, 2, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [\"1\", 2, 3]\n valid_numbers_list = [1, 2, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [\"1\", 2.1, 3]\n valid_numbers_list = [1, 2.1, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)", "def test_list(self):\n\n test = ['test', 1, list()]\n\n result = hashiter(test)\n\n self.assertEqual(\n result,\n hash(list) +\n (hash('test') + 1) * 1 +\n (hash(1) + 1) * 2 + (hashiter([]) + 1) * 3\n )", "def divide(intList, num, step): #4\n newIntList = []\n thingsToAdd = []\n for index in range(0, len(intList), step):\n thingsToAdd.append(index)\n for index, item in enumerate(intList):\n if index in thingsToAdd:\n newIntList.append(item / float(num))\n else:\n newIntList.append(item)\n return newIntList", "def test_list(self):\n x = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n\n z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True)\n assert_array_almost_equal_nulp(z, x, 10)\n z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)\n assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10)", "def test_cyclic_permutations_one():\n lst = [1]\n assert _cyclic_permutations(lst) == [[1]]", "def test_set(self):\n\n test = set([1, 2, 3])\n\n result = hashiter(test)\n\n self.assertEqual(\n result,\n hash(set) +\n (hash(1) + 1) * 1 + (hash(2) + 1) * 2 + (hash(3) + 1) * 3\n )", "def check_divisor(input_lst, n):\n\n output_lst = []\n for element in input_lst:\n if not element % n:\n output_lst.append('yes')\n else:\n output_lst.append('no')\n\n ''' List comprehension way to do this.\n\n output_lst = ['yes' if not element % n else 'no' for element in input_lst]\n\n '''\n\n return output_lst" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a list that does not divide equally and number of lists to divide in to set as 0 returns an empty list.
def test_unequally_divisible_list_and_zero(self): result = split_list(self.unequally_divisible_list, 0) self.assertEqual(len(result), 0)
[ "def test_equally_divisible_list_and_zero(self):\n result = split_list(self.equally_divisible_list, 0)\n self.assertEqual(len(result), 0)", "def test_unequally_divisible_list_and_1(self):\n result = split_list(self.unequally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.unequally_divisible_list)", "def test_empty_lists_case():\n assert check_sum_of_four([], [], [], []) == 0", "def test_empty_lists():\n check_sum_of_four([], [], [], []) == 0", "def testListDivide():\n\n result = listDivide([1, 2, 3, 4, 5])\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([2, 4, 6, 8, 10])\n if result != 5:\n raise ListDivideException('Fail!')\n result = listDivide([30, 54, 63, 98, 100], divide=10)\n if result != 2:\n raise ListDivideException('Fail!')\n result = listDivide([])\n if result != 0:\n raise ListDivideException('Fail!')\n result = listDivide([1, 2, 3, 4, 5], 1)\n if result != 5:\n raise ListDivideException('Fail!')", "def testListDivide():\n \n result = listDivide([1,2,3,4,5])\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([2,4,6,8,10])\n if result != 5:\n raise ListDivideException(\"Failed\")\n result = listDivide([30,54,63,98,100], divide=10)\n if result != 2:\n raise ListDivideException(\"Failed\")\n result = listDivide([])\n if result != 0:\n raise ListDivideException(\"Failed\")\n result = listDivide([1,2,3,4,5],1)\n if result != 5:\n raise ListDivideException(\"Failed\")", "def testListDivide():\n test_a = listDivide([1, 2, 3, 4, 5])\n test_b = listDivide([2, 4, 6, 8, 10])\n test_c = listDivide([30, 54, 63, 98, 100], divide=10)\n test_d = listDivide([])\n test_e = listDivide([1, 2, 3, 4, 5], 1)\n\n tests = (test_a, test_b, test_c, test_d, test_e)\n while test_a == int(2):\n while test_b == int(5):\n while test_c == int(2):\n while test_d == int(0):\n while test_e == int(5):\n return tests\n else:\n raise ListDivideException('Exception: a number is incorrect')", "def test_equally_divisible_list_and_1(self):\n result = split_list(self.equally_divisible_list, 1)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.equally_divisible_list)", "def test_equally_divisible_list_and_positive_int(self):\n result = split_list(self.equally_divisible_list, self.positive_int)\n self.assertEqual(len(result), 3)\n self.assertEqual(len(result[0]), 3)\n self.assertEqual(len(result[1]), 3)\n self.assertEqual(len(result[2]), 3)", "def listDivide(numbers, divide=2):\n\n num = []\n for x in numbers:\n if x % divide == 0:\n num.append(numbers)\n return len(num)", "def test_zero_lists():\n arr = [0, 0, 0]\n check_sum_of_four(arr, arr, arr, arr) == len(arr) ** 4", "def test_empty(self):\n s = IntegerSet()\n self.assertEqual(s.cardinality(), 0)\n self.assertNotIn(1337, s)\n L = list(s)\n self.assertEqual(L, [])\n self.assertFalse(s)\n self.assertEqual(0, len(s))", "def test_empty_list(self):\n empty = []\n self.assertEqual(max_integer(empty), None)", "def list_element_wise_division(a, b):\n return numpy.divide(a, b, out=numpy.zeros_like(a), where=b != 0.)", "def test_returns_zero_if_list_is_empty(self):\n result = island_counter([])\n self.assertEqual(result, 0)", "def testEmpty(self):\n assert Iter.foldl(self.f, 23, self.empty()) == 23\n assert Iter.foldr(self.f, 32, self.empty()) == 32", "def test_division(self):\n Mod5 = IntegersModP(5)\n Mod11 = IntegersModP(11)\n\n polysOverQ = polynomials_over(Fraction).factory\n polysMod5 = polynomials_over(Mod5).factory\n polysMod11 = polynomials_over(Mod11).factory\n for p in [polysOverQ, polysMod5, polysMod11]:\n # division\n assert p([1,1,1,1,1,1]) == p([-1,0,0,0,0,0,1]) / p([-1,1])\n assert p([-1,1,-1,1,-1,1]) == p([1,0,0,0,0,0,1]) / p([1,1])\n assert p([]) == p([]) / p([1,1])\n assert p([1,1]) == p([1,1]) / p([1])\n assert p([1,1]) == p([2,2]) / p([2])", "def suppr0(liste):\r\n return [n for n in liste if n!=0]", "def test_good_lists(self):\n input_numbers_list = []\n valid_numbers_list = []\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [1, 2, 3]\n valid_numbers_list = [1, 2, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [\"1\", 2, 3]\n valid_numbers_list = [1, 2, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)\n\n input_numbers_list = [\"1\", 2.1, 3]\n valid_numbers_list = [1, 2.1, 3]\n self.function_verify_cleaning(input_numbers_list, valid_numbers_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns key information about current state of the ricepile. Returns tuple (L,t,z,z_c) if single == False. If single == i for any i in {0,1,2,3}, returns (L,t,z,z_c)[i].
def info(self,single = False): if single not in [False,1,2,3,4]: raise ValueError("single must take value in [False,1,2,3,4]") data = (self.__L, self.__t, self.__z, self.__z_c) if single == False: return data else: return data[single]
[ "def utility(self, state):\n line_sums = self._calculate_line_sums(state)\n\n # Player 1 wins\n if np.any(line_sums == 4):\n return {1: 1, 2: -1}\n\n # Player 2 wins\n if np.any(line_sums == -4):\n return {1: -1, 2: 1}\n\n # Draw, if all squares are filled and no-one has four-in-a-row.\n if np.all(state):\n return {1: 0, 2: 0}\n\n # Otherwise the state is non-terminal and the utility cannot be calculated\n raise ValueError(\"Utility cannot be calculated for a \"\n \"non-terminal state.\")", "def getMouseState(self):\n\n state = pygame.mouse.get_pressed()\n statedict = {'mouse1': False, 'mouse2': False, 'mouse3': False}\n if state[0] is True:\n statedict['mouse1'] = True\n if state[1] is True:\n statedict['mouse2'] = True\n if state[2] is True:\n statedict['mouse3'] = True\n return statedict", "def get_current_state(self):\n active = self.state[self.state[:,2] == 1]\n return {'remain_time': np.transpose(self.state[:,0:1])[0], 'remain_energy': np.transpose(self.state[:, 1:2])[0]}", "def state(self, as_tuple = False):\n if as_tuple:\n return self.current_state\n else:\n return self.legal_states.index(self.current_state)", "def single(self):\r\n assert len(self) == 1, \"Wallet contains more than one thing.\"\r\n c, a = self.iteritems().next()\r\n return (a, c)", "def get_state(self) -> StatePiston:\n for name, value in StatePiston.__members__.items():\n if self._solenoid_l.get() == value.value[0] and self._solenoid_r.get() == value.value[1]:\n return value\n raise LookupError(\"Could not retrieve Piston State!\")", "def c3_state(self):\n return self.counter_get_state('C3')", "def state(self):\n if not self.sublayers:\n return self._state\n else:\n return tuple(layer.state if s is None else s\n for (layer, s) in zip(self.sublayers, self._state))", "def get_state(self):\n return self. _state, self._state_since", "def get_info_state(self):\n\n info_state = (InformationState(player=0), InformationState(player=1))\n\n for player in [0, 1]:\n for record in self:\n if record.action: # not the first record in the history\n # Append the action if player is the current player\n if record.action.player == player:\n info_state[player].append(record.action)\n else:\n info_state[player].append(None)\n info_state[player].append((record.obs[player], record.obs[-1]))\n\n return info_state", "def cp_key(c, ring):\n return (lbp_key(lbp(c[0], ring.zero, Num(c[2]))), lbp_key(lbp(c[3], ring.zero, Num(c[5]))))", "def getArmyFromState(state):\n return 1 if '1' in state else 2", "def get_state(self):\n return self._skuld.cmd(SkuldCmd(name='get_state',\n args=None, block=True))", "def extract_state(grid):\n grid.computeSlackPower()\n listP = [p for p in grid.pqBusesP]\n listP.insert(0, grid.slackPower[0])\n listQ = [q for q in grid.pqBusesQ]\n listQ.insert(0, grid.slackPower[1])\n\n grid.computeCurrents()\n lineCurrents = maximum(absolute(grid.forwardCurrents), absolute(grid.backwardCurrents)).tolist()\n\n # convert real and imaginary to magnitude and angle (degrees)\n Vm = []\n Va = []\n for i in range(len(grid.realV)):\n complexVoltage = complex(grid.realV[i], grid.imagV[i])\n Vm.append(absolute(complexVoltage))\n Va.append(angle(complexVoltage, deg=True))\n\n return {\n 'P': listP,\n 'Q': listQ,\n 'Vm': Vm,\n 'Va': Va,\n 'LineCurrents': lineCurrents\n }", "def get_states(self):\n self.H.__sendByte__(CP.DIN)\n self.H.__sendByte__(CP.GET_STATES)\n s = self.H.__getByte__()\n self.H.__get_ack__()\n return {'ID1': (s & 1 != 0), 'ID2': (s & 2 != 0), 'ID3': (s & 4 != 0), 'ID4': (s & 8 != 0)}", "def check_current_state(game_state):\n # Check horizontals in first row\n if (game_state[0][0] == game_state[0][1] and game_state[0][1] == game_state[0][2] and game_state[0][0] != ' '):\n return game_state[0][0], \"Done\"\n # Check horizontals in second row\n if (game_state[1][0] == game_state[1][1] and game_state[1][1] == game_state[1][2] and game_state[1][0] != ' '):\n return game_state[1][0], \"Done\"\n # Check horizontals in third row\n if (game_state[2][0] == game_state[2][1] and game_state[2][1] == game_state[2][2] and game_state[2][0] != ' '):\n return game_state[2][0], \"Done\"\n\n # Check verticals in first column\n if (game_state[0][0] == game_state[1][0] and game_state[1][0] == game_state[2][0] and game_state[0][0] != ' '):\n return game_state[0][0], \"Done\"\n # Check verticals in second column\n if (game_state[0][1] == game_state[1][1] and game_state[1][1] == game_state[2][1] and game_state[0][1] != ' '):\n return game_state[0][1], \"Done\"\n # Check verticals in third column\n if (game_state[0][2] == game_state[1][2] and game_state[1][2] == game_state[2][2] and game_state[0][2] != ' '):\n return game_state[0][2], \"Done\"\n\n # Check left diagonal\n if (game_state[0][0] == game_state[1][1] and game_state[1][1] == game_state[2][2] and game_state[0][0] != ' '):\n return game_state[1][1], \"Done\"\n # Check right diagonal\n if (game_state[0][2] == game_state[1][1] and game_state[1][1] == game_state[2][0] and game_state[0][2] != ' '):\n return game_state[1][1], \"Done\"\n\n # Check for draw\n draw_flag = 0\n for i in range(3):\n for j in range(3):\n if game_state[i][j] == ' ':\n draw_flag = 1\n\n if draw_flag == 0:\n return None, \"Draw\"\n\n return None, \"Not Done\"", "def get_state_keys(self):\r\n from .support import get_state_keys\r\n return get_state_keys()", "def get_data_state(self, user, input_key):\n return self._data_map[input_key]['state']", "def getGameState(self):\n ### student code goes here\n\n\n output = ([],[],[])\n\n bindings_list = self.kb.kb_ask(parse_input('fact: (on ?disk ?peg)'))\n\n for binding in bindings_list:\n num1 = int(binding['?peg'][3])-1\n num2 = int(binding['?disk'][4])\n output[num1].append(num2)\n\n for index in output:\n index.sort()\n\n return tuple(tuple(e) for e in output)", "def _state(self) -> Dict[str, Any]:\n return self._fetcher.get_cached_state(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of a selection of binary operators.
def test_compile_binary_operators(self): op_map = { operators.and_: ' AND ', operators.or_: ' OR ', operators.add: ' + ', operators.mul: ' * ', operators.sub: ' - ', operators.div: ' / ', operators.mod: ' MOD ', operators.truediv: ' / ', operators.lt: ' < ', operators.le: ' <= ', operators.ne: ' <> ', operators.gt: ' > ', operators.ge: ' >= ', operators.eq: ' = ', operators.concat_op: ' || ', operators.like_op: ' LIKE ', operators.is_: ' IS ', operators.isnot: ' IS NOT ' } for op in op_map.keys(): self.td_engine.execute(op(self.table.c.c1, text('arg'))) assert(self.last_compiled == 't_test.c1' + op_map[op] + 'arg')
[ "def test_compile_any_all_operators(self):\n op_map = {\n operators.any_op: 'ANY ',\n operators.all_op: 'ALL ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(\n op(sql.select([self.table.c.c1]).as_scalar()))\n\n assert(self.last_compiled ==\n op_map[op] + '(SELECT t_test.c1 \\nFROM t_test)')", "def test_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'or'])\n self.assertFalse(t.is_really_unary)", "def test_compile_unary_operators(self):\n op_map = {\n operators.distinct_op: 'DISTINCT ',\n operators.inv: 'NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == op_map[op] + 't_test.c1')", "def test_compile_in_operators(self):\n op_map = {\n operators.in_op: ' IN ',\n operators.notin_op: ' NOT IN ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, (0, 0)))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + '(?, ?)')", "def test_compile_nested_operators(self):\n self.td_engine.execute(\n operators.and_(\n operators.ne(self.table.c.c1, 0),\n operators.mod(self.table.c.c1, 0)))\n\n assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')", "def test_compile_modifier_operators(self):\n op_map = {\n operators.desc_op: ' DESC',\n operators.asc_op: ' ASC',\n operators.nullsfirst_op: ' NULLS FIRST',\n operators.nullslast_op: ' NULLS LAST',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op])", "def test_operator_get_all_operators(self):\n pass", "def test_mix_of_primitive_operators(self):\n self.assert_to_cnf_transformation(\n 'A and (B or C and D) and not (C or not D and not E)',\n 'A and (B or C) and (B or D) and not C and (D or E)')\n self.assert_to_cnf_transformation(\n '(A and B and C) or not (A and D) or (A and (B or C) or '\n '(D and (E or F)))',\n '(C or not A or not D or B or E or F) and '\n '(B or not A or not D or C or E or F)')", "def test_chained_unary_operators_with_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'and', 'not'])\n self.assertFalse(t.is_really_unary)\n\n t = ExpressionTreeNode.build_tree(\n ['A', 'B', 'or',\n 'C', 'D', 'E', 'not', 'and', 'and', 'not',\n 'xor'])\n self.assertFalse(t.is_really_unary)\n self.assertFalse(t.l_child.is_really_unary)\n self.assertFalse(t.r_child.is_really_unary)\n self.assertTrue(t.r_child.l_child.r_child.l_child.is_really_unary)", "def test_operator_literals():\n TestScanner._run(**{\n 'name': 'Operator Alpha Literals',\n 'expressions': {\n 'concat': ['.'],\n 'alt': ['|'],\n 'star': ['*'],\n 'question': ['?'],\n 'plus': ['+'],\n 'slash': ['\\\\'],\n 'lparen': ['('],\n 'rparen': [')'],\n 'lbracket': ['['],\n 'rbracket': [']']\n },\n 'DFA': {\n 'Q': set(['S', 'F', 'Err']),\n 'V': set('.|*?+\\\\()[]'),\n # pylint: disable=bad-whitespace\n 'T': [\n [' ', 'S', 'F', 'Err'],\n ['.', 'F', 'Err', 'Err'],\n ['|', 'F', 'Err', 'Err'],\n ['*', 'F', 'Err', 'Err'],\n ['?', 'F', 'Err', 'Err'],\n ['+', 'F', 'Err', 'Err'],\n ['\\\\', 'F', 'Err', 'Err'],\n ['(', 'F', 'Err', 'Err'],\n [')', 'F', 'Err', 'Err'],\n ['[', 'F', 'Err', 'Err'],\n [']', 'F', 'Err', 'Err']\n ],\n # pylint: enable=bad-whitespace\n 'S': 'S',\n 'F': set(['F']),\n 'G': {\n 'concat': set(['F']),\n 'alt': set(['F']),\n 'star': set(['F']),\n 'question': set(['F']),\n 'plus': set(['F']),\n 'slash': set(['F']),\n 'lparen': set(['F']),\n 'rparen': set(['F']),\n 'lbracket': set(['F']),\n 'rbracket': set(['F']),\n '_sink': set(['Err'])\n }\n }\n })", "def test_operator_get_operator(self):\n pass", "def test_operator_create_operator(self):\n pass", "def add_binary_comparison_operator(self, operator):\n operator = utils.ascii(operator)\n if not isinstance(operator, str):\n raise TypeError(\"expected operator name as string\")\n if operator not in ['==', '!=', '<', '<=', '>', '>=']:\n raise ValueError(\"The operator %r is invalid or not yet supported by PyBindGen\" % (operator,))\n self.binary_comparison_operators.add(operator)", "def test_mix_of_non_primitive_operators(self):\n self.assert_to_cnf_transformation(\n 'A xor (B -> C -> D) nand (E iff F)',\n '(not A or ~B or ~C or D or not E or not F) and '\n '(A or B or not E or not F) and '\n '(A or C or not E or not F) and '\n '(A or not D or not E or not F) and '\n '(not A or ~B or ~C or D or E or F) and '\n '(A or B or E or F) and '\n '(A or C or E or F) and '\n '(A or not D or E or F)')\n self.assert_to_cnf_transformation(\n '(A nand B) -> (C nor D) -> (E iff F)',\n r'(A \\/ C \\/ D \\/ F or not E) /\\ (A \\/ C \\/ D \\/ E or not F) /\\ '\n r'(B \\/ C \\/ D \\/ F or not E) /\\ (B \\/ C \\/ D \\/ E or not F)')", "def visit_BinaryOp(self, node):\n op = node.op\n\n if op == '+':\n return self.visit(node.left) + self.visit(node.right)\n elif op == '-':\n return self.visit(node.left) - self.visit(node.right)\n elif op == '/':\n return self.visit(node.left) / self.visit(node.right)\n elif op == '*':\n return self.visit(node.left) * self.visit(node.right)\n elif op == '%':\n return self.visit(node.left) % self.visit(node.right)\n elif op == '*':\n return self.visit(node.left) * self.visit(node.right)\n elif op == '<':\n return self.visit(node.left) < self.visit(node.right)\n elif op == '>':\n return self.visit(node.left) > self.visit(node.right)\n elif op == '>=':\n return self.visit(node.left) >= self.visit(node.right)\n elif op == '<=':\n return self.visit(node.left) <= self.visit(node.right)\n elif op == '&&':\n return self.visit(node.left) and self.visit(node.right)\n elif op == '||':\n return self.visit(node.left) or self.visit(node.right)\n elif op == '==':\n return self.visit(node.left) == self.visit(node.right)\n elif op == '!=':\n return self.visit(node.left) != self.visit(node.right)", "def test_deeply_nested_primitive_operators(self):\n self.assert_to_cnf_transformation(\n '(A or (B and (C or (D and (E or (F and (G or (H and I))))))))',\n '(A or B) and (A or C or D) and (A or C or E or F) and '\n '(A or C or E or G or H) and (A or C or E or G or I)')\n self.assert_to_cnf_transformation(\n '(((((((((A or B) and C) or D) and E) or F) and G) or H) and I) '\n 'or J)',\n '((((A or B or D or F or H or J) and (C or D or F or H or J)) and '\n '(E or F or H or J)) and (G or H or J)) and (I or J)')\n self.assert_to_cnf_transformation(\n '((A and (B or not (C and D)) and E) or (F and G)) and ((A or B) '\n 'and (C or (D and E)))',\n '(A or F) and (B or not C or not D or F) and (E or F) and '\n '(A or G) and (B or not C or not D or G) and (E or G) and '\n '(A or B) and (C or D) and (C or E)')", "def has_operator(query):\n for char in query:\n if char in OPERATORS:\n return True\n return False", "def transform_binary_operator(self, node):\n # get all the tokens of assignment\n # and store it in the tokens list\n tokens = list(node.get_tokens())\n\n # supported operators list\n operators_list = ['+', '-', '*', '/', '%','=',\n '>', '>=', '<', '<=', '==', '!=', '&&', '||', '+=', '-=',\n '*=', '/=', '%=']\n\n # this stack will contain variable content\n # and type of variable in the rhs\n combined_variables_stack = []\n\n # this stack will contain operators\n # to be processed in the rhs\n operators_stack = []\n\n # iterate through every token\n for token in tokens:\n # token is either '(', ')' or\n # any of the supported operators from the operator list\n if token.kind == cin.TokenKind.PUNCTUATION:\n\n # push '(' to the operators stack\n if token.spelling == '(':\n operators_stack.append('(')\n\n elif token.spelling == ')':\n # keep adding the expression to the\n # combined variables stack unless\n # '(' is found\n while (operators_stack\n and operators_stack[-1] != '('):\n if len(combined_variables_stack) < 2:\n raise NotImplementedError(\n \"Unary operators as a part of \"\n \"binary operators is not \"\n \"supported yet!\")\n rhs = combined_variables_stack.pop()\n lhs = combined_variables_stack.pop()\n operator = operators_stack.pop()\n combined_variables_stack.append(\n self.perform_operation(\n lhs, rhs, operator))\n\n # pop '('\n operators_stack.pop()\n\n # token is an operator (supported)\n elif token.spelling in operators_list:\n while (operators_stack\n and self.priority_of(token.spelling)\n <= self.priority_of(\n operators_stack[-1])):\n if len(combined_variables_stack) < 2:\n raise NotImplementedError(\n \"Unary operators as a part of \"\n \"binary operators is not \"\n \"supported yet!\")\n rhs = combined_variables_stack.pop()\n lhs = combined_variables_stack.pop()\n operator = operators_stack.pop()\n combined_variables_stack.append(\n self.perform_operation(\n lhs, rhs, operator))\n\n # push current operator\n operators_stack.append(token.spelling)\n\n # token is a bitwise operator\n elif token.spelling in ['&', '|', '^', '<<', '>>']:\n raise NotImplementedError(\n \"Bitwise operator has not been \"\n \"implemented yet!\")\n\n # token is a shorthand bitwise operator\n elif token.spelling in ['&=', '|=', '^=', '<<=',\n '>>=']:\n raise NotImplementedError(\n \"Shorthand bitwise operator has not been \"\n \"implemented yet!\")\n else:\n raise NotImplementedError(\n \"Given token {} is not implemented yet!\"\n .format(token.spelling))\n\n # token is an identifier(variable)\n elif token.kind == cin.TokenKind.IDENTIFIER:\n combined_variables_stack.append(\n [token.spelling, 'identifier'])\n\n # token is a literal\n elif token.kind == cin.TokenKind.LITERAL:\n combined_variables_stack.append(\n [token.spelling, 'literal'])\n\n # token is a keyword, either true or false\n elif (token.kind == cin.TokenKind.KEYWORD\n and token.spelling in ['true', 'false']):\n combined_variables_stack.append(\n [token.spelling, 'boolean'])\n else:\n raise NotImplementedError(\n \"Given token {} is not implemented yet!\"\n .format(token.spelling))\n\n # process remaining operators\n while operators_stack:\n if len(combined_variables_stack) < 2:\n raise NotImplementedError(\n \"Unary operators as a part of \"\n \"binary operators is not \"\n \"supported yet!\")\n rhs = combined_variables_stack.pop()\n lhs = combined_variables_stack.pop()\n operator = operators_stack.pop()\n combined_variables_stack.append(\n self.perform_operation(lhs, rhs, operator))\n\n return combined_variables_stack[-1][0]", "def test_get_combinator_sql_all_union_sql_generated(self):\n\n qs1 = Number.objects.filter(num__lte=1).values(\"num\")\n qs2 = Number.objects.filter(num__gte=8).values(\"num\")\n qs4 = qs1.union(qs2)\n\n compiler = SQLCompiler(qs4.query, self.connection, \"default\")\n sql_compiled, params = compiler.get_combinator_sql(\"union\", True)\n self.assertEqual(\n sql_compiled,\n [\n \"SELECT tests_number.num FROM tests_number WHERE \"\n + \"tests_number.num <= %s UNION ALL SELECT tests_number.num \"\n + \"FROM tests_number WHERE tests_number.num >= %s\"\n ],\n )\n self.assertEqual(params, [1, 8])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of the IN and NOT IN binary operators.
def test_compile_in_operators(self): op_map = { operators.in_op: ' IN ', operators.notin_op: ' NOT IN ', } for op in op_map.keys(): self.td_engine.execute(op(self.table.c.c1, (0, 0))) assert(self.last_compiled == 't_test.c1' + op_map[op] + '(?, ?)')
[ "def query_in(self, params):\n def make_list(value):\n if not isinstance(value, (tuple, list)):\n return [value]\n return value\n\n return self.get_conditions(params, 'in', '$in', make_list)", "def in_(field: FieldProxyAny, sequence: Sequence) -> QueryExpression:\n return _cmp_expression(field, \"$in\", sequence)", "def test_is_in(a, b, result):\n from is_in import is_in\n assert is_in(a, b) == result", "def test_compile_binary_operators(self):\n op_map = {\n operators.and_: ' AND ',\n operators.or_: ' OR ',\n operators.add: ' + ',\n operators.mul: ' * ',\n operators.sub: ' - ',\n operators.div: ' / ',\n operators.mod: ' MOD ',\n operators.truediv: ' / ',\n operators.lt: ' < ',\n operators.le: ' <= ',\n operators.ne: ' <> ',\n operators.gt: ' > ',\n operators.ge: ' >= ',\n operators.eq: ' = ',\n operators.concat_op: ' || ',\n operators.like_op: ' LIKE ',\n operators.is_: ' IS ',\n operators.isnot: ' IS NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, text('arg')))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + 'arg')", "def test_in(self):\n self.assertIn('k1', self.record.data_values)\n self.assertIn('k2', self.record.data_values)\n self.assertIn('k3', self.record.data_values)\n self.assertNotIn('no_such_key', self.record.data_values)", "def not_in(field: FieldProxyAny, sequence: Sequence) -> QueryExpression:\n return _cmp_expression(field, \"$nin\", sequence)", "def test_execute_value_subqueries_4(self):\n querying.execute_value_subqueries(self.mock_engine, \n self.mock_executable,\n self.mock_in_column,\n self.values)\n\n self.mock_in_column.in_.assert_called_with(self.values)", "def is_in(sequence):\n return IsIn(sequence)", "def test_compile_unary_operators(self):\n op_map = {\n operators.distinct_op: 'DISTINCT ',\n operators.inv: 'NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == op_map[op] + 't_test.c1')", "def assertIn(a, b):\n assert a in b", "def test_query_not_in_subset_operator():\n rally = Rally(server=RALLY, user=RALLY_USER, password=RALLY_PSWD, project=COLD_PROJECT)\n qualifier = 'ScheduleState !in Defined,Completed'\n #qualifier = '((ScheduleState != Defined) AND (ScheduleState != Completed))'\n response = rally.get('Story', fetch=True, query=qualifier, pagesize=100, limit=100, projectScopeDown=True)\n assert response.status_code == 200\n assert len(response.errors) == 0\n assert len(response.warnings) == 0\n\n items = [item for item in response]\n sched_states = [item.ScheduleState for item in items]\n ss = list(set(sorted(sched_states)))\n assert len(ss) == 2\n\n defined = [item for item in items if item.ScheduleState == 'Defined']\n inprog = [item for item in items if item.ScheduleState == 'In-Progress']\n completed = [item for item in items if item.ScheduleState == 'Completed']\n accepted = [item for item in items if item.ScheduleState == 'Accepted']\n\n assert len(defined) == 0\n assert len(completed) == 0\n assert len(inprog) > 0\n assert len(accepted) > 0", "def has_intel_query(self, operator: Enum, has_intel_query: int | list):\n if isinstance(has_intel_query, list) and operator not in self.list_types:\n raise RuntimeError(\n 'Operator must be CONTAINS, NOT_CONTAINS, IN'\n 'or NOT_IN when filtering on a list of values.'\n )\n\n self._tql.add_filter('hasIntelQuery', operator, has_intel_query, TqlType.INTEGER)", "def test_compile_nested_operators(self):\n self.td_engine.execute(\n operators.and_(\n operators.ne(self.table.c.c1, 0),\n operators.mod(self.table.c.c1, 0)))\n\n assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')", "def test_get_combinator_sql_difference_all_sql_generated(self):\n qs1 = Number.objects.filter(num__lte=1).values(\"num\")\n qs2 = Number.objects.filter(num__gte=8).values(\"num\")\n qs4 = qs1.difference(qs2)\n\n compiler = SQLCompiler(qs4.query, self.connection, \"default\")\n sql_compiled, params = compiler.get_combinator_sql(\"difference\", True)\n\n self.assertEqual(\n sql_compiled,\n [\n \"SELECT tests_number.num FROM tests_number WHERE \"\n + \"tests_number.num <= %s EXCEPT ALL SELECT tests_number.num \"\n + \"FROM tests_number WHERE tests_number.num >= %s\"\n ],\n )\n self.assertEqual(params, [1, 8])", "def _check_rule_not_in(self, data_sources, conditions):\n return not self._check_rule_in(data_sources, conditions)", "def in_and_not_in(identifier, collection, kwargs):\n return identifier in kwargs[\"url\"] and collection not in kwargs", "def ifnotin(parser, token):\n return do_ifin(parser, token, True)", "def use_in_expression(self) -> bool:", "def _has_infix(self, statement):\n tokens = statement.split(' ')\n for ix, token in enumerate(tokens):\n for op, _ in ArithmeticHandler.INFIX_OPS:\n if op == token:\n return self._is_number_in(tokens[ix:]) and self._is_number_in(tokens[:ix])\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of a selection of unary operators.
def test_compile_unary_operators(self): op_map = { operators.distinct_op: 'DISTINCT ', operators.inv: 'NOT ' } for op in op_map.keys(): self.td_engine.execute(op(self.table.c.c1)) assert(self.last_compiled == op_map[op] + 't_test.c1')
[ "def test_compile_binary_operators(self):\n op_map = {\n operators.and_: ' AND ',\n operators.or_: ' OR ',\n operators.add: ' + ',\n operators.mul: ' * ',\n operators.sub: ' - ',\n operators.div: ' / ',\n operators.mod: ' MOD ',\n operators.truediv: ' / ',\n operators.lt: ' < ',\n operators.le: ' <= ',\n operators.ne: ' <> ',\n operators.gt: ' > ',\n operators.ge: ' >= ',\n operators.eq: ' = ',\n operators.concat_op: ' || ',\n operators.like_op: ' LIKE ',\n operators.is_: ' IS ',\n operators.isnot: ' IS NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, text('arg')))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + 'arg')", "def test_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'or'])\n self.assertFalse(t.is_really_unary)", "def test_chained_unary_operators_ending_in_operand(self):\n t = ExpressionTreeNode.build_tree(['A', '~'])\n self.assertTrue(t.is_really_unary)\n\n t = ExpressionTreeNode.build_tree(['A', '~', '~'])\n self.assertTrue(t.is_really_unary)\n\n t = ExpressionTreeNode.build_tree(['A', '~', '~', '~'])\n self.assertTrue(t.is_really_unary)", "def test_chained_unary_operators_with_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'and', 'not'])\n self.assertFalse(t.is_really_unary)\n\n t = ExpressionTreeNode.build_tree(\n ['A', 'B', 'or',\n 'C', 'D', 'E', 'not', 'and', 'and', 'not',\n 'xor'])\n self.assertFalse(t.is_really_unary)\n self.assertFalse(t.l_child.is_really_unary)\n self.assertFalse(t.r_child.is_really_unary)\n self.assertTrue(t.r_child.l_child.r_child.l_child.is_really_unary)", "def test_only_unary_operand_expression(self):\n self.assert_to_cnf_transformation('not A', 'not A')\n self.assert_to_cnf_transformation('~A', '~A')\n self.assert_to_cnf_transformation('~~A', 'A')\n self.assert_to_cnf_transformation('~~~A', '~A')\n self.assert_to_cnf_transformation('~~~~~~~~~~~~~~~~~~~~~~~~A', 'A')\n self.assert_to_cnf_transformation(\n '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~A', '~A')", "def test_single_operand(self):\n for token in ('0', '1', 'token'):\n t = ExpressionTreeNode.build_tree([token])\n self.assertTrue(t.is_really_unary)", "def test_compile_nested_operators(self):\n self.td_engine.execute(\n operators.and_(\n operators.ne(self.table.c.c1, 0),\n operators.mod(self.table.c.c1, 0)))\n\n assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')", "def test_compile_negative_operator(self):\n self.td_engine.execute(operators.neg(self.table.c.c1))\n\n assert(self.last_compiled == '-t_test.c1')", "def test_compile_in_operators(self):\n op_map = {\n operators.in_op: ' IN ',\n operators.notin_op: ' NOT IN ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, (0, 0)))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + '(?, ?)')", "def test_compile_any_all_operators(self):\n op_map = {\n operators.any_op: 'ANY ',\n operators.all_op: 'ALL ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(\n op(sql.select([self.table.c.c1]).as_scalar()))\n\n assert(self.last_compiled ==\n op_map[op] + '(SELECT t_test.c1 \\nFROM t_test)')", "def _has_unary(self, statement):\n tokens = statement.split(' ')\n for ix, token in enumerate(tokens):\n for op, _ in ArithmeticHandler.UNARY_OPS:\n if op == token:\n return self._is_number_in(tokens[ix:])\n return False", "def __parse_unary_operators(self, parse_expr: list) -> list:\n index = 0\n while index < len(parse_expr):\n # Check if the operator can be unary\n if parse_expr[index] in UNARY_OPERATORS:\n # Check if there if the operator is unary\n if index == 0 or parse_expr[index - 1] == self.__open_parenthesis:\n sign = parse_expr[index]\n parse_expr = parse_expr[:index] + [float(sign + ONLY_SIGN_AFFECTED_NUMBER), MUL_SIGN] + parse_expr[\n index + 1:]\n index += 2\n continue\n index += 1\n return parse_expr", "def unaryop_expr(expr, whitelist, scope):\n if isinstance(expr.op, ast.Not):\n return '!({})'.format(pystmt2vega(expr.operand, whitelist, scope))\n if isinstance(expr.op, ast.USub):\n return '-{}'.format(pystmt2vega(expr.operand, whitelist, scope))\n if isinstance(expr.op, ast.UAdd):\n return '+{}'.format(pystmt2vega(expr.operand, whitelist, scope))\n\n raise RuntimeError('Unsupported {} operator, only a subset of Python is supported'.format(str(expr.op)))", "def test_compile_modifier_operators(self):\n op_map = {\n operators.desc_op: ' DESC',\n operators.asc_op: ' ASC',\n operators.nullsfirst_op: ' NULLS FIRST',\n operators.nullslast_op: ' NULLS LAST',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op])", "def test_mix_of_primitive_operators(self):\n self.assert_to_cnf_transformation(\n 'A and (B or C and D) and not (C or not D and not E)',\n 'A and (B or C) and (B or D) and not C and (D or E)')\n self.assert_to_cnf_transformation(\n '(A and B and C) or not (A and D) or (A and (B or C) or '\n '(D and (E or F)))',\n '(C or not A or not D or B or E or F) and '\n '(B or not A or not D or C or E or F)')", "def test_mix_of_non_primitive_operators(self):\n self.assert_to_cnf_transformation(\n 'A xor (B -> C -> D) nand (E iff F)',\n '(not A or ~B or ~C or D or not E or not F) and '\n '(A or B or not E or not F) and '\n '(A or C or not E or not F) and '\n '(A or not D or not E or not F) and '\n '(not A or ~B or ~C or D or E or F) and '\n '(A or B or E or F) and '\n '(A or C or E or F) and '\n '(A or not D or E or F)')\n self.assert_to_cnf_transformation(\n '(A nand B) -> (C nor D) -> (E iff F)',\n r'(A \\/ C \\/ D \\/ F or not E) /\\ (A \\/ C \\/ D \\/ E or not F) /\\ '\n r'(B \\/ C \\/ D \\/ F or not E) /\\ (B \\/ C \\/ D \\/ E or not F)')", "def test_operator_literals():\n TestScanner._run(**{\n 'name': 'Operator Alpha Literals',\n 'expressions': {\n 'concat': ['.'],\n 'alt': ['|'],\n 'star': ['*'],\n 'question': ['?'],\n 'plus': ['+'],\n 'slash': ['\\\\'],\n 'lparen': ['('],\n 'rparen': [')'],\n 'lbracket': ['['],\n 'rbracket': [']']\n },\n 'DFA': {\n 'Q': set(['S', 'F', 'Err']),\n 'V': set('.|*?+\\\\()[]'),\n # pylint: disable=bad-whitespace\n 'T': [\n [' ', 'S', 'F', 'Err'],\n ['.', 'F', 'Err', 'Err'],\n ['|', 'F', 'Err', 'Err'],\n ['*', 'F', 'Err', 'Err'],\n ['?', 'F', 'Err', 'Err'],\n ['+', 'F', 'Err', 'Err'],\n ['\\\\', 'F', 'Err', 'Err'],\n ['(', 'F', 'Err', 'Err'],\n [')', 'F', 'Err', 'Err'],\n ['[', 'F', 'Err', 'Err'],\n [']', 'F', 'Err', 'Err']\n ],\n # pylint: enable=bad-whitespace\n 'S': 'S',\n 'F': set(['F']),\n 'G': {\n 'concat': set(['F']),\n 'alt': set(['F']),\n 'star': set(['F']),\n 'question': set(['F']),\n 'plus': set(['F']),\n 'slash': set(['F']),\n 'lparen': set(['F']),\n 'rparen': set(['F']),\n 'lbracket': set(['F']),\n 'rbracket': set(['F']),\n '_sink': set(['Err'])\n }\n }\n })", "def supports_aux_operators(self) -> bool:\n return True", "def test_operator_get_all_operators(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of the ANY and ALL unary operators.
def test_compile_any_all_operators(self): op_map = { operators.any_op: 'ANY ', operators.all_op: 'ALL ', } for op in op_map.keys(): self.td_engine.execute( op(sql.select([self.table.c.c1]).as_scalar())) assert(self.last_compiled == op_map[op] + '(SELECT t_test.c1 \nFROM t_test)')
[ "def test_compile_binary_operators(self):\n op_map = {\n operators.and_: ' AND ',\n operators.or_: ' OR ',\n operators.add: ' + ',\n operators.mul: ' * ',\n operators.sub: ' - ',\n operators.div: ' / ',\n operators.mod: ' MOD ',\n operators.truediv: ' / ',\n operators.lt: ' < ',\n operators.le: ' <= ',\n operators.ne: ' <> ',\n operators.gt: ' > ',\n operators.ge: ' >= ',\n operators.eq: ' = ',\n operators.concat_op: ' || ',\n operators.like_op: ' LIKE ',\n operators.is_: ' IS ',\n operators.isnot: ' IS NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, text('arg')))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + 'arg')", "def test_compile_unary_operators(self):\n op_map = {\n operators.distinct_op: 'DISTINCT ',\n operators.inv: 'NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == op_map[op] + 't_test.c1')", "def any_(expr: _ColumnExpressionArgument[_T]) -> CollectionAggregate[bool]:\n return CollectionAggregate._create_any(expr)", "def all_(expr: _ColumnExpressionArgument[_T]) -> CollectionAggregate[bool]:\n return CollectionAggregate._create_all(expr)", "def test_compile_in_operators(self):\n op_map = {\n operators.in_op: ' IN ',\n operators.notin_op: ' NOT IN ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, (0, 0)))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + '(?, ?)')", "def test_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'or'])\n self.assertFalse(t.is_really_unary)", "def test_any():\n assert not hug.validate.any(\n hug.validate.contains_one_of(\"last\", \"year\"), hug.validate.contains_one_of(\"first\", \"place\")\n )(TEST_SCHEMA)\n assert hug.validate.any(\n hug.validate.contains_one_of(\"last\", \"year\"), hug.validate.contains_one_of(\"no\", \"way\")\n )(TEST_SCHEMA)", "def test_single_operand(self):\n for token in ('0', '1', 'token'):\n t = ExpressionTreeNode.build_tree([token])\n self.assertTrue(t.is_really_unary)", "def __or__(expr):", "def logical_all(*args):\n out = np.ones_like(args[0]).astype(bool)\n for arg in args:\n out = np.logical_and(out, arg)\n\n return out", "def test_any_of(self):\n self.assertTrue(any_of(lambda x: x == 1, [1, 2, 3, 4, 5]))\n self.assertTrue(any_of(lambda x: x == 2, [1, 2, 3, 4, 5]))\n self.assertTrue(any_of(lambda x: x == 5, [1, 2, 3, 4, 5]))\n\n self.assertFalse(any_of(lambda x: x == 0, [1, 2, 3, 4, 5]))\n self.assertFalse(any_of(lambda x: x == 6, [1, 2, 3, 4, 5]))", "def test_any_in_any():\n assert any_in_any([1, 2, 3, 9], [6, 7, 8, 9]) is True\n assert any_in_any([1, 2, 3, 4], [6, 7, 8, 9]) is False", "def test_has_and_any_suboperators(self):\r\n # create test computers\r\n computer1 = self.Computer(name=u'c1', vendor=u'foo')\r\n computer2 = self.Computer(name=u'c2', vendor=u'bar')\r\n computer3 = self.Computer(name=u'c3', vendor=u'bar')\r\n computer4 = self.Computer(name=u'c4', vendor=u'bar')\r\n computer5 = self.Computer(name=u'c5', vendor=u'foo')\r\n computer6 = self.Computer(name=u'c6', vendor=u'foo')\r\n self.session.add_all((computer1, computer2, computer3, computer4,\r\n computer5, computer6))\r\n self.session.commit()\r\n # add the computers to three test people\r\n person1, person2, person3 = self.people[:3]\r\n person1.computers = [computer1, computer2, computer3]\r\n person2.computers = [computer4]\r\n person3.computers = [computer5, computer6]\r\n self.session.commit()\r\n # test 'any'\r\n val = dict(name='vendor', op='like', val=u'%o%')\r\n d = dict(filters=[dict(name='computers', op='any', val=val)])\r\n result = search(self.session, self.Person, d)\r\n assert result.count() == 2\r\n # test 'has'\r\n val = dict(name='name', op='like', val=u'%incol%')\r\n d = dict(filters=[dict(name='owner', op='has', val=val)])\r\n result = search(self.session, self.Computer, d)\r\n assert result.count() == 3", "def test_operator_get_all_operators(self):\n pass", "def test_mix_of_primitive_operators(self):\n self.assert_to_cnf_transformation(\n 'A and (B or C and D) and not (C or not D and not E)',\n 'A and (B or C) and (B or D) and not C and (D or E)')\n self.assert_to_cnf_transformation(\n '(A and B and C) or not (A and D) or (A and (B or C) or '\n '(D and (E or F)))',\n '(C or not A or not D or B or E or F) and '\n '(B or not A or not D or C or E or F)')", "def __and__(expr):", "def test_get_combinator_sql_all_union_sql_generated(self):\n\n qs1 = Number.objects.filter(num__lte=1).values(\"num\")\n qs2 = Number.objects.filter(num__gte=8).values(\"num\")\n qs4 = qs1.union(qs2)\n\n compiler = SQLCompiler(qs4.query, self.connection, \"default\")\n sql_compiled, params = compiler.get_combinator_sql(\"union\", True)\n self.assertEqual(\n sql_compiled,\n [\n \"SELECT tests_number.num FROM tests_number WHERE \"\n + \"tests_number.num <= %s UNION ALL SELECT tests_number.num \"\n + \"FROM tests_number WHERE tests_number.num >= %s\"\n ],\n )\n self.assertEqual(params, [1, 8])", "def test_compile_nested_operators(self):\n self.td_engine.execute(\n operators.and_(\n operators.ne(self.table.c.c1, 0),\n operators.mod(self.table.c.c1, 0)))\n\n assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')", "def get_boolean_functions_from_truth_table_logical_oracle():\n variables = ['a', 'b', 'c', 'd']\n table = get_truth_table()\n OR = ' | '\n AND = ' & '\n NOT = ' ~'\n truth_string = boolean_function_builder(AND, NOT, OR, table, variables)\n return truth_string" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of a selection of modifier operators.
def test_compile_modifier_operators(self): op_map = { operators.desc_op: ' DESC', operators.asc_op: ' ASC', operators.nullsfirst_op: ' NULLS FIRST', operators.nullslast_op: ' NULLS LAST', } for op in op_map.keys(): self.td_engine.execute(op(self.table.c.c1)) assert(self.last_compiled == 't_test.c1' + op_map[op])
[ "def test_compile_nested_operators(self):\n self.td_engine.execute(\n operators.and_(\n operators.ne(self.table.c.c1, 0),\n operators.mod(self.table.c.c1, 0)))\n\n assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')", "def test_compile_binary_operators(self):\n op_map = {\n operators.and_: ' AND ',\n operators.or_: ' OR ',\n operators.add: ' + ',\n operators.mul: ' * ',\n operators.sub: ' - ',\n operators.div: ' / ',\n operators.mod: ' MOD ',\n operators.truediv: ' / ',\n operators.lt: ' < ',\n operators.le: ' <= ',\n operators.ne: ' <> ',\n operators.gt: ' > ',\n operators.ge: ' >= ',\n operators.eq: ' = ',\n operators.concat_op: ' || ',\n operators.like_op: ' LIKE ',\n operators.is_: ' IS ',\n operators.isnot: ' IS NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, text('arg')))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + 'arg')", "def test_compile_any_all_operators(self):\n op_map = {\n operators.any_op: 'ANY ',\n operators.all_op: 'ALL ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(\n op(sql.select([self.table.c.c1]).as_scalar()))\n\n assert(self.last_compiled ==\n op_map[op] + '(SELECT t_test.c1 \\nFROM t_test)')", "def test_compile_in_operators(self):\n op_map = {\n operators.in_op: ' IN ',\n operators.notin_op: ' NOT IN ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, (0, 0)))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + '(?, ?)')", "def test_operator_get_all_operators(self):\n pass", "def test_compile_unary_operators(self):\n op_map = {\n operators.distinct_op: 'DISTINCT ',\n operators.inv: 'NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == op_map[op] + 't_test.c1')", "def test_operator_literals():\n TestScanner._run(**{\n 'name': 'Operator Alpha Literals',\n 'expressions': {\n 'concat': ['.'],\n 'alt': ['|'],\n 'star': ['*'],\n 'question': ['?'],\n 'plus': ['+'],\n 'slash': ['\\\\'],\n 'lparen': ['('],\n 'rparen': [')'],\n 'lbracket': ['['],\n 'rbracket': [']']\n },\n 'DFA': {\n 'Q': set(['S', 'F', 'Err']),\n 'V': set('.|*?+\\\\()[]'),\n # pylint: disable=bad-whitespace\n 'T': [\n [' ', 'S', 'F', 'Err'],\n ['.', 'F', 'Err', 'Err'],\n ['|', 'F', 'Err', 'Err'],\n ['*', 'F', 'Err', 'Err'],\n ['?', 'F', 'Err', 'Err'],\n ['+', 'F', 'Err', 'Err'],\n ['\\\\', 'F', 'Err', 'Err'],\n ['(', 'F', 'Err', 'Err'],\n [')', 'F', 'Err', 'Err'],\n ['[', 'F', 'Err', 'Err'],\n [']', 'F', 'Err', 'Err']\n ],\n # pylint: enable=bad-whitespace\n 'S': 'S',\n 'F': set(['F']),\n 'G': {\n 'concat': set(['F']),\n 'alt': set(['F']),\n 'star': set(['F']),\n 'question': set(['F']),\n 'plus': set(['F']),\n 'slash': set(['F']),\n 'lparen': set(['F']),\n 'rparen': set(['F']),\n 'lbracket': set(['F']),\n 'rbracket': set(['F']),\n '_sink': set(['Err'])\n }\n }\n })", "def test_operator_get_operator(self):\n pass", "def test_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'or'])\n self.assertFalse(t.is_really_unary)", "def has_operator(query):\n for char in query:\n if char in OPERATORS:\n return True\n return False", "def test_operator_create_operator(self):\n pass", "def test_operator_delete_operator(self):\n pass", "def test_get_combinator_sql_difference_all_sql_generated(self):\n qs1 = Number.objects.filter(num__lte=1).values(\"num\")\n qs2 = Number.objects.filter(num__gte=8).values(\"num\")\n qs4 = qs1.difference(qs2)\n\n compiler = SQLCompiler(qs4.query, self.connection, \"default\")\n sql_compiled, params = compiler.get_combinator_sql(\"difference\", True)\n\n self.assertEqual(\n sql_compiled,\n [\n \"SELECT tests_number.num FROM tests_number WHERE \"\n + \"tests_number.num <= %s EXCEPT ALL SELECT tests_number.num \"\n + \"FROM tests_number WHERE tests_number.num >= %s\"\n ],\n )\n self.assertEqual(params, [1, 8])", "def test_compile_negative_operator(self):\n self.td_engine.execute(operators.neg(self.table.c.c1))\n\n assert(self.last_compiled == '-t_test.c1')", "def test_operator_update_operator(self):\n pass", "def test_operator_get_operator_groups_for_operator(self):\n pass", "def verify_modifiers(self,command,afmt,**kwargs):\n # optomux command format contains 'modifiers'\n if 'modifiers' in afmt:\n if 'modifiers' in kwargs:\n if isinstance(kwargs['modifiers'],tuple):\n if len(kwargs['modifiers']) == 1:\n return self.verify_single_modifier(command,kwargs['modifiers'][0])\n elif len(kwargs['modifiers']) == 2:\n return self.verify_double_modifier(command,kwargs['modifiers'])\n elif isinstance(kwargs['modifiers'],str) \\\n or isinstance(kwargs['modifiers'],int):\n return self.verify_single_modifier(command,kwargs['modifiers'])\n return ('E',-23)\n return (0,'')", "def __or__(expr):", "def test_tsql_select(self):\n expected_query = self.select_query\n actual_query = self.dictable.select_query_syntax()\n self.assertEqualQueries(expected_query, actual_query)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of the negative operator.
def test_compile_negative_operator(self): self.td_engine.execute(operators.neg(self.table.c.c1)) assert(self.last_compiled == '-t_test.c1')
[ "def test_negation_interaction(self):\n query, sort = beets.library.parse_query_string('-bar+',\n beets.library.Item)\n self.assertEqual(len(query.subqueries), 1)\n self.assertTrue(isinstance(query.subqueries[0],\n dbcore.query.TrueQuery))\n self.assertTrue(isinstance(sort, dbcore.query.SlowFieldSort))\n self.assertEqual(sort.field, '-bar')", "def test_negate_operator(self):\n actual = search_queries.negate_operator('=')\n self.assertEqual('!=', actual)\n\n actual = search_queries.negate_operator('!=')\n self.assertEqual('=', actual)\n\n actual = search_queries.negate_operator('<')\n self.assertEqual('>=', actual)\n\n actual = search_queries.negate_operator('<=')\n self.assertEqual('>', actual)\n\n actual = search_queries.negate_operator('>')\n self.assertEqual('<=', actual)\n\n actual = search_queries.negate_operator('>=')\n self.assertEqual('<', actual)", "def test_invalid(self):\n\n expression = \"- 1 + 3\" # Invalid syntax\n\n self.assertNotEqual(eval(expression), PrefixOperation(expression).evaluate_expression())", "def test_negative_mult(self) -> None:\n self.assertEqual(8, self.parse(self.arithmetic_lexer.lex(\"(0-2) * (0- 4)\")))\n self.assertEqual(8, self.parse(self.arithmetic_lexer.lex(\"(0-2) * 2 * (0-2)\")))\n self.assertEqual(-5, self.parse(self.arithmetic_lexer.lex(\"1 + (0-2) * 3\")))", "def test_compile_unary_operators(self):\n op_map = {\n operators.distinct_op: 'DISTINCT ',\n operators.inv: 'NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == op_map[op] + 't_test.c1')", "def test_negation(\n large_game_roles: tuple[Role, ...], example_statement: Statement\n ) -> None:\n expected = Statement(\n \"NOT - test\",\n ((2, const.ROLE_SET - frozenset({Role.ROBBER})),),\n speaker=Role.ROBBER,\n )\n\n result = example_statement.negation\n\n assert str(result) == str(expected)", "def test_score_equation_is_not_negative(self):\n self.assertTrue(views.score_equation(0, -10, -30) >= 0)", "def is_op_not_subtract(char):\n return char in OPS_WITHOUT_SUBTRACTION", "def test_compile_nested_operators(self):\n self.td_engine.execute(\n operators.and_(\n operators.ne(self.table.c.c1, 0),\n operators.mod(self.table.c.c1, 0)))\n\n assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')", "def negate(self):\r\n self.negative = not self.negative\r\n self.update_predicate_string()", "def __notexpr(self):\n if self.__token.category == Token.NOT:\n self.__advance()\n self.__relexpr()\n right = self.__operand_stack.pop()\n self.__operand_stack.append(not right)\n else:\n self.__relexpr()", "def logicalNegation(cell: 'Cell', /) -> 'WordConstructor': # noqa: E225\n return WordConstructor._unary_operator(\n cell=cell,\n prefix_path=['operations', 'logical-negation', 'prefix'],\n suffix_path=['operations', 'logical-negation', 'suffix']\n )", "def test_exec_flow_control_jump_when_neg_moves_pointer_when_popping_neg():\n from esolang_whitespace import SpaceInterpreter\n i = SpaceInterpreter('\\t\\t\\t\\n\\t\\n ')\n p, _ = i.exec_flow_control('\\t\\t\\t\\n\\t\\n ', {'\\t': 9}, [-5], [0])\n assert p == 9", "def negation(self):\n if self.is_negation():\n return Literal(self.value[1:])\n return Literal(NOT + self.value)", "def test_not(self):\n crit = qml.BooleanFn(lambda x: x < 4)\n ncrit = ~crit\n assert crit(-2) and not ncrit(-2)\n assert not crit(10) and ncrit(10)", "def checkNot(query):\n # if odd count of NOT then negate operator\n if query['wherenot'] == False:\n return query\n else:\n if query['whereop'] == \"=\":\n query['whereop'] = \"!\"\n elif query['whereop'] == \"<\":\n query['whereop'] = '>'\n elif query['whereop'] == \">\":\n query['whereop'] = \"<\"\n elif query['whereop'] == \"CONTAINS\":\n query['whereop'] = \"NCONTAINS\"\n return query", "def test_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'or'])\n self.assertFalse(t.is_really_unary)", "def test_negate_returns_not_predicate(self):\n with patch('uncertainty.conditions.NotPredicate') as not_predicate_mock:\n self.assertEqual(not_predicate_mock.return_value, -self.predicate)", "def test_negative_number(self) -> None:\n val = self.parse(self.arithmetic_lexer.lex(\"0-13\"))\n self.assertEqual(-13, val)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests SQL compilation of nested operators.
def test_compile_nested_operators(self): self.td_engine.execute( operators.and_( operators.ne(self.table.c.c1, 0), operators.mod(self.table.c.c1, 0))) assert(self.last_compiled == 't_test.c1 <> ? AND t_test.c1 MOD ?')
[ "def test_deeply_nested_primitive_operators(self):\n self.assert_to_cnf_transformation(\n '(A or (B and (C or (D and (E or (F and (G or (H and I))))))))',\n '(A or B) and (A or C or D) and (A or C or E or F) and '\n '(A or C or E or G or H) and (A or C or E or G or I)')\n self.assert_to_cnf_transformation(\n '(((((((((A or B) and C) or D) and E) or F) and G) or H) and I) '\n 'or J)',\n '((((A or B or D or F or H or J) and (C or D or F or H or J)) and '\n '(E or F or H or J)) and (G or H or J)) and (I or J)')\n self.assert_to_cnf_transformation(\n '((A and (B or not (C and D)) and E) or (F and G)) and ((A or B) '\n 'and (C or (D and E)))',\n '(A or F) and (B or not C or not D or F) and (E or F) and '\n '(A or G) and (B or not C or not D or G) and (E or G) and '\n '(A or B) and (C or D) and (C or E)')", "def test_deeply_nested_mixed_operators(self):\n self.assert_to_cnf_transformation(\n '(A nand (B impl (D or E or F))) iff ~~~(A nor B nor C)',\n '(A or not B) and (A or not C) and '\n '(A or not B or D or E or F) and '\n r'(A \\/ not C or not B or D or E or F) and '\n '(not A or B) and (not A or not D) and (not A or not E) and '\n '(not A or not F) and (not A or B or C) and '\n '(not A or not D or B or C) and (not A or not E or B or C) and '\n '(not A or not F or B or C)')\n self.assert_to_cnf_transformation(\n '(A nand ((B or C) iff (D nor E) iff (F or G or H)) nand C) nor D',\n 'A and (not B or D or E or not F or not C) and '\n '(not C or D or E or not F) and '\n '(not B or D or E or not G or not C) and '\n '(not C or D or E or not G) and '\n '(not B or D or E or not H or not C) and '\n '(not C or D or E or not H) and '\n '(not B or not D or F or G or H or not C) and '\n '(not C or not D or F or G or H) and '\n '(not B or not E or F or G or H or not C) and '\n '(not C or not E or F or G or H) and not D')", "def test_compile_binary_operators(self):\n op_map = {\n operators.and_: ' AND ',\n operators.or_: ' OR ',\n operators.add: ' + ',\n operators.mul: ' * ',\n operators.sub: ' - ',\n operators.div: ' / ',\n operators.mod: ' MOD ',\n operators.truediv: ' / ',\n operators.lt: ' < ',\n operators.le: ' <= ',\n operators.ne: ' <> ',\n operators.gt: ' > ',\n operators.ge: ' >= ',\n operators.eq: ' = ',\n operators.concat_op: ' || ',\n operators.like_op: ' LIKE ',\n operators.is_: ' IS ',\n operators.isnot: ' IS NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, text('arg')))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + 'arg')", "def test_has_and_any_nested_suboperators(self):\r\n # create test computers\r\n computer1 = self.Computer(name=u'c1', vendor=u'foo')\r\n computer2 = self.Computer(name=u'c2', vendor=u'bar')\r\n computer3 = self.Computer(name=u'c3', vendor=u'bar')\r\n computer4 = self.Computer(name=u'c4', vendor=u'bar')\r\n computer5 = self.Computer(name=u'c5', vendor=u'foo')\r\n computer6 = self.Computer(name=u'c6', vendor=u'foo')\r\n self.session.add_all((computer1, computer2, computer3, computer4,\r\n computer5, computer6))\r\n self.session.commit()\r\n # add the computers to three test people\r\n person1, person2, person3 = self.people[:3]\r\n person1.computers = [computer1, computer2, computer3]\r\n person2.computers = [computer4]\r\n person3.computers = [computer5, computer6]\r\n self.session.commit()\r\n # test 'any'\r\n innerval = dict(name='name', op='like', val=u'%incol%')\r\n val = dict(name='owner', op='has', val=innerval)\r\n d = dict(filters=[dict(name='computers', op='any', val=val)])\r\n result = search(self.session, self.Person, d)\r\n assert result.count() == 1\r\n # test 'has'\r\n innerval = dict(name='vendor', op='like', val=u'%o%')\r\n val = dict(name='computers', op='any', val=innerval)\r\n d = dict(filters=[dict(name='owner', op='has', val=val)])\r\n result = search(self.session, self.Computer, d)\r\n assert result.count() == 5", "def test_compile_modifier_operators(self):\n op_map = {\n operators.desc_op: ' DESC',\n operators.asc_op: ' ASC',\n operators.nullsfirst_op: ' NULLS FIRST',\n operators.nullslast_op: ' NULLS LAST',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op])", "def test_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'or'])\n self.assertFalse(t.is_really_unary)", "def test_compile_in_operators(self):\n op_map = {\n operators.in_op: ' IN ',\n operators.notin_op: ' NOT IN ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, (0, 0)))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + '(?, ?)')", "def test_compile_unary_operators(self):\n op_map = {\n operators.distinct_op: 'DISTINCT ',\n operators.inv: 'NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1))\n\n assert(self.last_compiled == op_map[op] + 't_test.c1')", "def test_compile_any_all_operators(self):\n op_map = {\n operators.any_op: 'ANY ',\n operators.all_op: 'ALL ',\n }\n\n for op in op_map.keys():\n self.td_engine.execute(\n op(sql.select([self.table.c.c1]).as_scalar()))\n\n assert(self.last_compiled ==\n op_map[op] + '(SELECT t_test.c1 \\nFROM t_test)')", "def test_chained_unary_operators_with_binary_operator(self):\n t = ExpressionTreeNode.build_tree(['A', 'B', 'and', 'not'])\n self.assertFalse(t.is_really_unary)\n\n t = ExpressionTreeNode.build_tree(\n ['A', 'B', 'or',\n 'C', 'D', 'E', 'not', 'and', 'and', 'not',\n 'xor'])\n self.assertFalse(t.is_really_unary)\n self.assertFalse(t.l_child.is_really_unary)\n self.assertFalse(t.r_child.is_really_unary)\n self.assertTrue(t.r_child.l_child.r_child.l_child.is_really_unary)", "def test_operator_get_all_operators(self):\n pass", "def test_operator_create_operator(self):\n pass", "def test_advanced_math(self):\n exp = \"m{(10+10)+10+10}\"\n self.assertEqual(self.engine.Process(exp), \"40\", \"adds complex nested math\")", "def test_operator_get_operator(self):\n pass", "def test_operator_literals():\n TestScanner._run(**{\n 'name': 'Operator Alpha Literals',\n 'expressions': {\n 'concat': ['.'],\n 'alt': ['|'],\n 'star': ['*'],\n 'question': ['?'],\n 'plus': ['+'],\n 'slash': ['\\\\'],\n 'lparen': ['('],\n 'rparen': [')'],\n 'lbracket': ['['],\n 'rbracket': [']']\n },\n 'DFA': {\n 'Q': set(['S', 'F', 'Err']),\n 'V': set('.|*?+\\\\()[]'),\n # pylint: disable=bad-whitespace\n 'T': [\n [' ', 'S', 'F', 'Err'],\n ['.', 'F', 'Err', 'Err'],\n ['|', 'F', 'Err', 'Err'],\n ['*', 'F', 'Err', 'Err'],\n ['?', 'F', 'Err', 'Err'],\n ['+', 'F', 'Err', 'Err'],\n ['\\\\', 'F', 'Err', 'Err'],\n ['(', 'F', 'Err', 'Err'],\n [')', 'F', 'Err', 'Err'],\n ['[', 'F', 'Err', 'Err'],\n [']', 'F', 'Err', 'Err']\n ],\n # pylint: enable=bad-whitespace\n 'S': 'S',\n 'F': set(['F']),\n 'G': {\n 'concat': set(['F']),\n 'alt': set(['F']),\n 'star': set(['F']),\n 'question': set(['F']),\n 'plus': set(['F']),\n 'slash': set(['F']),\n 'lparen': set(['F']),\n 'rparen': set(['F']),\n 'lbracket': set(['F']),\n 'rbracket': set(['F']),\n '_sink': set(['Err'])\n }\n }\n })", "def test_chained_unary_operators_ending_in_operand(self):\n t = ExpressionTreeNode.build_tree(['A', '~'])\n self.assertTrue(t.is_really_unary)\n\n t = ExpressionTreeNode.build_tree(['A', '~', '~'])\n self.assertTrue(t.is_really_unary)\n\n t = ExpressionTreeNode.build_tree(['A', '~', '~', '~'])\n self.assertTrue(t.is_really_unary)", "def test_mix_of_primitive_operators(self):\n self.assert_to_cnf_transformation(\n 'A and (B or C and D) and not (C or not D and not E)',\n 'A and (B or C) and (B or D) and not C and (D or E)')\n self.assert_to_cnf_transformation(\n '(A and B and C) or not (A and D) or (A and (B or C) or '\n '(D and (E or F)))',\n '(C or not A or not D or B or E or F) and '\n '(B or not A or not D or C or E or F)')", "def test_nested_parenthesized_expression(self) -> None:\n # Parsing:\n # \"( 4 + ( 1 + 2 * 3 * ( 4 + 5 ) + 6 ) ) * 7 + 8\"\n tokens = [\n postfix.LeftParen(\"(\"),\n Integer(\"4\"),\n Add(\"+\"),\n postfix.LeftParen(\"(\"),\n Integer(\"1\"),\n Add(\"+\"),\n Integer(\"2\"),\n Mult(\"*\"),\n Integer(\"3\"),\n Mult(\"*\"),\n postfix.LeftParen(\"(\"),\n Integer(\"4\"),\n Add(\"+\"),\n Integer(\"5\"),\n postfix.RightParen(\")\"),\n Add(\"+\"),\n Integer(\"6\"),\n postfix.RightParen(\")\"),\n postfix.RightParen(\")\"),\n Mult(\"*\"),\n Integer(\"7\"),\n Add(\"+\"),\n Integer(\"8\"),\n ]\n\n postfix_tokens = postfix.tokens_to_postfix(tokens)\n res = postfix.parse_postfix_tokens(postfix_tokens)\n self.assertEqual((4 + (1 + 2 * 3 * (4 + 5) + 6)) * 7 + 8, res)", "def test_build_onclause_2(self):\n graph = querying.build_graph(self.metadata) \n \n table = querying.build_onclause(graph, \"phage\", \"phage\")\n\n self.assertEqual(table, self.phage)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the stream of messages for symbol. An empty list is returned if the corresponding JSON file doesn't exist yet.
def read_stream(symbol): try: with open("stream{}.json".format(symbol), "r", encoding="utf-8") as f: stream = json.load(f) return(stream) except FileNotFoundError: print("Stream not found for {}.".format(symbol)) return([])
[ "def load_received_messages(username):\n lst = []\n\n # Open each json file in messages directory\n for file in glob('messages/*.json'):\n # Load message, append to list if 'to' matches username\n dict = _load_message(file)\n if dict['to'] == username:\n lst.append(dict)\n else:\n continue\n\n # Sort the list by the time key from most to least recent.\n lst = sorted(lst, key=lambda x: x['time'], reverse=True)\n\n return lst", "def load_sent_messages(username):\n lst = []\n\n # Open each json file in messages directory\n for file in glob('messages/*.json'):\n # Load message, append to list if 'from' matches username\n dict = _load_message(file)\n if dict['from'] == username:\n lst.append(dict)\n else:\n continue\n\n # Sort the list by the time key from most to least recent.\n lst = sorted(lst, key=lambda x: x['time'], reverse=True)\n\n return lst", "def load_all_messages():\n lst = []\n\n # Open each json file in messages directory\n for file in glob('messages/*.json'):\n\n # Load message into dict and append to list\n dict = _load_message(file)\n lst.append(dict)\n\n # Sort the list by the time key from most to least recent.\n lst = sorted(lst, key=lambda x: x['time'], reverse=True)\n\n return lst", "def get_streams(self) -> List[Stream]:\n p = http_get(self.get_play_data()[1])\n streams = []\n curr_res = None\n for line in p.splitlines():\n if line.startswith(b\"#EXT-X-STREAM-INF:\"):\n curr_res = re.search(rb'RESOLUTION=(\\d+x\\d+)', line).group(1)\n elif not line.startswith(b\"#\") and line.strip():\n streams.append(Stream(curr_res.decode(\"utf-8\"), line.decode(\"utf-8\")))\n return sorted(streams, reverse=True)", "def read_messages(self) -> List[DltMessage]:\r\n return [message for message in self.__iter__()]", "def find_add_audio_messages(self):\n return [msg for msg in self.messages_received if not isinstance(msg, dict)]", "async def get_msgs(self) -> t.List[t.Dict[str, t.Any]]: # type:ignore[override]\n msgs = []\n while True:\n try:\n msgs.append(await self.get_msg())\n except Empty:\n break\n return msgs", "def list_streams(self)->List[Metadata]:\n rows = self.session.query(Stream.stream_metadata).filter(Stream.study_name == self.study_name).all()\n results = []\n if rows:\n for row in rows:\n results.append(Metadata().from_json_file(row.stream_metadata))\n return results\n else:\n return results", "def load():\n with open(\"profiler.json\", \"r\") as f:\n streamerlist = json.load(f)\n return streamerlist", "def entries(self):\n return self._streams", "def get_messages(self):\n if self.id:\n json = self._connection._make_request(\n 'stops/%s/messages/' % self.id\n )\n obj_list = [BusMessage(j) for j in json.get(\"items\")]\n else:\n obj_list = []\n return obj_list", "def get_public_all_symbols(self):\n client = self.__create_http_client()\n client.request('GET', '/api/v1/public/symbol', None, self.__get_http_public_headers())\n response = json.loads(self.__decode_response(client.getresponse()))\n return response", "def fetch_messages(data):\n # type: (Dict[str, Dict[str, str]]) -> List[str]\n ret = [] # type: List[str]\n if not data:\n return ret\n entries = data.values()\n for entry in entries:\n if 'messages' not in entry: # No debug info\n continue\n ret += [msg for msg in entry['messages'] if msg.find('DebugInfo') == -1]\n return ret", "def get_all():\n global buffer\n messages = buffer\n buffer = []\n logger.info('Returning contents and cleared buffer, current count: %d'%count())\n return messages", "def message_cache(self) -> List[StarboardMessage]:\n return list(self._cache.values())", "def read_messages (file_of_messages):\n line = file_of_messages.readline()\n collection_of_messages = []\n while (line != \"\"):\n collection_of_messages.append(line.strip())\n line = file_of_messages.readline()\n return (collection_of_messages)", "def process_jats_stream(\n fname: str,\n stream: bytes,\n temp_dir: str=BASE_TEMP_DIR\n):\n temp_input_dir = os.path.join(temp_dir, 'input')\n temp_input_file = os.path.join(temp_input_dir, fname)\n\n os.makedirs(temp_dir, exist_ok=True)\n os.makedirs(temp_input_dir, exist_ok=True)\n\n with open(temp_input_file, 'wb') as outf:\n outf.write(stream)\n\n output_file = process_jats_file(temp_input_file)\n\n if os.path.exists(output_file):\n with open(output_file, 'r') as f:\n contents = json.load(f)\n return contents\n else:\n return []", "def load_tickers() -> List[str]:\n with open(TICKERS_JSON, 'r') as f:\n return json.load(f)", "def list_messages(self):\r\n \r\n UI = self._input_ui.get()\r\n if self._lastselectedfriend == None:\r\n return \r\n friendname = self._lastselectedfriend\r\n participants = [UI, friendname]\r\n \r\n msg=['download chat history', participants]\r\n encoded = json.dumps(msg) \r\n self._client._s.send(encoded)\r\n\r\n encoded_chat = self._client._s.recv(4096)\r\n unencoded = json.loads(encoded_chat)\r\n if self._current_chat_history != unencoded:\r\n self._current_chat_history = unencoded\r\n self.show_chat()\r\n self._chatdisplay.see(tk.END)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format the sns topic into and aws ARN
def formatted_sns_topic_arn(cls, config): prefix = config['global']['account']['prefix'] topic = config['lambda']['rule_promotion_config'].get( 'digest_sns_topic', cls.DEFAULT_STATS_SNS_TOPIC_SUFFIX.format(prefix) ) return 'arn:aws:sns:{region}:{account_id}:{topic}'.format( region=config['global']['account']['region'], account_id=config['global']['account']['aws_account_id'], topic=topic )
[ "def get_sns_arn():\n try:\n response = sns.list_topics()\n while True:\n for res in response['Topics']:\n if \"QSTopicSNSEmail\" in res[\"TopicArn\"]:\n LOGGER.info('-- SNS Topic ARN: ' + res[\"TopicArn\"])\n return res[\"TopicArn\"]\n try:\n response = sns.list_topics(NextToken=response[\"NextToken\"])\n except KeyError:\n break\n except Exception as e:\n LOGGER.error(e)\n # send_notification(str(e))\n return None", "def _transcribe_topic(self, topic):\n topic = \"-\".join([str(id(self)), topic])\n return topic", "def sns_topic_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"sns_topic_name\")", "def hello_amazon():\n CONFIG = create_app().config\n db = PostgresWrapper(\n \"host='{PG_HOST}' port={PG_PORT} dbname={PG_DATABASE} \"\n \"user={PG_USERNAME} password={PG_PASSWORD} \".format(**CONFIG))\n r = Redis(db=1)\n amz = boto.sns.connect_to_region(\"us-west-2\",\n aws_access_key_id=CONFIG[\"AWS_ACCESS_KEY\"],\n aws_secret_access_key=CONFIG[\"AWS_SECRET_KEY\"])\n values = []\n\n # register the user's device ID with Amazon, and add to the associated notification topics\n for d in [\"ios\", \"ios-sbx\", \"android\"]:\n for x in r.hkeys('prkng:hello-amazon:'+d):\n try:\n # create SNS platform endpoint with saved user device ID, and save endpoint to DB\n device_id = r.hget('prkng:hello-amazon:'+d, x)\n arn = amz.create_platform_endpoint(CONFIG[\"AWS_SNS_APPS\"][d], device_id, x.encode('utf-8'))\n arn = arn['CreatePlatformEndpointResponse']['CreatePlatformEndpointResult']['EndpointArn']\n values.append(\"({},'{}')\".format(x, arn))\n r.hdel('prkng:hello-amazon:'+d, x)\n if not CONFIG[\"DEBUG\"]:\n # add the user to associated mass-push topics\n amz.subscribe(CONFIG[\"AWS_SNS_TOPICS\"][\"all_users\"], \"application\", arn)\n amz.subscribe(CONFIG[\"AWS_SNS_TOPICS\"][d+\"_users\"], \"application\", arn)\n except Exception, e:\n # if the token already exists, grab and save the existing one instead\n if \"already exists with the same Token\" in e.message:\n arn = re.search(\"Endpoint (arn:aws:sns\\S*)\\s.?\", e.message)\n if not arn:\n continue\n values.append(\"({},'{}')\".format(x, arn.group(1)))\n r.hdel('prkng:hello-amazon:'+d, x)\n\n # Update the local user records with their new Amazon SNS ARNs\n if values:\n db.query(\"\"\"\n UPDATE users u SET sns_id = d.arn\n FROM (VALUES {}) AS d(uid, arn)\n WHERE u.id = d.uid\n \"\"\".format(\",\".join(values)))", "def create_sns_topic(self, args):\n new_region_args = create_args_for_multi_region(args, ALL_REGIONS)\n return self.create_pool(self._create_sns_topic_from_meta,\n new_region_args)", "def publish_sns(sns_message):\n\n print(\"Publishing message to SNS topic...\")\n sns_client.publish(TargetArn=environ['SNSArn'], Message=sns_message)\n return", "def __init__(self, access_key_id, secret_access_key, region_name,\n targets=None, **kwargs):\n super().__init__(**kwargs)\n\n # Store our AWS API Access Key\n self.aws_access_key_id = validate_regex(access_key_id)\n if not self.aws_access_key_id:\n msg = 'An invalid AWS Access Key ID was specified.'\n self.logger.warning(msg)\n raise TypeError(msg)\n\n # Store our AWS API Secret Access key\n self.aws_secret_access_key = validate_regex(secret_access_key)\n if not self.aws_secret_access_key:\n msg = 'An invalid AWS Secret Access Key ' \\\n '({}) was specified.'.format(secret_access_key)\n self.logger.warning(msg)\n raise TypeError(msg)\n\n # Acquire our AWS Region Name:\n # eg. us-east-1, cn-north-1, us-west-2, ...\n self.aws_region_name = validate_regex(\n region_name, *self.template_tokens['region']['regex'])\n if not self.aws_region_name:\n msg = 'An invalid AWS Region ({}) was specified.'.format(\n region_name)\n self.logger.warning(msg)\n raise TypeError(msg)\n\n # Initialize topic list\n self.topics = list()\n\n # Initialize numbers list\n self.phone = list()\n\n # Set our notify_url based on our region\n self.notify_url = 'https://sns.{}.amazonaws.com/'\\\n .format(self.aws_region_name)\n\n # AWS Service Details\n self.aws_service_name = 'sns'\n self.aws_canonical_uri = '/'\n\n # AWS Authentication Details\n self.aws_auth_version = 'AWS4'\n self.aws_auth_algorithm = 'AWS4-HMAC-SHA256'\n self.aws_auth_request = 'aws4_request'\n\n # Validate targets and drop bad ones:\n for target in parse_list(targets):\n result = is_phone_no(target)\n if result:\n # store valid phone number in E.164 format\n self.phone.append('+{}'.format(result['full']))\n continue\n\n result = IS_TOPIC.match(target)\n if result:\n # store valid topic\n self.topics.append(result.group('name'))\n continue\n\n self.logger.warning(\n 'Dropped invalid phone/topic '\n '(%s) specified.' % target,\n )\n\n return", "def __topicString(self, aTopic):\n return '[%-26s %s]' % (aTopic[0].__name__, self.winString(aTopic[1]))", "def create_canonical_msg(content):\n message_type = content[\"Type\"]\n\n # Depending on the message type, canonical message format varies.\n # ref: https://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.verify.signature.html\n if message_type == \"SubscriptionConfirmation\" or message_type == \"UnsubscribeConfirmation\":\n msg_fields = SNS_SUB_UNSUB_NOTIFICATION_FIELDS\n elif message_type == \"Notification\":\n msg_fields = SNS_MSG_NOTIFICATION_FIELDS\n else:\n raise ValueError(\"Message Type (%s) is not recognized\" % message_type)\n\n msg = \"\"\n\n for field in msg_fields:\n try:\n msg += field + \"\\n\" + content[field] + \"\\n\"\n except KeyError:\n # Build with what you have\n pass\n\n return str(msg)", "def describe_topic(name, region=None, key=None, keyid=None, profile=None):\n topics = list_topics(region=region, key=key, keyid=keyid, profile=profile)\n ret = {}\n for topic, arn in topics.items():\n if name in (topic, arn):\n ret = {\"TopicArn\": arn}\n ret[\"Subscriptions\"] = list_subscriptions_by_topic(\n arn, region=region, key=key, keyid=keyid, profile=profile\n )\n ret[\"Attributes\"] = get_topic_attributes(\n arn, region=region, key=key, keyid=keyid, profile=profile\n )\n # Grab extended attributes for the above subscriptions\n for sub in ret[\"Subscriptions\"]:\n sub_arn = sub[\"SubscriptionArn\"]\n if not sub_arn.startswith(\"arn:aws:sns:\"):\n # Sometimes a sub is in e.g. PendingAccept or other\n # wierd states and doesn't have an ARN yet\n log.debug(\"Subscription with invalid ARN %s skipped...\", sub_arn)\n continue\n return ret", "def mogrify(topic, msg):\n return str(topic) + ' ' + json.dumps(msg)", "def create_sns_subscription_for_lambda(self, lambda_arn, topic_name,\n region):\n if region:\n if isinstance(region, str):\n if region == 'all':\n for each in ALL_REGIONS:\n self._subscribe_lambda_to_sns_topic(lambda_arn,\n topic_name,\n each)\n else:\n if check_region_available(region, ALL_REGIONS):\n self._subscribe_lambda_to_sns_topic(lambda_arn,\n topic_name,\n region)\n elif isinstance(region, list):\n for each in region:\n if check_region_available(each, ALL_REGIONS):\n self._subscribe_lambda_to_sns_topic(lambda_arn,\n topic_name,\n each)\n else:\n raise AssertionError('Invalid value for SNS region: %s.',\n region)\n else:\n self._subscribe_lambda_to_sns_topic(lambda_arn, topic_name,\n self.region)", "def full_topic( topic : str ):\n return defs.SYS_CONF_PREFIX+\"/\"+__MY_ID+\"/\"+topic", "def _get_topic(namespace):\n return '{}.{}'.format(DocManager._topic_prefix, namespace)", "def _normalize_name_in_topic_msg(topic, payload):\n info = topic.split(\"/\")\n payload_dict = {}\n\n # Shellies format\n try:\n if settings.KEEP_FULL_TOPIC: # options instead of hardcoded length\n topic = \"/\".join(info[:-1]).lower()\n else:\n topic = f\"{info[0]}/{info[1]}\".lower()\n\n payload_dict = {info[-1]: payload} # usually the last element is the type of sensor\n except IndexError:\n pass\n\n return topic, payload_dict", "def kms_key_arn(kms_client, alias):\n try:\n response = kms_client.describe_key(KeyId=alias)\n key_arn = response[\"KeyMetadata\"][\"Arn\"]\n except ClientError as error:\n raise RuntimeError(\"Failed to obtain key arn for alias {}, error: {}\".format(alias, error.response[\"Error\"][\"Message\"]))\n\n return key_arn", "def delete_topic(TopicArn, region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n try:\n conn.delete_topic(TopicArn=TopicArn)\n log.info(\"SNS topic %s deleted\", TopicArn)\n return True\n except botocore.exceptions.ClientError as e:\n log.error(\"Failed to delete SNS topic %s: %s\", name, e)\n return False", "def _write_topic(self, topic):\n index_of = self._index\n startElement, endElement, newline = self._writer.startElement, self._writer.endElement, self._writer.newline\n startElement(u'topic', {u'number': index_of(topic)})\n newline()\n self._write_locators(u'subjectIdentifiers', topic.sids)\n self._write_locators(u'subjectLocators', topic.slos)\n self._write_iids(topic)\n write_name = self._write_name\n for pos, name in enum(self._names(topic)):\n write_name(name, pos)\n write_occurrence = self._write_occurrence\n for pos, occ in enum(self._occs(topic)):\n write_occurrence(occ, pos)\n emptyElement = self._writer.emptyElement\n for role in sorted(topic.roles_played, self._cmp_role):\n emptyElement(u'rolePlayed', {u'ref': u'association.%s.role.%s' % (index_of(role.parent), index_of(role))})\n newline()\n endElement(u'topic')\n newline()", "def get_topic_event_subscription_full_url_output(event_subscription_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n topic_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTopicEventSubscriptionFullUrlResult]:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute a query for all alerts for a rule so the user can be sent the results
def _query_alerts(self, stat): info_statement = stat.sql_info_statement LOGGER.debug('Querying alert info for rule \'%s\': %s', stat.rule_name, info_statement) response = self._athena_client.run_async_query(info_statement) return response['QueryExecutionId']
[ "def list_alerts(request):\n return request.db.query(Alert).all()", "def search_alerts():\n payload = {} # type: dict\n handle_time_filter(payload, {'type': 'relative', 'value': {'amount': 7, 'unit': 'day'}})\n handle_filters(payload)\n response = req('POST', 'alert', payload, {'detailed': 'true'})\n alerts = []\n context_path = 'Redlock.Alert(val.ID === obj.ID)'\n context = {context_path: []} # type: dict\n for alert in response:\n alerts.append(alert_to_readable(alert))\n context[context_path].append(alert_to_context(alert))\n context['Redlock.Metadata.CountOfAlerts'] = len(response)\n demisto.results({\n 'Type': entryTypes['note'],\n 'ContentsFormat': formats['json'],\n 'Contents': response,\n 'EntryContext': context,\n 'HumanReadable': tableToMarkdown('Alerts', alerts, [\n 'ID', 'Status', 'FirstSeen', 'LastSeen', 'AlertTime', 'PolicyName', 'PolicyType', 'PolicyDescription',\n 'PolicySeverity', 'PolicyRecommendation', 'PolicyDeleted', 'PolicyRemediable', 'RiskRating', 'ResourceName',\n 'ResourceAccount', 'ResourceType', 'ResourceCloudType'\n ])\n })", "def schedule_alerts():\n accounts = fetch_accounts()\n client = InfluxDBClient(**INFLUXDB_CONF)\n for account in accounts:\n\n sql = f\"\"\"\n SELECT\n mean(\"return_code\") AS \"mean_return_code\"\n FROM \"ping\".\"autogen\".\"{account}\"\n WHERE \n time > now() - 5m AND time < now()\n AND \"hostname\"=\"{hostname}\"\n GROUP BY time(10m), \"hostname\" FILL(null)\n \"\"\"\n resp = client.query(sql)\n for res in resp:\n for i in res:\n print(i)", "def alert_generator():\n mails = []\n sms_numbers = []\n rules = Alert_Rule.objects.all()\n\n for rule in rules:\n site = rule.site\n site_groups = get_groups_with_perms(site)\n\n # Get datapoint and real value\n data_point, real_value = get_alert_check_value(rule)\n\n\n if data_point is not None and real_value is not None:\n if check_alert(rule, real_value):\n alert_obj = alert_factory(site, rule, data_point)\n\n # if alert_obj is created\n if alert_obj is not None:\n content = get_alert_content(site, rule, data_point, real_value, alert_obj)\n mails, sms_numbers = get_recipients_for_site(site)\n\n # reporting\n logging.debug(\"Alert triggered sending alerts out %s\"%mails)\n alert_obj.emailSent = send_mail(\"Alert Mail\", mails, content)\n alert_obj.smsSent = send_sms(sms_numbers, content)\n slack_msg = get_slack_alert_msg(\"Alert Triggered\", alert_obj)\n alert_obj.slackSent = send_alert_slack(site_groups, slack_msg)\n\n alert_obj.save()", "def get_scheduled_query_rules_alert(name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetScheduledQueryRulesAlertResult:\n __args__ = dict()\n __args__['name'] = name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure:monitoring/getScheduledQueryRulesAlert:getScheduledQueryRulesAlert', __args__, opts=opts, typ=GetScheduledQueryRulesAlertResult).value\n\n return AwaitableGetScheduledQueryRulesAlertResult(\n actions=pulumi.get(__ret__, 'actions'),\n authorized_resource_ids=pulumi.get(__ret__, 'authorized_resource_ids'),\n data_source_id=pulumi.get(__ret__, 'data_source_id'),\n description=pulumi.get(__ret__, 'description'),\n enabled=pulumi.get(__ret__, 'enabled'),\n frequency=pulumi.get(__ret__, 'frequency'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n name=pulumi.get(__ret__, 'name'),\n query=pulumi.get(__ret__, 'query'),\n query_type=pulumi.get(__ret__, 'query_type'),\n resource_group_name=pulumi.get(__ret__, 'resource_group_name'),\n severity=pulumi.get(__ret__, 'severity'),\n tags=pulumi.get(__ret__, 'tags'),\n throttling=pulumi.get(__ret__, 'throttling'),\n time_window=pulumi.get(__ret__, 'time_window'),\n triggers=pulumi.get(__ret__, 'triggers'))", "def main():\n slids = get_alert_ids()\n for slid in slids:\n alert = get_alert(slid)\n print alert\n if not requests.get(alert[\"url\"]).ok:\n trigger_alert(alert)", "def get(self, request, organization):\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n project_ids = self.get_requested_project_ids(request) or None\n alert_rules = AlertRule.objects.fetch_for_organization(organization, project_ids)\n if not features.has(\"organizations:performance-view\", organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n\n return self.paginate(\n request,\n queryset=alert_rules,\n order_by=\"-date_added\",\n paginator_cls=OffsetPaginator,\n on_results=lambda x: serialize(x, request.user),\n default_per_page=25,\n )", "def get_scheduled_query_rules_alert_output(name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetScheduledQueryRulesAlertResult]:\n ...", "def alerts(self):\n endpoint = '/'.join((self.endpoint, self.id, 'alerts'))\n return self.alertFactory.find(\n endpoint=endpoint,\n api_key=self.api_key,\n )", "async def hits_with_rules(host_info: dict):\n hits = []\n\n for data in host_info['rhv-log-collector-analyzer']:\n details = {}\n if \"WARNING\" in data['type'] or \"ERROR\" in data['type']:\n details.update({\n 'description': data['description'],\n 'kb': data['kb'],\n 'result': data['result']\n })\n logger.info(\"========== added the following entry ====\")\n logger.info(\"Description: {0}\".format(data['description']))\n logger.info(\"Knowledge Base: {0}\".format(data['kb']))\n logger.info(\"========== added the following entry ====\")\n\n # split to remove the module name\n ruleid = data['name'].split(\".\")[1]\n\n hits.append(\n {'rule_id': ruleid + \"|\" + ruleid.upper(), 'details': details}\n )\n\n return hits", "def do_action_on_all(self, action):\n if not action:\n return\n context = aq_inner(self.context)\n wft = getToolByName(context, 'portal_workflow')\n wft.doActionFor(context, action)\n context.reindexObject()\n #perform on all subevents\n for sub in context.getSubEvents():\n try:\n wft.doActionFor(sub, action)\n sub.reindexObject()\n except:\n pass\n self.request.response.redirect(context.absolute_url() + \"/view\")\n return ''", "def _query_alarms(self, filter, orderby, limit):\n return self.clients(\"ceilometer\").query_alarms.query(\n filter, orderby, limit)", "def get_triggered_alerts(cls):\n path = '/triggeredalerts/{}'.format(cls.token)\n\n return cls._request('GET', path=path)", "def test_alerts_rules_get_command_success_no_alerts(requests_mock):\n client = init_mock_client(requests_mock, on_cloud=False)\n expected_response = []\n expected_readable_output = \"No Alerts were found.\"\n requests_mock.get(\n f\"{BASE_URL}/api/v1/alerts\", json=expected_response, status_code=200\n )\n result = ExtraHop_v2.alerts_rules_get_command(client)\n\n assert result.outputs_prefix == \"ExtraHop.Alert\"\n assert result.readable_output == expected_readable_output", "def save_executed_alerts(self, alerts):\n for alert in alerts:\n timestamp = alert['timestamp'] / 1000\n alert_id = alert['definitionId']\n self.alerts_run_time[alert_id] = timestamp", "def get(self, request: Request, project, alert_rule) -> Response:\n data = serialize(alert_rule, request.user, AlertRuleSerializer())\n return Response(data)", "def run(self, instances_list):\n rule_results = []\n\n def get_instance(list_of_instances, content_type):\n for _instance in list_of_instances:\n if isinstance(_instance, content_type.model_class()):\n return _instance\n\n instance_identifier = None\n rules = Rule.objects.filter(ruleset=self, active=True).order_by(\n 'sequence')\n instance = None\n\n for rule in rules:\n instance = get_instance(instances_list, rule.process_model)\n result = rule.run_evaluate(instance)\n if not instance_identifier:\n instance_identifier = rule.instance_identifier\n if result:\n rule_results.append({\n \"identity\": getattr(instance, instance_identifier),\n \"result\": result\n })\n if rule.exit_on_match:\n return rule_results\n else:\n if rule.exit_on_fail:\n return rule_results\n\n if not rule_results:\n rule_results = [{'identity': getattr(\n instance, instance_identifier), 'result': None}]\n return rule_results", "def get(self):\n datastore_hooks.SetPrivilegedRequest()\n sheriffs_to_email_query = sheriff.Sheriff.query(\n sheriff.Sheriff.stoppage_alert_delay > 0)\n for sheriff_entity in sheriffs_to_email_query:\n _SendStoppageAlertEmail(sheriff_entity)", "def get_alerts():\r\n if flask.request.method == 'POST':\r\n result = flask.request.form\r\n alert_data = {}\r\n flask.session['VIEW_DATA'].clear()\r\n for key in result:\r\n alert_data[key] = result[key]\r\n flask.session['alertData'] = alert_data\r\n \r\n filteredAlerts = get_alerts_from_graph()\r\n if b'' in filteredAlerts:\r\n print(\"Please Sign-in using a on.microsoft.com account for demo data\")\r\n filteredAlerts = \"Incorrect Tenant Account\"\r\n elif 'error' in filteredAlerts:\r\n if filteredAlerts['error']['code'] == 'InvalidAuthenticationToken':\r\n\r\n return flask.redirect(flask.url_for('login'))\r\n\r\n flask.session['VIEW_DATA']['GetAlertResults'] = filteredAlerts\r\n\r\n MSGRAPH.base_url = config.RESOURCE + config.API_VERSION + '/'\r\n return flask.redirect(flask.url_for('homepage'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publish the alert statistics message to SNS
def _publish_message(self, stats): LOGGER.info('Sending daily message digest at %s', self._current_time) sns_client = boto3.resource('sns').Topic(self._topic_arn) subject = 'Alert statistics for {} staged rule(s) [{} UTC]'.format( len(stats), self._current_time ) sns_client.publish( Message=self._format_digest(stats), Subject=subject )
[ "def publish_sns(sns_message):\n\n print(\"Publishing message to SNS topic...\")\n sns_client.publish(TargetArn=environ['SNSArn'], Message=sns_message)\n return", "def send_statistics(self, payload):\n self.logger.debug('send Statistics {}'.format(payload))\n self.socket.send_pyobj((InternalEventHeaders.STATISTICS, self.address, payload))", "def publish_stats(stats):\n for key, value in stats.iteritems():\n cmd = ['/usr/bin/gmetric',\n '--name', 'scribetail_%s_%s' % (options.category, key),\n '--value', str(value),\n '--type int32',\n '--units count']\n run_command(' '.join(cmd))", "def notify(self):\n\n if self.send_to_sns:\n publish_to_sns('SO0111-SHARR_Topic', self.severity + ':' + self.message, AWS_REGION)\n\n self.applogger.add_message(\n self.severity + ': ' + self.message\n )\n if self.logdata:\n for line in self.logdata:\n self.applogger.add_message(\n line\n )\n self.applogger.flush()", "def send_notifications():\n CONFIG = create_app().config\n r = Redis(db=1)\n amz = boto.sns.connect_to_region(\"us-west-2\",\n aws_access_key_id=CONFIG[\"AWS_ACCESS_KEY\"],\n aws_secret_access_key=CONFIG[\"AWS_SECRET_KEY\"])\n\n keys = r.hkeys('prkng:push')\n if not keys:\n return\n\n # for each message to push...\n for pid in keys:\n message = r.hget('prkng:push', pid)\n r.hdel('prkng:push', pid)\n device_ids = r.lrange('prkng:push:'+pid, 0, -1)\n r.delete('prkng:push:'+pid)\n\n # if the message looks like a JSON, structure it accordingly\n message_structure = None\n if message.startswith(\"{\") and message.endswith(\"}\"):\n message_structure = \"json\"\n mg_title = \"message-group-{}\".format(datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n mg_arn = None\n\n if device_ids == [\"all\"]:\n # Automatically publish messages destined for \"all\" via our All Users notification topic\n amz.publish(message=message, message_structure=message_structure,\n target_arn=CONFIG[\"AWS_SNS_TOPICS\"][\"all_users\"])\n elif device_ids == [\"ios\"]:\n # Automatically publish messages destined for all iOS users\n amz.publish(message=message, message_structure=message_structure,\n target_arn=CONFIG[\"AWS_SNS_TOPICS\"][\"ios_users\"])\n elif device_ids == [\"android\"]:\n # Automatically publish messages destined for all Android users\n amz.publish(message=message, message_structure=message_structure,\n target_arn=CONFIG[\"AWS_SNS_TOPICS\"][\"android_users\"])\n elif device_ids == [\"en\"]:\n # Automatically publish messages destined for all English-language users\n amz.publish(message=message, message_structure=message_structure,\n target_arn=CONFIG[\"AWS_SNS_TOPICS\"][\"en_users\"])\n elif device_ids == [\"fr\"]:\n # Automatically publish messages destined for all French-language users\n amz.publish(message=message, message_structure=message_structure,\n target_arn=CONFIG[\"AWS_SNS_TOPICS\"][\"fr_users\"])\n\n if len(device_ids) >= 10:\n # If more than 10 real device IDs at once:\n for id in device_ids:\n if id.startswith(\"arn:aws:sns\") and \"endpoint\" in id:\n # this is a user device ID\n # Create a temporary topic for a manually specified list of users\n if not mg_arn:\n mg_arn = amz.create_topic(mg_title)\n mg_arn = mg_arn[\"CreateTopicResponse\"][\"CreateTopicResult\"][\"TopicArn\"]\n try:\n amz.subscribe(mg_arn, \"application\", id)\n except:\n continue\n elif id.startswith(\"arn:aws:sns\"):\n # this must be a topic ARN, send to it immediately\n amz.publish(message=message, message_structure=message_structure, target_arn=id)\n if mg_arn:\n # send to all user device IDs that we queued up in the prior loop\n amz.publish(message=message, message_structure=message_structure, target_arn=mg_arn)\n else:\n # Less than 10 device IDs or topic ARNs. Send to them immediately\n for id in [x for x in device_ids if x.startswith(\"arn:aws:sns\")]:\n try:\n amz.publish(message=message, message_structure=message_structure, target_arn=id)\n except BotoServerError:\n continue", "def _publish_stats(self, counter_prefix, stats, instance):\n for stat_name, stat_value in ceph.flatten_dictionary(\n stats,\n prefix=counter_prefix,\n ):\n self.publish_gauge(stat_name, stat_value, instance=instance)", "def __publish(topic, message, subject=None):\r\n try:\r\n SNS_CONNECTION.publish(topic=topic, message=message, subject=subject)\r\n logger.info('Sent SNS notification to {0}'.format(topic))\r\n except BotoServerError as error:\r\n logger.error('Problem sending SNS notification: {0}'.format(\r\n error.message))\r\n\r\n return", "def publish(self, metrics):\n super().publish(metrics)\n\n url = config['cern_grafana_url']\n\n resp = self.send(url, [self.data])\n logger.debug('Response: %s' % resp)", "def bulk_publish(self, list_message, list_action):\n message = ''\n msg_list = []\n if self.sns_client:\n for i, one_message in enumerate(list_message):\n full_message = self._add_more_data(one_message, list_action[i])\n temp_message = json.dumps(full_message)\n # check max size of the message to publish under the limit\n if (len(message) + len(temp_message)) > 256000:\n self.sns_client.publish(message, self.PC_SNS_TOPIC)\n msg_list = [full_message]\n else:\n msg_list.append(one_message)\n message = json.dumps(msg_list)\n\n if message:\n self.sns_client.publish(message, self.PC_SNS_TOPIC)", "def publish_stats(stat_name, stat_unit, stat_value):\n CloudHost.SELF_STATS[stat_name] = stat_value\n if not CloudHost.ENABLED['cloudwatch']:\n return\n\n dims = {'InstanceID': CloudHost.instance_id()}\n CloudHost.log_info(\"CloudWatch \" + CloudHost.INSTALL_ID + \".\" + CloudHost.instance_id() + \".\" + stat_name\n + \"=\" + str(stat_value) + \"(\" + stat_unit + \")\")\n CloudHost.connect_cloudwatch().put_metric_data(namespace=CloudHost.INSTALL_ID, name=stat_name,\n unit=stat_unit, value=stat_value, dimensions=dims)", "def get_sns_alert_function(self):\n if self.sns_topic_arn is None:\n return None\n return lambda message, subject: \\\n SNSConnection().publish(self.sns_topic_arn, message, subject)", "def alert():\n data = request.get_json(force=True)\n\n try:\n validatesns.validate(data)\n except validatesns.ValidationError as err:\n logging.error(err)\n abort(403)\n\n client = nexmo.Client(key=app.config['NEXMO_KEY'], secret=app.config['NEXMO_SECRET'])\n if data['Type'] == 'Notification':\n client.send_message({\n 'from': app.config['NEXMO_FROM'],\n 'to': app.config['NEXMO_TO'],\n 'text': '\\n'.join([data['Subject'], data['Message']]),\n })\n\n if data['Type'] == 'SubscriptionConfirmation':\n urllib.request.urlopen(data['SubscribeURL']).read()\n client.send_message({\n 'from': app.config['NEXMO_FROM'],\n 'to': app.config['NEXMO_TO'],\n 'text': 'Subscribed to ' + data['TopicArn'],\n })\n\n return success_response()", "def publish_update_metric_response(self, metric_info, metric_status):\n topic = 'metric_response'\n msg_key = 'update_metric_response'\n response_msg = {\"schema_version\":1.0,\n \"schema_type\":\"metric_update_response\",\n \"correlation_id\":metric_info['correlation_id'],\n \"metric_update_response\":\n {\n \"metric_uuid\":0,\n \"resource_uuid\":metric_info['metric_create']['resource_uuid'],\n \"status\":metric_status\n }\n }\n self.logger.info(\"Publishing response:\\nTopic={}\\nKey={}\\nValue={}\"\\\n .format(topic, msg_key, response_msg))\n #Core producer\n self.producer_metrics.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)", "def publishJSON(self):\n\n\t\t# setup AWS and connect\n\t\tAWSClient = AWSIoTMQTTClient(\"basicPubSub\")\n\t\tAWSClient.configureEndpoint(myhost, 8883)\n\t\tAWSClient.configureCredentials(rootCA, privatePath, certPath)\n\n\t\tAWSClient.configureAutoReconnectBackoffTime(1,32,20)\n\t\tAWSClient.configureOfflinePublishQueueing(-1)\n\t\tAWSClient.configureDrainingFrequency(2)\n\t\tAWSClient.configureConnectDisconnectTimeout(10)\n\t\tAWSClient.configureMQTTOperationTimeout(10)\n\t\tAWSClient.connect()\n\n\t\t# create JSON object\n\t\tjsonData = {}\t\n\t\tjsonData['type'] = 'overhead'\n\t\tjsonData['mqtt_lat'] = self.mqtt_latency\n\t\tjsonData['mqtt_ovh'] = self.mqtt_size\n\t\tjsonData['ws_lat'] = self.ws_latency\n\t\tjsonData['ws_ovh'] = self.ws_size\n\t\tjsonData['coap_lat'] = self.coap_latency\n\t\tjsonData['coap_ovh'] = self.coap_size\n\t\tstrData= json.dumps(jsonData)\n\t\t\n\t\t# publish JSON\n\t\tAWSClient.publish(\"AccessControl/performance\", str(strData), 1)\n\t\tprint(\"Published data!\")\n\n\t\t# reset stats for new values\n\t\tself.clearVals()", "def publish(self, topic: str, message: dict):\n pass", "def publish(self, msg):\n if not type(msg) is list:\n msg = [msg]\n\n now = self.node.get_clock().now()\n da = DiagnosticArray()\n da.header.stamp = now.to_msg() # Add timestamp for ROS 0.10\n for stat in msg:\n stat.name = self.node.get_name() + ': ' + stat.name\n db = DiagnosticStatus()\n db.name = stat.name\n db.message = stat.message\n db.hardware_id = stat.hardware_id\n db.values = stat.values\n db.level = stat.level\n da.status.append(db)\n self.publisher.publish(da)", "def _publish_messages(\n self,\n number_of_messages: int,\n message_size: int) -> List[float]:\n publish_latencies = []\n failure_counter = 0\n\n # publishing 'number_of_messages' messages\n for _ in range(number_of_messages):\n message_payload = self._generate_random_message(message_size)\n start_time = GET_TIME_IN_MILLISECONDS()\n # Publishing a message and waiting for completion\n try:\n self._publish_message(message_payload)\n end_time = GET_TIME_IN_MILLISECONDS()\n publish_latencies.append(end_time - start_time)\n except:\n failure_counter += 1\n\n # getting metrics for publish, pull, and acknowledge latencies\n publish_metrics = self._get_summary_statistics('publish_latency',\n publish_latencies,\n number_of_messages,\n failure_counter)\n print(json.dumps(publish_metrics))\n return publish_metrics", "def send_to_pubsub_topic(self, stocks):\n pass", "def publish_create_metric_response(self, metric_info, metric_status):\n topic = 'metric_response'\n msg_key = 'create_metric_response'\n response_msg = {\"schema_version\":1.0,\n \"schema_type\":\"create_metric_response\",\n \"correlation_id\":metric_info['correlation_id'],\n \"metric_create_response\":\n {\n \"metric_uuid\":0,\n \"resource_uuid\":metric_info['metric_create']['resource_uuid'],\n \"status\":metric_status\n }\n }\n self.logger.info(\"Publishing response:\\nTopic={}\\nKey={}\\nValue={}\"\\\n .format(topic, msg_key, response_msg))\n #Core producer\n self.producer_metrics.publish(key=msg_key, value=json.dumps(response_msg), topic=topic)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the individual lines from the output A response cannot necessarily know ahead of time if its output contents are individual string lines or binary data that might contain line endings. A user of a response is responsible for knowing the context of the response, and thus can decide to interpret the output as individual lines using this property, which will return the output contents as an array of lines with their line endings stripped.
def lines(self): return self.output.split(self._newLine)
[ "def _get_lines(self, output):\n\n return output.decode(self.encoding).split('\\n')", "def _get_log_lines(self):\n return [\n log_line\n for log_line in self.captured_output.getvalue().split(\"\\n\")\n if log_line\n ]", "def getLinesContent(self):\n return self.__content.getLines()", "def output(self):\n lines = []\n with open(self._temp_file_name) as file:\n line = file.readline()\n while line:\n lines.append(line)\n line = file.readline()\n return lines", "def stdout_to_list(self):\n return self.stdout.split('\\n')", "def getLines(self) -> [Line]:\n return list(self.pool.values())", "def GetOutputLines(cmd, show_stderr=True, strip_output=False):\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=None if show_stderr else subprocess.PIPE)\n stdout, _ = p.communicate()\n text = six.ensure_text(stdout)\n if strip_output:\n text = text.strip()\n lines = text.splitlines()\n return lines", "def _recv_line(self):\n msg_line = ''\n # Retrieve an complete line end with CRLF.\n while 1:\n line = self.buffer.readline()\n msg_line += line\n if line[-2:] == CRLF: break\n printd(msg_line)\n # Remove the ending CRLF.\n return msg_line[:-2].split(' ', 1)", "async def readlines(self, sizehint=None, keepends=True):\n data = await self.read()\n return data.splitlines(keepends)", "def GetLines(self):\n return self.bufcount()", "def _non_empty_lines(output):\n return [line for line in output.splitlines() if line.strip()]", "def get_data(self):\n try:\n return self.data_lines\n except AttributeError:\n if self.test_mode:\n data_lines = [x.strip() for x in self.test_data.split('\\n')]\n else:\n data_lines = []\n while True:\n try:\n inp = input()\n if inp == \"!EOF\":\n raise EOFError\n data_lines.append(inp)\n except EOFError:\n break\n return data_lines", "def parse_output(output):\n output_lines = output.splitlines()\n parsed_lines = [parse_output_line(line) for line in output_lines]\n return [line_details for line_details in parsed_lines if line_details is not None]", "def get_lines(stream):\n s = stream.read()\n return re.split(r'\\r\\n|\\n|\\r', s)", "def getArray(self):\n\n ret_array = arrays.empty_str_array(self.dimensions)\n lines_to_use = self.message_list[-self.dimensions[1]:]\n # lines_to_use.reverse()\n for i in range(len(lines_to_use)):\n arrays.print_str_to_end_of_line((0, i), lines_to_use[i], ret_array)\n\n return ret_array", "def readLine(self):\n if self.buf is None:\n self.buf = []\n\n # Buffer may already have a line if we've received unilateral\n # response(s) from the server\n if len(self.buf) == 1 and b\"\\n\" in self.buf[0]:\n (line, b) = self.buf[0].split(b\"\\n\", 1)\n self.buf = [b]\n return line\n\n while True:\n b = self.readBytes(4096)\n if b\"\\n\" in b:\n result = b\"\".join(self.buf)\n (line, b) = b.split(b\"\\n\", 1)\n self.buf = [b]\n return result + line\n self.buf.append(b)", "def parse_output_line(self, line):\n return self.output_protocol().read(line)", "def read_source(self):\n lines = [line.strip(' \\t\\n\\r')\n for line in self.lines]\n return lines", "def xreadlines(self):\n if self._bin_mode:\n return self._fo.xreadlines()\n return _XReadlines(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads and uncompresses dataset from url, expects tar.gz file
def download_dataset_and_uncompress(dataset_dir: str, url: str, filename: str=None): filename = filename or url.split('/')[-1] if not os.path.isfile(filename): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='download dataset') as pbar: urlretrieve( url, filename, pbar.hook) if not os.path.exists(dataset_dir): os.mkdir(dataset_dir) with tarfile.open(filename, 'r:gz') as tar: tar.extractall(dataset_dir) tar.close() statinfo = os.stat(filename) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
[ "def download_and_uncompress_tarball(tarball_url, dataset_dir):", "def download(url, dataset):\n print(\"Downloading data\")\n r = requests.get(url, allow_redirects=True)\n data_file_path = get_directory() + \"/data/raw/\"\n open(data_file_path + dataset, \"wb\").write(r.content)\n tar = tarfile.open(data_file_path + dataset)\n tar.extractall(path=data_file_path)\n tar.close()\n return True", "def download_and_unzip_data(\n url = \"https://storage.googleapis.com/simpeg/bookpurnong/bookpurnong_inversion.tar.gz\"\n):\n # download the data\n downloads = Utils.download(url)\n\n # directory where the downloaded files are\n directory = downloads.split(\".\")[0]\n\n # unzip the tarfile\n tar = tarfile.open(downloads, \"r\")\n tar.extractall()\n tar.close()\n\n return downloads, directory", "def download_and_unzip(url, dataset_name, data_folder):\n print(\"Downloading\", dataset_name, \"data set...\")\n data_zip = wget.download(url, out=data_folder)\n print(\"\\tunzipping...\")\n zip_ = zipfile.ZipFile(data_zip, \"r\")\n zip_.extractall(data_folder)\n zip_.close()\n print(\"\\tdone\")", "def untar_data(url: str, fname: PathOrStr = None, dest: PathOrStr = None, data=True):\n dest = Path(ifnone(dest, _url2path(url, data)))\n if not dest.exists():\n fname = download_data(url, fname=fname, is_data=data)\n mode = 'r:gz' if is_gzip(url) else 'r:bz2'\n tarfile.open(fname, mode).extractall(dest.parent)\n else:\n print('Data existed')\n return dest", "def maybe_download_and_extract_dataset(data_url, dest_directory):\n if not data_url:\n return\n print_info(\"Checking destination directory : \" + dest_directory)\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n else:\n print_info(\"SR dataset already exists!\")\n return\n filename = data_url.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n try:\n filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)\n except:\n tf.logging.error('Failed to download URL: %s to folder: %s', data_url,\n filepath)\n tf.logging.error('Please make sure you have enough free space and'\n ' an internet connection')\n raise\n print()\n statinfo = os.stat(filepath)\n tf.logging.info('Successfully downloaded %s (%d bytes)', filename,\n statinfo.st_size)\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def maybe_download_and_extract(url, download_dir):\n \n filename = url.split('/')[-1]\n file_path = os.path.join(download_dir,filename)\n \n if not os.path.exists(file_path):\n if not os.path.exists(download_dir):\n os.mkdir(download_dir) \n \"\"\"\n # for python2\n file_path, _ = urllib.urlretrieve(url=url, \n filename=file_path, \n reporthook=_print_download_progress)\n \"\"\"\n # for python3\n file_path, _ = urllib.request.urlretrieve(url=url, \n filename=file_path, \n reporthook=_print_download_progress)\n print(\"download finished.\") \n else:\n print(\"unpacking...\")\n tarfile.open(name=file_path, mode='r:gz').extractall(download_dir)\n print(\"Data has apparently already been download and unpacked!\")", "def DownloadAndUnpack(url, output_dir, path_prefixes=None, is_known_zip=False):\n with tempfile.TemporaryFile() as f:\n DownloadUrl(url, f)\n f.seek(0)\n EnsureDirExists(output_dir)\n if url.endswith('.zip') or is_known_zip:\n assert path_prefixes is None\n zipfile.ZipFile(f).extractall(path=output_dir)\n else:\n t = tarfile.open(mode='r:*', fileobj=f)\n members = None\n if path_prefixes is not None:\n members = [m for m in t.getmembers()\n if any(m.name.startswith(p) for p in path_prefixes)]\n t.extractall(path=output_dir, members=members)", "def _download( self ):\n self._system.download_file(\"https://github.com/mastbaum/avalanche/tarball/\" + self._tar_name)", "def unzip_gz(url, outpath):\n\n with gzip.open(url, 'rb') as f1:\n with open('file.txt', 'wb') as f2:\n shutil.copyfileobj(f1, f2)", "def fetch_archive_from_http(url: str, output_dir: str, proxies: Optional[dict] = None):\n # verify & prepare local directory\n path = Path(output_dir)\n if not path.exists():\n path.mkdir(parents=True)\n\n is_not_empty = len(list(Path(path).rglob(\"*\"))) > 0\n if is_not_empty:\n logger.info(\n f\"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data.\"\n )\n return False\n else:\n logger.info(f\"Fetching from {url} to `{output_dir}`\")\n\n # download & extract\n with tempfile.NamedTemporaryFile() as temp_file:\n http_get(url, temp_file, proxies=proxies)\n temp_file.flush()\n temp_file.seek(0) # making tempfile accessible\n # extract\n if url[-4:] == \".zip\":\n zip_archive = zipfile.ZipFile(temp_file.name)\n zip_archive.extractall(output_dir)\n elif url[-7:] == \".tar.gz\":\n tar_archive = tarfile.open(temp_file.name)\n tar_archive.extractall(output_dir)\n elif url[-3:] == \".gz\":\n filename = url.split(\"/\")[-1].replace(\".gz\", \"\")\n output_filename = Path(output_dir) / filename\n with gzip.open(temp_file.name) as f, open(output_filename, \"wb\") as output:\n for line in f:\n output.write(line)\n else:\n logger.warning('Skipped url {0} as file type is not supported here. '\n 'See haystack documentation for support of more file types'.format(url))\n # temp_file gets deleted here\n return True", "def download_and_unzip(url, target_path, token_file):\r\n if not os.path.exists(target_path):\r\n reproduce.utils.url_fetch_and_validate(url, target_path)\r\n if target_path.endswith('zip'):\r\n with zipfile.ZipFile(target_path, 'r') as zip_ref:\r\n zip_ref.extractall(os.path.dirname(target_path))", "def download_dataset(url=DATASET_URL):\n c = urllib3.PoolManager()\n with c.request('GET', url, preload_content=False) as res, open(LOCAL_FILE_NAME, 'wb') as out_file:\n shutil.copyfileobj(res, out_file)\n logging.info(\"Download completed.\")", "def download_from_dbox(url,out_file):\n\n response = urllib.request.urlopen(url)\n compressed_file = io.BytesIO(response.read())\n decompressed_file = gzip.GzipFile(fileobj=compressed_file)\n\n with open(out_file, 'wb') as outfile:\n outfile.write(decompressed_file.read())\n\n print(\"Download \" + out_file[:-4] + \" from dropbox succeed.\")", "def download_coord_data():\n \n link = ('https://workbench.qr1hi.arvadosapi.com/collections/'\n 'b6331bea18718d2e39c193ba449c055c+131/tileid_hg19_split_by_path.tar.gz'\n '?disposition=attachment&size=104970070')\n os.system('wget ' + link)\n os.system('tar -xzf tileid_hg19_split_by_path.tar.gz')", "def download_and_unzip_celeba():\n file_list = (\"images\", \"partitions\", \"attributes\")\n data_to_path = {}\n\n for url, file_item in zip(\n [_ALIGNED_IMGS_URL, _PARTITIONS_URL, _ATTRIBUTES_URL], file_list):\n filename = url.split('?')[0].split('/')[-1]\n filepath = os.path.join(FLAGS.dataset_dir, filename)\n\n print('Downloading file %s' % filename)\n print(filepath)\n\n if not tf.gfile.Exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %.1f%%' %\n (float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)\n if '.zip' in filename:\n print('Extracting..')\n with zipfile.ZipFile(filepath, 'r') as f:\n f.extractall(FLAGS.dataset_dir)\n\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded and extracted %s, size %s bytes.' %\n (filename, size))\n\n data_to_path[file_item] = filepath\n\n return data_to_path", "def download_data(path):\n import requests\n import zipfile\n import os\n\n # download file\n resp = requests.get('http://files.grouplens.org/datasets/movielens/ml-100k.zip', allow_redirects=True, stream=True)\n\n if resp.status_code == 200:\n print('Successfully downloaded the data')\n elif resp.status_code == 404:\n print('File Not Found. Could not download the dataset.')\n \n filename = 'ml-100k.zip'\n zfile = open(filename, 'wb')\n zfile.write(resp.content)\n zfile.close()\n\n zipf = zipfile.ZipFile(filename, 'r') \n zipf.extractall(path)\n zipf.close()\n\n os.remove(filename)", "def get_dataset(ds,dataDir,removecompressed=1):\n #Convert input ds to string incase it is put in via function\n ds = str(ds)\n #The final character of the dataset can be a letter\n lettersuffix=''\n if re.search('[A-Za-z]$',ds):\n lettersuffix = ds[-1]\n ds = ds[:-1]\n openfMRI_dataset_string = '{0:06d}'.format(int(ds)) + lettersuffix\n #Some datasets include\n try:\n os.mkdir(dataDir)\n except:\n pass\n\n datasetDir = os.path.join(dataDir, 'openfmri/')\n\n try:\n os.mkdir(datasetDir)\n except:\n pass\n\n openfMRI_url = 'https://openfmri.org/dataset/ds' + openfMRI_dataset_string + '/'\n r = urlopen(openfMRI_url).read()\n soup = BeautifulSoup(r,'lxml')\n\n #Isolate only the links from the latest revision. The text \"data associated with revision\". If the website changes its static text, this needs to be changed\n unformatted_soup=soup.prettify()\n firstOccurance=unformatted_soup.find('Data Associated with Revision')\n secondOccurancce=unformatted_soup[firstOccurance+1:].find('Data Associated with Revision')\n #If there is only one \"Data Associated...\" (i.e. only one revision) this returns -1. This should be kept. Otherwise add on the firstOccurance index\n if secondOccurancce != -1:\n secondOccurancce+=firstOccurance\n #The latest links are confined within this part of the text\n soup_latestversion = BeautifulSoup(unformatted_soup[firstOccurance:secondOccurancce],'lxml')\n\n # Loop through all links and dowload files\n filelist = []\n for a in soup_latestversion.find_all('a', href=True):\n #This assumes that all files include ds....\n if re.search('ds[A-Za-z_0-9.-]*$',a['href']):\n filename_start=re.search('ds[A-Za-z_0-9.-]*$',a['href']).start()\n filelist.append(a['href'][filename_start:])\n print('Downloading: ' + a['href'][filename_start:])\n urlretrieve(a['href'],datasetDir + a['href'][filename_start:])\n print('--- Download complete ---')\n for f in filelist:\n untar_or_unzip(datasetDir,f)\n print('--- Uncompressing complete ---')\n if removecompressed==1:\n for f in filelist:\n print('Clean up. Deleting: ' + f)\n os.remove(datasetDir+f)\n print('--- Clean up complete ---')\n print('NOTE: It is best to verify manually that all the correct data has been downloaded and uncompressed correctly. \\n If data is used in any publication, see openfmri.org about how to appropriately cite/credit the data.')\n print('--- Script complete ---')", "def download_similarity_datasets():\n # this is the URL we are downloading\n url = \"http://www.socsci.uci.edu/~mdlee/all.zip\"\n\n # download the file and extract its contents.\n request = requests.get(url)\n dest = os.path.join(\"data\", \"similarity_data\")\n zipfile.ZipFile(BytesIO(request.content)).extractall(dest)\n\n return dest" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should return an Iterable of services initialized for the current instance. You should override this when subclassing IRCClient.
def services(self): return []
[ "def services(self):\n return self.__services", "def _QueryServices(self):\n init_prop_header = 'init.svc.'\n props = self._Props()\n return dict([(k[len(init_prop_header):], v) for k, v in props.iteritems()\n if k.startswith(init_prop_header)])", "def getStatefulServices(self):\n rv = []\n sf = self.sf\n services = sf.activeServices()\n for srv in services:\n try:\n prx = sf.getByName(srv)\n prx = omero.api.StatefulServiceInterfacePrx.checkedCast(prx)\n if prx is not None:\n rv.append(prx)\n except:\n self.__logger.warn(\"Error looking up proxy: %s\" % srv, exc_info=1)\n return rv", "def services(self):\n for service_id in self.service_ids():\n yield self._get_service_from_graph(service_id)", "def get_availables_services(self):\r\n self._service_locator.get_availables_services()", "async def get_services(self, **kwargs) -> BleakGATTServiceCollection:\n await self._services_resolved.wait()\n return self.services", "def _all_services(type_, *args, **kwargs):\n return all_srvs[type_]", "def _enumerate_services_generator(self):\n size_needed = gdef.DWORD()\n nb_services = gdef.DWORD()\n counter = gdef.DWORD()\n try:\n windows.winproxy.EnumServicesStatusExW(self.handle, SC_ENUM_PROCESS_INFO, SERVICE_TYPE_ALL, SERVICE_STATE_ALL, None, 0, ctypes.byref(size_needed), ctypes.byref(nb_services), byref(counter), None)\n except WindowsError:\n pass\n\n while True:\n size = size_needed.value\n buffer = (BYTE * size)()\n try:\n windows.winproxy.EnumServicesStatusExW(self.handle, SC_ENUM_PROCESS_INFO, SERVICE_TYPE_ALL, SERVICE_STATE_ALL, buffer, size, ctypes.byref(size_needed), ctypes.byref(nb_services), byref(counter), None)\n except WindowsError as e:\n continue\n break\n services_array = (gdef.ENUM_SERVICE_STATUS_PROCESSW * nb_services.value).from_buffer(buffer)\n for service_info in services_array:\n shandle = self.open_service(service_info.lpServiceName)\n yield Service(handle=shandle, name=service_info.lpServiceName, description=service_info.lpDisplayName)\n return", "def get_services(self):\n xpath = [\"Services\", \"Service\"]\n return self.find_anywhere(xpath)", "def get_all_local_services(self):\n return self._services", "def parse_services(self):\n #Client\n for item in self.client_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.client_services_list.append(service) \n\n #Server\n for item in self.server_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.server_services_list.append(service)", "def services(self) -> List[Service]:\n if self._services:\n return self._services\n\n ito_ids = [w.id for w in self.warnings]\n self._services = list(Service.objects.filter(ito_id__in=ito_ids))\n return self._services", "def get_services(self):\n\n # try to get services\n try:\n\n # get services\n command = str('kubectl get services')\n subprocess.call(command.split())\n\n # handle exception\n except:\n\n # raise Exception\n raise Exception('I could not get the list of services')", "def get_clients_services_info(self):\n with self.clients_lock:\n return [c.get_services_info() for c in self.clients]", "def _start_all_services(self):\n print(f'Starting all services...')\n server_module = __import__('Server.services')\n all_service_files = server_module.__dict__['services'].__dict__['__all__']\n print(f'All service files: {all_service_files}')\n for service_file in all_service_files:\n service_module = __import__(f'Server.services.{service_file}')\n # All service objects must be named identically to the file that they are saved under\n service_module = service_module.__dict__['services'].__dict__[service_file]\n service_class = getattr(service_module, service_file)\n # All service classes must be initialize themselves with register callback\n # in order to map Message object names to Service object handlers\n self.services.append(service_class(self.register_service))\n [print(f'Added {_} to server services list') for _ in self.services]", "def services(self):\n _log.debug('get service list')\n result = self._requestJSON('services', '')\n return self._getKey(result, 'name')", "def clients(self):\n\t\tfor client in self._clients:\n\t\t\tyield client", "def get_all_services():\n global tts_srv\n tts_srv = QI_SESSION.service(\"ALTextToSpeech\")\n\n global al_srv\n al_srv = QI_SESSION.service(\"ALAutonomousLife\")\n\n global ba_srv\n ba_srv = QI_SESSION.service(\"ALBasicAwareness\")\n\n global ab_srv\n ab_srv = QI_SESSION.service(\"ALAutonomousBlinking\")\n\n global motion_srv\n motion_srv = QI_SESSION.service(\"ALMotion\")\n\n global video_srv\n video_srv = QI_SESSION.service(\"ALVideoDevice\")\n\n global tablet_srv\n tablet_srv = QI_SESSION.service(\"ALTabletService\")\n\n global as_srv\n as_srv = QI_SESSION.service(\"ALAnimatedSpeech\")\n\n global ap_srv\n ap_srv = QI_SESSION.service(\"ALAnimationPlayer\")\n\n global posture_srv\n posture_srv = QI_SESSION.service(\"ALRobotPosture\")\n\n global ar_srv\n ar_srv = QI_SESSION.service(\"ALAudioRecorder\")\n\n global ad_srv\n ad_srv = QI_SESSION.service(\"ALAudioDevice\")\n\n global fd_srv\n fd_srv = QI_SESSION.service(\"ALFaceDetection\")\n\n global mem_srv\n mem_srv = QI_SESSION.service(\"ALMemory\")\n\n global lm_srv\n lm_srv = QI_SESSION.service(\"ALListeningMovement\")\n\n global sm_srv\n sm_srv = QI_SESSION.service(\"ALSpeakingMovement\")\n\n global audio_player\n audio_player = QI_SESSION.service(\"ALAudioPlayer\")\n\n global led_srv\n led_srv = QI_SESSION.service(\"ALLeds\")", "def discovery_services():\n return app.manager.admin_discovery_services_controller.process_discovery_services()", "def init_service_list(self):\n images = self._cli.images()\n for image in images:\n self._services[image['Id']] = image\n\n # liang: dump the images, need to move to cache abstraction in the future.\n if image['Id'] == u'3d3b49d80014e2df3434f282586b3bb2cff0f7b5f58a3e63d9229c48085a53a8':\n continue\n if not os.path.exists(conf['image_dir']+image['Id']+'.tar'):\n logger.info('dumping %s' % image['Id'])\n raw = self._cli.get_image(image['Id'])\n tar = open(conf['image_dir']+image['Id']+'.tar', 'w')\n tar.write(raw.data)\n tar.close()\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the given configuration parameter. The following order of precedence is used to return the parameter in order to deal with
def cfgget(self, name, default = NOPARAM): try: return self.params[name] except KeyError: pass if default != NOPARAM: return default try: return default_params[name] except KeyError: pass return None
[ "def fetchConfigParam(self):\r\n pass", "def _get_from_backend(self, parameter, section):\n value = None\n try:\n value = self.config_backend.get(section, parameter.id)\n except (NoOptionError, NoSectionError):\n # Ignore, we return None.\n pass\n return value", "def get_parameter(self, index):\n result = None\n if index < len(self.paramorder):\n key = self.paramorder[index]\n if key in self._parameters:\n result = self._parameters[key]\n\n return result", "def get(self, name: str, default=None):\n if name in self.__config:\n return self.__config[name]\n if '.' in name:\n names = name.split('.')\n cur = self.__config\n for name in names:\n if type(cur) is dict and name in cur:\n cur = cur[name]\n else:\n return default\n return cur\n return default", "def get(self, parameter, section=None):\n if not section:\n section = self._calculate_config_section(parameter)\n # Try to get the parameter value first if it has one.\n if parameter.value is not None:\n value = parameter.value\n else:\n value = self._get_from_cache(parameter, section)\n\n if value is None:\n value = self._get_from_backend(parameter, section)\n return value", "def get_param(backend_model, var, dims):\n try:\n return getattr(backend_model, var)[dims]\n except AttributeError: # i.e. parameter doesn't exist at all\n logger.debug(\n \"get_param: var {} and dims {} leading to default lookup\".format(var, dims)\n )\n return backend_model.__calliope_defaults[var]\n except KeyError: # try removing timestep\n try:\n if len(dims) > 2:\n return getattr(backend_model, var)[dims[:-1]]\n else:\n return getattr(backend_model, var)[dims[0]]\n except KeyError: # Static default value\n logger.debug(\n \"get_param: var {} and dims {} leading to default lookup\".format(\n var, dims\n )\n )\n return backend_model.__calliope_defaults[var]", "def get_parameter(self) -> str:\n return self.parameter", "def get(self, botconf, cat=None):\n setting = botconf.get(self.name)\n return setting if (setting is not None) else self.default", "def get(self,name,default=MANDATORY):\n if self.defaults is not None:\n default = self._getFromDict(self.defaults, name, default)\n val = self._getFromDict(self.dict, name, default)\n if val == MANDATORY:\n raise ValueError(\"config value '{}' is not specified in {} and {} in {}\".format(name,self.dict,self.defaults,self.name));\n return val", "def get_param(self, name):\n return(self._find_param(name).value)", "def get_parameter(self, element, parameter):\n try:\n element = self.parameters[element]\n except KeyError:\n raise KeyError('family `{}` does not contain the element `{}`'.format(self.label, element))\n\n try:\n return element[parameter]\n except KeyError:\n raise KeyError('parameter `{}` is not available for element `{}`'.format(parameter, element))", "def _get_url_param(self, category):\n res = get_nested(ATTR_CONFIG, category)\n\n if type(res) == dict and 'default' in res:\n return res[\"default\"] # for the root value\n else:\n return res", "def get_config_value(key,base=u'default',default=None):\n if base in LOCAL and key in LOCAL[base]:\n return LOCAL[base][key]\n if base in GLOBAL and key in GLOBAL[base]:\n return GLOBAL[base][key]\n else:\n return default\n return None", "def getarg(self, parname):\n # list of strings that should parse to boolean true\n # we need to handle booleans separately, because bool(\"False\")\n # evaluates to True\n booltrue = ['yes','true','1','t']\n\n parname = parname.lower() # so we don't have to worry about case\n\n # Get paramlist index and check if parameter is valid\n try:\n ind = [par[0].lower() for par in self.paramlist].index(parname)\n except ValueError:\n msg = 'GetArg: There is no parameter named %s' % parname\n self.log.error(msg)\n raise KeyError(msg)\n parnameraw = self.paramlist[ind][0] # ParName in original Case\n default = self.paramlist[ind][1]\n # get from arguments if possible\n if self.arglist.has_key(parname):\n # assumes that: if value is not default, then set on command line\n # by the user.\n if self.arglist[parname] != self.parser.get_default(parnameraw):\n ret = self.arglist[parnameraw]\n self.log.debug('GetArg: from command line, done (%s=%s)'\n % (parnameraw, repr(ret)) )\n return ret\n # make temporary config entry with lowercase key names\n conftmp = {}\n if self.config.has_key(self.name): # skip if no step entry in config\n for keyname in self.config[self.name].keys():\n conftmp[keyname.lower()] = self.config[self.name][keyname]\n # get from config if possible\n if conftmp.has_key(parname):\n value = conftmp[parname]\n # If default is a sequence:\n if isinstance(default,(tuple,list)):\n # Get type for list elements\n # (if default is empty, convert to string)\n if len(default) > 0:\n outtype = type(default[0])\n else:\n outtype = str\n ret = []\n # Convert elements in list\n # Note: if the keyword only has one item in the list and there\n # is no trailing comma, configobj will read it as a string\n # instead of a 1-element list. We force to list here.\n if isinstance(value,str):\n value = [value]\n for i in xrange(len(value)):\n # Check if it's boolean\n if outtype == bool:\n if value[i].lower() in booltrue:\n ret.append(True)\n else: # default to False\n ret.append(False)\n # Not boolean - just convert to type\n else:\n ret.append(outtype(value[i]))\n # convert to tuple\n self.log.debug('GetArg: from config file, done (%s=%s)' % (parname,repr(type(default)(ret))))\n return type(default)(ret)\n # Default is not a sequence\n else:\n # Check if it's boolean\n if isinstance(default,bool) and not isinstance(value,bool):\n if value.lower() in booltrue:\n self.log.debug('GetArg: from config file, done (%s=True)' % parname)\n return True\n else:\n self.log.debug('GetArg: from config file, done (%s=False)' % parname)\n return False\n # Not boolean - just convert to type\n else:\n self.log.debug('GetArg: from config file, done (%s=%s)' % (parname,repr(type(default)(value))))\n return type(default)(value)\n # get default from parameter list\n ret = self.paramlist[ind][1]\n # return parameter\n self.log.debug('GetArg: from param list, done (%s=%s)' % (parname,repr(ret)))\n return ret", "def get_parameter(self, a_name):\n return self.parameters.get(a_name, None)", "def get_option(cfg, base, opt):\n if cfg.has_option(base, opt):\n return cfg.get(base, opt)\n else:\n return None", "def config(default=None, environment=environment(), **values):\n if environment in values:\n return values[environment]\n else:\n return default", "def config(self, param: str, /) -> Any:", "def getparam(self, parname):\n self.log.warn('GetParam is Decrecated - use GetArg')\n return self.getarg(parname)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends several messages at once. Because the bot is heavily threaded and threads are evil, it's probably best to call sendmany() instead of calling send multiple times when sending series of messages, so unrelated series don't occur at the same time. There's no danger of two messages sending inside of each other midstream, mind, as Transport.write() and self.send() are both highly threadsafe.
def sendmany(self, messages): with self.lock_many: for i in messages: self.send(i)
[ "def send_multiple_emails(cls, *messages):\n pass", "def send_messages(self, messages):\r\n if not messages:\r\n return\r\n self._lock.acquire()\r\n try:\r\n # The try-except is nested to allow for\r\n # Python 2.4 support (Refs #12147)\r\n try:\r\n stream_created = self.open()\r\n for message in messages:\r\n self.stream.write(render_message(message))\r\n self.stream.write('\\n')\r\n self.stream.write('-'*79)\r\n self.stream.write('\\n')\r\n self.stream.flush() # flush after each message\r\n if stream_created:\r\n self.close()\r\n except:\r\n if not self.fail_silently:\r\n raise\r\n finally:\r\n self._lock.release()\r\n return len(messages)", "def send_messages(self, messages):\r\n count = 0\r\n for message in messages:\r\n message_body = unicodedata.normalize('NFKD', unicode(message.body)).encode('ascii', 'ignore')\r\n for tel_number in message.to:\r\n try:\r\n self.client.send(tel_number, message_body, getattr(settings, 'SMS_SLUZBA_API_USE_POST', True))\r\n except Exception:\r\n if self.fail_silently:\r\n log.exception('Error while sending sms via sms.sluzba.cz backend API.')\r\n else:\r\n raise\r\n else:\r\n count += 1\r\n\r\n return count", "def send_messages(self, message: str):\n users = self.bd.get_user_ids()\n for user in users:\n self._send_msg(message,\n user)", "async def _send(\n self, msg: Message, timeout: float, retry: int = 0\n ) -> Message:\n if timeout is None:\n timeout = self.timeout\n\n for i in range(retry):\n try:\n return await self.engine.send(msg, timeout)\n except NoResponseError as error:\n # Log only if logger already exist\n if self._log is not None:\n self.log.warning(\n \"%s (retry %d/%d): %s\",\n type(error).__name__,\n i + 1,\n retry,\n str(error),\n )\n return await self.engine.send(msg, timeout)", "def _send(self, msg: Message, timeout: float, retry: int = 0) -> Message:\n if timeout is None:\n timeout = self.timeout\n for i in range(retry):\n try:\n return self.engine.send(msg, timeout)\n except NoResponseError as error:\n # Log only if logger already exist\n if self._log is not None:\n self.log.warning(\n \"%s (retry %d/%d): %s\",\n type(error).__name__,\n i + 1,\n retry,\n str(error),\n )\n return self.engine.send(msg, timeout)", "def message_all(self, message):\n self.log.debug('Sending message to {} clients'.format(len(self._clients)))\n for address, handler in self._clients.items():\n handler.message(message)", "def to_send_all(self):\n while self.has_to_send():\n yield self.to_send()", "def s_send_strings(socket: zmq.Socket, messages):\n flags = zmq.SNDMORE\n for i, message in enumerate(messages):\n if i == len(messages) - 1:\n flags = 0\n socket.send(message, flags)", "def send_messages():\n print 'Sending messages'\n client = create_client()\n result = client.send_messages([\"+61412345671\"], \"Hello from messagemedia-python!\")\n\n print \"Sent %d messages, scheduled %d messages, %d messages failed\" % (\n result._sent, result._scheduled, result._failed)", "async def send_message_to_multiparty(self):\n async with Slacker(self.token) as slack:\n if self.bot_id is None:\n self.bot_id = (await self.get_users_dict())[self.bot_name]\n\n response = await slack.mpim.open(self.users + [self.bot_id])\n self.channel_id = response.body['group']['id']\n print(f\"Message was send to multiparty chat {self.channel_id} to users:\")\n print(', '.join(self.users))\n await self.send_message_to_each([self.channel_id])", "async def send_message_to_each(self, users: list = None):\n if users is None:\n users = self.users\n async with Slacker(self.token) as slack:\n for user in users:\n await slack.chat.post_message(channel=user,\n text=self.text,\n attachments=json.dumps(self.attachments))", "def enqueue_all(self, requests=None, replies=None, routed=None):\n fast = []\n medium = []\n slow = []\n\n if requests is not None:\n if SPEED_FAST in requests:\n fast = requests[SPEED_FAST]\n if SPEED_MEDIUM in requests:\n medium = requests[SPEED_MEDIUM]\n if SPEED_SLOW in requests:\n slow = requests[SPEED_SLOW]\n\n if replies is not None:\n if SPEED_FAST in replies:\n fast = fast + replies[SPEED_FAST]\n if SPEED_MEDIUM in replies:\n medium = medium + replies[SPEED_MEDIUM]\n if SPEED_SLOW in replies:\n slow = slow + replies[SPEED_SLOW]\n\n if routed is not None:\n fast = fast + routed\n\n total = len(fast) + len(medium) + len(slow)\n\n if len(fast) > 0:\n assert Message.validate_messages_for_send(fast, self.app)\n self.fast_queue.enqueue(fast)\n if len(medium) > 0:\n assert Message.validate_messages_for_send(medium, self.app)\n self.medium_queue.enqueue(medium)\n if len(slow) > 0:\n assert Message.validate_messages_for_send(slow, self.app)\n self.slow_queue.enqueue(slow)\n\n if total > 0:\n self.sleep.set()", "def send_messages(message_list):\n sent_messages = []\n # prints each message\n show_messages(message_list)\n # Moves all messages\n move_messages(message_list, sent_messages)\n return sent_messages", "def send_message(message):\n for chatid in connected_chat_ids:\n tb.send_message(chatid, message)\n return connected_chat_ids", "def send_message_to_all_users(self, users, sender_id, data):\n for user in users:\n if user != sender_id:\n users[user].send(data)", "def send_smses():\n smses = Sms.objects.filter(sent=False)\n fail = 0\n\n for sms in smses:\n if fail < 3:\n try:\n message = unicode(sms.message, \"utf-8\")\n send_sms(sms.harambee.candidate_id, message)\n except (ValueError, httplib2.ServerNotFoundError):\n fail += 1\n continue\n\n sms.sent = True\n sms.time_sent = timezone.now()\n try:\n sms.save()\n except IntegrityError:\n fail += 1", "def chunk_send(msg):\n msg = msg.encode(util.UTF8)\n while len(msg) > 0:\n sent = client_sock.send(msg)\n if sent == 0:\n raise RuntimeError(\"socket connection broken\")\n msg = msg[sent:]", "def multiple_send_command(self, job):\n obj = job[1]\n command_list = job[3]\n if obj.device == \" \":\n device = 0\n else:\n device = obj.device\n if obj.system == \" \":\n system = 0\n else:\n system = obj.system\n \n self.set_status(obj, \"Connecting\")\n self.notify_send_command_window(obj)\n try:\n telnet_session = self.establish_telnet(obj.ip_address)\n telnet_session.read_until('>', int(job[2]))\n total = len(command_list)\n count = 0\n error = 0\n for command in command_list:\n count += 1\n output = (\"send_command \" + \n str(device) + \n \":\" + \n str(command[1]) + \n \":\" + \n str(system) + \n \", \" + \n \"\\\"\\'\" + \n str(command[0]) + \n \"\\'\\\"\") \n telnet_session.write(str(output + \" \\r\"))\n result_raw = telnet_session.read_until('>', int(job[2]))\n if result_raw.split()[0] != 'command:':\n dispatcher.send(\n signal=\"send_command result\", \n sender=((True, 'Sending ' + str(result_raw)[:-1])))\n self.set_status(\n obj, ('Sent ' + str(count) + ' of ' + str(total)))\n self.notify_send_command_window(obj) \n else:\n error += 1\n dispatcher.send(signal=\"send_command result\",\n sender=((False, 'Failed to send command')))\n\n telnet_session.close()\n if not error: \n self.set_status(obj, 'Success')\n self.notify_send_command_window(obj)\n else:\n self.set_status(obj, 'Failed')\n self.notify_send_command_window(obj) \n except Exception as error:\n self.error_processing(obj, error)\n self.notify_send_command_window(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait for the switch to be released (waits for a raising edge). YES. This is important with magnetic switches. And wait_released is already blocking so leave me alone with the time.sleep!
def wait_released(self): while not self.is_released(): time.sleep(0.01)
[ "def wait_released(self):\n GPIO.wait_for_edge(self.pin, GPIO.RISING)\n return", "def wait_pressed(self):\n GPIO.wait_for_edge(self.pin, GPIO.FALLING)\n return", "def _hw_wait(self):\n while self.read_status()[0] == Drivable.Status.BUSY:\n sleep(0.3)", "def wait():\n t = random.triangular(config.WAIT_MIN, config.WAIT_MAX)\n time.sleep(t)", "def wait(self):\n self._lock.acquire()", "def waitUntilDown(self):\n while not self.isDown():\n self.waitForEvent()", "def waitUntilUnlocked(self):\n while not self.isUnlocked():\n self.waitForEvent()", "def wait(self, t=1):\n self.flag.clear()\n self.flag.wait(t)", "def wait(self, press=True, timeout=None):\n return self._wait(0 if press else 1, timeout=timeout)", "def wait_STP(switch):\r\n status = switch.dpctl('show')\r\n while 'STP_FORWARD' not in status or 'STP_LEARN' in status:\r\n status = switch.dpctl('show')\r\n sleep(0.5)", "def wait_for_time():\n while rospy.Time().now().to_sec() == 1:\n pass", "def wait(self, handle):\n return", "def wait_for_scan(self):\n while self.any_scans(bin(int(self.get_cond()))):\n time.sleep(1)", "def waiton(self, timeout=90):\n status = None\n message = \"Waiting until switch %s(%s) is up.\" % (self.name, self.ipaddr)\n self.class_logger.info(message)\n stop_flag = False\n end_time = time.time() + timeout\n while not stop_flag:\n if loggers.LOG_STREAM:\n sys.stdout.write(\".\")\n sys.stdout.flush()\n if time.time() < end_time:\n # While time isn't elapsed continue probing switch.\n try:\n status = self.probe()\n except KeyboardInterrupt:\n message = \"KeyboardInterrupt while checking switch %s(%s)...\" % (self.name, self.ipaddr)\n self.class_logger.info(message)\n self.sanitize()\n pytest.exit(message)\n if status[\"isup\"] and status[\"type\"] == \"switchpp\":\n stop_flag = True\n if status['prop'] == {}:\n # If type == switchpp but prop == empty dict then Platform table return incorrect data.\n message = \"Found running switchpp on %s but Platform data is corrupted.\" % (self.ipaddr, )\n self.class_logger.warning(message)\n raise SwitchException(message)\n self.class_logger.info(\"Switch instance on %s(%s) is OK.\" % (self.name, self.ipaddr))\n else:\n # Time is elapsed.\n if status[\"isup\"] and status[\"type\"] != \"switchpp\":\n message = (\"Port %s on host %s is opened but doesn't response queries.\" +\n \" %s Check your environment!\") % (self.port, self.ipaddr, self.waiton_err_message)\n else:\n port = self._get_port_for_probe()\n message = \"Timeout exceeded. IP address %s port %s doesn't respond\" % (self.ipaddr, port)\n self.class_logger.warning(message)\n raise SwitchException(message)\n if not stop_flag:\n time.sleep(0.75)\n return status", "def wait_for_switch(self, switch: Switch, state: int = 1, only_on_change=True, ms=0):\n return self.wait_for_any_switch([switch], state, only_on_change, ms)", "def wait(self, cond, timeout=None):\n if timeout != None:\n timeout = time.clock() + timeout / 1000.0\n while True:\n time.sleep(0.01)\n if cond(str(self.wheel.state())):\n return True\n if timeout != None and time.clock() >= timeout:\n return False", "def activate_bed():\n while (True):\n pin_on(BED_PIN)\n sleep(3)\n pin_off(BED_PIN)\n sleep(2)", "def wait_for_release(self, timeout = 0):\n target = 2 ** 31 if timeout == 0 else time.time() + timeout\n while (self.shm.touch_p > 1) and (time.time() < target): \n time.sleep(0.01)\n self._check_host_ts()\n return (self.shm.touch_p < 1)", "def test_switchinggpio():\n try:\n active_high = SwitchingGPIO.SwitchingGPIO(1)\n run_test(active_high)\n active_low = SwitchingGPIO.SwitchingGPIO(1, False)\n run_test(active_low)\n\n except KeyboardInterrupt:\n pass\n finally:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the cross section of a single MRMSGrib object's data from point1 to point2 using cubic interpolation
def get_cross_cubic(grb, point1, point2): lons = grb.grid_lons lats = grb.grid_lats x, y = np.meshgrid(lons, lats) z = grb.data # [(x1, y1), (x2, y2)] line = [(point1[0], point1[1]), (point2[0], point2[1])] # cubic interpolation y_world, x_world = np.array(list(zip(*line))) col = z.shape[1] * (x_world - x.min()) / x.ptp() row = z.shape[0] * (y.max() - y_world ) / y.ptp() num = 100 row, col = [np.linspace(item[0], item[1], num) for item in [row, col]] valid_date = grb.validity_date valid_time = grb.validity_time # Extract the values along the line, using cubic interpolation zi = scipy.ndimage.map_coordinates(z, np.vstack((row, col)), order=1, mode='nearest') return zi
[ "def __curve_splicing(self):", "def plot_cross_cubic_single(grb, point1, point2, first=False):\n lons = grb.grid_lons\n lats = grb.grid_lats\n\n x, y = np.meshgrid(lons, lats)\n z = grb.data\n\n # [(x1, y1), (x2, y2)]\n line = [(point1[0], point1[1]), (point2[0], point2[1])]\n\n # cubic interpolation\n y_world, x_world = np.array(list(zip(*line)))\n col = z.shape[1] * (x_world - x.min()) / x.ptp()\n row = z.shape[0] * (y.max() - y_world ) / y.ptp()\n\n num = 1000\n row, col = [np.linspace(item[0], item[1], num) for item in [row, col]]\n\n valid_date = grb.validity_date\n valid_time = grb.validity_time\n\n # Extract the values along the line, using cubic interpolation\n zi = scipy.ndimage.map_coordinates(z, np.vstack((row, col)), order=1, mode='nearest')\n\n # Plot...\n fig, axes = plt.subplots(nrows=2)\n axes[0].pcolormesh(x, y, z)\n axes[0].plot(x_world, y_world, 'ro-')\n axes[0].axis('image')\n\n axes[1].plot(zi)\n\n plt.show()", "def getCrossings(x1,y1, x2,y2):\r\n x, y = _intersections(x1,y1, x2,y2)\r\n return x, y", "def _segment_approx_value_cubic(self, r, i1, i2):\n\n Dr = self._r[i2] - self._r[i1]\n a0 = self._y[i1]\n a1 = self._dy[i1]*Dr\n a2 = -(2.*self._dy[i1] + self._dy[i2])*Dr + 3*(self._y[i2] - self._y[i1])\n a3 = (self._dy[i1] + self._dy[i2])*Dr - 2*(self._y[i2] - self._y[i1])\n \n X = (r - self._r[i1])/Dr\n \n return a0 + a1*X + a2*X**2 + a3*X**3", "def get_gradient_value_at_point(self, point):\n x, y, z = self.get_object_position_on_grid(point)\n #get the indices\n ix = int(x)\n iy = int(y)\n iz = int(z)\n #perform a trilinear interpolation from wikipedia.org\n #x\n if(x - ix > .5):\n x0 = ix\n x1 = ix + 1\n else:\n x0 = ix - 1\n x1 = ix\n #y\n if(y - iy > .5):\n y0 = iy\n y1 = iy + 1\n else:\n y0 = iy - 1\n y1 = iy\n #z\n if(z - iz > .5):\n z0 = iz\n z1 = iz + 1\n else:\n z0 = iz - 1\n z1 = iz \n \n #solve for xd, yd, zd\n xd = (abs(x - (ix + .5))) / (x1 - x0)\n yd = (abs(y - (iy + .5))) / (y1 - y0)\n zd = (abs(z - (iz + .5))) / (z1 - z0)\n\n #now the first set of linear interp\n c00 = self.C[x0, y0, z0]*(1 - xd) + self.C[x1, y0, z0]*xd\n c10 = self.C[x0, y1, z0]*(1 - xd) + self.C[x1, y1, z0]*xd\n c01 = self.C[x0, y0, z1]*(1 - xd) + self.C[x1, y0, z1]*xd\n c11 = self.C[x0, y1, z1]*(1 - xd) + self.C[x1, y1, z1]*xd\n\n #now the second set\n c0 = c00*(1-yd) + c10*yd\n c1 = c01*(1-yd) + c11*yd\n #finally the last set\n c = c0*(1-zd) + c1*zd\n #return the predicted value\n return c", "def line_sample2d(x,y,z,x1,y1):\n from scipy.interpolate import RectBivariateSpline as rbs\n # Extract the values along the line, using cubic interpolation\n f = rbs(x,y,z.T)\n return f.ev(x1,y1)\n #return scipy.ndimage.map_coordinates(z, np.vstack((y,x)))", "def test_cubic():\n\txi = -10 +np.random.sample(4)*20\n\tyi = -10 +np.random.sample(4)*20\n\tplot_cubic(xi,yi)\n\tprint \"ok!\"", "def cubic_spline_coefficients(self):\r\n\r\n delx = np.diff(self.xi); delf = np.diff(self.fi)\r\n # form matrices to solve for spline coefficients\r\n vx = np.zeros_like(self.xi)\r\n # form rhs vector using python's array slicing \r\n vx[1:-1:] = 3.*(delf[1::]/delx[1::] - delf[:-1:]/delx[:-1:])\r\n # construct 3 diagonals\r\n nx = np.size(self.xi)\r\n diags = np.zeros((3, nx))\r\n diags[1,0] = 1.; diags[1,-1] = 1.\r\n diags[1,1:-1:] = 2. * (delx[1::] + delx[:-1:])\r\n diags[0,1:] = delx[:]\r\n diags[2,1:-1] = delx[1:]\r\n # solve for coefficients c using Thomas algorithm for tri-diagonal matrices\r\n # see https://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)\r\n ac, bc, cc, dc = map(np.array, (diags[0,:], diags[1,:], diags[2,:], vx)) # copy arrays\r\n\r\n '''Note: This loop can be improved with NumPy'''\r\n for k in range(1, nx):\r\n mk = ac[k] / bc[k-1]\r\n bc[k] = bc[k] - mk * cc[k-1] \r\n dc[k] = dc[k] - mk * dc[k-1]\r\n \r\n c = np.zeros_like(bc)\r\n c[-1] = dc[-1] / bc[-1]\r\n\r\n '''Note: This loop can be improved with NumPy'''\r\n for k in range(nx-2, -1, -1):\r\n c[k] = (dc[k]-cc[k]*c[k+1])/bc[k]\r\n\r\n # now get the rest of the coefficients\r\n b = delf[::]/delx[::] - (c[1::] + 2.*c[:-1:])*delx[::]/3.\r\n d = (c[1::] - c[:-1:])/(3.*delx[::]) \r\n a = self.fi\r\n return a, b, c, d", "def interpolate_points(pointA, pointB, factor):\n #interpolated = pointA + factor*(pointB - pointA) \n interpolated = np.add(pointA, np.multiply(factor, np.subtract(pointB, pointA)))\n return interpolated", "def cross(o, a, b):\r\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])", "def cross(self, v2):\n if len(self.coordinates) == len(v2.coordinates):\n if len(v2.coordinates) == 3:\n v3 = list((0, 0, 0))\n v3[0] = (\n self.coordinates[1] * v2.coordinates[2]\n - self.coordinates[2] * v2.coordinates[1]\n )\n v3[1] = (\n self.coordinates[2] * v2.coordinates[0]\n - self.coordinates[0] * v2.coordinates[2]\n )\n v3[2] = (\n self.coordinates[0] * v2.coordinates[1]\n - self.coordinates[1] * v2.coordinates[0]\n )\n return Vector(v3)\n else:\n raise ValueError(\"Cross Product implemented for R^3\")\n else:\n raise ValueError(\"Arrays are not of the same dimension\")", "def catmull_rom(p_x: List[float], p_y: List[float], res: int = 2) -> Tuple[List[float], List[float]]:\n # create arrays for spline points\n x_intpol: np.ndarray = np.empty(res * (len(p_x) - 1) + 1)\n y_intpol: np.ndarray = np.empty(res * (len(p_x) - 1) + 1)\n\n # set the last x- and y-coord, the others will be set in the loop\n x_intpol[-1] = p_x[-1]\n y_intpol[-1] = p_y[-1]\n\n # loop over segments (we have n-1 segments for n points)\n for i in range(len(p_x) - 1):\n # set x-coords\n x_intpol[i * res:(i + 1) * res] = np.linspace(\n p_x[i], p_x[i + 1], res, endpoint=False)\n if i == 0:\n # need to estimate an additional support point before the first\n y_intpol[:res] = np.array([\n catmull_rom_one_point(\n x,\n p_y[0] - (p_y[1] - p_y[0]), # estimated start point,\n p_y[0],\n p_y[1],\n p_y[2])\n for x in np.linspace(0., 1., res, endpoint=False)])\n elif i == len(p_x) - 2:\n # need to estimate an additional support point after the last\n y_intpol[i * res:-1] = np.array([\n catmull_rom_one_point(\n x,\n p_y[i - 1],\n p_y[i],\n p_y[i + 1],\n p_y[i + 1] + (p_y[i + 1] - p_y[i]) # estimated end point\n ) for x in np.linspace(0., 1., res, endpoint=False)])\n else:\n y_intpol[i * res:(i + 1) * res] = np.array([\n catmull_rom_one_point(\n x,\n p_y[i - 1],\n p_y[i],\n p_y[i + 1],\n p_y[i + 2]) for x in np.linspace(0., 1., res, endpoint=False)])\n return x_intpol.tolist(), y_intpol.tolist()", "def correlate_xy(self, data_pol0, data_pol1, header, indpol0, indpol1):\n\n seq0 = header[indpol0, -1]\n seq1 = header[indpol1, -1]\n\n XYreal = []\n XYimag = []\n \n seq_xy = []\n\n data_rp0 = data_pol0.real\n data_ip0 = data_pol0.imag\n\n data_rp1 = data_pol1.real\n data_ip1 = data_pol1.imag\n\n for t0, tt in enumerate(seq0):\n\n t1 = np.where(seq1 == tt)[0]\n\n if len(t1) < 1:\n continue\n\n seq_xy.append(tt)\n\n xyreal = data_rp0[t0] * data_rp1[t1] + data_ip0[t0] * data_ip1[t1]\n xyimag = data_ip0[t0] * data_rp1[t1] - data_rp0[t0] * data_ip1[t1]\n\n XYreal.append(xyreal)\n XYimag.append(xyimag)\n\n return XYreal, XYimag", "def cross_product(point1, point2): # never tested\n return Point(coords=np.cross(point1.as_array(), point2.as_array()))", "def vectorCross(v1, v2):\r\n return (v1[0] * v2[1] - v1[1] * v2[0])", "def interp_cubic(p0, p1, t_abs):\n T = (p1.time_from_start - p0.time_from_start).to_sec()\n t = t_abs - p0.time_from_start.to_sec()\n q = [0] * 7\n qdot = [0] * 7\n qddot = [0] * 7\n for i in range(len(p0.positions)):\n a = p0.positions[i]\n b = p0.velocities[i]\n c = (-3 * p0.positions[i] + 3 * p1.positions[i] - 2 * T * p0.velocities[i] - T * p1.velocities[i]) / T**2\n d = (2 * p0.positions[i] - 2 * p1.positions[i] + T * p0.velocities[i] + T * p1.velocities[i]) / T**3\n\n q[i] = a + b * t + c * t**2 + d * t**3\n qdot[i] = b + 2 * c * t + 3 * d * t**2\n qddot[i] = 2 * c + 6 * d * t\n return JointTrajectoryPoint(positions=q, velocities=qdot, accelerations=qddot, time_from_start=rospy.Duration(t_abs))", "def get_cross_section(self):\n\t\treturn PI * self.b * self.c", "def cubic_interp(xi,yi):\t\n\terror_message = \"xi, yi need to be type numpy.ndarray\"\n\tassert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message\n\terror_message = \"You need 4 points!\"\n\tassert (len(xi)==4) and (len(yi)==4), error_message\n\terror_message = \"The xi points have to be unique!\"\n\tassert (len(xi) == len(np.unique(xi))), error_message\n\n\tA = np.vstack([np.ones(4), xi, xi**2,xi**3]).T\n\tc= np.linalg.solve(A,yi)\n\treturn c", "def separableCrossCorrelate(data, vx, vy):\n \n mode = 'reflect'\n out0 = filt.correlate1d(data, vx, mode=mode)\n out = filt.correlate1d(out0, vy, mode=mode, axis=0)\n return out", "def crosses(line1, line2):\n (x1,y1), (x2,y2) = line1\n (u1,v1), (u2,v2) = line2\n (a,b), (c,d) = (x2-x1, u1-u2), (y2-y1, v1-v2)\n e, f = u1-x1, v1-y1\n denom = float(a*d - b*c)\n if near(denom, 0):\n # parallel\n return False\n else:\n t = (e*d - b*f)/denom\n s = (a*f - e*c)/denom\n # When 0<=t<=1 and 0<=s<=1 the point of intersection occurs within the\n # line segments\n return 0<=t<=1 and 0<=s<=1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the cross section of a single MRMSGrib object's data from point1 to point2 using nearestneighbor interpolation
def get_cross_neighbor(grb, point1, point2): lons = grb.grid_lons lats = grb.grid_lats x, y = np.meshgrid(lons, lats) # Read the MRMS reflectivity data from the grib object's memory-mapped array z = np.memmap(grb.get_data_path(), dtype='float32', mode='r', shape=grb.shape) # Calculate the coordinates of a line defined by point1 & point2 to sample line = [(point1[0], point1[1]), (point2[0], point2[1])] y_world, x_world = np.array(list(zip(*line))) col = z.shape[1] * (x_world - x.min()) / x.ptp() row = z.shape[0] * (y.max() - y_world ) / y.ptp() num = 1000 row, col = [np.linspace(item[0], item[1], num) for item in [row, col]] valid_date = grb.validity_date valid_time = grb.validity_time d_lats, d_lons = calc_coords(point1, point2, num) # Sample the points along the line in order to get the reflectivity values zi = z[row.astype(int), col.astype(int)] return (zi, d_lats, d_lons)
[ "def getCrossings(x1,y1, x2,y2):\r\n x, y = _intersections(x1,y1, x2,y2)\r\n return x, y", "def corresponding_roi(rpc1, rpc2, x, y, w, h):\n m, M = altitude_range(rpc1, x, y, w, h, 0, 0)\n\n # build an array with vertices of the 3D ROI, obtained as {2D ROI} x [m, M]\n a = np.array([x, x, x, x, x+w, x+w, x+w, x+w])\n b = np.array([y, y, y+h, y+h, y, y, y+h, y+h])\n c = np.array([m, M, m, M, m, M, m, M])\n\n # corresponding points in im2\n xx, yy = find_corresponding_point(rpc1, rpc2, a, b, c)[0:2]\n\n # return coordinates of the bounding box in im2\n return common.bounding_box2D(np.vstack([xx, yy]).T)", "def centroid_linkage(c1, c2):", "def find_rectangles(crossing_points, line_map):\n rectangles = []\n\n # Sort crossing points from top to bottom, left to right.\n raw_points = np.array(sorted(crossing_points, key=lambda pt: (pt[1], pt[0])))\n\n for i, point in enumerate(raw_points):\n # Remove the current leftmost point from the set\n points = np.delete(raw_points, i, axis=0)\n\n # Get points that are on the same x and y coordinates\n x_points = points[points[:, 0] == point[0]]\n y_points = points[points[:, 1] == point[1]]\n\n for x_point in x_points:\n\n # Get pixels between points and replace actual pixel values with binary\n h_edge = line_map[point[1]:x_point[1], point[0]]\n h_zero_edge = h_edge != 0\n\n # Check if there is any empty pixels in the line\n if h_zero_edge.all():\n\n for y_point in y_points:\n w_edge = line_map[point[1], point[0]:y_point[0]]\n w_zero_edge = w_edge != 0\n\n if w_zero_edge.all():\n btm_right_point = (y_point[0], x_point[1])\n\n # Check if there is continuous line from each corner to the bottom right point\n btm_edge_x = line_map[x_point[1], y_point[0]: x_point[0]] != 0\n btm_edge_y = line_map[x_point[1]:y_point[1], y_point[0]] != 0\n\n if btm_right_point in points and btm_edge_x.all() and btm_edge_y.all():\n rectangles.append((tuple(point), tuple(btm_right_point), tuple(x_point), tuple(y_point)))\n\n return rectangles", "def crossWith(self, other):\r\n # select randomly the 4-point of te crossover\r\n genes = [random.randint(0, len(self.points) - 1) for _ in range(4)]\r\n genes.sort() # sort them for the use\r\n\r\n points_from_self = self.points[genes[0]:genes[1]] # first part of self's points\r\n points_from_self += self.points[genes[2]:genes[3]] # second part of self's points\r\n # looking for the missing points\r\n points_from_other = [point for _, point in enumerate(other.points) if point not in points_from_self]\r\n\r\n # add the parent's point to create the child's list of point\r\n child_points = points_from_self + points_from_other\r\n return Individual(child_points)", "def plot_cross_neighbor_single(grb, point1, point2, first=False):\n lons = grb.grid_lons\n lats = grb.grid_lats\n\n x, y = np.meshgrid(lons, lats)\n z = grb.data\n\n line = [(point1[0], point1[1]), (point2[0], point2[1])]\n y_world, x_world = np.array(list(zip(*line)))\n col = z.shape[1] * (x_world - x.min()) / x.ptp()\n row = z.shape[0] * (y.max() - y_world ) / y.ptp()\n\n num = 1000\n row, col = [np.linspace(item[0], item[1], num) for item in [row, col]]\n\n valid_date = grb.validity_date\n valid_time = grb.validity_time\n\n zi = z[row.astype(int), col.astype(int)] #(10000,)\n\n fig, axes = plt.subplots(nrows=2)\n axes[0].pcolormesh(x, y, z)\n axes[0].plot(x_world, y_world, 'ro-')\n axes[0].axis('image')\n\n axes[1].plot(zi)\n\n plt.show()", "def GenCrossHist(X1,X2):\r\n r1= []\r\n ND1, ND2 = 0,0 # num points in box\r\n for i in range(len(X1)):\r\n if -5<X1[i][0] <5 and -5<X1[i][1]<5: # if point is in box\r\n ND1+=1 # increment counter\r\n for j in range(len(X2)): # loop over all points in X2\r\n r1.append(math.sqrt( (X2[j][0]-X1[i][0])**2.0 + (X2[j][1]-X1[i][1])**2.0)) # append pairwise distance\r\n \r\n \r\n for i in range(len(X2)):\r\n if -5<X2[i][0] <5 and -5<X2[i][1]<5: # if point in x2 is in box\r\n ND2+=1 # increment counter\r\n for j in range(len(X1)): # loop over points in x1, but avoid any points in box\r\n if (X1[j][0]>5 or X1[j][0]<-5) and (5<X1[j][1] or X1[j][1]<-5): # if outside box then count\r\n r1.append(math.sqrt((X2[i][0]-X1[j][0])**2.0 + (X2[i][1]-X1[j][1])**2.0))\r\n # now bin these\r\n \r\n DDhist1 = np.histogram(r1, bins = bins)\r\n DDcount1, DDr1 = DDhist1[0][:], DDhist1[1][1:] # using outer boundaries\r\n \r\n return ND1,ND2,DDcount1,DDr1", "def combine_data_neighbours(model_data, old_data_point):\n data_point = old_data_point.tolist()\n distances = calculate_neighbours(model_data, data_point)\n data_point.append(distances)\n return data_point", "def spline2D(\n boundary_points=((0, 5), (1, 2), (-4, -1)),\n decision_points=((1, 1), (2, 2))\n):\n raise NotImplementedError(\"It should be a good idea but I don't have time to implement.\")", "def select_regression_points(area, n_considered_points, method=\"max\"):\n if method == \"max\":\n nz_indices = np.nonzero(area)\n joint = np.array(list(zip(*nz_indices)))\n\n joint = sorted(joint, key=lambda idx: area[idx[0], idx[1]])\n\n y_coords, x_coords = zip(*joint)\n\n y_coords = y_coords[:n_considered_points]\n x_coords = x_coords[:n_considered_points]\n elif method == \"mode_y\" or method == \"mode_x\":\n nz_indices = np.nonzero(area)\n joint = np.transpose(nz_indices)\n joint = np.array(sorted(joint, reverse=True, key=lambda idx: area[idx[0], idx[1]])[:n_considered_points])\n\n if method == \"mode_y\":\n m = mode(joint[:, 0])[0][0]\n joint = np.array([v for v in joint if v[0] == m])\n else:\n m = mode(joint[:, 1])[0][0]\n joint = np.array([v for v in joint if v[1] == m])\n\n y_coords, x_coords = zip(*joint)\n \"\"\"elif method in (\"topmost\", \"bottommost\", \"leftmost\", \"rightmost\"):\n nz_indices = np.nonzero(area)\n\n joint = np.array(list(zip(*nz_indices)))\n\n joint = sorted(joint, reverse=True, key=lambda idx: area[idx[0], idx[1]])[:3 * n_considered_points]\n\n if OUTLIER_ELIMINATION:\n print(\"Len joint before\", len(joint))\n\n is_inlier = LocalOutlierFactor(20).fit_predict(joint)\n\n joint = np.array([v for v, inlier in zip(joint, is_inlier) if inlier == 1])\n\n print(\"Len joint after\", len(joint))\n\n if method == \"topmost\":\n ranked = np.array(sorted(joint, key=lambda v: v[0]))\n elif method == \"bottommost\":\n ranked = np.array(sorted(joint, key=lambda v: v[0], reverse=True))\n elif method == \"leftmost\":\n ranked = np.array(sorted(joint, key=lambda v: v[1]))\n elif method == \"rightmost\":\n ranked = np.array(sorted(joint, key=lambda v: v[1], reverse=True))\n\n y_coords, x_coords = zip(*ranked)\n\n y_coords = y_coords[:n_considered_points]\n x_coords = x_coords[:n_considered_points]\"\"\"\n else:\n raise ValueError(\"Invalid method: \" + method)\n\n return y_coords, x_coords", "def path_any_point(self, point_1, point_2, go_edge=False):\n road_graph = self.graph\n \n if go_edge:\n\t\t\tpath_seg = road_graph.pixel_path(point_1, point_2)\n\t\t\treturn path_seg\n\t\t\t\n temp_map = self.global_map.copy()\n\n # this to a degree can represent the expansion of walls and obstacles\n kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n temp_map = cv2.erode(temp_map, kernel1, iterations=5)\n\n # plt.matshow(temp_map)\n # plt.show()\n # plt.pause(10)\n\n if temp_map[point_2[0]][point_2[1]] == 0:\n print(\"invalid starting or ending position! You are colliding the wall there!\")\n\n min_1 = 999999\n min_2 = 999999\n index_1 = [0, 0]\n index_2 = [0, 0]\n for i in range(road_graph.edge_list.__len__()):\n edge_i = road_graph.edge_list[i]\n for j in range(edge_i.__len__()):\n # find the point closest to point_1 and point_2\n point_j = edge_i[j]\n dist_1 = diagonal_distance(point_j, point_1)\n dist_2 = diagonal_distance(point_j, point_2)\n # try to find the j_th point in i_th edge\n if dist_1 < min_1:\n min_1 = dist_1\n index_1 = [i, j]\n if dist_2 < min_2:\n min_2 = dist_2\n index_2 = [i, j]\n\n # following process is toooooo clumsy\n # get the node number for the edge that point_1 is closest to\n node_11 = road_graph.connection[index_1[0]][0]\n node_12 = road_graph.connection[index_1[0]][1]\n # for point_2\n node_21 = road_graph.connection[index_2[0]][0]\n node_22 = road_graph.connection[index_2[0]][1]\n\n # print node_11, node_12, node_21, node_22\n\n i_1 = index_1[0]\n j_1 = index_1[1]\n i_2 = index_2[0]\n j_2 = index_2[1]\n\n if (node_11 == node_21 and node_12 == node_22) or (node_11 == node_22 and node_12 == node_21):\n path_seg = road_graph.pixel_path(node_12, node_11, ps=road_graph.edge_list[i_1][j_1],\n pe=road_graph.edge_list[i_2][j_2])\n return path_seg, [(node_12, node_11)]\n\n # can be optimized, not all path needs to be stored and transferred\n ind1 = road_graph.a_star_path(node_11, node_21)\n ind2 = road_graph.a_star_path(node_11, node_22)\n ind3 = road_graph.a_star_path(node_12, node_21)\n ind4 = road_graph.a_star_path(node_12, node_22)\n\n if isinstance(ind1, tuple):\n ind1 = [ind1]\n if isinstance(ind2, tuple):\n ind2 = [ind2]\n if isinstance(ind3, tuple):\n ind3 = [ind3]\n if isinstance(ind4, tuple):\n ind4 = [ind4]\n\n l1 = abs(index_1[1] - road_graph.edge_list[index_1[0]].index(list(road_graph.nodes[node_11]))) + \\\n abs(index_2[1] - road_graph.edge_list[index_2[0]].index(list(road_graph.nodes[node_21])))\n l2 = abs(index_1[1] - road_graph.edge_list[index_1[0]].index(list(road_graph.nodes[node_11]))) + \\\n abs(index_2[1] - road_graph.edge_list[index_2[0]].index(list(road_graph.nodes[node_22])))\n l3 = abs(index_1[1] - road_graph.edge_list[index_1[0]].index(list(road_graph.nodes[node_12]))) + \\\n abs(index_2[1] - road_graph.edge_list[index_2[0]].index(list(road_graph.nodes[node_21])))\n l4 = abs(index_1[1] - road_graph.edge_list[index_1[0]].index(list(road_graph.nodes[node_12]))) + \\\n abs(index_2[1] - road_graph.edge_list[index_2[0]].index(list(road_graph.nodes[node_22])))\n\n #print i_1, j_1, i_2, j_2\n #print ind1, ind2, ind3, ind4\n #print l1, l2, l3, l4\n\n for it in ind1:\n if not it == (0, 0):\n l1 = l1 + road_graph.get_length(it)\n print road_graph.get_length(it)\n for it in ind2:\n if not it == (0, 0):\n l2 = l2 + road_graph.get_length(it)\n print road_graph.get_length(it)\n for it in ind3:\n if not it == (0, 0):\n l3 = l3 + road_graph.get_length(it)\n print road_graph.get_length(it)\n for it in ind4:\n if not it == (0, 0):\n l4 = l4 + road_graph.get_length(it)\n print road_graph.get_length(it)\n\n if l1 < l2 and l1 < l3 and l1 < l4:\n path_seg = road_graph.pixel_path(node_12, node_11, ps=road_graph.edge_list[i_1][j_1])\n for it in ind1:\n if not it == (0, 0):\n path_seg += road_graph.pixel_path(it[0], it[1])\n path_seg += road_graph.pixel_path(node_21, node_22, pe=road_graph.edge_list[i_2][j_2])\n return path_seg, ind1\n elif l2 < l1 and l2 < l3 and l2 < l4:\n path_seg = road_graph.pixel_path(node_12, node_11, ps=road_graph.edge_list[i_1][j_1])\n for it in ind2:\n if not it == (0, 0):\n path_seg += road_graph.pixel_path(it[0], it[1])\n path_seg += road_graph.pixel_path(node_22, node_21, pe=road_graph.edge_list[i_2][j_2])\n return path_seg, ind2\n elif l3 < l1 and l3 < l2 and l3 < l4:\n path_seg = road_graph.pixel_path(node_11, node_12, ps=road_graph.edge_list[i_1][j_1])\n for it in ind3:\n if not it == (0, 0):\n path_seg += road_graph.pixel_path(it[0], it[1])\n path_seg += road_graph.pixel_path(node_21, node_22, pe=road_graph.edge_list[i_2][j_2])\n return path_seg, ind3\n else:\n path_seg = road_graph.pixel_path(node_11, node_12, ps=road_graph.edge_list[i_1][j_1])\n for it in ind4:\n if not it == (0, 0):\n path_seg += road_graph.pixel_path(it[0], it[1])\n path_seg += road_graph.pixel_path(node_22, node_21, pe=road_graph.edge_list[i_2][j_2])\n return path_seg, ind4", "def extrapolate_nearest(src, dst):\n\n return extrapolate(src, dst, BorderType.NearestNeighbour)", "def line_sample2d(x,y,z,x1,y1):\n from scipy.interpolate import RectBivariateSpline as rbs\n # Extract the values along the line, using cubic interpolation\n f = rbs(x,y,z.T)\n return f.ev(x1,y1)\n #return scipy.ndimage.map_coordinates(z, np.vstack((y,x)))", "def cutout_cross(self, x, y, radius):\n n = radius\n wd, ht = self.get_size()\n x0, x1 = max(0, x - n), min(wd - 1, x + n)\n y0, y1 = max(0, y - n), min(ht - 1, y + n)\n\n xview = np.s_[y, x0:x1 + 1]\n yview = np.s_[y0:y1 + 1, x]\n\n xarr = self._slice(xview)\n yarr = self._slice(yview)\n\n return (x0, y0, xarr, yarr)", "def assign_closest_pairs(clouda, cloudb):\n clouda = np.transpose(clouda)\n cloudb = np.transpose(cloudb)\n nbrs = NearestNeighbors(n_neighbors=1).fit(cloudb)\n distances, indices = nbrs.kneighbors(clouda)\n indices = indices.flatten()\n newcloudb = cloudb[indices]\n newcloudb = np.transpose(newcloudb)\n return newcloudb", "def geodesic(self, point_a, point_b, **kwargs):", "def calculate_offset_pos_two_side_one_point_locked(b_struct, v_key, pt_1, pt_2, v1, v2, d_o_1, d_o_2):\n\n pt_1_new = add_vectors(pt_1, scale_vector(v1, -1.*d_o_1))\n pt_2_new = add_vectors(pt_2, scale_vector(v2, -1.*d_o_2))\n\n vec_x_new = normalize_vector(vector_from_points(pt_1_new, pt_2_new))\n x_ax = b_struct.vertex[v_key][\"gripping_plane\"][1]\n\n if not angle_vectors(x_ax, vec_x_new, deg=True) < 90:\n vec_x_new = scale_vector(vec_x_new, -1.)\n\n # transform gripping plane\n pt_o = b_struct.vertex[v_key][\"gripping_plane\"][0]\n y_ax = b_struct.vertex[v_key][\"gripping_plane\"][2]\n vec_z = cross_vectors(vec_x_new, y_ax)\n l_n = (pt_1_new, pt_2_new)\n pt_o_new = closest_point_on_line(pt_o, l_n)\n\n return pt_o_new, vec_x_new, y_ax, vec_z", "def points_intersect(points1, points2):", "def get_gradient_value_at_point(self, point):\n x, y, z = self.get_object_position_on_grid(point)\n #get the indices\n ix = int(x)\n iy = int(y)\n iz = int(z)\n #perform a trilinear interpolation from wikipedia.org\n #x\n if(x - ix > .5):\n x0 = ix\n x1 = ix + 1\n else:\n x0 = ix - 1\n x1 = ix\n #y\n if(y - iy > .5):\n y0 = iy\n y1 = iy + 1\n else:\n y0 = iy - 1\n y1 = iy\n #z\n if(z - iz > .5):\n z0 = iz\n z1 = iz + 1\n else:\n z0 = iz - 1\n z1 = iz \n \n #solve for xd, yd, zd\n xd = (abs(x - (ix + .5))) / (x1 - x0)\n yd = (abs(y - (iy + .5))) / (y1 - y0)\n zd = (abs(z - (iz + .5))) / (z1 - z0)\n\n #now the first set of linear interp\n c00 = self.C[x0, y0, z0]*(1 - xd) + self.C[x1, y0, z0]*xd\n c10 = self.C[x0, y1, z0]*(1 - xd) + self.C[x1, y1, z0]*xd\n c01 = self.C[x0, y0, z1]*(1 - xd) + self.C[x1, y0, z1]*xd\n c11 = self.C[x0, y1, z1]*(1 - xd) + self.C[x1, y1, z1]*xd\n\n #now the second set\n c0 = c00*(1-yd) + c10*yd\n c1 = c01*(1-yd) + c11*yd\n #finally the last set\n c = c0*(1-zd) + c1*zd\n #return the predicted value\n return c", "def find_intersection(self, train_data):\n data = sorted(train_data)\n left = data[0]\n right = data[-1]\n step = (right-left)/100.\n dis = []\n if right - left == 0:\n return np.array([0.])\n for i in np.arange(left, right, step):\n positive_pro = self.Gaussian(i,self.mu_positive,self.sigma)\n negetive_pro = self.Gaussian(i,self.mu_negetive,self.sigma)\n dis.append(positive_pro - negetive_pro)\n point = (left+right)/2.0\n h = []\n for i in range(len(dis)-1):\n if dis[i]*dis[i+1] < 0:\n point = (dis[i]+dis[i+1])/2.0\n h.append(point)\n return np.array([point])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes a vertical cross section slice of MRMS reflectivity data along the line defined by point1 & point2
def process_slice(base_path, slice_time, point1, point2): cross_sections = np.array([]) scans = fetch_scans(base_path, slice_time) # z = 33 grbs = get_grib_objs(scans, base_path, point1, point2) cross_sections, lats, lons = np.asarray(get_cross_neighbor(grbs[0], point1, point2)) for grb in grbs[1:]: x_sect, _, _ = get_cross_neighbor(grb, point1, point2) cross_sections = np.vstack((cross_sections, x_sect)) return (cross_sections, lats, lons)
[ "def run_mrms_xsect2(base_path, slice_time, point1, point2, wtlma_obj, wtlma_coords,\n show=False, save=False, outpath=None):\n print('Warning: Depricated')\n cross_data, lats, lons = process_slice(base_path, slice_time, point1, point2)\n plot_mrms_cross_section2(data=cross_data, lons=lons, lats=lats, wtlma_obj=wtlma_obj,\n wtlma_coords=wtlma_coords, show=show, save=save, outpath=outpath)", "def run_mrms_xsect(base_path, slice_time, point1, point2):\n #f_out = '/media/mnichol3/pmeyers1/MattNicholson/mrms/x_sect'\n\n cross_data, lats, lons = process_slice(base_path, slice_time, point1, point2)\n plot_mrms_cross_section(data=cross_data, lons=lons, lats=lats)", "def planeCrossingPoints(streamline, point, normal, verbose = 0):\n #calculate two orthonormal vectors in the plane where we are looking for\n #the intersection \n x1=perpZ(normal) #calculate a vector perpendicular to the normal\n x2=np.cross(normal, x1) # and another vector perpendicular to both\n x2/=l(x2)\n if verbose: print x1, x2\n crossingsX1X2 = []\n crossingIndices=getCrossings(streamline, point, normal)\n for i in crossingIndices:\n crossCoord= pointOnPlane(streamline.tracers[i], streamline.tracers[i+1], point, normal)\n crossingsX1X2.append([np.dot(x1,crossCoord), np.dot(x2,crossCoord)])\n\n\n return np.array(crossingsX1X2)", "def getCrossings(streamline, point = [0., 0., 0.], normal=[1.,0.,0.]):\n sides = np.dot( (streamline.tracers-point), normal)>0 #calculate the side of the plane each point falls on by projecting on the normal vector\n return (sides[:-1]^sides[1:]).nonzero()[0] #calculate the xor of the shifted sidetables (which is only true if one is shifted) and return the sum", "def _walk_line(p0, p1):\n # unpack the point tuples\n x0, y0 = p0\n x1, y1 = p1\n\n dx, dy = x1 - x0, y1 - y0\n yi = 1\n if dy < 0:\n yi = -1\n dy = -dy\n\n D = 2 * dy - dx\n x = np.arange(x0, x1 + 1, dtype=int).T\n y = np.zeros((len(x),), dtype=int)\n\n yy = y0\n for i in np.arange(len(x)):\n y[i] = yy\n if D > 0:\n yy = yy + yi\n D = D - 2 * dx\n\n D = D + 2 * dy\n\n # sort by major axis, and index the cells\n xI = np.argsort(x)\n x = x[xI]\n y = y[xI]\n\n return x, y", "def _reflect_points(points, p1 = (0,0), p2 = (1,0)):\n # From http://math.stackexchange.com/questions/11515/point-reflection-across-a-line\n points = np.array(points); p1 = np.array(p1); p2 = np.array(p2);\n if np.asarray(points).ndim == 1:\n return 2*(p1 + (p2-p1)*np.dot((p2-p1),(points-p1))/norm(p2-p1)**2) - points\n if np.asarray(points).ndim == 2:\n return np.array([2*(p1 + (p2-p1)*np.dot((p2-p1),(p-p1))/norm(p2-p1)**2) - p for p in points])", "def _line_from_two_points(pt1: np.array, pt2: np.array) -> np.array:\n numLine = pt1.shape[0]\n lines = np.zeros((numLine, 6))\n n = np.cross(pt1, pt2)\n n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=True)), 1, 3) + 1e-9)\n lines[:, 0:3] = n\n\n areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))\n areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))\n areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))\n planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1\n lines[:, 3] = planeIDs\n\n for i in range(numLine):\n uv = _xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])\n umax = uv[:, 0].max() + np.pi\n umin = uv[:, 0].min() + np.pi\n if umax - umin > np.pi:\n lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi\n else:\n lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi\n\n return lines", "def line_cross_direction(self, p0, p1, p2) :\n # print( 'line_cross_direction' )\n \n u = np.array(p0) - np.array(p1)\n v = np.array(p1) - np.array(p2)\n \n v = v[::-1]\n v[0] *= -1\n \n uv_mag_sqr = np.sum(u*v)\n \n return np.sign( uv_mag_sqr )", "def get_pairwise_intersections(lines: np.ndarray) -> np.ndarray:\n num_lines = lines.shape[0]\n intersections = np.empty(shape=[num_lines**2, 3], dtype=lines.dtype)\n \"\"\" YOUR CODE HERE \"\"\"\n intersections[:, :] = 0\n count = 0\n for i in range(num_lines):\n for k in range(num_lines):\n intersection = np.cross(lines[i], lines[k])\n if (intersection[2] != 0):\n intersections[count] = intersection/intersection[2]\n count += 1\n \"\"\" END YOUR CODE HERE \"\"\"\n return intersections", "def slice(self, x1, y1, x2, y2):\n\n slice = TwoDArray(x2 - x1, y2 - y1)\n for ix, x in enumerate(range(x1, x2)):\n for iy, y in enumerate(range(y1, y2)):\n slice.set(ix, iy, self.get(x, y))\n return slice", "def crosses(line1, line2):\n (x1,y1), (x2,y2) = line1\n (u1,v1), (u2,v2) = line2\n (a,b), (c,d) = (x2-x1, u1-u2), (y2-y1, v1-v2)\n e, f = u1-x1, v1-y1\n denom = float(a*d - b*c)\n if near(denom, 0):\n # parallel\n return False\n else:\n t = (e*d - b*f)/denom\n s = (a*f - e*c)/denom\n # When 0<=t<=1 and 0<=s<=1 the point of intersection occurs within the\n # line segments\n return 0<=t<=1 and 0<=s<=1", "def crosses(line1, line2):\n (x1,y1), (x2,y2) = line1\n (u1,v1), (u2,v2) = line2\n (a,b), (c,d) = (x2-x1, u1-u2), (y2-y1, v1-v2)\n e, f = u1-x1, v1-y1\n denom = float(a*d - b*c)\n if MathHelper.near(denom, 0):\n # parallel\n return False\n else:\n t = (e*d - b*f)/denom\n s = (a*f - e*c)/denom\n # When 0<=t<=1 and 0<=s<=1 the point of intersection occurs within the\n # line segments\n return 0<=t<=1 and 0<=s<=1", "def pv_slice_series_overlay():\n # Load PV info\n # path_filename_short = \"catalogs/m16_pv_vectors_2.reg\"; n_steps = 60\n path_filename_short = \"catalogs/m16_pv_vectors_3.reg\"; n_steps = 85\n path_info = pvdiagrams.linear_series_from_ds9(catalog.utils.search_for_file(path_filename_short), n_steps=n_steps)\n path_stub = os.path.split(path_filename_short)[-1].replace('.reg', '')\n pv_vel_lims = (8*kms, 35*kms)\n pv_vel_intervals = np.arange(16, 33, 2)\n\n # Load cubes\n img_stub = 'ciiAPEX'\n img_cube_obj = cube_utils.CubeData(get_map_filename(img_stub)).convert_to_kms()\n\n contour_stub = '12co32'\n contour_cube_obj = cube_utils.CubeData(get_map_filename(contour_stub)).convert_to_kms()\n\n # Reference image\n ref_vel_lims = (10*kms, 35*kms)\n ref_mom0 = img_cube_obj.data.spectral_slab(*ref_vel_lims).moment0()\n ref_img = ref_mom0.to_value()\n ref_contour_mom0 = contour_cube_obj.data.spectral_slab(*ref_vel_lims).moment0()\n ref_contour = reproject_interp((ref_contour_mom0.to_value(), ref_contour_mom0.wcs), ref_mom0.wcs, ref_mom0.shape, return_footprint=False)\n\n # Colors\n ref_img_cmap = 'Greys_r'\n ref_contour_cmap = 'magma_r'\n pv_img_cmap = 'plasma'\n pv_img_contours_color = 'k'\n pv_contour_cmap = 'cool'\n reg_color = 'LimeGreen'\n\n \"\"\"\n go thru and look at run_plot_and_save_series and plot_path in pvdiagrams.py\n will need to iterate somewhat manually using cues from these two functions\n \"\"\"\n\n # Colorscale limits\n pv_vmaxes = {'ciiAPEX': 20, '12co32': 30, '13co32': 15}\n pv_levels = {'ciiAPEX': (3, 37, 4), '12co32': (5, 41, 5), '13co32': (1, 27, 2.5)}\n def _get_levels(line_stub):\n \"\"\"\n Get levels from the above dictionary. Return None if not present.\n \"\"\"\n if line_stub in pv_levels:\n return np.arange(*pv_levels[line_stub])\n else:\n return None\n\n\n img_cube = img_cube_obj.data.spectral_slab(*pv_vel_lims)\n contour_cube = contour_cube_obj.data.spectral_slab(*pv_vel_lims)\n\n # path_info is: center_coord, length_scale, path_generator\n path_generator = path_info[2]\n for i, p in enumerate(path_generator):\n\n # if i%3 != 0 and i < 44:\n # if i != 14:\n\n if os.path.isfile(f\"/home/ramsey/Pictures/2023-06-13/m16_pv_{path_stub}_{i:03d}.png\"):\n continue\n\n sl_img = pvextractor.extract_pv_slice(img_cube, p)\n sl_contour_raw = pvextractor.extract_pv_slice(contour_cube, p)\n sl_contour_raw.header['RESTFRQ'] = sl_img.header['RESTFRQ']\n sl_wcs = WCS(sl_img.header)\n sl_contour = reproject_interp((sl_contour_raw.data, sl_contour_raw.header), sl_wcs, shape_out=sl_img.data.shape, return_footprint=False)\n\n fig = plt.figure(figsize=(10, 9))\n gs = fig.add_gridspec(2, 1, height_ratios=[1, 1])\n\n # Reference image\n ax_ref = fig.add_subplot(gs[0,0], projection=ref_mom0.wcs)\n cbar_ax = ax_ref.inset_axes([1, 0, 0.05, 1])\n cbar_ax2 = ax_ref.inset_axes([0, 1, 1, 0.05])\n\n im = ax_ref.imshow(ref_img, origin='lower', cmap=ref_img_cmap, vmin=0)\n cbar = fig.colorbar(im, cax=cbar_ax, label=f\"{get_data_name(img_stub)} ({ref_mom0.unit.to_string('latex_inline')})\")\n ax_ref.text(0.05, 0.93, make_vel_stub(ref_vel_lims), color='k', ha='left', va='bottom', transform=ax_ref.transAxes)\n\n cs = ax_ref.contour(ref_contour, cmap=ref_contour_cmap, linewidths=0.5, alpha=0.6)\n cbar = fig.colorbar(cs, cax=cbar_ax2, location='top', spacing='proportional', label=f\"{get_data_name(contour_stub)} ({ref_contour_mom0.unit.to_string('latex_inline')})\")\n\n ax_ref.plot([c.ra.deg for c in p._coords], [c.dec.deg for c in p._coords], color=reg_color, linestyle='-', lw=1, transform=ax_ref.get_transform('world'))\n ax_ref.text(p._coords[0].ra.deg, p._coords[0].dec.deg + 4*u.arcsec.to(u.deg), 'Offset = 0\\\"', color=reg_color, fontsize=10, va='center', ha='right', transform=ax_ref.get_transform('world'))\n\n # Plot the footprint of the overlay if it would be visible at all\n overlay_nan_map = np.isnan(ref_contour)\n if np.any(overlay_nan_map):\n ax_ref.contour(overlay_nan_map.astype(float), levels=[0.5], colors='SlateGray', linestyles=':', linewidths=1)\n del overlay_nan_map\n\n # Beams\n beam_patch_kwargs = dict(alpha=0.9, hatch='////')\n beam_x, beam_y = 0.93, 0.1\n beam_ecs = [['white', 'grey'], [cs.cmap(cs.norm(cs.levels[j])) for j in [0, 2]]]\n for j, cube in enumerate((img_cube, contour_cube)):\n # Beam is known, plot it\n patch = cube.beam.ellipse_to_plot(*(ax_ref.transAxes + ax_ref.transData.inverted()).transform([beam_x, beam_y]), misc_utils.get_pixel_scale(ref_mom0.wcs))\n patch.set(**beam_patch_kwargs, facecolor=beam_ecs[j][0], edgecolor=beam_ecs[j][1])\n ax_ref.add_artist(patch)\n beam_x -= 0.03\n\n\n # PV diagram\n ax_pv = fig.add_subplot(gs[1,0], projection=sl_wcs)\n cbar_ax = ax_pv.inset_axes([1, 0, 0.05, 1])\n # Image\n im = ax_pv.imshow(sl_img.data, origin='lower', cmap=pv_img_cmap, vmin=0, vmax=pv_vmaxes.get(img_stub, None), aspect=(sl_img.data.shape[1]/(2.5*sl_img.data.shape[0])))\n cbar = fig.colorbar(im, cax=cbar_ax, label=img_cube.unit.to_string('latex_inline'))\n # Contours\n cs = ax_pv.contour(sl_img.data, colors=pv_img_contours_color, linewidths=1, linestyles=':', levels=_get_levels(img_stub))\n for l in cs.levels:\n cbar.ax.axhline(l, color=pv_img_contours_color)\n cs = ax_pv.contour(sl_contour, cmap=pv_contour_cmap, linewidths=1.5, levels=_get_levels(contour_stub), vmax=pv_vmaxes.get(contour_stub, None))\n for l in cs.levels:\n cbar.ax.axhline(l, color=cs.cmap(cs.norm(l)))\n\n # Plot horizontal gridlines\n xlim = ax_pv.get_xlim() # save existing xlim to reintroduce them later\n x_length = p._coords[0].separation(p._coords[1]).deg\n for v in pv_vel_intervals: # these mess up the xlim\n ax_pv.plot([0, x_length], [v*1e3]*2, color='grey', alpha=0.7, linestyle='--', transform=ax_pv.get_transform('world'))\n # Label observation names\n ax_pv.text(0.05, 0.95, \"Image: \" + cube_utils.cubenames[img_stub], fontsize=13, color=marcs_colors[1], va='top', ha='left', transform=ax_pv.transAxes)\n ax_pv.text(0.05, 0.90, \"Contour: \" + cube_utils.cubenames[contour_stub], fontsize=13, color='w', va='top', ha='left', transform=ax_pv.transAxes)\n # Put xlim back in\n ax_pv.set_xlim(xlim)\n\n\n ax_pv.coords[1].set_format_unit(u.km/u.s)\n ax_pv.coords[1].set_major_formatter('x.xx')\n ax_pv.coords[0].set_format_unit(u.arcsec)\n ax_pv.coords[0].set_major_formatter('x.xx')\n\n plt.tight_layout()\n\n # 2023-06-12,13\n savename = f\"/home/ramsey/Pictures/2023-06-13/m16_pv_{path_stub}_{i:03d}.png\"\n fig.savefig(savename, metadata=catalog.utils.create_png_metadata(title='pv movie',\n file=__file__, func='pv_slice_series_overlay'))\n\n plt.close(fig)", "def getLine(data_outVTK, orig, n1, n2):\n\n # function display \n print '---- DAEPy::getLine ----'\n\n # stop execution if data not consistent with the method\n if data_outVTK.GetCell(0).GetNumberOfPoints() < 4:\n raise ValueError(\"Error: cells in data from VTK output object are not 3D cells, be sure the data used here are 3D.\")\n\n # Double slicing\n print '--> 1st slicing...'\n dataSlice1 = getSlice(data_outVTK, orig, n1)\n print '--> 2nd slicing...'\n dataSlice2 = getSlice(dataSlice1, orig, n2)\n \n print ''\n return dataSlice2", "def getCrossings(x1,y1, x2,y2):\r\n x, y = _intersections(x1,y1, x2,y2)\r\n return x, y", "def line_sample2d(x,y,z,x1,y1):\n from scipy.interpolate import RectBivariateSpline as rbs\n # Extract the values along the line, using cubic interpolation\n f = rbs(x,y,z.T)\n return f.ev(x1,y1)\n #return scipy.ndimage.map_coordinates(z, np.vstack((y,x)))", "def render_visible(V):\n\n # make V into list sorted by slope: O(nlogn)\n V = sorted(V, key=lambda l: l.m)\n X = visible_intersections(V)\n\n # add point beyond left end point to have a support point for the line\n # with smallest slope\n X = [X[0]-5] + X\n\n # Calculate the corresponding Y values:\n Y = [ l.y(x) for l,x in zip(V,X)]\n\n # and now a support point for the lines with greatest slope:\n X.append( X[-1]+5 )\n Y.append( V[-1].y(X[-1]+5) )\n return X,Y", "def calculate_vertical_components(self, points, prev_horizontal, prev_vertical, px_size):\n\n if prev_horizontal is None:\n # It's a new species, so it starts at the bottom\n points[1] = 0\n points[3] = 0\n else:\n points[1] = prev_horizontal['quad'].points[7]\n points[3] = prev_horizontal['quad'].points[5]\n\n if prev_vertical is None:\n # It's the first on generation, so it starts at the bottom\n points[5] = 0\n points[7] = px_size\n else:\n points[5] = prev_vertical['quad'].points[7]\n points[7] = prev_vertical['quad'].points[7] + px_size", "def Campello_VXYZ(leaf2clusters_1, leaf2clusters_2, \\\n membership_calc = lambda common_levels: common_levels, \\\n cotau = max, tau2 = min):\n leaf2clusters_1, leaf2clusters_2, leaf2ix = trees_prefiltering(leaf2clusters_1, leaf2clusters_2)\n \n M_dict = M_dictionary_l2c(leaf2clusters_1, membership_calc)\n M1 = M_dictionary2matrix(M_dict, leaf2ix) \n M_dict = M_dictionary_l2c(leaf2clusters_2, membership_calc)\n M2 = M_dictionary2matrix(M_dict, leaf2ix)\n\n V,X,Y,Z = Campello_membership_VXYZ(M1, M2, cotau, tau2)\n\n #import numpy\n #print \"[Campello_VXYZ] M1:\\n\",numpy.array(M1)\n #print \"[Campello_VXYZ] M2:\\n\",numpy.array(M2)\n #print \"[Campello_VXYZ] V:\\n\",numpy.array(V)\n #print \"[Campello_VXYZ] X:\\n\",numpy.array(X)\n #print \"[Campello_VXYZ] Y:\\n\",numpy.array(Y)\n #print \"[Campello_VXYZ] Z:\\n\",numpy.array(Z)\n\n return (V,X,Y,Z), leaf2ix", "def line_slice(\n start_pt: Point,\n stop_pt: Point,\n line: LineString,\n) -> LineString:\n\n if not line or get_type(line) != \"LineString\":\n raise Exception(\"line must be a LineString\")\n\n coords = get_coords(line)\n start_vertex = nearest_point_on_line(line, start_pt)\n stop_vertex = nearest_point_on_line(line, stop_pt)\n\n if start_vertex[\"properties\"][\"index\"] <= stop_vertex[\"properties\"][\"index\"]:\n ends = [start_vertex, stop_vertex]\n else:\n ends = [stop_vertex, start_vertex]\n\n clip_coords = [get_coord(ends[0])]\n clip_coords.extend(\n coords[ends[0][\"properties\"][\"index\"] + 1 : ends[1][\"properties\"][\"index\"] + 1]\n )\n clip_coords.append(get_coord(ends[1]))\n\n return Feature(geometry=LineString(clip_coords), properties=line[\"properties\"].copy())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filters the WTLMA dataframe to only include events that are within a certain distance of the line that defines the MRMS crosssection
def filter_by_dist(lma_df, dist, start_point, end_point, num_pts): if (not isinstance(dist, int)): raise TypeError('dist must be of type int') s_lat = start_point[0] s_lon = start_point[1] e_lat = end_point[0] e_lon = end_point[1] idxs = [] coords = [] alts = lma_df['alt'].tolist() xsect_az = int(calc_bearing(start_point, end_point)) for pt1 in calc_geod_pts(start_point, end_point, num_pts=num_pts): for idx, pt2 in enumerate(list(zip(lma_df['lat'].tolist(), lma_df['lon'].tolist()))): # reverse the order of pt1 since the function returns the coordinates # as (lon, lat) and calc_dist wants (lat, lon) curr_az = int(calc_bearing((pt1[1], pt1[0]), pt2)) if ((calc_dist((pt1[1], pt1[0]), pt2, units='m') <= dist) and (idx not in idxs) and (alts[idx] < 19000)): idxs.append(idx) coords.append([pt1[1], pt1[0]]) # Remove repeat indexes from list # MUCH faster to use a set than another conditional inside the nested loops #idxs = list(set(idxs)) subs_df = lma_df.iloc[idxs] return subs_df, coords
[ "def low_pass_filter_anomaly_detection(df,\n column_name,\n number_of_stdevs_away_from_mean):\n #60-day rolling average\n df[column_name+'_Rolling_Average']=df[column_name].rolling(window=60, center=True).mean()\n #60-day standard deviation\n df[column_name+'_Rolling_StDev']=df[column_name].rolling(window=60, center=True).std()\n #Detect anomalies by determining how far away from the mean (in terms of standard deviation)\n #each data point is\n df[column_name+'_Low_Pass_Filter_Anomaly']=(abs(df[column_name]-df[\n column_name+'_Rolling_Average'])>(\n number_of_stdevs_away_from_mean*df[\n column_name+'_Rolling_StDev']))\n return df", "def mass_conservation_filter(data, qc_thresh=1e-6):\n if \"mass_diff\" not in data.columns:\n mass_columns(data)\n mass_filter = ((data[\"mass_diff\"] == 0) &\n (data[\"NC_TAU_out_v2\"] > 0) &\n (data[\"NR_TAU_out_v2\"] > 0) &\n (data[\"QC_TAU_out_v2\"] > 0) &\n (data[\"QR_TAU_out_v2\"] > 0) &\n (data[\"QC_TAU_in_v2\"] >= qc_thresh))\n return data.loc[mass_filter].reset_index()", "def __filter_rows_from_time_speed_distance(self, rows, stopping_time, speed_range, distance_range):\n\n find_it = False\n start_time = 0\n end_time = 0\n canditate_row = None\n seconds_in_minute = 60\n filter_rows = []\n\n min_speed, max_speed = speed_range\n\n max_speed += 1 #Increment of 1 because range doesn't consider last value\n\n last_row = len(rows)\n counter = 1\n\n for row in rows:\n\n latitude = row[0]\n longitude = row[1]\n time = row[2]\n date = row[3]\n hour = row[4]\n speed = row[6]\n \n format_date = datetime.strptime(time, '%Y-%m-%dT%H:%M:%S.%f%z')\n\n if speed in range(min_speed, max_speed) and find_it == False:\n find_it = True\n elif speed not in range(min_speed, max_speed) and find_it == True:\n find_it = False\n\n if find_it == True:\n if start_time == 0:\n start_time = format_date.timestamp() #Convert time in unix epoch\n canditate_row = [latitude, longitude, date, hour] #fields correspond to begin of stopover\n\n end_time = format_date.timestamp()\n seconds = end_time - start_time\n\n if seconds >= stopping_time:\n minutes = seconds / seconds_in_minute\n seconds = seconds - (minutes * seconds_in_minute)\n stopping_duration = f\"{minutes}:{seconds}\"\n del canditate_row[4:] #remove all element affter the first four \n canditate_row.extend([latitude, longitude, date, hour, round(minutes)]) #fields correspond to the end of stopover\n else:\n start_time = 0\n end_time = 0\n\n if len(canditate_row) > 4:\n filter_rows.append(canditate_row.copy())\n canditate_row.clear()\n\n counter += 1\n\n if last_row == counter:\n if len(canditate_row) > 4:\n filter_rows.append(canditate_row.copy())\n canditate_row.clear()\n\n\n if distance_range is not None:\n pass #TO-DO Define how to implement the distance filter\n\n return filter_rows", "def set_road_within_distance(feature, distance):\n geo = ee.Geometry.Point([feature.get('longitude'), feature.get('latitude')])\n disk = geo.buffer(distance)\n close_roads = roads.filterBounds(disk)\n is_close_road = close_roads.size().gt(0)\n return feature.set({f'road_within_{distance}m': is_close_road})", "def remove_atlas_lines_range(self, wavelength, tolerance=10):\n\n for atlas_line in self.atlas_lines:\n if abs(atlas_line.wavelength - wavelength) < tolerance:\n self.atlas_lines.remove(atlas_line)", "def mask_line(wl, wl_ref, mask_width):\n wl_min = wl_ref - mask_width / 2.0\n wl_max = wl_ref + mask_width / 2.0\n mask = (wl < wl_min) | (wl > wl_max)\n\n return mask", "def calc_DM_time_thresh(self):\n peak_DM = self.peak_DM\n # print \"peakDM:\", self.peak_DM\n # print \"peak_SNR\", self.peak_SNR\n # SNR limit for regular (non-clipped) SPEGs\n SNR_limit = log(self.peak_SNR) / log(2) * 0.4 + 4.5\n\n # SNR = 5 is the base line\n lower_SNR_limit = SNR_limit - 5\n upper_SNR_limit = SNR_limit - 5\n\n cur_cluster_DF = spe_DF_clean.loc[(spe_DF_clean['DM'] >= self.min_DM) & (spe_DF_clean['DM'] <= self.max_DM) &\n (spe_DF_clean['time'] >= self.min_time) &\n (spe_DF_clean['time'] <= self.max_time), ]\n\n # if there are more than one single-pulse event within the same DM channel, use the brightest one only\n cur_cluster_DF = cur_cluster_DF.groupby('DM', group_keys=False).apply(lambda x: x.loc[x.SNR.idxmax()])\n\n cur_peak_left = cur_cluster_DF.loc[cur_cluster_DF['DM'] < self.peak_DM, ]\n cur_peak_right = cur_cluster_DF.loc[cur_cluster_DF['DM'] > self.peak_DM, ]\n # print curPeakLeft.shape, curPeakRight.shape\n\n # SNR limit for clipped SPEGs, the expected span should be shifted further towards the clipped side,\n # and less on the other side\n if cur_peak_left.shape[0] == 0:\n lower_SNR_limit = lower_SNR_limit - log(self.peak_SNR) / log(2) * 0.05\n upper_SNR_limit = upper_SNR_limit + log(self.peak_SNR) / log(2) * 0.1\n\n elif cur_peak_right.shape[0] == 0:\n lower_SNR_limit = lower_SNR_limit + log(self.peak_SNR) / log(2) * 0.1\n upper_SNR_limit = upper_SNR_limit - log(self.peak_SNR) / log(2) * 0.05\n\n # move 5 times of the DM spacing at the peak to save computation time\n DM_spacing = self.peak_DM_spacing\n\n # sampling time = time / sample\n sampling_time = self.peak_time / self.peak_sampling * 1.0 # of the center\n\n # width (in milliseconds) of the peak single-pulse event (width = sampling time * downfact)\n peak_width = sampling_time * 1000 * self.peak_downfact # (to milliseconds)\n\n peak_time = self.peak_time\n peak_SNR = self.peak_SNR - 5\n\n # get the DM (upper) bound and time (lower) bound of current SPEG\n upper_idx = 0\n\n while True:\n # check every 5 DM channels\n delta_DM = 5 * DM_spacing * (upper_idx + 1)\n cur_DM = peak_DM + delta_DM\n # calculate expected SNR\n exp_SNR = peak_SNR * constant * ((0.00691 * delta_DM * dnu / (peak_width * nu ** 3)) ** (-1)) * \\\n erf(0.00691 * delta_DM * dnu / (peak_width * nu ** 3))\n upper_idx += 1 # the minimum value is 1\n if exp_SNR < upper_SNR_limit or cur_DM > DMs[-1]:\n break\n\n upper_DM_bound = cur_DM\n dt_minus = sampling_time * upper_idx * 5\n\n # get the DM (lower) bound and time (upper) bound of current SPEG\n lower_idx = 0 # use this index to calculate time\n while True:\n delta_DM = 5 * DM_spacing * (lower_idx + 1)\n cur_DM = peak_DM - delta_DM\n exp_SNR = peak_SNR * constant * ((0.00691 * delta_DM * dnu / (peak_width * nu ** 3)) ** (-1)) * \\\n erf(0.00691 * delta_DM * dnu / (peak_width * nu ** 3))\n lower_idx += 1\n if exp_SNR < lower_SNR_limit or cur_DM < 0:\n break\n lower_DM_bound = cur_DM\n dt_plus = sampling_time * lower_idx * 5\n\n upper_time = peak_time + max(peak_width / 2000, dt_plus)\n lower_time = peak_time - max(peak_width / 2000, dt_minus)\n\n DM_time_span = [lower_DM_bound, upper_DM_bound, lower_time, upper_time]\n\n return DM_time_span", "def filter_df_on_case_length(df, case_id_glue=\"case:concept:name\", min_trace_length=3, max_trace_length=50):\n df = df.groupby(case_id_glue).filter(lambda x: (len(x)>= min_trace_length and len(x)<=max_trace_length))\n return df", "def _filter_temporal_dataframe(self, dataframe: GeoDataFrame) -> GeoDataFrame:\n unique_timestamps = dataframe[self.config.timestamp_column].unique()\n filtered_timestamps = np.sort(unique_timestamps)[self.times]\n filtered_rows = dataframe[self.config.timestamp_column].isin(filtered_timestamps)\n return dataframe[filtered_rows]", "def robust_remove_travel_time_outliers(data):\n # add km/h and seconds/meter as columns\n data[\"km_h\"] = data[\"osrm_distance\"] / data[\"inzet_rijtijd\"] * 3.6\n data[\"s_m\"] = data[\"inzet_rijtijd\"] / data[\"osrm_distance\"]\n\n # calculate tresholds\n speed_treshold = data[[\"km_h\", \"s_m\"]].describe() \\\n .apply(lambda x: x[\"75%\"] + 1.5*(x[\"75%\"]-x[\"25%\"]))\n max_speed = speed_treshold.loc[\"km_h\"]\n min_speed = 1 / speed_treshold.loc[\"s_m\"] * 3.6\n\n # filter data and return\n df_filtered = data[(data[\"km_h\"] > min_speed) & (data[\"km_h\"] < max_speed)].copy()\n df_filtered.drop([\"km_h\", \"s_m\"], axis=1, inplace=True)\n return df_filtered, min_speed, max_speed", "def test_filter_by_distance(self):\n\n threshold = 1\n points = random.uniform(-1,1,size=(100,6))\n points = mathtools.filter_by_distance(points, threshold)\n \n for point in points:\n dif = points[:,0:3]-point[0:3]\n euclidean_distance = sum(dif*dif,1)\n euclidean_distance = euclidean_distance[euclidean_distance>=1e-6] # excluding the evaluated point from list\n nearer_point = argmin(euclidean_distance)\n self.assertTrue(min(euclidean_distance)>=threshold**2,\n msg = \"The points: \"+str(point)+\" and \"+str(points[nearer_point])+\" are too close\"\n )", "def filter_coords(df):\n lon_l, lon_r = -74.1, -73.7\n lat_l, lat_r = 40.65, 40.85\n\n for c in filter(lambda c: c.endswith('_Lon'), df.columns):\n df = df[(df[c] <= lon_r) & (df[c] >= lon_l)]\n\n for c in filter(lambda c: c.endswith('_Lat'), df.columns):\n df = df[(df[c] <= lat_r) & (df[c] >= lat_l)]\n\n return df", "def trim_to_track_timespan(ari_data, track):\n start_time = track[:,GPX_COL_TIME].min()\n end_time = track[:,GPX_COL_TIME].max()\n keep_rows = np.logical_and(ari_data[:,1] > start_time, ari_data[:,1] < end_time)\n new_data = ari_data[keep_rows, :].copy()\n return new_data", "def distance_filter(coord1,coord2,max_distance):\n\n if haversine(coord1,coord2,miles = True) > max_distance:\n return False\n else:\n return True", "def gdf_clip(gdf,clip_geom):\n return gdf.loc[gdf['geometry'].apply(lambda x: x.within(clip_geom))].reset_index(drop=True)", "def anomaly_filter(self, df):\n # calculate forward means\n df['30_PERIOD_FWD_MEAN'] = df[self.endog].rolling(30, min_periods=0).mean().tolist()\n df['30_PERIOD_FWD_MEAN'].fillna(inplace=True, method='bfill')\n df['30_PERIOD_FWD_MEAN'][1:] = df['30_PERIOD_FWD_MEAN'][:-1]\n\n # calculate reverse means\n reverse_mean = df[self.endog].sort_index(ascending=False).rolling(30, min_periods=0).mean().tolist()\n reverse_mean.reverse()\n df['30_PERIOD_BWD_MEAN'] = reverse_mean\n df['30_PERIOD_BWD_MEAN'].fillna(inplace=True, method='ffill')\n df['30_PERIOD_BWD_MEAN'][:-1] = df['30_PERIOD_BWD_MEAN'][1:]\n\n\n df['FWD_STD'] = (df[self.endog] - df['30_PERIOD_FWD_MEAN'])**2\n df['FWD_STD'] = np.sqrt(df['FWD_STD'].rolling(30, min_periods=0).mean())\n df['FWD_STD'].fillna(inplace=True, method='bfill')\n df['FWD_STD'][1:] = df['FWD_STD'][:-1]\n\n df['BWD_STD'] = (df[self.endog] - df['30_PERIOD_BWD_MEAN'])**2\n bkwd_std = np.sqrt(df['BWD_STD'].sort_index(ascending=False).rolling(30, min_periods=0).mean()).tolist()\n bkwd_std.reverse()\n df['BWD_STD'] = bkwd_std\n df['BWD_STD'].fillna(inplace=True, method='bfill')\n df['BWD_STD'][1:] = df['BWD_STD'][:-1]\n\n df['FILTER_VARIANCE'] = np.where(df['FWD_STD'] < df['BWD_STD'], df['BWD_STD'], df['FWD_STD'])\n\n df['HIGH_FILTER'] = df['30_PERIOD_FWD_MEAN']+df['FILTER_VARIANCE']*3\n df['LOW_FILTER'] = df['30_PERIOD_FWD_MEAN']-df['FILTER_VARIANCE']*3\n\n df[self.endog] = np.where(df[self.endog] > df['HIGH_FILTER'], df['HIGH_FILTER'], df[self.endog])\n df[self.endog] = np.where(df[self.endog] < df['LOW_FILTER'], df['LOW_FILTER'], df[self.endog])\n\n cleaned_timeseries = df[[self.date_header, self.endog]]\n\n return cleaned_timeseries", "def calculate_trajectory_cutoff(trajectories, window):\n ma = np.mean(rolling_window(trajectories, window), -1)\n ma_mean = np.mean(ma, axis=1)\n ma_std = np.std(ma, axis=1)\n cutoff = ma_mean + ma_std\n\n return cutoff.reshape(-1, 1)", "def extract_range(self, wrange):\n wavelengths = self[WAVELENGTH_COLUMN].quantity\n\n wmin = wrange[0]\n wmax = wrange[1]\n\n # convert wavelenghts in line list to whatever\n # units the wavelength range is expressed in.\n new_wavelengths = wavelengths.to(wmin.unit)\n\n # 'indices' points to rows with wavelength values\n # that lie outside the wavelength range.\n indices_to_remove = np.where((new_wavelengths.value < wmin.value) |\n (new_wavelengths.value > wmax.value))\n\n return self._remove_lines(indices_to_remove)", "def _selecting_incoming_lines(rab_multipolygons, edges, angle_threshold=0):\n # selecting the lines that are touching but not covered by\n if GPD_10:\n touching = gpd.sjoin(edges, rab_multipolygons, predicate=\"touches\")\n edges_idx, rabs_idx = rab_multipolygons.sindex.query_bulk(\n edges.geometry, predicate=\"covered_by\"\n )\n else:\n touching = gpd.sjoin(edges, rab_multipolygons, op=\"touches\")\n edges_idx, rabs_idx = rab_multipolygons.sindex.query_bulk(\n edges.geometry, op=\"covered_by\"\n )\n idx_drop = edges.index.take(edges_idx)\n touching_idx = touching.index\n ls = list(set(touching_idx) - set(idx_drop))\n\n incoming = touching.loc[ls]\n\n # figuring out which ends of incoming edges need to be connected to the center_pt\n incoming[\"first_pt\"] = incoming.geometry.apply(lambda x: Point(x.coords[0]))\n incoming[\"dist_first_pt\"] = incoming.center_pt.distance(incoming.first_pt)\n incoming[\"last_pt\"] = incoming.geometry.apply(lambda x: Point(x.coords[-1]))\n incoming[\"dist_last_pt\"] = incoming.center_pt.distance(incoming.last_pt)\n lines = []\n for _i, row in incoming.iterrows():\n if row.dist_first_pt < row.dist_last_pt:\n lines.append(LineString([row.first_pt, row.center_pt]))\n else:\n lines.append(LineString([row.last_pt, row.center_pt]))\n incoming[\"line\"] = gpd.GeoSeries(lines, index=incoming.index, crs=edges.crs)\n\n # checking if there are more than one incoming lines arriving to the same point\n # which would create several new lines\n incoming[\"line_wkt\"] = incoming.line.to_wkt()\n grouped_lines = incoming.groupby([\"line_wkt\"])[\"line_wkt\"]\n count_s = grouped_lines.count()\n\n # separating the incoming roads that come on their own to those that come in groups\n filter_count_one = pd.DataFrame(count_s[count_s == 1])\n filter_count_many = pd.DataFrame(count_s[count_s > 1])\n incoming_ones = pd.merge(\n incoming, filter_count_one, left_on=\"line_wkt\", right_index=True, how=\"inner\"\n )\n incoming_many = pd.merge(\n incoming, filter_count_many, left_on=\"line_wkt\", right_index=True, how=\"inner\"\n )\n incoming_many_reduced = _coins_filtering_many_incoming(\n incoming_many, angle_threshold=angle_threshold\n )\n\n incoming_all = gpd.GeoDataFrame(\n pd.concat([incoming_ones, incoming_many_reduced]), crs=edges.crs\n )\n\n return incoming_all, idx_drop", "def slice_trim_traces(self):\n t1 = self.stream[0].stats.starttime\n t2 = t1 + self.analysis_interval # ppc\n st = self.stream.slice(t1, t2)\n t3 = t2 - self.analysis_overlap\n self.stream.trim(starttime=t3)\n return st" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the bearing between two points
def calc_bearing(point1, point2): lat1 = math.radians(point1[0]) lat2 = math.radians(point2[0]) diffLong = math.radians(point2[1] - point1[1]) x = math.sin(diffLong) * math.cos(lat2) y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diffLong)) initial_bearing = math.atan2(x, y) # Now we have the initial bearing but math.atan2 return values # from -180° to + 180° which is not what we want for a compass bearing # The solution is to normalize the initial bearing as shown below initial_bearing = math.degrees(initial_bearing) bearing = (initial_bearing + 360) % 360 return bearing
[ "def get_bearing(aLocation1, aLocation2): \n off_x = aLocation2.lon - aLocation1.lon\n off_y = aLocation2.lat - aLocation1.lat\n bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795\n if bearing < 0:\n bearing += 360.00\n return bearing", "def get_bearing(self, aLocation1, aLocation2):\n off_x = aLocation2.lon - aLocation1.lon\n off_y = aLocation2.lat - aLocation1.lat\n bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795\n if bearing < 0:\n bearing += 360.00\n return bearing;", "def get_bearing(p1, p2):\r\n lat1, long1 = p1.lat, p1.long\r\n lat2, long2 = p2.lat, p2.long\r\n\r\n brng = Geodesic.WGS84.Inverse(lat1, long1, lat2, long2)['azi1']\r\n return brng", "def _calculate_bearing(self, lat_sat, long_sat, lat_drone, long_drone):\n\n lat_sat = math.radians(lat_sat)\n lat_drone = math.radians(lat_drone)\n long_sat = math.radians(long_sat)\n long_drone = math.radians(long_drone)\n delta_long = long_drone - long_sat\n delta_lat = lat_drone - lat_sat\n y = math.sin(delta_long) * math.cos(lat_drone)\n x = math.cos(lat_sat) * math.sin(lat_drone) - \\\n math.sin(lat_sat) * math.cos(lat_drone) * math.cos(delta_long)\n bearing_initial = math.atan2(y, x)\n\n return bearing_initial", "def calculateRangeBearingFromPosition(easting1, northing1, easting2, northing2):\n\n dx = easting2-easting1\n dy = northing2-northing1\n\n bearing = 90 - (180/math.pi)*math.atan2(northing2-northing1, easting2-easting1)\n return (math.sqrt((dx*dx)+(dy*dy)), bearing)", "def getBearingTo(self, p, o):\n relPoint = Point2D(p.x - self.x, p.y - self.y)\n absDir = relPoint.getDirection()\n return normalizeAngle(absDir - o)", "def simplebearing(self, mvalues):\n [mx, my] = mvalues\n bearing = (180.0/pi) * atan2(my, mx)\n return bearing", "def angle(point1, point2):\n ax = ux(point1)\n ay = uy(point1)\n bx = ux(point2)\n by = uy(point2)\n return 180.0 * math.atan2(by-ay, bx-ax) / math.pi", "def angle(self, other) -> float:\n return acos(self.angle(other))", "def bearing_rads(self):\n return (np.pi / 180.0 ) * (90.0 - self.bearing)", "def get_angle_between_azimuths(azimuth1, azimuth2) -> float:\n tmp = abs(azimuth1 - azimuth2) % 360\n return 360 - tmp if tmp > 180 else tmp", "def bures_angle(A, B):\n if A.isket or A.isbra:\n A = A.proj()\n if B.isket or B.isbra:\n B = B.proj()\n if A.dims != B.dims:\n raise TypeError('A and B do not have same dimensions.')\n return np.arccos(fidelity(A, B))", "def angle(v1, v2):\n\n import numpy as np\n\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n\n if np.linalg.norm(v1_u) == 0. or np.linalg.norm(v2_u) == 0.:\n return 0.0\n else:\n return np.real(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))", "def getAngle(r1, r2):\n\tray1=np.array(r1)\n\tray2=np.array(r2)\n\tinters,p=col.linesIntersect(ray1,ray2, getPoint=True)\n\tif inters:\n\t\tpts=[r1[0],r1[1], r2[0], r2[1]]\n\t\tif not tuple(p) in pts: raise Exception('lines are intersecting and not incident, angle not defined')\n\tp=np.array(p)\n\tpoints=[]\n\tfor ray in ray1,ray2:\n\t\tfurthestDist=-1\n\t\tfor point in ray:\n\t\t\tdist=getDistance(p,point)\n\t\t\tif dist>furthestDist:\n\t\t\t\tfurthest=point\n\t\t\t\tfurthestDist=dist\n\t\tpoints.append(point)\n\tp1=np.array(points[0])-p\n\tp2=np.array(points[1])-p\n\tth=acos(np.dot(p1,p2)/(getDistance(p,p1)*getDistance(p,p2)))\n\tif th>pi:\n\t\tif th>2*pi: raise Exception('something is wrong with getAngle')\n\t\tth=pi-th\n\treturn th", "def calc_ang1(v1,v2):\n ang = np.arccos( np.abs(np.dot(v1,v2))/(np.linalg.norm(v1)*np.linalg.norm(v2)) )\n return ang * 180./np.pi", "def dist_between_two_balls(ball_1, ball_2):\r\n # create numpy array with keypoint positions\r\n arr = np.array([ball_1.pt, ball_2.pt])\r\n # scale array to mm\r\n arr = arr * 40 / 1280\r\n # return distance, calculated by pythagoras\r\n return np.sqrt(np.sum((arr[0] - arr[1]) ** 2))", "def angleBetween(self, coord1, coord2):\n # Call A = coord1, B = coord2, C = self\n # Then we are looking for the angle ACB.\n # If we treat each coord as a (x,y,z) vector, then we can use the following spherical\n # trig identities:\n #\n # (A x C) . B = sina sinb sinC\n # (A x C) . (B x C) = sina sinb cosC\n #\n # Then we can just use atan2 to find C, and atan2 automatically gets the sign right.\n # And we only need 1 trig call, assuming that x,y,z are already set up, which is often\n # the case.\n\n self._set_aux()\n coord1._set_aux()\n coord2._set_aux()\n\n AxC = ( coord1._y * self._z - coord1._z * self._y ,\n coord1._z * self._x - coord1._x * self._z ,\n coord1._x * self._y - coord1._y * self._x )\n BxC = ( coord2._y * self._z - coord2._z * self._y ,\n coord2._z * self._x - coord2._x * self._z ,\n coord2._x * self._y - coord2._y * self._x )\n sinC = AxC[0] * coord2._x + AxC[1] * coord2._y + AxC[2] * coord2._z\n cosC = AxC[0] * BxC[0] + AxC[1] * BxC[1] + AxC[2] * BxC[2]\n import math\n C = math.atan2(sinC, cosC)\n return C * galsim.radians", "def compute_angle(v1,v2):\n length_product = norm(v1) * norm(v2)\n cosine = dot(v1,v2) / length_product\n angle = degrees( acos( cosine ) )\n return angle", "def ang_sep(l1,b1,l2,b2):\n sin_theta = np.sqrt((np.cos(b2 * _d2r) * np.sin((l1 - l2) * _d2r)) ** 2 +\n (np.cos(b1 * _d2r) * np.sin(b2 * _d2r) - \n np.sin(b1 * _d2r) * np.cos(b2 * _d2r) * np.cos((l1 - l2) * _d2r)) ** 2)\n cos_theta = (np.cos(b1 * _d2r) * np.cos(b2 * _d2r) *\n np.cos((l1 - l2) * _d2r) +\n np.sin(b1 * _d2r) * np.sin(b2 * _d2r))\n tan_theta = sin_theta/cos_theta\n return np.arctan2(sin_theta,cos_theta) / _d2r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the coordinates for a number, num, of points along the line defined by point1 and point2
def calc_coords(point1, point2, num): xs = [point1[1], point2[1]] ys = [point1[0], point2[0]] lons = np.linspace(min(xs), max(xs), num) lats = np.linspace(min(ys), max(ys), num) return (lats, lons)
[ "def pixel_points(self,y1, y2, line):\r\n if line is None:\r\n return None\r\n slope, intercept = line\r\n x1 = int((y1 - intercept)/slope)\r\n x2 = int((y2 - intercept)/slope)\r\n y1 = int(y1)\r\n y2 = int(y2)\r\n return ((x1, y1), (x2, y2))", "def pixel_points(self, y1, y2, line):\n if line is None:\n return None\n slope, intercept = line\n x1 = int((y1 - intercept)/slope)\n x2 = int((y2 - intercept)/slope)\n y1 = int(y1)\n y2 = int(y2)\n return ((x1, y1), (x2, y2))", "def _line_from_two_points(pt1: np.array, pt2: np.array) -> np.array:\n numLine = pt1.shape[0]\n lines = np.zeros((numLine, 6))\n n = np.cross(pt1, pt2)\n n = n / (matlib.repmat(np.sqrt(np.sum(n ** 2, 1, keepdims=True)), 1, 3) + 1e-9)\n lines[:, 0:3] = n\n\n areaXY = np.abs(np.sum(n * matlib.repmat([0, 0, 1], numLine, 1), 1, keepdims=True))\n areaYZ = np.abs(np.sum(n * matlib.repmat([1, 0, 0], numLine, 1), 1, keepdims=True))\n areaZX = np.abs(np.sum(n * matlib.repmat([0, 1, 0], numLine, 1), 1, keepdims=True))\n planeIDs = np.argmax(np.hstack([areaXY, areaYZ, areaZX]), axis=1) + 1\n lines[:, 3] = planeIDs\n\n for i in range(numLine):\n uv = _xyz2uvN(np.vstack([pt1[i, :], pt2[i, :]]), lines[i, 3])\n umax = uv[:, 0].max() + np.pi\n umin = uv[:, 0].min() + np.pi\n if umax - umin > np.pi:\n lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi\n else:\n lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi\n\n return lines", "def dist_point_line(nx,ny,px,py,xg,yg,xw,yw): \n det = ny * px - nx * py\n dist = py * (xg - xw) + px * (yw - yg)\n dist = dist / det # min distance between point and line\n x_int = -dist * nx + xw # nearest point in the line to the point x_coor\n y_int = -dist * ny + yw # nearest point in the line to the point y_coor\n return [abs( dist ), x_int, y_int]", "def _walk_line(p0, p1):\n # unpack the point tuples\n x0, y0 = p0\n x1, y1 = p1\n\n dx, dy = x1 - x0, y1 - y0\n yi = 1\n if dy < 0:\n yi = -1\n dy = -dy\n\n D = 2 * dy - dx\n x = np.arange(x0, x1 + 1, dtype=int).T\n y = np.zeros((len(x),), dtype=int)\n\n yy = y0\n for i in np.arange(len(x)):\n y[i] = yy\n if D > 0:\n yy = yy + yi\n D = D - 2 * dx\n\n D = D + 2 * dy\n\n # sort by major axis, and index the cells\n xI = np.argsort(x)\n x = x[xI]\n y = y[xI]\n\n return x, y", "def coefficients_of_line_from_points(p1, p2):\n\n points = [p1, p2]\n x_coords, y_coords = zip(*points)\n coord_array = np.vstack([x_coords, np.ones(len(x_coords))]).T\n m, c = np.linalg.lstsq(coord_array, y_coords, rcond=None)[0]\n\n return m, c", "def coefficients_of_line_from_points(point_a, point_b):\n\n points = [point_a, point_b]\n x_coords, y_coords = zip(*points)\n coord_array = np.vstack([x_coords, np.ones(len(x_coords))]).T\n m, c = np.linalg.lstsq(coord_array, y_coords, rcond=None)[0]\n return m, c", "def train_linear_two_points(point_1, point_2):\n\n points = [point_1, point_2]\n x_coords, y_coords = zip(*points)\n A = vstack([x_coords, ones(len(x_coords))]).T\n m, c = lstsq(A, y_coords)[0]\n\n output_dict = {\"slope\": m, \"intercept\": c}\n\n return output_dict", "def point_line_distance(point: Tuple[float, float],\n start_point: Tuple[float, float],\n end_point: Tuple[float, float]) -> float:\n if start_point == end_point:\n return distance(point, start_point)\n else:\n n = abs(\n (end_point[0] - start_point[0]) * (start_point[1] - point[1]) - (start_point[0] - point[0]) * (end_point[1] - start_point[1])\n )\n d = sqrt(\n (end_point[0] - start_point[0]) ** 2 + (end_point[1] - start_point[1]) ** 2\n )\n return n / d", "def make_model(self, points: List[Point2D]) -> None:\n if len(points) != 2:\n raise ValueError(f'Need 2 points to make line, not {len(points)}')\n\n try:\n self._slope = (points[0].y - points[1].y) / (points[0].x -\n points[1].x)\n except ZeroDivisionError:\n self._slope = math.nan\n self._y_int = math.nan\n self._x_int = points[0].x\n return\n\n self._y_int = points[0].y - self._slope * points[0].x\n\n try:\n self._x_int = -1 * self._y_int / self._slope\n except ZeroDivisionError:\n self._x_int = math.nan", "def distance_line_point(line_start, line_end, point):\n # The original end point:\n true_line_end = line_end\n\n # \"Move\" the line, so it \"starts\" on (0, 0)\n line_end = line_end[0] - line_start[0], line_end[1] - line_start[1]\n point = point[0] - line_start[0], point[1] - line_start[1]\n\n line_len_sqr = line_end[0] * line_end[0] + line_end[1] * line_end[1]\n\n # Both points are very near each other.\n if line_len_sqr < 0.0001:\n return distance_point_point(point), line_start\n\n projlen = (line_end[0] * point[0] + line_end[1] * point[1]) / line_len_sqr\n\n if projlen < 0.0:\n # Closest point is the start of the line.\n return distance_point_point(point), line_start\n elif projlen > 1.0:\n # Point has a projection after the line_end.\n return distance_point_point(point, line_end), true_line_end\n else:\n # Projection is on the line. multiply the line_end with the projlen\n # factor to obtain the point on the line.\n proj = line_end[0] * projlen, line_end[1] * projlen\n return distance_point_point((proj[0] - point[0], proj[1] - point[1])),\\\n (line_start[0] + proj[0], line_start[1] + proj[1])", "def distPointToLine(point, line):\n\n [xp, yp] = point\n [a, c] = line\n b = -1\n\n return abs((a*xp + b*yp + c) / np.linalg.norm([a, b]))", "def xy_distance(x1, y1, x2, y2):\r\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** (1 / 2)", "def _reflect_points(points, p1 = (0,0), p2 = (1,0)):\n # From http://math.stackexchange.com/questions/11515/point-reflection-across-a-line\n points = np.array(points); p1 = np.array(p1); p2 = np.array(p2);\n if np.asarray(points).ndim == 1:\n return 2*(p1 + (p2-p1)*np.dot((p2-p1),(points-p1))/norm(p2-p1)**2) - points\n if np.asarray(points).ndim == 2:\n return np.array([2*(p1 + (p2-p1)*np.dot((p2-p1),(p-p1))/norm(p2-p1)**2) - p for p in points])", "def points_on_line(r0, r1, spacing):\n dim = len(r0)\n v = np.array(r1) - np.array(r0)\n length = np.linalg.norm(v)\n steps = math.ceil(1.0 * length / spacing) + 1\n points = np.zeros((steps, dim))\n for i in xrange(dim):\n points[:, i] = np.linspace(r0[i], r1[i], steps)\n return points", "def intersect_line_line(line1_start, line1_end, line2_start, line2_end):\n x1, y1 = line1_start\n x2, y2 = line1_end\n u1, v1 = line2_start\n u2, v2 = line2_end\n\n try:\n b1 = (y2 - y1) / float(x2 - x1)\n except ZeroDivisionError:\n # line 1 is vertical, we'll approach that with a very big number\n b1 = 1E199\n\n try: \n b2 = (v2 - v1) / float(u2 - u1)\n except ZeroDivisionError:\n # line 2 is vertical\n b2 = 1E199\n \n a1 = y1 - b1 * x1\n a2 = v1 - b2 * u1\n\n try: \n xi = - (a1 - a2) / (b1 - b2)\n except ZeroDivisionError:\n # two lines are parallel\n return None\n \n yi = a1 + b1 * xi\n if (x1 - xi) * (xi - x2) >= 0 and (u1 - xi) * (xi - u2) >= 0 \\\n and (y1 - yi) * (yi - y2) >= 0 and (v1 - yi) * (yi - v2) >= 0:\n return xi, yi", "def xy_2_lineID(dc_survey):\n\n # Compute unit vector between two points\n nstn = dc_survey.nSrc\n\n # Pre-allocate space\n lineID = np.zeros(nstn)\n\n linenum = 0\n indx = 0\n\n for ii in range(nstn):\n\n if ii == 0:\n\n A = dc_survey.source_list[ii].location[0]\n B = dc_survey.source_list[ii].location[1]\n\n xout = np.mean([A[0:2], B[0:2]], axis=0)\n\n xy0 = A[:2]\n xym = xout\n\n # Deal with replicate pole location\n if np.all(xy0 == xym):\n\n xym[0] = xym[0] + 1e-3\n\n continue\n\n A = dc_survey.source_list[ii].location[0]\n B = dc_survey.source_list[ii].location[1]\n\n xin = np.mean([A[0:2], B[0:2]], axis=0)\n\n vec1, r1 = r_unit(xout, xin) # Compute vector between neighbours\n vec2, r2 = r_unit(xym, xin) # Compute vector between current stn and mid-point\n vec3, r3 = r_unit(xy0, xin) # Compute vector between current stn and start line\n vec4, r4 = r_unit(xym, xy0) # Compute vector between mid-point and start line\n\n # Compute dot product\n ang1 = np.abs(vec1.dot(vec2))\n ang2 = np.abs(vec3.dot(vec4))\n\n # If the angles are smaller then 45d, than next point is on a new line\n if ((ang1 < np.cos(np.pi / 4.0)) | (ang2 < np.cos(np.pi / 4.0))) & (\n np.all(np.r_[r1, r2, r3, r4] > 0)\n ):\n\n # Re-initiate start and mid-point location\n xy0 = A[:2]\n xym = xin\n\n # Deal with replicate pole location\n if np.all(xy0 == xym):\n\n xym[0] = xym[0] + 1e-3\n\n linenum += 1\n indx = ii\n\n else:\n xym = np.mean([xy0, xin], axis=0)\n\n lineID[ii] = linenum\n xout = xin\n\n return lineID", "def crossing_num(pnts, poly, line=True):\n def _in_ex_(pnts, ext):\n \"\"\"Return the points within an extent or on the line of the extent.\"\"\"\n LB, RT = ext\n comp = np.logical_and(LB <= pnts, pnts <= RT) # using <= and <=\n idx = np.logical_and(comp[..., 0], comp[..., 1])\n return idx, pnts[idx]\n\n pnts = np.atleast_2d(pnts)\n xs = poly[:, 0]\n ys = poly[:, 1]\n N = len(poly)\n xy_diff = np.diff(poly, axis=0)\n dx = xy_diff[:, 0] # np.diff(xs)\n dy = xy_diff[:, 1] # np.diff(ys)\n ext = np.array([poly.min(axis=0), poly.max(axis=0)])\n idx, inside = _in_ex_(pnts, ext)\n is_in = []\n for pnt in inside:\n cn = 0 # the crossing number counter\n x, y = pnt\n for i in range(N - 1):\n if line is True:\n c0 = (ys[i] < y <= ys[i + 1]) # changed to <= <=\n c1 = (ys[i] > y >= ys[i + 1]) # and >= >=\n else:\n c0 = (ys[i] < y < ys[i + 1])\n c1 = (ys[i] > y > ys[i + 1])\n if (c0 or c1): # or y in (ys[i], ys[i+1]):\n vt = (y - ys[i]) / dy[i] # compute x-coordinate\n if line is True:\n if (x == xs[i]) or (x < (xs[i] + vt * dx[i])): # include\n cn += 1\n else:\n if x < (xs[i] + vt * dx[i]): # exclude pnts on line\n cn += 1\n is_in.append(cn % 2) # either even or odd (0, 1)\n return inside[np.nonzero(is_in)]", "def getLineIndices(self, img_shape, x1, y1, x2, y2):\n length = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n t = np.linspace(0, 1, int(round(2*length)))\n x = np.round(t * x1 + (1-t)*x2).astype(np.uint32)\n y = np.round(t * y1 + (1-t)*y2).astype(np.uint32)\n x = x[np.logical_and(x >= 0, x <= img_shape[0])]\n y = y[np.logical_and(y >= 0, y <= img_shape[1])]\n return (x, y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets NWS WWA polygons for a specified date & time
def get_wwa_polys(abs_path, date, time, wwa_type=['SV', 'TO']): polys = {} target_dt = _format_wwa_time(date, time) wwa_reader = shpreader.Reader(abs_path) if ('SV' in wwa_type): filtered_wwa_sv = [rec.geometry for rec in wwa_reader.records() if (rec.attributes['GTYPE'] == 'P') and (_valid_wwa_time(rec.attributes['ISSUED'], rec.attributes['EXPIRED'], target_dt)) and (rec.attributes['PHENOM'] == 'SV')] polys['SV'] = filtered_wwa_sv if ('TO' in wwa_type): filtered_wwa_to = [rec.geometry for rec in wwa_reader.records() if (rec.attributes['GTYPE'] == 'P') and (_valid_wwa_time(rec.attributes['ISSUED'], rec.attributes['EXPIRED'], target_dt)) and (rec.attributes['PHENOM'] == 'TO')] polys['TO'] = filtered_wwa_to return polys
[ "def generate_wind():\n# Taken by converting UTM Zone 11 coordinates on\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# These values specific to files called yosemite_landscape_12-03-2019_0900_120m\n west_lon = -120.006255\n east_lon = -119.4736\n south_lat = 37.464649\n north_lat = 37.822073\n\n# Open .shp and .dbf files with rb\n myshp = open(\"SHAPEFILES/HOUR1/yosemite_landscape_12-03-2019_0900_120m.shp\", \"rb\")\n mydbf = open(\"SHAPEFILES/HOUR1/yosemite_landscape_12-03-2019_0900_120m.dbf\", \"rb\")\n wind = Wind(myshp, mydbf, west_lon, east_lon, south_lat, north_lat)\n\n# Regrid the base data onto a 30mx30m grid and bounded at the coordinates described\n# Our model focuses on the area between -120W to -119.5W, and 37.5N to 37.8N\n new_wind = wind.regrid(30, -120, -119.5, 37.5, 37.8)\n return new_wind", "def getSnowSurfaceWetness():\n uq = urlquery.SnowSurfaceQuery()\n \n uq.add_filter(\"DtObsTime ge datetime'2012-09-01T00:00:00'\")\n uq.add_filter(\"LangKey eq 1\")\n \n res = uq.get_json_data()\n \n print uq.UQ # can be copied to the browser's address field to view the full result of the query\n \n shTS = timeseries.SnowSurfaceTS()\n\n for item in res:\n \"\"\"\n Just exchange \"SH Overflaterim/hulromsrim\" by:\n \"IF Is og skare\"\n \"MFcr Gjenfroset smeltelag\"\n \"MF Smelteomdannede korn\"\n to retrieve information about crusts.\n \"\"\"\n wetness_l= [u'Fuktig',\n u'Våt',\n u'Meget våt',\n u'Sørpe']\n \n if item['SurfaceWaterContentName'] in wetness_l: \n shTS.values.append(item['SnowSurfaceName'])\n shTS.dates.append(item['DtObsTime'])\n shTS.snowdepth.append(item['SnowDepth'])\n shTS.lwq.append(item['SurfaceWaterContentName'])\n shTS.UTMZone.append(item['UTMZone'])\n shTS.UTMEast.append(item['UTMEast'])\n shTS.UTMNorth.append(item['UTMNorth'])\n shTS.set_regid(item['RegID'])\n \n shTS.json_date_as_datetime()\n print shTS", "def infer_polygons(way_data):\n polygon = list()\n if len(way_data['nodes']) > 2:\n for node in way_data['nodes']:\n polygon.append((node['lon'], node['lat']))\n polygon_obj = Polygon(polygon)\n return polygon_obj", "def get_shops(self, polygon, count=1000):\n query = 'INTERSECTS(geom,POLYGON(({})))'\n poly_trans = [p.transform(self.geoserver_epsg) for p in polygon]\n str_poly = ', '.join(('{} {}'.format(pnt[1], pnt[0])\n for pnt in poly_trans))\n srsname = 'EPSG:{}'.format(self.epsg)\n params = dict(CQL_FILTER=query.format(str_poly),\n srsname=srsname,\n count=str(count))\n params.update(self.wfs_params)\n new_params = []\n for (k, v) in params.items():\n param = '='.join([urllib.quote(k), urllib.quote(v)])\n new_params.append(param)\n param_str = '&'.join(new_params)\n r = requests.get(self.url, params=param_str)\n try:\n json = r.json()\n except ValueError:\n arcpy.AddMessage('Fehler bei der Anfrage des Geoservers.')\n return []\n return self._decode_json(json)", "def generate_polygon():\n with open('sweden.json') as f:\n data = json.load(f)\n\n arr = data['geometry']['coordinates']\n dt = []\n res = []\n for x in arr:\n for poly in x:\n for p in poly:\n dt.append(p)\n print(getPixel(p[0], p[1], 512))\n res.append(dt)\n dt = []\n \n\n\n for i,p in enumerate(res):\n res[i] = np.array([[ xtile(x), ytile(y)] for x,y in p])\n\n return res", "def get_stations_in_area(db: Session, polygon: Polygon) -> list:\n logger.debug('Get hydrometric stations in polygon', polygon.wkt)\n\n # Search for point Lat: 49.250285 Lng: -122.953816\n stn_q = db.query(\n StreamStationDB,\n ).filter(\n func.ST_Intersects(\n func.ST_GeographyFromText(polygon.wkt),\n func.Geography(StreamStationDB.geom)\n )\n )\n\n rs_stations = stn_q.all()\n\n stations = [\n StreamStationDB.get_as_feature(x, StreamStationDB.get_geom_column(db))\n for x in rs_stations\n ]\n\n return stations", "def extract_ncep_winds(dbeg='19900101', dend='20190228', verbose=False):\n\n ybeg=int(dbeg[0:4])\n yend=int(dend[0:4])\n \n if verbose: print ('Extracting U10M for NCEP')\n uwnd = xr.concat([extract_one_ncep(f, 'uwnd') for f in ncep_filelist('U10M', ybeg=ybeg, yend=yend)], dim='time')\n uwnd = uwnd.drop(['lat','lon'])\n uwnd = uwnd.sel(time=slice(dbeg,dend))\n \n if verbose: print ('Extracting V10M for NCEP')\n vwnd = xr.concat([extract_one_ncep(f, 'vwnd') for f in ncep_filelist('V10M', ybeg=ybeg, yend=yend)], dim='time')\n vwnd = vwnd.drop(['lat','lon'])\n vwnd = vwnd.sel(time=slice(dbeg,dend))\n \n return uwnd, vwnd", "def extract_polygons(data):\n polygons = []\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n \n north_coord_min = north - d_north\n north_coord_max = north + d_north\n east_coord_min = east - d_east\n east_coord_max = east + d_east\n \n \n corners = [(np.int(north_coord_min), np.int(east_coord_min)),\n (np.int(north_coord_max), np.int(east_coord_min)),\n (np.int(north_coord_max), np.int(east_coord_max)),\n (np.int(north_coord_min), np.int(east_coord_max))]\n \n height = alt+d_alt\n\n p = Polygon(corners)\n \n polygons.append([p, height])\n \n return polygons", "def build_geojson(polygons, trips, flow_key_start):\n geojson = {\"type\": \"FeatureCollection\", \"features\": []}\n\n for tract in trips:\n tract_id = tract.get(flow_key_start)\n feature = polygons.get(tract_id)\n\n count = int(tract.get(\"trip_count\"))\n\n count_as_height = (\n count / 5\n ) # each 5 trips will equate to 1 meter of height on the map\n\n if feature:\n feature[\"properties\"][\"trips\"] = count\n feature[\"properties\"][\"count_as_height\"] = count_as_height\n feature[\"properties\"][\"tract_id\"] = int(tract_id)\n feature[\"properties\"][\"trips\"] = count\n geojson[\"features\"].append(feature)\n\n return geojson", "def wpolygon(self, *args, **kwargs):\n return _regionmanager.regionmanager_wpolygon(self, *args, **kwargs)", "def make_polygons(geojs):\n polygons = {}\n for block in range(len(geojs[\"features\"])):\n geoid = geojs[\"features\"][block][\"properties\"][\"geoid\"]\n polygon = shape(geojs[\"features\"][block][\"geometry\"])\n polygons[geoid] = polygon\n return polygons", "def get_stations_polygon(\n self,\n stations: list,\n crossover_radius=200000,\n ):\n if len(stations) == 0:\n return []\n\n result = None\n sql = \"\"\"\n select st_astext('{}')\n \"\"\".format(self._get_translated_stations_buffer(\n stations,\n crossover_radius=crossover_radius,\n ))\n result = DataSet._fetchall_query(sql, True)[0]\n\n return result", "def filter_polygons(state, header):\n filtered_state = []\n\n for source in state:\n if 'Polygon' in source[header.index('geometry type')]:\n filtered_state.append(source)\n\n return filtered_state", "def lv03_to_wgs84(east, north, height=None):\n return __to_wgs84(e_aux=(east - 600000) / 1000000, n_aux=(north - 200000) / 1000000, height=height)", "def get_polygon(geohash):\n bounds = get_bounds(geohash)\n polygon_vertices = [[bounds['sw']['lon'], bounds['sw']['lat']],\n [bounds['sw']['lon'], bounds['ne']['lat']],\n [bounds['ne']['lon'], bounds['ne']['lat']],\n [bounds['ne']['lon'], bounds['sw']['lat']],\n [bounds['sw']['lon'], bounds['sw']['lat']]]\n\n return polygon_vertices", "def getExtentCounty(province, prefecture, county, extent, ansidate, coverage):\n \n extent = [117.04640962322863,33.00404358318741,117.59765626636589,33.50222015793983] # left, bottom, right, top\n d = 150842\n endpoint='http://192.168.1.104:8080/rasdaman/ows'\n field={}\n field['SERVICE']='WCS'\n field['VERSION']='2.0.1'\n field['REQUEST']='GetCoverage'\n field['COVERAGEID']=coverage#'trmm_3b42_coverage_1'\n field['SUBSET']=['ansi('+str(d)+')',\n 'Lat('+str(extent[1])+','+str(extent[3])+')',\n 'Long('+str(extent[0])+','+str(extent[2])+')']\n field['FORMAT']='image/tiff'\n url_values = urllib.urlencode(field,doseq=True)\n full_url = endpoint + '?' + url_values\n print full_url\n wcsCoverage_filename='coverage'+str(d)+'.tif'\n f,h = urllib.urlretrieve(full_url,wcsCoverage_filename)\n print h \n \n #path_base = \"/home/rasdaman/Downloads\"\n #CHN_adm_gpkg = os.path.join(path_base, \"CHN_adm.gpkg\") \n \n #wcsCoverage_filename_clip = 'coverage'+str(d)+'clip.tif' \n\n #command = [\"/usr/bin/gdalwarp\", \"-cutline\", CHN_adm_gpkg, \"-csql\", \"SELECT NAME_3 FROM CHN_adm3 WHERE NAME_1 = \"+province+\" and NAME_2 = \"+prefecture+\" and NAME_3 = \"+county+\"\",\n # \"-crop_to_cutline\", \"-of\", \"GTiff\", \"-dstnodata\",\"-9999\",wcsCoverage_filename, wcsCoverage_filename_clip, \"-overwrite\"] # \n\n #print (sp.list2cmdline(command))\n\n #norm = sp.Popen(sp.list2cmdline(command), shell=True) \n #norm.communicate() \n \n return wcsCoverage_filename #wcsCoverage_filename_clip", "def output_shp(self):\n ofn = \"{}_{}_tracks\".format(\n self.year,\n \"ATL\" if list(self.tc.keys())[0][:2] == \"AL\" else \"PAC\"\n )\n with shapefile.Writer(ofn,shapeType=3) as gis:\n gis.field(\"ATCFID\",\"C\",\"8\")\n gis.field(\"NAME\",\"C\",\"10\")\n gis.field(\"START\",\"C\",\"16\")\n gis.field(\"END\",\"C\",\"16\")\n gis.field(\"MAXWIND\",\"N\",\"3\")\n gis.field(\"MINMSLP\",\"N\",\"4\")\n gis.field(\"ACE (x10^4)\",\"N\",\"12\",3)\n gis.field(\"HDP (x10^4)\",\"N\",\"12\",3)\n gis.field(\"MHDP (x10^4)\",\"N\",\"12\",3)\n gis.field(\"TRK_DIST_NMI\",\"N\",\"22\",1)\n gis.field(\"TRK_DIST_TC_NMI\",\"N\",\"22\",1)\n gis.field(\"TRK_DIST_TS_NMI\",\"N\",\"22\",1)\n gis.field(\"TRK_DIST_HU_NMI\",\"N\",\"22\",1)\n gis.field(\"TRK_DIST_MHU_NMI\",\"N\",\"22\",1)\n for trop in self.tc:\n gis.record(\n self.tc[trop].atcfid,\n self.tc[trop].name,\n self.tc[trop].entry[0].entrytime.isoformat(),\n self.tc[trop].entry[-1].entrytime.isoformat(),\n self.tc[trop].maxwind,\n self.tc[trop].minmslp if self.tc[trop].minmslp != None else 9999,\n self.tc[trop].ACE * math.pow(10,-4),\n self.tc[trop].HDP * math.pow(10,-4),\n self.tc[trop].MHDP * math.pow(10,-4),\n self.tc[trop].track_distance,\n self.tc[trop].track_distance_TC,\n self.tc[trop].track_distance_TS,\n self.tc[trop].track_distance_HU,\n self.tc[trop].track_distance_MHU\n )\n entiretrack = [self.tc[trop].entry[trk].location_reversed for trk in range(len(self.tc[trop].entry))]\n gis.line([entiretrack])", "def getPoly(pt, w, h):\n x, y = pt\n ll = (x - (w * 0.5), y - (h * 0.5))\n ul = (x - (w * 0.5), y + (h * 0.5))\n ur = (x + (w * 0.5), y + (h * 0.5))\n lr = (x + (w * 0.5), y - (h * 0.5))\n return arcpy.Polygon(arcpy.Array([arcpy.Point(*coords) for coords in [ll,ul,ur,lr,ll]]))", "def _make_wcs(cont_table):\n\n output = dict()\n data = cont_table[1].data\n for ary in ['SLW', 'SSW']:\n ind_ary = np.where(data['array'] == ary)[0]\n data_ = data[ind_ary]\n ra, dec = data_['ra'][0], data_['dec'][0]\n row, col = data_['row'][0], data_['column'][0]\n #ra_lim = [np.max(data_['ra']), np.min(data_['ra'])]\n dec_lim = [np.min(data_['dec']), np.max(data_['dec'])]\n row_lim = [np.min(data_['row']), np.max(data_['row'])]\n col_lim = [np.min(data_['column']), np.max(data_['column'])]\n row_diff, col_diff = np.diff(row_lim), np.diff(col_lim)\n cdelt = np.diff(dec_lim)[0]/row_diff[0]\n\n w = WCS(naxis=2)\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\"]\n w.wcs.crpix = [col+1, row+1]\n w.wcs.crval = [ra, dec]\n w.wcs.cdelt = [-cdelt, cdelt]\n header = w.to_header()\n header['NAXIS1'] = int(col_diff+1)\n header['NAXIS2'] = int(row_diff+1)\n\n output[ary] = WCS(header)\n\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
BRIEF Get the average word count for all rows & versions
def AnalyticsQuery(table, full_col_name): total = 0 count = 0.0 for row in table.fetch_all_rows(): total += len(Regex.WORD.findall(row[full_col_name])) count += 1.0 print("(Analytics) AverageWordCount({0}) = {1}".format(full_col_name, total / count)) print(' ') sys.stdout.flush()
[ "def get_average_word_length(self):\n\n if self.word_count_list is None:\n self.tokenize_documents()\n\n return self.word_count_list.apply(lambda x: np.average([len(w) for w in x]))", "def avg_word_vectors(wordlist,size): \n sumvec=np.zeros(shape=(1,size))\n wordcnt=0\n for w in wordlist:\n if w in model_w2v:\n sumvec += model_w2v[w]\n wordcnt +=1\n \n if wordcnt ==0:\n return sumvec\n else:\n return sumvec / wordcnt", "def avg_occ(word, all_texts, text_lens):\n n_occs = []\n for text in all_texts:\n n_occs.append(np.mean([i+1 for i in\n range(len(text)) if word in ' '.join(text[i])]))\n return np.array(n_occs)/text_lens", "def words_avg_embedding(words: list, glove):\n\n word_embeddings = map(partial(get_word_vec, glove=glove), words)\n sum_words_embedding = reduce(np.add, word_embeddings)\n return sum_words_embedding / len(words)", "def average_words(statuses):\n total_words = sum([len(s.split()) for s in statuses])\n return 1.0 * total_words / len(statuses)", "def avg_words_song(band):\n count = {}\n file = pickle.load(open(band + '_count.pickle', 'rb'))\n for artist, value in file.iteritems():\n count[artist] = sum(value)/float(len(value))\n return count", "def word_averaging(wv, words):\n all_words, mean = set(), []\n \n for word in words:\n if isinstance(word, np.ndarray):\n mean.append(word)\n elif word in wv.vocab:\n mean.append(wv.syn0norm[wv.vocab[word].index])\n all_words.add(wv.vocab[word].index)\n\n if not mean:\n logging.warning(\"cannot compute similarity with no input %s\", words)\n # FIXME: remove these examples in pre-processing\n return np.zeros(wv.layer1_size,)\n\n mean = gensim.matutils.unitvec(np.array(mean).mean(axis=0)).astype(np.float32)\n return mean", "def average_words(word_count, sentence_count):\n average_words = word_count / sentence_count\n if average_words >= 25:\n run_on_sentence = average_words / 25\n sentence_count += (math.ceil(run_on_sentence))\n average_words = word_count / sentence_count\n\n return average_words", "def test_get_average_occurence_count_method():\n keywordsChief1 = KeywordsChief(\"test_data/keywords.yaml\")\n assert keywordsChief1.get_average_occurrence_count() == 1.0\n\n keywordsChief2 = KeywordsChief(\"test_data/keywords_ngram2.yaml\")\n assert keywordsChief2.get_average_occurrence_count() == 1.0\n\n keywordsChief3 = KeywordsChief(\"test_data/keywords_ngram3.yaml\")\n assert keywordsChief3.get_average_occurrence_count() == 1.0", "def sentence_to_avg(word_list, word_to_vec_map):\n \n # Initialize the average word vector, should have the same shape as your word vectors.\n shape = np.shape(50,)\n \n avg = np.zeros(shape)\n\n \n total = 0\n unknown_counter = 0\n for w in word_list:\n try:\n total += word_to_vec_map[w]\n except:\n unknown_counter += 1\n \n avg = total / len(word_list) - unknown_counter\n \n \n return avg", "def average_word_length(sentence_in):\n\tsum = 0.0\n\tcount = 0\n\tfor word in sentence_in.split(sep=\" \"):\n\t\tsum += len(word)\n\t\tcount += 1\n\treturn (sum / count)", "def get_mean_word_length(self):\n if not self.words:\n return False\n redacted_words = purge_punctuation_etc(self.words)\n redacted_words = redacted_words.replace(\" \", \"\")\n total_letters = len(redacted_words)\n result = total_letters/self.word_count\n return result", "def get_avg_word_length(lyrics):\n\n\tlyrics = lyrics.translate(str.maketrans('','',string.punctuation))\n\treturn round(sum([len(word) for word in lyrics.split()]) / len(lyrics.split()),2)", "def fraction_adverbs(row):\n text = row['text']\n text_splited = text.split(' ')\n text_splited = [''.join(c for c in s if c not in string.punctuation) for s in text_splited]\n text_splited = [s for s in text_splited if s]\n word_count = text_splited.__len__()\n pos_list = nltk.pos_tag(text_splited)\n verbs_count = len([w for w in pos_list if w[1] in ('RB','RBR','RBS')])\n return (verbs_count/word_count)", "def average_length_of_documents(self):\n if PROXIMITY.useCache:\n return PROXIMITY.average_doc_length\n else:\n summ = 0\n for key, value in self.documents.items():\n for k, v in value.items():\n summ += v\n return summ / float(self.no_of_documents)", "def get_avg_wrdlen(tokens):\n if len(tokens) < 2:\n return -1\n num = len(tokens)\n count = 0\n for word in tokens:\n count += len(word)\n avg_wrdlen = float(count)/float(num)\n avg_wrdlen = avg_wrdlen\n if avg_wrdlen < 0: avg_wrdlen = 0\n return avg_wrdlen", "def getAvgHits():\n\n\t\t\t# if 'GBTSTATUS' in fileHandle[1].read_header()['EXTNAME']:\n\t\t\t# \tfor i = range(len()", "def _calculate_average_field_lengths(self):\n accumulator = defaultdict(int)\n documents_with_field = defaultdict(int)\n\n for field_ref, length in self.field_lengths.items():\n _field_ref = FieldRef.from_string(field_ref)\n field = _field_ref.field_name\n\n documents_with_field[field] += 1\n accumulator[field] += length\n\n for field_name in self._fields:\n accumulator[field_name] /= documents_with_field[field_name]\n\n self.average_field_length = accumulator", "def get_avg_embedding(word_ids, words_i2w, w2v_model, w2v_dim):\n\n word_sum = np.zeros(w2v_dim, dtype=np.float32)\n if w2v_model == None:\n return word_sum\n word_count = 0\n for wid in word_ids:\n word = words_i2w[wid]\n if word != UNKNOWN_WORD and word in w2v_model.vocab:\n word_sum += w2v_model.wv[word]\n word_count += 1\n if word_count > 0:\n return word_sum / word_count\n return word_sum" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that root resource works correctly with API format.
def testRootAsAPIView(self): response = self.client.get(self.url(), data={'format': 'api'}) self.assertEqual(response.status_code, 200) self.assertIn("Resource Instance", response.content.decode('utf-8'))
[ "def testRootAPITrailingSlash(self):\n response = self.client.get(self.url().rstrip('/'),\n data={'format': 'api'})\n self.assertEqual(response.status_code, 301)\n self.assertEqual(response.url.replace('http://testserver', ''), self.url())", "def test_default_api_is_v1(self):\n with uaac_set(app):\n with app.test_client() as c:\n rv = c.get('/api/')\n\n assert rv.status_code == 302\n assert rv.location == 'http://localhost/api/v1/'", "def test_API1_missing(self):\n response = self.app.put(\n constants.API1_URL,\n data = json.dumps(dict()),\n mimetype = 'application/json')\n\n self.assertEqual(response.status_code, 400)", "def test_API1_valid(self):\n responses.add(\n responses.POST, \n constants.API_URL + constants.API9_URL,\n json = {'query_result': []},\n status = 200)\n\n responses.add(\n responses.POST, \n constants.API_URL + constants.API8_URL,\n json = {},\n status = 200)\n\n response = self.app.put(\n constants.API1_URL,\n data = json.dumps(dict(\n username = username_3,\n password = password_3)),\n mimetype = 'application/json')\n\n self.assertEqual(response.status_code, 201)", "def api_root(request, format=None):\n return Response({\n # 'users': reverse('user-list', request=request, format=format),\n 'polities': reverse('polity-list', request=request, format=format)\n })", "def api_root(request, format=None):\r\n return Response({\r\n 'appointment creation': reverse('create-appointment', request=request, format=format),\r\n 'appointment list': reverse('appointments-list', request=request, format=format)\r\n })", "def test_api_authorization(self):\n response = self.get('/api/v1/run/')\n self.assertEqual(200, response.status_code)\n\n json_data = json.loads(response.content)\n self.assertEqual(1, len(json_data['objects']))\n self.assertEqual(1, json_data['objects'][0]['id'])", "def test_part_api(self):\n url = reverse('api-part-list')\n\n # Check JSON response\n response = self.client.get(url, HTTP_ACCEPT='application/json')\n self.assertEqual(response.status_code, 200)", "def test_baseRoute(self):\n response = self.client.get('/')\n assert response.status_code == 200\n assert type(response.data) == bytes\n assert response.data != \"\"", "def test_json(self):\n print \"\\nAccessing /api/\"\n response = self.client.get('/api/?format=json')\n print \"Page response: %s\" % response.content\n verify_response = verify_json(response.content)\n self.assertEqual(verify_response, True)", "def test_service_doc(self):\n response = self.app.get(\"api.html\", follow_redirects=True)\n self.assertEqual(200, response.status_code)", "def test_get_api_version(self):\n\n factory = APIRequestFactory()\n request = factory.get(reverse('api-version'))\n view = rest.ApiVersionView.as_view()\n response = view(request)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue('date' in response.data)\n self.assertTrue('version' in response.data)\n self.assertTrue('build_url' in response.data)\n self.assertTrue('build' in response.data)\n self.assertTrue('githash' in response.data)\n self.assertEqual(response.data['github_url'], 'https://github.com/1001genomes/aragwas/commit')", "def test_service_desc(self):\n service_desc = self._request_valid(\"api\")\n self.assertIn(\"openapi\", service_desc.keys())\n self.assertIn(\"eodag\", service_desc[\"info\"][\"title\"].lower())\n self.assertGreater(len(service_desc[\"paths\"].keys()), 0)\n # test a 2nd call (ending slash must be ignored)\n self._request_valid(\"api/\")", "def test_default_api_url(self):\n self.assertEqual(self.client.api_url, rest_client.DEFAULT_API_URL)", "def test_api_authorization(self):\n response = self.get('/api/v1/project/')\n self.assertEqual(200, response.status_code)\n\n json_data = json.loads(response.content)\n self.assertEqual(1, len(json_data['objects']))\n self.assertEqual('Test project 1', json_data['objects'][0]['title'])", "def test_version(self):\n response = self.client.get('/internal/version')\n self.assert200(response)\n self.assertIn('version', response.json)", "def test_20_api_can_get_a_document(self):\n res = self.client.get(\n '/documents/1',\n format='json'\n )\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n assert json.loads(res.content)['id'] == 1", "def test_raw(self, apiobj):\n response = apiobj._request(\n path=apiobj._router.fields,\n method=\"get\",\n raw=True,\n is_json=True,\n error_status=True,\n )\n assert isinstance(response, requests.Response)", "def _initialize_with_recipe_api(self, root_api):\n paths_found = {}\n def add_found(path):\n if path is not None:\n paths_found[str(path)] = path\n\n search_set = [root_api]\n found_api_id_set = {id(root_api)}\n while search_set:\n api = search_set.pop()\n\n add_found(api.resource())\n add_found(api.package_repo_resource())\n\n for name in dir(api.m):\n sub_api = getattr(api.m, name)\n if not isinstance(sub_api, RecipeApiPlain):\n continue\n if id(sub_api) not in found_api_id_set:\n found_api_id_set.add(id(api))\n search_set.append(sub_api)\n\n # transpose\n # [(path_string, path), ...]\n # into\n # ([path_string, ...], [path, ...])\n self.path_strings, self.paths = zip(*sorted(paths_found.items()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks redirect to root resource with trailing slash.
def testRootAPITrailingSlash(self): response = self.client.get(self.url().rstrip('/'), data={'format': 'api'}) self.assertEqual(response.status_code, 301) self.assertEqual(response.url.replace('http://testserver', ''), self.url())
[ "def _HandleRoot(self, request):\n \n raise HttpRedirect, WebRequest(\"/static/index.html\")", "def warn_trailing_slash(self, dest, uri):\n if uri == '%s/' % self.get_uri(dest):\n self.log.warning(\n 'It seems that the url given do not need the trailing slash (%s). '\n 'You would have better not to keep trailing slash in your urls '\n 'if you don\\'t have to.' % uri)\n return True\n return False", "def test_redirect_suppression(self):\n rev = RedirectRevisionFactory()\n redirect = rev.document\n response = self.client.get(redirect.get_absolute_url() + \"?redirect=no\", follow=True)\n self.assertContains(response, \"REDIRECT \")", "def addslash(method):\r\n @functools.wraps(method)\r\n def wrapper(self, *args, **kwargs):\r\n if not self.request.path.endswith(\"/\"):\r\n if self.request.method in (\"GET\", \"HEAD\"):\r\n uri = self.request.path + \"/\"\r\n if self.request.query:\r\n uri += \"?\" + self.request.query\r\n self.redirect(uri, permanent=True)\r\n return\r\n raise HTTPError(404)\r\n return method(self, *args, **kwargs)\r\n return wrapper", "def test_redirect_suppression(self):\n redirect, _ = doc_rev('REDIRECT <a class=\"redirect\" href=\"http://smoo/\">smoo</a>')\n response = self.client.get(\n redirect.get_absolute_url() + '?redirect=no',\n follow=True)\n self.assertContains(response, 'REDIRECT ')", "def root_index_redirect():\n return redirect('/LightUpPi/', code=302)", "def test_certificate_redirect(self):\n without_slash = \"https://localhost:%s/certificate/\" % settings.HTTPS_FRONTEND_PORT\n response = requests.get(without_slash, verify=False, allow_redirects=False)\n self.assertEqual(response.status_code, 301)", "def test_library_route_not_logged_in_gets_302(self):\n response = self.client.get(reverse_lazy('library'))\n self.assertEqual(response.status_code, 302)", "def test_subdomain_redirect(self):\n self.init('/', 'japan.personfinder.appspot.com')\n legacy_redirect.redirect(self.handler)\n self.assertEquals(301, self.handler.response.status_int)\n self.assertEquals('http://google.org/personfinder/japan/',\n self.handler.response.headers['Location'])", "def updateRedirector(self, redirector):\n\n if not redirector.startswith(\"root://\"):\n redirector = \"root://\" + redirector\n tolog(\"Updated redirector for missing protocol: %s\" % (redirector))\n if not redirector.endswith(\"/\"):\n redirector = redirector + \"/\"\n tolog(\"Updated redirector for missing trailing /: %s\" % (redirector))\n\n # Protect against triple slashes\n redirector = redirector.replace('///','//')\n\n return redirector", "def _http_check_url_rec_handle_redir(r, redirects):\n\n # If Location is in the headers\n if \"Location\" in r.headers:\n url_redir = r.headers[\"Location\"]\n redirects.append(url_redir)\n\n # Loop back in the recursion\n return FME_utils._http_check_url_rec(url_redir, redirects)\n\n return False", "def _check_redirect(self):\r\n try:\r\n request = requests.get(\r\n 'http://localhost:8050/render.json',\r\n params={\r\n 'url': self._url,\r\n 'html': 1,\r\n 'wait': 2,\r\n 'timeout': 90,\r\n 'png': 1,\r\n 'render_all': 1,\r\n 'history': 1,\r\n }\r\n )\r\n response = request.json()\r\n\r\n if len(response['history']) != 1:\r\n self._real_url = response['url']\r\n self._is_redirect = True\r\n for redirect in response['history'][:-1]: # Except last url\r\n self._redirects.append(redirect['response']['url'])\r\n\r\n elif self._url != response['url']:\r\n self._real_url = response['url']\r\n self._is_redirect = True\r\n\r\n self._html = response['html']\r\n\r\n except:\r\n response = requests.get(self._url)\r\n if len(response.history) > 0:\r\n self._real_url = response.url\r\n self._is_redirect = True\r\n for redirect in response.history:\r\n self._redirects.append(redirect.url)\r\n\r\n self._html = response.text", "def safe_redirect(target, endpoint=\"home.index\"):\r\n if not target or not is_safe_url(target):\r\n target = url_for(endpoint)\r\n return redirect(target)", "def slashend(request, slug=''):\n return HttpResponsePermanentRedirect('/' + slug + '/')", "def test_trailing_slash(self):\n path = utils.safe_join(\"base_url/\", \"path/to/somewhere/\")\n self.assertEqual(path, \"base_url/path/to/somewhere/\")", "def test_not_connected_then_redirected(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 302)", "def test_fall_back_to_root(self):\n root = RootRouteFactory.create()\n\n with self.assertNumQueries(1):\n route = Route.objects.best_match_for_path('/absent-branch/')\n\n self.assertEqual(route, root)", "def _is_absolute_uri(self, uri):\n return uri.startswith(\"/\")", "def test_root_path(self, copier, cwp):\n assert cwp.root_path == C_ROOT_PATH\n assert copier.root_path == \"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that is_collection flag can't be changed by PATCH request.
def testChangeResourceTypeForbidden(self): response = self.client.patch(self.url(self.dir.path), data={ "is_collection": False }) self.assertEqual(response.status_code, 400) error = 'Resource type cannot be changed after creation' self.assertDictEqual(response.data, {'is_collection': [error]})
[ "def is_collection(path):\n return not is_root(path) and not is_doc(path)", "def test_disallow_patch_many(self):\r\n response = self.app.patch('/api/person', data=dumps(dict(name='foo')))\r\n assert response.status_code == 405", "def isFromCollection(self) -> bool:\n return self._is_from_collection", "def test_nonUpdatableAttribute(self):\n self.headers.setRawHeaders(\"Content-Type\", [\"application/json\"])\n self.body = disallowedUpdateState\n return self._testUpdate(http.FORBIDDEN)", "def can_write_collection(view):\n \n @wraps(view)\n def inner_decorator(request, collection, *args, **kwargs):\n \n out = createBaseResponseObject()\n database = kwargs.get(database, settings.MONGO_SERVER_DEFAULT_DB)\n\n try:\n #check user and collection\n collectionInstance = SketchCollection.objects.get(name=collection, database=database)\n wa = collectionInstance.hasWriteAccess(request.user)\n if wa:\n return view(request, collection, database=database, *args, **kwargs)\n \n except SketchCollection.DoesNotExist:\n #TODO: we could limit the number of collections here\n return view(request, collection, database=database, *args, **kwargs)\n \n \n except Exception, e:\n out['status'] = 0\n out['errors'] = [str(e)]\n return HttpResponse(json.dumps(out))\n \n out['status'] = 0\n out['errors'] = ['You must own collection %s or have the right to write to it.' % collection]\n return HttpResponse(json.dumps(out))\n\n return inner_decorator", "def hasRenderSettingsCollectionInstance(self):\n \n pass", "def has_changed (self):\n if (self.collection != None) and self.collection.item_changed:\n self.collection.item_changed (self)", "def _status_setter_checks(self):\n if self.check_status((\"public\", )):\n raise QiitaDBStatusError(\"Illegal operation on public collection!\")", "def assert_collection_not_exists(self, database, collection):\n db = self.client[database]\n self.assertNotIn(collection, db.list_collection_names())", "def is_collection(cls, name):\n return name in cls.collection_classes", "def is_document_collection(name):\n return CollectionMetaclass.is_document_collection(name)", "def test_non_staff(self):\n self._verify_non_staff_cannot_access(\n course_discussions_settings_handler, \"GET\", [str(self.course.id)]\n )\n self._verify_non_staff_cannot_access(\n course_discussions_settings_handler, \"PATCH\", [str(self.course.id)]\n )", "def is_document_collection(cls, name):\n try:\n Collection = cls.get_collection_class(name)\n return issubclass(collection, Collection)\n except KeyError:\n return False", "def test_is_cataloging_admin_for(user, collection, superuser):\n other_collection = CollectionFactory()\n not_admin_permission = Permission(user=user, collection=collection,\n cataloging_admin=False).save_as(superuser)\n admin_permission = Permission(user=user, collection=other_collection,\n cataloging_admin=True).save_as(superuser)\n\n assert user.is_cataloging_admin_for(not_admin_permission.collection) is False\n assert user.is_cataloging_admin_for(admin_permission.collection) is True\n\n assert user.is_cataloging_admin_for(admin_permission.collection,\n not_admin_permission.collection) is False\n not_admin_permission.cataloging_admin = True\n now_also_admin_permission = not_admin_permission.save()\n assert user.is_cataloging_admin_for(admin_permission.collection,\n now_also_admin_permission.collection) is True", "def validate_patch(self, patch):\n for change in patch:\n if change['path'] == \"/document_pid\":\n self.ensure_document_exists(change['value'])", "def am_check_site_updated(coll):\n if layout.FIELD_TYPEID in coll._children(RecordType, altscope=\"all\"):\n return am_errors.AM_SUCCESS\n print(\"Perform 'annalist-manager updatesitedata' before collection data migration.\")\n print(\"Collection data not migrated.\")\n return am_errors.AM_MIGRATECOLLFAIL", "def __init__(self, collection, inner_exception=None):\n message = \"Collection {0} must be {1} but it is not\".format(\n collection.role.key, \"compact\" if collection.role.compact else \"full\"\n )\n\n super(CollectionWrongFormatError, self).__init__(message, inner_exception)\n\n self._collection = collection", "def test_update_with_enabled_False(self):\n self._assert_update_raises_bad_request('False')", "def test_update_partial_with_forbidden_fields(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'is_active': False,\n 'is_present': True,\n }\n\n response = self.client.patch(\n reverse(\n 'retirement:reservation-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n response_data = json.loads(response.content)\n\n content = {\n 'non_field_errors': [\n \"Only is_present and retirement can be updated. To change \"\n \"other fields, delete this reservation and create a new one.\"\n ]\n }\n\n self.assertEqual(response_data, content)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a batch norm module in target_model that corresponds immediately following a given convolution node in the model's NNCFGraph representation.
def get_bn_for_conv_node_by_name(target_model: NNCFNetwork, conv_node_name: NNCFNodeName) -> Optional[torch.nn.Module]: graph = target_model.nncf.get_original_graph() conv_node = graph.get_node_by_name(conv_node_name) bn_node = get_bn_node_for_conv(graph, conv_node) if bn_node is None: return None bn_module = target_model.nncf.get_containing_module(bn_node.node_name) return bn_module
[ "def forward_pass_on_convolutions(self, x):\n conv_output = None\n for module_pos, module in self.model.encoder.encoder._modules.items():\n \n if int(module_pos) == self.target_block:\n# if int(module_pos) == self.target_layer:\n for sub_module_pos, sub_module in module._modules.items():\n# print(sub_module)\n x = sub_module(x)\n if int(sub_module_pos) == self.target_layer: \n x.register_hook(self.save_gradient)\n conv_output = x # Save the convolution output on that layer\n break\n x = module(x) # Forward\n return conv_output, x", "def conv_batchnorm_relu_forward(x, w, b, beta, gamma, bn_param, conv_param):\n out_cf, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out_bf, batch_cache = spatial_batchnorm_forward(out_cf, gamma, beta, bn_param)\n out, relu_cache = relu_forward(out_bf)\n cache = (conv_cache, batch_cache, relu_cache)\n\n return out, cache", "def forward_pass_on_convolutions(self, x):\n conv_output = None\n for module_name, module in self.model._modules.items():\n print(module_name)\n if module_name == 'fc':\n return conv_output, x\n x = module(x) # Forward\n #print(module_name, module)\n #resnet\n if module_name == self.target_layer:\n print('True')\n x.register_hook(self.save_gradient)\n conv_output = x # Save the convolution output on that layer\n #VGG\n elif module_name == 'features':\n print('vgg cam hook register to be done')\n x.register_hook(self.save_gradient)\n conv_output = x\n return conv_output, x", "def make_ncp_graph(model_config):\n tf.reset_default_graph()\n\n interceptor = ed_transforms.ncp\n\n def model_ncp(*params):\n with ed.interception(interceptor):\n return model_config.model(*params)\n\n if model_config.bijectors_fn is not None:\n model_ncp = ed_transforms.transform_with_bijectors(\n model_ncp, model_config.bijectors_fn)\n\n log_joint_noncentered = ed.make_log_joint_fn(model_ncp)\n\n with ed.tape() as model_tape:\n _ = model_ncp(*model_config.model_args)\n\n target_ncp_kwargs = {}\n for param in model_tape.keys():\n if param in model_config.observed_data.keys():\n target_ncp_kwargs[param] = model_config.observed_data[param]\n\n def target_ncp(*param_args):\n i = 0\n for param in model_tape.keys():\n if param not in model_config.observed_data.keys():\n target_ncp_kwargs[param] = param_args[i]\n i = i + 1\n\n return log_joint_noncentered(*model_config.model_args, **target_ncp_kwargs)\n\n elbo, variational_parameters = util.get_mean_field_elbo(\n model_config.model,\n target_ncp,\n num_mc_samples=FLAGS.num_mc_samples,\n model_args=model_config.model_args,\n model_obs_kwargs=model_config.observed_data,\n vi_kwargs=None)\n\n return target_ncp, model_ncp, elbo, variational_parameters, None", "def add_conv_block(\n Conv,\n BatchNorm,\n in_channels=1,\n out_channels=1,\n kernel_size=3,\n dilation=1,\n last=False,\n):\n padding = dilation if not last else 0\n conv_layer = Conv(\n in_channels, out_channels, kernel_size, padding=padding, dilation=dilation\n )\n bn_layer = BatchNorm(out_channels)\n\n return [conv_layer, bn_layer]", "def cyclic_conv1d_alt(input_node, filter_):\n c = int(input_node.shape[2])\n kernel_node = filter_.coeffs\n\n N = int(input_node.shape[1])\n\n start = N - filter_.num_neg()\n end = filter_.num_pos() - 1\n\n # Perodically extend input signal\n input_new = tf.concat(\n (input_node[:, start:, :], input_node, input_node[:, 0:end, :]),\n axis=1\n )\n\n # Convolve with periodic extension\n result = tf.nn.conv1d(input_new, kernel_node[::-1], stride=1, padding=\"VALID\")\n\n return result", "def flops_convnd(module: _ConvNd, input: Tensor, output: Tensor) -> int:\n\n # For each position, # mult = kernel size, # adds = kernel size - 1\n window_flops_per_chan = 2 * reduce(mul, module.kernel_size) - 1\n # Connections to input channels is controlled by the group parameter\n effective_in_chan = (input.shape[1] // module.groups)\n # N * flops + (N - 1) additions\n window_flops = effective_in_chan * window_flops_per_chan + (effective_in_chan - 1)\n conv_flops = output.numel() * window_flops\n\n # Each output element gets a bias addition\n bias_flops = output.numel() if module.bias is not None else 0\n\n return conv_flops + bias_flops", "def get_conv_net(self):\n layers = []\n in_channels = self.channels\n\n for layer in configs[self.config]:\n if layer == 'M':\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n else:\n layer = layer.split('-')\n kernel_size = int(layer[0])\n out_channels = int(layer[1])\n layers.append(nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=1))\n\n if self.batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n\n layers.append(nn.ReLU(inplace=True))\n in_channels = out_channels\n\n return nn.Sequential(*layers)", "def make_cp_graph(model_config):\n\n tf.reset_default_graph()\n\n model = model_config.model\n if model_config.bijectors_fn is not None:\n model = ed_transforms.transform_with_bijectors(\n model, model_config.bijectors_fn)\n\n log_joint_centered = ed.make_log_joint_fn(model)\n\n with ed.tape() as model_tape:\n _ = model(*model_config.model_args)\n\n target_cp_kwargs = {}\n for param in model_tape.keys():\n if param in model_config.observed_data.keys():\n target_cp_kwargs[param] = model_config.observed_data[param]\n\n def target_cp(*param_args):\n i = 0\n for param in model_tape.keys():\n if param not in model_config.observed_data.keys():\n target_cp_kwargs[param] = param_args[i]\n i = i + 1\n\n return log_joint_centered(*model_config.model_args, **target_cp_kwargs)\n\n elbo, variational_parameters = util.get_mean_field_elbo(\n model,\n target_cp,\n num_mc_samples=FLAGS.num_mc_samples,\n model_args=model_config.model_args,\n model_obs_kwargs=model_config.observed_data,\n vi_kwargs=None)\n\n return target_cp, model_config.model, elbo, variational_parameters, None", "def convolution(prev_layer, n_filters, hype_space, force_ksize=None):\n if force_ksize is not None:\n k = force_ksize\n else:\n k = int(round(hype_space['conv_kernel_size']))\n return tensorflow.keras.layers.Conv2D(\n filters=n_filters, kernel_size=(k, k), strides=(1, 1),\n padding='same', activation=hype_space['activation'],\n kernel_regularizer=tensorflow.keras.regularizers.l2(\n STARTING_L2_REG * hype_space['l2_weight_reg_mult'])\n )(prev_layer)", "def conv_deep_n_backward_search(node:torch._C.Node,deep = 1,is_head = True):\n assert ((is_head and \"convolution\" in node.kind()) or not is_head)\n logging.debug(\"current deep:{}\".format(deep))\n def recursive(node: torch._C.Node,re_deep, expect_next_node_kind: list = None):\n inputs_Value = list(node.inputs())\n result_list = []\n recursive_flag = False\n for value in inputs_Value:\n if not list(value.uses()):\n continue\n if expect_next_node_kind:\n for exp in expect_next_node_kind:\n if exp in value.node().kind():\n recursive_flag = True\n logging.debug(\"next_node: {} => {}\".format(scope2name( node.scopeName()),\n scope2name( value.node().scopeName())))\n result = conv_deep_n_backward_search(value.node(),re_deep,False)\n if result:\n result_list.extend(result)\n if not recursive_flag:\n logging.warning(\"the next structure of node {} is not config, which might be a fault.\".format(\n scope2name(node.scopeName())))\n\n else:\n result = conv_deep_n_backward_search(value.node(), re_deep, False)\n if result:\n result_list.extend(result)\n return result_list\n\n def new_structure_error(node_kind):\n raise RuntimeError(\n \"New structure in search path which is not config in shortcut forward search: {} \".format(node_kind))\n\n # first call by add_ node\n # if from_node is None:\n if \"convolution\" in node.kind():\n if deep == 0:\n logging.info(\"stop by conv when deep is {}\".format(deep))\n return [node]\n else:\n # find convolution linked by shortcut add, return it.\n result = recursive(node, deep-1,[\"batch_norm\", \"relu\",\"max_pool2d\",\"add\",\"convolution\"])\n if not is_head:\n result.append(node)\n return result\n\n elif \"relu\" in node.kind():\n result = recursive(node,deep, [\"batch_norm\", \"relu\",\"max_pool2d\",\"add\",\"convolution\"])\n return result\n\n\n elif \"batch_norm\" in node.kind():\n result = recursive(node,deep, [\"batch_norm\", \"relu\",\"max_pool2d\",\"add\",\"convolution\"])\n result.append(node)\n return result\n\n elif \"add\" in node.kind():\n logging.info(\"stop by add when deep is {}\".format(deep))\n return []\n\n\n elif \"max_pool2d\" in node.kind():\n result = recursive(node,deep, [\"batch_norm\", \"relu\",\"max_pool2d\",\"add\",\"convolution\"])\n return result\n\n else:\n new_structure_error(node.kind())", "def graph_conv_net(batch_size, prior, num_task):\n tg = TensorGraph(use_queue=False)\n if prior == True:\n add_on = num_task\n else:\n add_on = 0\n atom_features = Feature(shape=(None, 75 + 2*add_on))\n circular_features = Feature(shape=(batch_size, 256), dtype=tf.float32)\n\n degree_slice = Feature(shape=(None, 2), dtype=tf.int32)\n membership = Feature(shape=(None,), dtype=tf.int32)\n deg_adjs = []\n for i in range(0, 10 + 1):\n deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)\n deg_adjs.append(deg_adj)\n\n gc1 = GraphConv(\n 64 + add_on,\n activation_fn=tf.nn.elu,\n in_layers=[atom_features, degree_slice, membership] + deg_adjs)\n batch_norm1 = BatchNorm(in_layers=[gc1])\n gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs)\n\n\n gc2 = GraphConv(\n 64 + add_on,\n activation_fn=tf.nn.elu,\n in_layers=[gc1, degree_slice, membership] + deg_adjs)\n batch_norm2 = BatchNorm(in_layers=[gc2])\n gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs)\n\n add = Concat(in_layers = [gp1, gp2])\n add = Dropout(0.5, in_layers =[add])\n dense = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[add])\n batch_norm3 = BatchNorm(in_layers=[dense])\n readout = GraphGather(\n batch_size=batch_size,\n activation_fn= tf.nn.tanh,\n in_layers=[batch_norm3, degree_slice, membership] + deg_adjs)\n batch_norm4 = BatchNorm(in_layers=[readout])\n\n dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features])\n dense1 = BatchNorm(in_layers=[dense1])\n dense1 = Dropout(0.5, in_layers =[dense1])\n dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features])\n dense1 = BatchNorm(in_layers=[dense1])\n dense1 = Dropout(0.5, in_layers =[dense1])\n merge_feat = Concat(in_layers = [dense1, batch_norm4])\n merge = Dense(out_channels=256, activation_fn=tf.nn.elu, in_layers=[merge_feat])\n costs = []\n labels = []\n for task in range(num_task):\n classification = Dense(\n out_channels=2, activation_fn=None,in_layers=[merge])\n softmax = SoftMax(in_layers=[classification])\n tg.add_output(softmax)\n label = Label(shape=(None, 2))\n labels.append(label)\n cost = SoftMaxCrossEntropy(in_layers=[label, classification])\n costs.append(cost)\n all_cost = Stack(in_layers=costs, axis=1)\n weights = Weights(shape=(None, num_task))\n loss = WeightedError(in_layers=[all_cost, weights])\n tg.set_loss(loss)\n #if prior == True:\n # return tg, atom_features,circular_features, degree_slice, membership, deg_adjs, labels, weights#, prior_layer\n return tg, atom_features, circular_features ,degree_slice, membership, deg_adjs, labels, weights", "def conv_deep_n_forward_search(node:torch._C.Node,deep = 1,is_head = True):\n assert ((is_head and \"convolution\" in node.kind()) or not is_head)\n logging.debug(\"current deep:{}\".format(deep))\n def recursive(node: torch._C.Node,deep, expect_next_node_kind: list = None):\n recursive_flag = False\n outputs_Value = list(node.outputs())\n result_list = []\n for value in outputs_Value:\n if not list(value.uses()):\n continue\n if expect_next_node_kind:\n for exp in expect_next_node_kind:\n for next_node in [u.user for u in value.uses()]:\n if exp in next_node.kind():\n recursive_flag = True\n logging.debug(\"next_node: {} => {}\".format(scope2name(node.scopeName()),scope2name(next_node.scopeName())))\n result = conv_deep_n_forward_search(next_node,deep,False)\n if result:\n result_list.extend(result)\n if not recursive_flag:\n logging.warning(\"the next structure of node {} is not config, which might be a fault.\".format(scope2name(node.scopeName())))\n else:\n for next_node in [u.user for u in value.uses()]:\n result_list.extend(conv_deep_n_forward_search(next_node,deep, False))\n\n return result_list\n\n def new_structure_error(node_kind):\n raise RuntimeError(\n \"New structure in search path which is not config in shortcut forward search: {} \".format(node_kind))\n\n # first call by add_ node\n # if from_node is None:\n if \"convolution\" in node.kind():\n if deep == 0:\n logging.info(\"stop by conv when deep is {}\".format(deep))\n return [node]\n else:\n # find convolution linked by shortcut add, return it.\n result = recursive(node, deep-1,[\"batch_norm\", \"relu\",\"max_pool2d\",\"add\",\"convolution\"])\n if not is_head:\n result.append(node)\n return result\n\n elif \"relu\" in node.kind():\n result = recursive(node,deep, [\"batch_norm\", \"relu\",\"max_pool2d\",\"add\",\"convolution\"])\n return result\n\n\n elif \"batch_norm\" in node.kind():\n result = recursive(node,deep, [\"batch_norm\", \"relu\",\"max_pool2d\",\"add\",\"convolution\"])\n result.append(node)\n return result\n\n elif \"add\" in node.kind():\n logging.info(\"stop by add when deep is {}\".format(deep))\n return []\n\n\n elif \"max_pool2d\" in node.kind():\n result = recursive(node,deep, [\"batch_norm\", \"relu\",\"max_pool2d\",\"add\",\"convolution\"])\n return result\n\n\n else:\n new_structure_error(node.kind())", "def forward(self, t):\r\n\r\n t = F.relu(self.conv1(t))\r\n t = self.pool(t)\r\n t = F.relu(self.conv2(t))\r\n #t = self.pool(t)\r\n t = F.relu(self.conv3(t))\r\n #t = F.relu(self.conv4(t))\r\n t = t.flatten(start_dim = 1)\r\n t = F.relu(self.fc(t))\r\n t = self.out(t)\r\n return t", "def patch_base_cnn_model_fn(features, labels, mode): \n\n\tinput_layer = tf.reshape(features[\"x\"], [-1, 101, 101, 3])\n\n# ------------------ Layer1 -------------------------\n\tconv1 = tf.layers.conv2d(\n\t\tinputs=input_layer,\n\t\tfilters=80,\n\t\tkernel_size=[6,6],\n\t\tstrides=[1,1],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n\tlrn1 = tf.nn.local_respose_normalsation(\t\t\t\t\t\t#TODO\n\t\tinputs=conv1,\n\t\tdepth_radius=5,\n\t\tbias=1,\n\t\talpha=1,\n\t\tbeta=0.5,\n\t\tname=None)\n\n\tpool1 = tf.layers.max_pooling2d(\n\t\tinputs=lrn1,\n\t\tpool_size=[2,2],\n\t\tstrides=2)\n\n# ------------------ Layer2 -------------------------\n\tconv2 = tf.layers.conv2d(\n\t\tinputs=pool1,\n\t\tfilters=120,\n\t\tkernel_size=[5,5],\n\t\tstrides=[1,1],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n\tlrn2 = tf.nn.local_respose_normalsation(\t\t\t\t\t\t#TODO\n\t\tinputs=conv2,\n\t\tdepth_radius=5,\n\t\tbias=1,\n\t\talpha=1,\n\t\tbeta=0.5,\n\t\tname=None)\n\n\tpool2 = tf.layers.max_pooling2d(\n\t\tinputs=lrn2,\n\t\tpool_size=[2,2],\n\t\tstrides=2)\n\n# ------------------ Layer3 -------------------------\n\tconv3 = tf.layers.conv2d(\n\t\tinputs=pool2,\n\t\tfilters=160,\n\t\tkernel_size=[3,3],\n\t\tstrides=(1,1),\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n# ------------------ Layer4 -------------------------\n\tconv4 = tf.layers.conv2d(\n\t\tinputs=conv3,\n\t\tfilters=200,\n\t\tkernel_size=[3,3],\n\t\tstrides=(1,1),\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n\tpool4 = tf.layers.max_pooling2d(\n\t\tinputs=conv4,\n\t\tpool_size=[3,3],\n\t\tstrides=2)\n\n# ------------------ Dense Layer 1-------------------------\n\tpool4_flat = tf.reshape(pool4, [-1, 9*9*200])\n\tdense_layer1 = tf.layers.dense(\n\t\tinputs=pool4_flat,\n\t\tunits=320,\n\t\tactivation=tf.nn.relu)\n\n\tdropout1 = tf.layers.dropout(\n\t\tinputs=dense_layer1,\n\t\trate=0.5,\t\t\t\t\t\t\t\t\t\t\t\t\t\t#FIXME\n\t\ttraining=mode==tf.estimator.ModeKeys.TRAIN)\n\n# ------------------ Dense Layer 2-------------------------\n\tdense_layer2 = tf.layers.dense(\n\t\tinputs=dropout1,\n\t\tunits=320,\n\t\tactivation=tf.nn.relu)\t\n\n\tdropout2 = tf.layers.dropout(\n\t\tinputs=dense_layer2,\n\t\trate=0.5,\t\t\t\t\t\t\t\t\t\t\t\t\t\t#FIXME\n\t\ttraining=mode==tf.estimator.ModeKeys.TRAIN)\n\n# ------------------ Logits Layer -------------------------\n\tlogits = tf.layers.dense(\n\t\tinputs=dropout2,\n\t\tunits=2,\n\t\tactivation=None)\n\n\n#--------------- mode = PRED -----------------#\n\tpredictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n \t}\n\n \tif mode == tf.estimator.ModeKeys.PREDICT:\n\t return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\n#--------------- mode = TRAIN and EVAL -----------------#\n\tonehot_labels = tf.one_hot(\n\t\t\t\t\tindices=tf.cast(labels, tf.int32), \n\t\t\t\t\tdepth=10)\t\t\t\t\t\t\t\t\t# Number of classes\t\t\t\t\t\t\n\t\n\tloss = tf.losses.softmax_cross_entropy(\n\t\t\t\t\tonehot_labels=onehot_labels,\n\t\t\t\t\tlogits=logits)\t\t\t\t\t\t\t\t# Logits are taken as input not their softmax probabilities \n\n\n\t# Training Mode\n\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n\t\ttrain_op = optimizer.minimize(\n\t\t\t loss=loss,\n\t\t\t\t\tglobal_step=tf.train.get_global_step())\n\t return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n\n\t# Eval mode\n\tif mode == tf.estimator.ModeKeys.EVAL:\n\t\teval_metric_ops = {\n\t\t\t\t\"accuracy\": tf.metrics.accuracy(labels, predictions=predictions[\"classes\"])\n\t\t\t}\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def load_conv_layer(input_x, w, b, name='conv_layer'):\n with tf.name_scope(name):\n conv = tf.nn.conv2d(input_x, w, strides=[1, 1, 1, 1], padding=\"SAME\")\n act = tf.nn.relu(conv + b)\n \n return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")", "def conv_batchnorm_relu_pool_forward(x, w, b, beta, gamma, bn_param, conv_param, pool_param):\n out_cff, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out_sbf, batch_cache = spatial_batchnorm_forward(out_cff, gamma, beta, bn_param)\n out_rf, relu_cache = relu_forward(out_sbf)\n out, pool_cache = max_pool_forward_fast(out_rf, pool_param)\n cache = (conv_cache, batch_cache, relu_cache, pool_cache)\n return out, cache", "def ReluKernelFeatures(inducing_batch):\n return nn.Sequential(FeaturesToKernel(inducing_batch), ReluKernelGram())", "def create_nncf_graph(model: ov.Model) -> NNCFGraph:\n nncf_graph = NNCFGraph()\n visited = set()\n read_value_nodes = [op for op in model.get_ops() if op.get_type_name() == \"ReadValue\"]\n inference_nodes = model.get_parameters() + read_value_nodes\n\n while inference_nodes:\n node = inference_nodes[0]\n inference_nodes = inference_nodes[1:]\n if node.get_friendly_name() not in visited:\n GraphConverter._add_nncf_node(node, nncf_graph)\n visited.add(node.get_friendly_name())\n for out in node.outputs():\n for inp in sorted(out.get_target_inputs(), key=lambda inp: inp.get_node().get_friendly_name()):\n inference_nodes.append(inp.get_node())\n\n for node in model.get_ops():\n metatype = GraphConverter._get_node_metatype(node)\n # Add nodes from constant subgraphs\n node_name = node.get_friendly_name()\n if node_name not in visited:\n GraphConverter._add_nncf_node(node, nncf_graph)\n # Set const port id\n elif metatype in METATYPES_WITH_CONST_PORT_ID:\n const_attrs, act_attrs = {}, {}\n for inp in GraphConverter._filter_weight_input_ports(node.inputs(), metatype):\n inp_name = inp.get_source_output().get_node().get_friendly_name()\n if inp_name in visited:\n continue\n\n const_port_id = inp.get_index()\n const_node = get_operation_const_op(node, const_port_id)\n ov_dtype = const_node.get_element_type().get_type_name()\n if GraphConverter.convert_to_nncf_dtype(ov_dtype) == Dtype.INTEGER:\n continue\n\n const_attrs[const_port_id] = {\n \"name\": const_node.get_friendly_name(),\n \"shape\": tuple(const_node.get_output_shape(0)),\n }\n\n if metatype == OVMatMulMetatype:\n node_inputs = node.inputs()\n attribute_names = [\"transpose_a\", \"transpose_b\"]\n node_attributes = node.get_attributes()\n const_transpose_name = attribute_names[const_port_id]\n const_attrs[const_port_id][\"transpose\"] = node_attributes[const_transpose_name]\n\n act_port_id = abs(const_port_id - 1)\n act_attrs[\"transpose\"] = node_attributes[attribute_names[act_port_id]]\n partial_shape = node_inputs[act_port_id].get_partial_shape()\n act_attrs[\"shape\"] = tuple(partial_shape.get_max_shape())\n\n if const_attrs or act_attrs:\n nncf_node = nncf_graph.get_node_by_name(node_name)\n nncf_node.layer_attributes = OVConstantLayerAttributes(const_attrs, act_attrs)\n\n GraphConverter._add_edges_to_nncf_graph(model, nncf_graph)\n return nncf_graph" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize masks in graph for mask propagation algorithm
def init_output_masks_in_graph(graph: NNCFGraph, nodes: List): for node in graph.get_all_nodes(): node.attributes.pop("output_mask", None) for minfo in nodes: mask = minfo.operand.binary_filter_pruning_mask nncf_node = graph.get_node_by_id(minfo.nncf_node_id) nncf_node.attributes["output_mask"] = PTNNCFTensor(mask)
[ "def _initialize_mask(self):\n if 'locally_connected' in self.mask_type:\n assert self.neighbour_matrix is not None\n L = self.neighbour_matrix.T\n assert L.shape == (self.in_joints, self.in_joints)\n if 'learnable' not in self.mask_type:\n self.mask = tf.constant(L)\n else:\n if self.init_type == 'same':\n initializer = L\n elif self.init_type == 'ones':\n initializer = tf.initializers.ones\n elif self.init_type == 'random':\n initializer = tf.random.uniform\n var_mask = tf.Variable(\n name='mask', shape=[self.in_joints, self.out_joints] if self.init_type != 'same' else None,\n dtype=tf.float32, initial_value=initializer\n )\n var_mask = tf.nn.softmax(var_mask, axis=0)\n self.mask = var_mask * tf.constant(L != 0, dtype=tf.float32)", "def __set_masks__(self, g: dgl.DGLGraph):\n num_feat = g.ndata[self.nfeat].shape[1:]\n self.feature_mask = nn.Parameter(torch.randn(num_feat) * 0.1)\n\n std = nn.init.calculate_gain('relu') * sqrt(2.0 / (2 * g.num_nodes()))\n g.edata[ExplainerTags.EDGE_MASK] = nn.Parameter(torch.randn(g.num_edges()) * std)", "def build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, num_classes]\n x = PyramidROIAlign(\n [pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name=\"mrcnn_mask_bn2\")(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name=\"mrcnn_mask_bn3\")(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name=\"mrcnn_mask_bn4\")(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2,\n activation='relu'),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1,\n activation='sigmoid'),\n name=\"mrcnn_mask\")(x)\n return x", "def edge_mask(self):", "def _applyMask(self):\n self.pcontainer._params[:] = self.mask*self.maskableParams", "def restore_init_state(self):\n self.curr_mask_index = 0\n self.mask_count = 0", "def apply_mask_to_datasets(self):\n if self.mask_active:\n # Load mask data\n if len(self.mask_file_path) > 0:\n ext = os.path.splitext(self.mask_file_path)[-1].lower()\n msg = \"\"\n try:\n if \".npy\" == ext:\n self.mask_data = np.load(self.mask_file_path)\n elif \".txt\" == ext:\n self.mask_data = np.loadtxt(self.mask_file_path)\n else:\n self.mask_data = np.array(Image.open(self.mask_file_path))\n\n for k in self.data_sets.keys():\n self.data_sets[k].set_mask(mask=self.mask_data, mask_active=self.mask_active)\n\n # TODO: remove the following code if not needed\n # I see no reason of adding the mask image to every processed dataset\n # for k in self.img_dict.keys():\n # if 'fit' in k:\n # self.img_dict[k][\"mask\"] = self.mask_data\n\n except IOError as ex:\n msg = f\"Mask file '{self.mask_file_path}' cannot be loaded: {str(ex)}.\"\n except Exception as ex:\n msg = f\"Mask from file '{self.mask_file_path}' cannot be set: {str(ex)}.\"\n if msg:\n logger.error(msg)\n self.mask_data = None\n self.mask_active = False # Deactivate the mask\n # Now raise the exception so that proper error processing can be performed\n raise RuntimeError(msg)\n\n logger.debug(f\"Mask was successfully loaded from file '{self.mask_file_path}'\")\n else:\n # We keep the file name, but there is no need to keep the data, which is loaded from\n # file each time the mask is loaded. Mask is relatively small and the file can change\n # between the function calls, so it's better to load new data each time.\n self.mask_data = None\n # Now clear the mask in each dataset\n for k in self.data_sets.keys():\n self.data_sets[k].set_mask(mask=self.mask_data, mask_active=self.mask_active)\n\n # TODO: remove the following code if not needed\n # There is also no reason to remove the mask image if it was not added\n # for k in self.img_dict.keys():\n # if 'fit' in k:\n # self.img_dict[k][\"mask\"] = self.mask_data\n\n logger.debug(\"Setting spatial ROI ...\")\n logger.debug(f\" ROI selection is active: {self.roi_selection_active}\")\n logger.debug(f\" Starting position: ({self.roi_row_start}, {self.roi_col_start})\")\n logger.debug(f\" Ending position (not included): ({self.roi_row_end}, {self.roi_col_end})\")\n\n try:\n for k in self.data_sets.keys():\n self.data_sets[k].set_selection(\n pt_start=(self.roi_row_start, self.roi_col_start),\n pt_end=(self.roi_row_end, self.roi_col_end),\n selection_active=self.roi_selection_active,\n )\n except Exception as ex:\n msg = f\"Spatial ROI selection can not be set: {str(ex)}\\n\"\n logger.error(msg)\n raise RuntimeError(ex)\n\n # TODO: it may be more logical to pass the data somewhere else. We leave it here for now.\n # Select raw data to for single pixel fitting.\n self.data_all = self.data_sets[self.selected_file_name].raw_data\n\n # Create Dask client to speed up processing of multiple datasets\n client = dask_client_create()\n # Run computations with the new selection and mask\n # ... for the dataset selected for processing\n self.data, self.data_total_count = self.data_sets[self.selected_file_name].get_total_spectrum_and_count(\n client=client\n )\n # ... for all datasets selected for preview except the one selected for processing.\n for key in self.data_sets.keys():\n if (key != self.selected_file_name) and self.data_sets[key].selected_for_preview:\n self.data_sets[key].update_buffers(client=client)\n client.close()", "def _update_masks(self):\n masks = {}\n for layer_name, parameters in self._model.named_parameters():\n # if the layer shouldn't be pruned -> skip\n if not self._is_prune_layer(layer_name):\n continue\n # calculate how many parameters to prune form the layer according to s_t (st is a percentage - 0-100)\n st = int(parameters.view(-1).shape[0] * self._st / 100)\n # calculate masking bar according to the the weight's magnitude\n\n bar = parameters.abs().view(-1).topk(parameters.view(-1).shape[0] - st)[0].min()\n # set mask\n mask_positive_indices = torch.where(parameters.abs() >= bar)\n mask = torch.zeros(parameters.shape).to(self._model.device)\n mask[mask_positive_indices] = 1\n masks[layer_name] = mask\n self._masks = masks", "def set_initial_conditions(self, mask):\n self.Ci = (mask == 0)*self._c_out", "def sparse_mask(self, input, mask): # real signature unknown; restored from __doc__\n pass", "def mrcnn_masks(self):\n masks = []\n klasses = []\n for b in self.buildings:\n if b.color() == 5:\n continue\n img = np.zeros(S.MASKSHAPE[:2], dtype=np.uint8)\n coords = b.coords()\n if len(coords) > 0:\n cv2.fillPoly(img, np.array([coords]), 1)\n masks.append(img)\n klasses.append(b.color())\n if len(masks) == 0:\n return np.array(masks, copy=False), np.ones([0], dtype=np.int32)\n #masks.append(np.zeros(MASKSHAPE[:2], dtype=np.uint8))\n return np.dstack(masks).astype(bool), np.array(klasses)#np.ones([len(masks)], dtype=np.int32)", "def convert_masks(self, idxs):\n masks_list = [self.masks[i] for i in idxs]\n\n masks = torch.ones((idxs.shape[0], self.dim))\n for i, m in enumerate(masks_list):\n for j in m:\n masks[i, j] = 0\n\n return masks", "def apply_mask(self, stack):\n if self.global_flags != 0:\n stack.apply_global_mask(self.global_flags, self.mask_num_images)\n return stack", "def update_masks(self, adjmodule_getter: AdjModuleGetter) -> None:\n # Note: model must have nn.Flatten to get last conv shape info\n last_conv_shape = adjmodule_getter.last_conv_shape\n\n for channelrepr, conv, bn in self.channelrepr_conv_bn:\n # Copy channel weight_mask to bias_mask\n ch_buffers = {name: buf for name, buf in channelrepr.named_buffers()}\n ch_mask = ch_buffers[\"weight_mask\"].detach().clone()\n if \"bias_mask\" in ch_buffers:\n ch_buffers[\"bias_mask\"].set_(ch_mask) # type: ignore\n\n # Copy channel weight_mask to bn weight_mask, bias_mask\n bn_buffers = {name: buf for name, buf in bn.named_buffers()}\n bn_buffers[\"weight_mask\"].set_(ch_mask) # type: ignore\n bn_buffers[\"bias_mask\"].set_(ch_mask) # type: ignore\n\n conv_buffers = {name: buf for name, buf in conv.named_buffers()}\n if \"bias_mask\" in conv_buffers:\n conv_buffers[\"bias_mask\"].set_(ch_mask) # type: ignore\n\n # conv2d - batchnorm - activation (CBA)\n # bn_mask: [out], conv: [out, in, h, w]\n [o, i, h, w] = conv_buffers[\"weight_mask\"].shape\n\n # check shape -> if its not shaped as CBA\n if ch_mask.shape[0] != o:\n continue\n # ch_mask: [out, 1, 1]\n ch_mask = ch_mask.view(o, 1, 1)\n # ch_mask: [out, h, w]\n ch_mask = ch_mask.repeat(1, h, w)\n # ch_mask: [out, 1, h, w]\n ch_mask = ch_mask.unsqueeze(1)\n # ch_mask: [out, in, h, w]\n ch_mask = ch_mask.repeat(1, i, 1, 1)\n\n conv_buffers[\"weight_mask\"].set_(ch_mask) # type: ignore\n\n # Update fc layer mask\n fc_modules: Dict[str, nn.Linear] = dict()\n bn_modules: Dict[str, nn.BatchNorm2d] = dict()\n for k, v in self.model.named_modules():\n if type(v) is nn.Linear:\n fc_modules.update({k: v})\n elif type(v) is nn.BatchNorm2d:\n bn_modules.update({k: v})\n\n for fc in fc_modules.values():\n bns = adjmodule_getter.find_modules_ahead_of(fc, nn.BatchNorm2d)\n bn_connections = [bn.weight_mask for bn in bns]\n\n if not bn_connections:\n continue\n\n fc_mask = torch.cat(bn_connections)\n fc_mask = torch.flatten(\n fc_mask.view(-1, 1, 1).repeat(1, last_conv_shape, last_conv_shape)\n )\n\n o, i = fc.weight_mask.size() # type: ignore\n fc_mask = fc_mask.repeat(o).reshape(o, i)\n fc.weight_mask.data = fc_mask", "def get_masks(self):\n x = self.getxrvar('mask')\n ds = xr.Dataset()\n\n for cv_set in ['train', 'valid', 'test', 'all']:\n indices = self.space_indices[cv_set]\n mask = np.zeros((len(x.lat), len(x.lon)), dtype=int)\n for (lat, lon) in indices:\n mask[lat, lon] = 1\n ds[cv_set] = xr.DataArray(mask, coords=[x.lat, x.lon])\n\n return ds", "def labeled_mask_to_inst_masks(self, labeled_mask):\n nr_true = labeled_mask.max()\n masks = []\n for i in range(1, nr_true + 1):\n msk = labeled_mask.copy()\n msk[msk != i] = 0.\n msk[msk == i] = 255.\n masks.append(msk)\n if not masks:\n return np.asarray([labeled_mask])\n else:\n return np.asarray(masks)", "def _load_mask(self, gt_data):\n img_coco = self.refexp_dataset.loadImgs(ids=gt_data['image_id'])[0]\n mask = Image.new('L', (img_coco['width'], img_coco['height']), 0)\n for seg in gt_data['segmentation']:\n ImageDraw.Draw(mask).polygon(seg, outline='white', fill='white')\n return numpy.asarray(mask)", "def create_masks(self):\n height, width = self.sem_seg_height_width\n\n pts = np.array([[width//2, 0], [width, 0], [width, height], [width//2, height]]) # mask for right side\n cv2.drawContours(self.mask_right_side, [pts], -1, 1, -1, cv2.LINE_AA)\n\n pts = np.array([[75, 300], [175, 0], [200, 0], [325, 300]]) # mask to detected american style traffic lights\n cv2.drawContours(self.mask_front_triangle, [pts], -1, 1, -1, cv2.LINE_AA)", "def add_mask(self):\n self.variables=np.append(self.variables, 'MASK')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates output shape of convolution layer by input edge.
def _calculate_output_shape(graph: NNCFGraph, node: NNCFNode) -> Tuple[int, ...]: in_edge = graph.get_input_edges(node)[0] shape = list(in_edge.tensor_shape) attrs = node.layer_attributes if isinstance(attrs, ConvolutionLayerAttributes): shape = shape[2:] for i, _ in enumerate(shape): if attrs.transpose: shape[i] = (shape[i] - 1) * attrs.stride[i] - 2 * attrs.padding_values[i] + attrs.kernel_size[i] else: shape[i] = (shape[i] + 2 * attrs.padding_values[i] - attrs.kernel_size[i]) // attrs.stride[i] + 1 elif isinstance(attrs, LinearLayerAttributes): shape = shape[:-1] + [attrs.out_features] else: raise RuntimeError(f"Unexpected node type {node.node_type} is fed to _calculate_output_shape") return tuple(shape)
[ "def _conv_output_shape(cls, h_w: Union[tuple, int],\n kernel_size: Union[tuple, int],\n stride: Union[tuple, int],\n pad: Union[tuple, int] = 0,\n dilation=1):\n # source https://discuss.pytorch.org/t/utility-function-for-calculating-the-shape-of-a-conv-output/11173/6\n\n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(stride) is not tuple:\n stride = (stride, stride)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) // stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) // stride[1] + 1\n\n return h, w", "def get_conv_output_shape(image_shape, kernel_shape,\n border_mode, subsample,\n filter_dilation=None):\n # ====== convert tensorflow shape to theano shape ====== #\n image_shape = (image_shape[0], image_shape[-1]) + tuple(image_shape[1:-1])\n kernel_shape = (kernel_shape[-1], kernel_shape[-2]) + tuple(kernel_shape[:-2])\n # ====== infer shape ====== #\n bsize, imshp = image_shape[0], image_shape[2:]\n nkern, kshp = kernel_shape[0], kernel_shape[2:]\n if filter_dilation is None:\n filter_dilation = np.ones(len(subsample), dtype='int')\n if isinstance(border_mode, tuple):\n out_shp = tuple(__get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode[i],\n subsample[i], filter_dilation[i]) for i in range(len(subsample)))\n else:\n out_shp = tuple(__get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode,\n subsample[i], filter_dilation[i]) for i in range(len(subsample)))\n # ====== convert theano to tensorflow shape ====== #\n return (bsize, ) + out_shp + (nkern,)", "def calculate_out_shape(in_shape, kernel_size, stride, padding):\n in_shape = np.atleast_1d(in_shape)\n out_shape = ((in_shape - kernel_size + padding + padding) // stride) + 1\n out_shape = tuple(int(s) for s in out_shape)\n\n return tuple(out_shape) if len(out_shape) > 1 else out_shape[0]", "def compute_output_shape(self, input_shape):\n return (\n input_shape[0],\n self.paddings[1][0] + input_shape[1] + self.paddings[1][1],\n self.paddings[2][0] + input_shape[2] + self.paddings[2][1],\n input_shape[3]\n )", "def calc_conv_layer_shape(prev_layer_shape, num_filters, stride):\n\n return np.array([num_filters] + calc_image_dimensions(\n prev_layer_shape[1:], stride), dtype=np.uint64)", "def output_shape(self, l_in):\r\n out_channel, l_out = self.in_channel, l_in\r\n for conv1d_unit in self.conv_layers:\r\n out_channel, l_out = conv1d_unit.output_shape(l_out)\r\n return l_out, out_channel", "def output_shape_for(self, input_shape):\n # N1, C1, W1, H1 = input_shape\n # output_shape = (N1, self.n_classes, W1, H1)\n x = input_shape\n\n # Encoder\n x = OutputShapeFor(self.convbnrelu1.cbr_unit)(x)\n x = OutputShapeFor(self.maxpool)(x)\n\n e1 = OutputShapeFor(self.encoder1)(x)\n e2 = OutputShapeFor(self.encoder2)(e1)\n e3 = OutputShapeFor(self.encoder3)(e2)\n e4 = OutputShapeFor(self.encoder4)(e3)\n\n # Decoder with Skip Connections\n d4 = OutputShapeFor(self.decoder4)(e4)\n # d4 += e3\n d3 = OutputShapeFor(self.decoder3)(d4)\n # d3 += e2\n d2 = OutputShapeFor(self.decoder2)(d3)\n # d2 += e1\n d1 = OutputShapeFor(self.decoder1)(d2)\n\n # Final Classification\n f1 = OutputShapeFor(self.finaldeconvbnrelu1)(d1)\n f2 = OutputShapeFor(self.finalconvbnrelu2)(f1)\n f3 = OutputShapeFor(self.finalconv3)(f2)\n return f3", "def calc_edge_length(edge, layout):\n\n Ax, Ay, Bx, By = edge_to_cartesian(edge,layout)\n\n edge_length = math.sqrt( (Bx - Ax)*(Bx - Ax) + (By - Ay)*(By - Ay) )\n\n #print edge, Ax, Ay, Bx, By\n\n return edge_length", "def output_shape(self):\n return None", "def out_len_conv(self, in_len, conv_layer):\n out_len = (in_len-conv_layer.kernel_size[0]+2*conv_layer.padding[0])/conv_layer.stride[0]+1\n return out_len", "def get_conv1d_output_size(input_size, kernel_size, stride):\n # TODO: implement the formula in the writeup. One-liner; don't overthink\n return ((input_size - kernel_size) // stride) + 1 # See python doc for full formula\n \n #raise NotImplementedError(\"TODO: Complete functional.get_conv1d_output_size()!\")", "def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n return self.output_shape", "def get_output_size(self, input_size):\n # conv layers\n current_output = input_size + 2*np.array(self._modules['conv_module'].padding) - (self._modules['conv_module'].kernel_size - 1) - 1\n pre_pooling = np.floor(current_output/np.array(self._modules['conv_module'].stride) + 1)\n # pooling layers\n post_pooling = None\n if not self._modules.hasattr('pool_module') is None:\n post_pooling = pre_pooling - (self._modules['pool_module'].kernel_size-1) - 1\n post_pooling = np.floor(post_pooling/np.array(self._modules['pool_module'].kernel_size) + 1)\n return pre_pooling, post_pooling", "def output_shape(self, l_in):\r\n return self.out_channels, int((l_in - self.kernel_size) / self.stride) + 1", "def out_edge_count(self):", "def maxpool2d_out_dim(in_dim, kernel_size, padding=1, stride=1, dilation=1):\n out_dim = ((in_dim + 2*padding - dilation*(kernel_size-1) - 1)/stride) + 1\n return out_dim\n\n #TODO make a util function to calculate the output size of a layer given a input dim\n #ie get the input size of a linear layer by giving input h or w", "def output_shape(self):\n if context.in_eager_mode():\n raise RuntimeError('Layer.output_shape not supported in Eager mode.')\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set(\n [str(node.output_shapes) for node in self._inbound_nodes])\n if len(all_output_shapes) == 1:\n output_shapes = self._inbound_nodes[0].output_shapes\n if len(output_shapes) == 1:\n return tuple(tensor_shape.TensorShape(output_shapes[0]).as_list())\n else:\n return [\n tuple(tensor_shape.TensorShape(shape).as_list())\n for shape in output_shapes\n ]\n else:\n raise AttributeError('The layer \"%s\"'\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.' % self.name)", "def get_convolution_backward_filter_workspace_size(\n self, src_desc, diff_desc, conv_desc, grad_desc, algo):\n size = ffi.new(\"size_t *\")\n err = self._lib.cudnnGetConvolutionBackwardFilterWorkspaceSize(\n self.handle, src_desc, diff_desc, conv_desc, grad_desc, algo, size)\n if err:\n raise CU.error(\"cudnnGetConvolutionBackwardFilterWorkspaceSize\",\n err)\n return int(size[0])", "def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n output_shape = None\n for p in self.processings:\n new_output_shape = p.infer_image_input_shape()\n if new_output_shape is not None:\n output_shape = new_output_shape\n\n return output_shape" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collects output dimension shapes for convolutions and fully connected layers from the connected edges in the NNCFGraph.
def collect_output_shapes(graph: NNCFGraph) -> Dict[NNCFNodeName, List[int]]: modules_out_shapes = {} output_shape_collecting_info = [ (NNCF_GENERAL_CONV_MODULES_DICT, slice(2, None)), (NNCF_LINEAR_MODULES_DICT, slice(None)), ] for nncf_module_type, shape_slice in output_shape_collecting_info: for node in graph.get_nodes_by_types([v.op_func_name for v in nncf_module_type]): output_edges = graph.get_output_edges(node) if output_edges: out_edge = output_edges[0] out_shape = out_edge.tensor_shape[shape_slice] else: # For disconnected NNCFGraph when node have no output edge out_shape = _calculate_output_shape(graph, node) nncf_logger.debug(f"Node {node.node_name} has no output edge in NNCFGraph") modules_out_shapes[node.node_name] = out_shape return modules_out_shapes
[ "def output_shape(self, l_in):\r\n out_channel, l_out = self.in_channel, l_in\r\n for conv1d_unit in self.conv_layers:\r\n out_channel, l_out = conv1d_unit.output_shape(l_out)\r\n return l_out, out_channel", "def output_shapes(self, l_in):\r\n shapes = [(self.in_channel, l_in)]\r\n for conv1d_unit in self.conv_layers:\r\n shapes.append(conv1d_unit.output_shape(shapes[-1][1]))\r\n return shapes", "def _calculate_output_shape(graph: NNCFGraph, node: NNCFNode) -> Tuple[int, ...]:\n in_edge = graph.get_input_edges(node)[0]\n shape = list(in_edge.tensor_shape)\n attrs = node.layer_attributes\n\n if isinstance(attrs, ConvolutionLayerAttributes):\n shape = shape[2:]\n for i, _ in enumerate(shape):\n if attrs.transpose:\n shape[i] = (shape[i] - 1) * attrs.stride[i] - 2 * attrs.padding_values[i] + attrs.kernel_size[i]\n else:\n shape[i] = (shape[i] + 2 * attrs.padding_values[i] - attrs.kernel_size[i]) // attrs.stride[i] + 1\n elif isinstance(attrs, LinearLayerAttributes):\n shape = shape[:-1] + [attrs.out_features]\n else:\n raise RuntimeError(f\"Unexpected node type {node.node_type} is fed to _calculate_output_shape\")\n return tuple(shape)", "def compute_output_shape(observation_space, layers):\n # [None] adds a batch dimension to the random observation\n torch_obs = torch.tensor(observation_space.sample()[None])\n with torch.no_grad():\n sample = preprocess_obs(torch_obs, observation_space, normalize_images=True)\n for layer in layers:\n # forward prop to compute the right size\n sample = layer(sample)\n\n # make sure batch axis still matches\n assert sample.shape[0] == torch_obs.shape[0]\n\n # return everything else\n return sample.shape[1:]", "def layer_shapes(image_shape, model):\n shape = {model.layers[0].name: (None,) + image_shape,}\n\n for layer in model.layers[1:]:\n nodes = layer._inbound_nodes\n for node in nodes:\n inputs = [shape[lr.name] for lr in node.inbound_layers]\n if not inputs:\n continue\n shape[layer.name] = layer.compute_output_shape(inputs[0] if len(inputs) == 1 else inputs)\n\n return shape", "def output_shape_for(self, input_shape):\n # N1, C1, W1, H1 = input_shape\n # output_shape = (N1, self.n_classes, W1, H1)\n x = input_shape\n\n # Encoder\n x = OutputShapeFor(self.convbnrelu1.cbr_unit)(x)\n x = OutputShapeFor(self.maxpool)(x)\n\n e1 = OutputShapeFor(self.encoder1)(x)\n e2 = OutputShapeFor(self.encoder2)(e1)\n e3 = OutputShapeFor(self.encoder3)(e2)\n e4 = OutputShapeFor(self.encoder4)(e3)\n\n # Decoder with Skip Connections\n d4 = OutputShapeFor(self.decoder4)(e4)\n # d4 += e3\n d3 = OutputShapeFor(self.decoder3)(d4)\n # d3 += e2\n d2 = OutputShapeFor(self.decoder2)(d3)\n # d2 += e1\n d1 = OutputShapeFor(self.decoder1)(d2)\n\n # Final Classification\n f1 = OutputShapeFor(self.finaldeconvbnrelu1)(d1)\n f2 = OutputShapeFor(self.finalconvbnrelu2)(f1)\n f3 = OutputShapeFor(self.finalconv3)(f2)\n return f3", "def _internal_weight_shapes(self):\n coeff = 4 if self._use_lstm else 1\n shapes = []\n\n # Initial fully-connected layers.\n prev_dim = self._n_in\n for n_fc in self._fc_layers_pre:\n shapes.append([n_fc, prev_dim])\n if self._use_bias:\n shapes.append([n_fc])\n\n prev_dim = n_fc\n\n # Recurrent layers.\n for n_rec in self._rnn_layers:\n # Input-to-hidden\n shapes.append([n_rec*coeff, prev_dim])\n if self._use_bias:\n shapes.append([n_rec*coeff])\n\n # Hidden-to-hidden\n shapes.append([n_rec*coeff, n_rec])\n if self._use_bias:\n shapes.append([n_rec*coeff])\n\n if not self._use_lstm:\n # Hidden-to-output\n shapes.append([n_rec, n_rec])\n if self._use_bias:\n shapes.append([n_rec])\n\n prev_dim = n_rec\n\n # Fully-connected layers.\n for n_fc in self._fc_layers:\n shapes.append([n_fc, prev_dim])\n if self._use_bias:\n shapes.append([n_fc])\n\n prev_dim = n_fc\n\n return shapes", "def output_shape(self):\n if context.in_eager_mode():\n raise RuntimeError('Layer.output_shape not supported in Eager mode.')\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set(\n [str(node.output_shapes) for node in self._inbound_nodes])\n if len(all_output_shapes) == 1:\n output_shapes = self._inbound_nodes[0].output_shapes\n if len(output_shapes) == 1:\n return tuple(tensor_shape.TensorShape(output_shapes[0]).as_list())\n else:\n return [\n tuple(tensor_shape.TensorShape(shape).as_list())\n for shape in output_shapes\n ]\n else:\n raise AttributeError('The layer \"%s\"'\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.' % self.name)", "def network_input_shapes(self):\n assert self._ie_network is not None\n input_shapes = {}\n for k, v in self._ie_network.input_info.items():\n shape_with_batch = v.input_data.shape.copy()\n input_shapes[k] = shape_with_batch[1:]\n return input_shapes", "def output_shape(self):\n return None", "def out_dim_list(self):\n if self.merge_type == 'add':\n out_dim_list = [self.out_channels] * self.child_num\n elif self.merge_type == 'concat':\n out_dim_list = self.get_split_list(self.out_channels, self.child_num)\n else:\n assert self.child_num == 1\n out_dim_list = [self.out_channels]\n return out_dim_list", "def calc_conv_layer_shape(prev_layer_shape, num_filters, stride):\n\n return np.array([num_filters] + calc_image_dimensions(\n prev_layer_shape[1:], stride), dtype=np.uint64)", "def _update_output_dim(self):\n self._output_dim = 0\n for m in self.measurements:\n # attempt to infer the output dimension\n if isinstance(m, ProbabilityMP):\n # TODO: what if we had a CV device here? Having the base as\n # 2 would have to be swapped to the cutoff value\n self._output_dim += 2 ** len(m.wires)\n elif not isinstance(m, StateMP):\n self._output_dim += 1\n if self.batch_size:\n self._output_dim *= self.batch_size", "def get_output_size(self, input_size):\n # conv layers\n current_output = input_size + 2*np.array(self._modules['conv_module'].padding) - (self._modules['conv_module'].kernel_size - 1) - 1\n pre_pooling = np.floor(current_output/np.array(self._modules['conv_module'].stride) + 1)\n # pooling layers\n post_pooling = None\n if not self._modules.hasattr('pool_module') is None:\n post_pooling = pre_pooling - (self._modules['pool_module'].kernel_size-1) - 1\n post_pooling = np.floor(post_pooling/np.array(self._modules['pool_module'].kernel_size) + 1)\n return pre_pooling, post_pooling", "def inference(data, conv_settings, full_settings, n_labels, dropout_pl):\n assert len(conv_settings) > 0 and len(full_settings) > 0\n\n tf.image_summary('input', data, max_images=3, collections=None, name=None)\n\n # 2D convolution, with 'SAME' padding (i.e. the output feature map has\n # the same size as the input). Note that {strides} is a 4D array whose\n # shape matches the data layout: [image index, y, x, depth].\n\n # Add first convl layer\n with tf.variable_scope('conv1') as scope:\n initializer = tf.truncated_normal_initializer(stddev=0.1,\n seed=SEED,\n dtype=tf.float32)\n kernel = tf.get_variable('weights',\n [5, 5, N_CHANNELS, conv_settings[0]],\n initializer=initializer)\n conv = tf.nn.conv2d(data,\n kernel,\n strides=[1, 1, 1, 1],\n padding='SAME')\n initializer = tf.zeros_initializer([conv_settings[0]], dtype=data_type())\n biases = tf.get_variable('biases', initializer=initializer)\n bias = tf.nn.bias_add(conv, biases)\n relu = tf.nn.relu(bias, name=scope.name)\n\n pool = tf.nn.max_pool(relu,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool1')\n\n # tensor = tf.split(3, conv_settings[0], pool, name='split')\n # for i in xrange(len(tensor)):\n # tf.image_summary('conv1_kernel-' + str(i),\n # tensor[i],\n # max_images=3,\n # collections=None,\n # name=None)\n\n # Add second convl layer\n if len(conv_settings) > 1:\n with tf.variable_scope('conv2') as scope:\n initializer = tf.truncated_normal_initializer(stddev=0.1,\n seed=SEED,\n dtype=data_type())\n kernel = tf.get_variable('weights',\n [5, 5, conv_settings[0], conv_settings[1]],\n initializer=initializer)\n conv = tf.nn.conv2d(pool,\n kernel,\n strides=[1, 1, 1, 1],\n padding='SAME')\n initializer = tf.constant_initializer(0.1, dtype=data_type())\n biases = tf.get_variable('biases',\n shape=[conv_settings[1]],\n initializer=initializer)\n bias = tf.nn.bias_add(conv, biases)\n relu = tf.nn.relu(bias, name=scope.name)\n\n pool = tf.nn.max_pool(relu,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool2')\n\n # Add first dense layer\n with tf.variable_scope('local1') as scope:\n # Reshape the feature map cuboid into a 2D matrix to feed it to the\n # fully connected layers.\n pool_shape = pool.get_shape().as_list()\n reshape = tf.reshape(\n pool,\n [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])\n # Fully connected layer. Note that the '+' operation automatically\n # broadcasts the biases.\n initializer = tf.truncated_normal_initializer(stddev=0.1,\n seed=SEED,\n dtype=data_type())\n # img height/width after pooling, note each convl layer is followed by a\n # single pool layer\n img_height = (IMAGE_SIZE // (2 * len(conv_settings)))\n img_width = (IMAGE_SIZE // (2 * len(conv_settings)))\n img_size = img_width * img_height\n # convl_sizes[-1] images are produced by the last convl layer, each pixel\n # in those images is connected with each node in the dense layer\n fc_size = conv_settings[-1] * img_size\n weights = tf.get_variable('weights',\n [fc_size, full_settings[0]],\n initializer=initializer)\n initializer = tf.constant_initializer(0.1, dtype=data_type())\n biases = tf.get_variable('biases',\n shape=[full_settings[0]],\n initializer=initializer)\n local1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n # Add a 50% dropout during training only. Dropout also scales\n # activations such that no rescaling is needed at evaluation time.\n\n with tf.name_scope('dropout'):\n local1 = tf.nn.dropout(local1, dropout_pl, seed=SEED)\n\n # Add final softmax layer\n with tf.variable_scope('softmax_linear') as scope:\n initializer = tf.truncated_normal_initializer(stddev=0.1,\n seed=SEED,\n dtype=data_type())\n weights = tf.get_variable('weights',\n shape=[full_settings[0], n_labels],\n initializer=initializer)\n initializer = tf.constant_initializer(0.1, dtype=data_type())\n biases = tf.get_variable('biases',\n shape=[n_labels],\n initializer=initializer)\n softmax_linear = tf.add(tf.matmul(local1, weights),\n biases,\n name=scope.name)\n\n return softmax_linear", "def CNN_paper(input_shape):\n\n inputs = Input(shape=input_shape)\n \n x = Conv2D(96, kernel_size=(3,3), padding='same', strides=1, kernel_initializer='he_normal')(inputs)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(96, kernel_size=(3,3), padding='same', strides=1, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(96, kernel_size=(3,3), padding='same', strides=2, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n \n for i in range(4):\n if i==2:\n x = Conv2D(192, kernel_size=(3,3), padding='same', strides=2, kernel_initializer='he_normal')(x)\n else:\n x = Conv2D(192, kernel_size=(3,3), padding='same', strides=1, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(192, kernel_size=(1,1), padding='same', strides=1, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Conv2D(10, kernel_size=(1,1), padding='same', strides=1, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n\n x = Flatten()(x)\n x = Dense(1000, activation='relu')(x)\n x = Dense(1000, activation='relu')(x)\n outputs = Dense(1)(x)\n\n return Model(inputs=inputs, outputs=outputs)", "def output_dim(self):\n return self._output_dim", "def getLayersShapes(\n self, netInputShapes\n ) -> Tuple[layersIds, inLayersShapes, outLayersShapes]:\n ...", "def _add_edges_to_nncf_graph(model: ov.Model, graph: NNCFGraph) -> None:\n for op in model.get_ops():\n in_node_id = graph.get_node_by_name(op.get_friendly_name()).node_id\n for output_port_id, out in enumerate(op.outputs()):\n for inp in out.get_target_inputs():\n out_node = inp.get_node()\n tensor_shape = list(out.partial_shape.get_max_shape())\n output_node_id = graph.get_node_by_name(out_node.get_friendly_name()).node_id\n ov_dtype = out.get_element_type().get_type_name()\n nncf_dtype = GraphConverter.convert_to_nncf_dtype(ov_dtype)\n graph.add_edge_between_nncf_nodes(\n from_node_id=in_node_id,\n to_node_id=output_node_id,\n tensor_shape=tensor_shape,\n input_port_id=inp.get_index(),\n output_port_id=output_port_id,\n dtype=Dtype(nncf_dtype),\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Each function in list_func_per_ax takes an ax as input and draw something on it
def ax_func_to_plot( list_func_per_ax, n_per_row=3, title=None, title_font_size=10, width=15, height_row=10, saving_path=None, rec_padding=(0, 0, 0, 0), x_labels=None, y_labels=None, outer_axis_labels_only=False, show=True, ): n_rows = int(np.ceil(len(list_func_per_ax) / n_per_row)) fig, axes = plt.subplots( nrows=n_rows, ncols=n_per_row, figsize=(width, height_row * n_rows) ) for ax, func in zip(axes.flatten(), list_func_per_ax): func(ax) # fig.legend(fancybox=True, framealpha=1, shadow=True, borderpad=1) fig.suptitle(title, fontsize=title_font_size) for ax in axes.flat: ax.set(xlabel=x_labels, ylabel=y_labels) if outer_axis_labels_only: for ax in axes.flat: ax.label_outer() handles, labels = ax.get_legend_handles_labels() fig.legend(handles, labels, loc=1) if rec_padding: fig.tight_layout(rect=rec_padding) if saving_path: fig.savefig(saving_path) if show: plt.show()
[ "def subax_call(self, method, args, kwargs):\n result = []\n for ax in self.axs:\n if ax.xaxis.get_scale() == \"log\":\n ax.xaxis.set_major_locator(ticker.LogLocator())\n else:\n ax.xaxis.set_major_locator(ticker.AutoLocator())\n if ax.yaxis.get_scale() == \"log\":\n ax.yaxis.set_major_locator(ticker.LogLocator())\n else:\n ax.yaxis.set_major_locator(ticker.AutoLocator())\n result.append(getattr(ax, method)(*args, **kwargs))\n\n self.standardize_ticks()\n self.set_spines()\n self.fix_exponent()\n\n return result", "def drawPoints(self, drawingFunc):\n for point, kind in self.points:\n drawingFunc(point, kind=kind, color=self.color)", "def plot_func(graph, f, x0, x1, num_of_segments=SEGMENTS, c='black'):\n\n seg_width = (abs(x0) + abs(x1)) / num_of_segments\n for seg in range(num_of_segments):\n\n x_first = x0 + seg * seg_width\n x_second = x_first + seg_width\n p1 = (x_first, f()(x_first))\n p2 = (x_second, f()(x_second))\n\n graph.plot_line(p1, p2, c)", "def plot_func(graph, f, x0, x1, num_of_segments=SEGMENTS, c='black'):\n delta = (x1 - x0) / num_of_segments\n start_point = (x0, f(x0))\n for i in range(num_of_segments):\n end_point = (start_point[0] + delta, f(start_point[0] + delta))\n graph.plot_line(start_point, end_point, c)\n start_point = end_point", "def plot_active_cells(self, ax=None):\n if ax is None:\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n ax.plot(self.y_ref[0], self.y_ref[1], 's')\n for l, u, _ in self.active_cells:\n self.plot_cell(l, u, ax, alpha=0.5)\n return ax.get_figure(), ax", "def plot_results(my_func, my_points, min_x=-30, max_x=30, min_y=-30, max_y=30, nbx=100, nby=100, title=\"Title\"):\n X = np.linspace(min_x, max_x, num=nbx)\n Y = np.linspace(min_y, max_y, num=nby)\n Z=np.zeros((nbx,nby),dtype=np.double)\n for i in range(nbx):\n for j in range(nby):\n Z[i][j]=my_func([X[i],Y[j]])\n fig,ax=plt.subplots(figsize=(5,5))\n ax.set_xlim(min_x,max_x)\n ax.set_ylim(min_y,max_y)\n ax.imshow(Z, cmap='hot', interpolation='nearest',extent=(min_x,max_x,min_y,max_y))\n \n #print(\"Points: \"+str(my_points))\n x=[]\n y=[]\n for p in my_points:\n x.append(p[0])\n y.append(p[1])\n\n ax.plot(x,y,\".\")\n ax.set_title(title)\n plt.show()", "def passed_or_new_ax(func):\n @wraps(func)\n def inner(*args, **kwargs):\n if 'ax' in kwargs:\n return func(*args, **kwargs)\n else:\n fig, ax = plt.subplots()\n kwargs.update({'ax':ax})\n return func(*args, **kwargs)\n return inner", "def graph_sub(axs, i, cs_func, x_input, y_input, y_label, title):\n axs[i].set_title(title)\n axs[i].plot(x_input, cs_func(y_input), label='Cubic Spline')\n axs[i].set_ylabel(y_label)\n plt.xlabel(\"years\")\n axs[i].legend(loc='best')", "def mapMatplotlibData(f, *pointsOnAxis):\n return zip(*map(f, zip(*pointsOnAxis)))", "def _plot(self, datum, ax):\n raise NotImplementedError", "def create_plots(begin, end, stride):\r\n \r\n # generate x coordinates for plot points\r\n x_coords = []\r\n current_x = begin\r\n while current_x < end:\r\n x_coords.append(current_x)\r\n current_x += stride\r\n \r\n # compute list of (x, y) coordinates for each function\r\n double_plot = [(x_val, double(x_val)) for x_val in x_coords]\r\n square_plot = [(x_val, square(x_val)) for x_val in x_coords]\r\n exp_plot = [(x_val, exp(x_val)) for x_val in x_coords]\r\n \r\n # plot the list of points\r\n simpleplot.plot_lines(\"Plots of three functions\", 600, 400, \"x\", \"f(x)\",\r\n [double_plot, square_plot, exp_plot], \r\n True, [\"double\", \"square\", \"exp\"])", "def plot(self, data_container=None):\n for plt_fct in self._plot_fcts:\n plt_fct(self._data_container,self._config)", "def draw_function_graph(self, eval_function, x_start, x_end, nb_x, * function_params, curve_type = \"polyline\"):\n\n x_step = (x_end - x_start) / (nb_x - 1)\n point_list = [ [ x, eval_function(x, * function_params) ] for x in [ x_start + xi * x_step for xi in range(nb_x) ] ]\n if(curve_type == \"polyline\"):\n self.draw_polyline(point_list)\n elif(curve_type == \"autosmooth\"):\n self.draw_smoothly_interpolated_open_curve(point_list)\n else:\n raise Exception(\"curve_type not in ('polyline', 'autosmooth')\")\n return", "def draw_plot():\r\n\r\n if function_definition():\r\n functions = function_definition()\r\n else:\r\n return False\r\n \r\n if len(functions) == 0:\r\n message_label.config(text = \"Proszę zdefiniować funkcję poprawnie.\")\r\n return False\r\n\r\n try:\r\n float(x_from_entry.get())\r\n float(x_to_entry.get())\r\n except:\r\n message_label.config(text = \"Proszę podać prawidłowy zakres na osi x.\")\r\n x_axis_label.config(bg = '#a7bbc7')\r\n return False\r\n \r\n if x_from_entry.get() >= x_to_entry.get():\r\n message_label.config(text = \"Wartość 'od' zakresu na osi x powinna być mniejsza niż wartość 'do'.\")\r\n return False\r\n\r\n x_axis_label.config(bg = '#e1e5ea')\r\n x = arange(float(x_from_entry.get()), float(x_to_entry.get()), 0.001)\r\n figure = Figure(figsize=(3.7, 4), dpi=100)\r\n plot = figure.add_subplot(1, 1, 1)\r\n\r\n sus_functions = ['tan', 'cot', 'log', 'arcsin', 'arccos']\r\n sus_signs = ['**', '/']\r\n \r\n for function in functions:\r\n try:\r\n y = eval(function)\r\n except:\r\n message_label.config(text = \"Proszę zdefiniować funkcję poprawnie.\")\r\n return False\r\n for f in sus_functions:\r\n if f in function:\r\n message_label.config(text = \"Proszę zwrócić uwagę, czy funkcja '%s' jest określona na zakresie.\" % f)\r\n for sign in sus_signs:\r\n if sign in function:\r\n message_label.config(text = \"Proszę zwrócić uwagę, czy operator '%s' będzie działał prawidłowo.\" % sign)\r\n plot.plot(x,y)\r\n\r\n if is_legend.get() == 1: \r\n plot.legend(function_definition(True))\r\n \r\n plot.set(title = title_entry.get(), xlabel = x_label_entry.get(), ylabel = y_label_entry.get())\r\n canvas = FigureCanvasTkAgg(figure, window)\r\n canvas.get_tk_widget().grid(row = 0, column = 0, columnspan = 16, rowspan = 16)", "def plot_all(self, ax=None):\n if ax is None:\n fig, ax = plt.subplots()\n\n ax = self.plot_principal_radii_of_gyration(ax=ax)\n ax = self.plot_principal_inertia_ellipsoids(ax=ax)\n ax = self.plot_geometry(ax=ax)\n ax = self.plot_mass_centers(ax=ax)\n\n return ax", "def allAntennaMultiPlot(xData, yData, antennaList, xMin = None, xMax = None,\n yMin = None, yMax = None, \\\n orientation = 'portrait', plotTitle = \"\", xLabel = \"x-axis\", \\\n yLabel = \"y-axis\", infoTextLeft = \"\", infoTextRight = \"\",\n showGrid = True, showPlot = True, axisStyle = 'lin', \\\n lineStyles = ('bo-', 'rx--', 'gv-.'), legends = None, \\\n plotFileName = \"\", makePdf = False, makePng = False) :\n#\n error = False\n plt.ioff()\n\n# Number of line styles provided\n numStyles = len(lineStyles)\n\n# Various page and geometry dimensions in inches\n topMargin = 0.1\n bottomMargin = 0.1\n leftMargin = 0.2\n rightMargin = 0.2\n\n if orientation == 'landscape' :\n pageWidth = 11.0\n pageHeight = 8.5\n else :\n pageWidth = 8.5\n pageHeight = 11.0\n\n# Plot panel geometry\n numPlotCols = 3\n numPlotRows = maxAntennas / numPlotCols\n\n plotLeftDist = 0.75\n plotRightDist = 0.5\n plotTopDist = 1.0\n plotBotDist = 1.0\n\n plotHeight = (pageHeight - plotTopDist - plotBotDist) / numPlotRows\n plotWidth = (pageWidth - plotLeftDist - plotRightDist) / numPlotCols\n\n# Some handy font definitions\n tickFont = {'family' : 'sans-serif',\n 'weight' : 'normal',\n 'size' : 8,\n }\n generalFont = {'family' : 'sans-serif',\n 'weight' : 'normal',\n 'size' : 11,\n }\n plt.rc('font', **generalFont) # pass in the font dict as kwargs\n\n titleFontSize = 14\n labelFontSize = 11\n tickFontSize = 8\n infoFontSize = 8\n legendFontSize = 8\n\n# Start a new figure\n try:\n figure = plt.figure(figsize = (pageWidth, pageHeight))\n except :\n printError(\"allAntennaMultiPlot: Not an Xterm? Cannot plot\")\n plt.rc({'backend' : 'Agg'})\n error = True\n return error\n\n# Title for the plots\n titleOffset = 0.05\n x = (0.5 * (pageWidth + plotLeftDist - plotRightDist)) / pageWidth\n y = 1.0 - (plotTopDist - titleOffset) / pageHeight\n plt.figtext(x, y, plotTitle, fontsize = titleFontSize, \\\n va = 'bottom', ha = 'center', variant = 'small-caps')\n\n# Left info box\n left = leftMargin / pageWidth\n top = 1.0 - topMargin / pageHeight\n plt.figtext(left, top, infoTextLeft, fontsize = infoFontSize, va = 'top')\n\n# Right info box\n right = 1.0 - rightMargin / pageWidth\n top = 1.0 - topMargin / pageHeight\n\n plt.figtext(right, top, infoTextRight, fontsize = infoFontSize, va = 'top', \\\n ha = 'right')\n\n# Array of plot panels. Start at top left and work left to right\n# The array (list of lists) of y values is assumed to be a multiple of the number\n# of antennas, with the values for each antenna adjacent\n plotsPerAntenna = len(yData) / maxAntennas\n bot = (pageHeight - plotTopDist - plotHeight) / pageHeight\n ant = 1\n ny = 0\n\n for row in range(numPlotRows) :\n left = plotLeftDist / pageWidth\n for col in range(numPlotCols) :\n ax = plt.axes([left, bot, plotWidth / pageWidth, plotHeight / pageHeight])\n if showGrid :\n ax.grid(True, color = 'gray')\n plt.figtext(left + plotWidth / pageWidth - 0.01, bot + 0.01, \\\n \"C%d\" % ant, fontsize = 10, ha = 'right')\n if isinstance(xData[0], list) :\n xd = xData[ant - 1]\n else :\n xd = xData\n if ant in antennaList :\n for nplt in range(plotsPerAntenna) :\n if axisStyle == 'logx' :\n plt.semilogx(xd, yData[ny], lineStyles[nplt % numStyles])\n elif axisStyle == 'logy' :\n plt.semilogy(xd, yData[ny], lineStyles[nplt % numStyles])\n elif axisStyle == 'loglog' :\n plt.loglog(xd, yData[ny], lineStyles[nplt % numStyles])\n else :\n plt.plot(xd, yData[ny], lineStyles[nplt % numStyles])\n ny += 1\n else :\n plt.figtext(left + 0.5 * plotWidth / pageWidth, \\\n bot + 0.5 * plotHeight / pageHeight, \"NOT PRESENT\", \\\n va = 'center', ha = 'center', color = 'gray', fontsize = 8)\n ny += plotsPerAntenna\n\n # Insert legend if required\n if (col == 0) and (row == numPlotRows -1) and legends :\n x = -(plotLeftDist - leftMargin) / plotWidth\n y = -(plotBotDist - bottomMargin) / plotHeight\n plt.legend(legends, loc = (x, y), \\\n prop = FontProperties(size = legendFontSize), labelspacing = 0.0)\n\n # Set up x-axis\n plt.xlim(xMin, xMax)\n if row < numPlotRows - 1 :\n for tick in ax.xaxis.get_major_ticks() :\n tick.label1On = False\n else :\n if (col < numPlotCols - 1) :\n ticks = ax.xaxis.get_major_ticks()\n ticks[len(ticks) - 1].label1On = False\n plt.xticks(**tickFont)\n\n # Set up y-axis\n plt.ylim(yMin, yMax)\n if col > 0 :\n for tick in ax.yaxis.get_major_ticks() :\n tick.label1On = False\n else :\n if (row > 0) :\n ticks = ax.yaxis.get_major_ticks()\n ticks[len(ticks) - 1].label1On = False\n plt.yticks(**tickFont)\n\n if (col == numPlotCols - 1) and (row == numPlotRows - 1) :\n plt.xlabel(xLabel)\n if (col == 0) and (row == 0) :\n plt.ylabel(yLabel)\n left += plotWidth / pageWidth\n ant += 1\n bot -= plotHeight / pageHeight\n\n # Where plot output is to be directed\n if plotFileName :\n if makePdf :\n try :\n plt.savefig(plotFileName + \".pdf\")\n except :\n error = True\n printError(\"Cannot make PDF file\")\n if makePng :\n try :\n plt.savefig(plotFileName + \".png\")\n except :\n error = True\n printError(\"Cannot make PNG file\")\n if showPlot :\n plt.ioff()\n plt.show()\n\n return error", "def function_graph(self):\n x = value_range()\n y = self.a * x ** 2 + self.b * x + self.c\n plt.plot(x, y)\n plt.show()", "def meshPlot(func):\n _simple_plotter_plot_methods.append(func.__name__)\n\n def decorated(*args, **kwargs):\n period, x, y, z, xlabel, ylabel, zlabel, time_prefix = func(*args, **kwargs)\n if period is not None:\n print_interpol = False\n if kwargs.get(\"use_index\", False):\n idx = period\n else:\n if period not in y:\n lisa_print(\"Interpolating for usable period (using nearest): \",\n end=\"\", debug=False)\n print_interpol = True\n if period < 0:\n period = np.max(y) + period\n idx = np.argmin(np.abs(np.array(y) - period * 1 / time_prefix[1]))\n args[0]._last_interpol_idx = idx\n # if period not in y and not kwargs.get(\"use_index\", False):\n if print_interpol:\n lisa_print(y[idx], debug=False)\n\n @SimplePlotter.plot\n def dummy(x, z, xlabel, zlabel, *args, **kwargs):\n if hasattr(z, 'unit_function'):\n z = z.unit_function(idx)\n else:\n z = z[idx]\n return(x, z, xlabel, zlabel)\n return dummy(x, z, xlabel, zlabel, **kwargs)\n\n if kwargs.get(\"mean_range\", None) is not None:\n range = kwargs.get(\"mean_range\")\n z_mean = np.mean(z[range[0]:range[1]], axis=0)\n zlabel += \" (mean over range {})\".format(range)\n\n @SimplePlotter.plot\n def dummy(x, z, xlabel, zlabel, *args, **kwargs):\n return(x, z, xlabel, zlabel)\n return dummy(x, z_mean, xlabel, zlabel, **kwargs)\n\n if isinstance(kwargs.get(\"fig\", None), plt.Figure):\n fig = kwargs[\"fig\"]\n if isinstance(kwargs.get(\"ax\", None), plt.Axes):\n ax = kwargs.get(\"ax\")\n else:\n ax = fig.add_subplot(111)\n else:\n if isinstance(kwargs.get(\"ax\", None), plt.Axes):\n ax = kwargs.get(\"ax\")\n fig = ax.figure\n else:\n fig = plt.figure(tight_layout=True)\n ax = fig.add_subplot(111)\n\n if 'norm' in kwargs:\n if isinstance(kwargs['norm'], matplotlib.colors.Normalize):\n norm = kwargs['norm']\n else:\n if kwargs['norm'].lower() == \"linear\":\n norm = matplotlib.colors.Normalize()\n elif kwargs['norm'].lower() == \"log\":\n norm = matplotlib.colors.LogNorm()\n else:\n print(\"Unknown norm specification, using linear\")\n norm = matplotlib.colors.Normalize()\n else:\n norm = matplotlib.colors.Normalize()\n # warn if values in kwargs and plt_args\n if 'norm' in kwargs.get(\"plt_args\", {}):\n warn(\"'norm' is already in arguments, duplicate in plt_args, \"\n \"will not use norm in plt_args\")\n del kwargs.get(\"plt_args\")['norm']\n if 'cmap' in kwargs.get(\"plt_args\", {}):\n warn(\"'cmap' will be set by this method. use colormap argument \"\n \"instead of cmap in plt_args.\\n\" + \"will ignore cmap in plt_args.\")\n del kwargs.get(\"plt_args\")['cmap']\n if kwargs.get(\"transpose\", False):\n x, y = y, x\n xlabel, ylabel = ylabel, xlabel\n z = z.T\n pm = ax.pcolormesh(x, y, z, norm=norm, cmap=kwargs.get(\"colormap\", \"PuBu\"),\n **kwargs.get(\"plt_args\", {}))\n ax.set_xlabel(xlabel) # TODO: What?\n ax.set_ylabel(ylabel)\n if kwargs.get(\"force_bad_to_min\", False):\n pm.get_cmap().set_bad((pm.get_cmap()(pm.get_clim()[0])))\n\n if kwargs.get(\"force_exponential_x\", False):\n ax.get_xaxis().get_major_formatter().set_powerlimits((0, 0))\n if kwargs.get(\"force_exponential_y\", False):\n ax.get_yaxis().get_major_formatter().set_powerlimits((0, 0))\n colorbar = fig.colorbar(pm)\n colorbar.set_label(zlabel)\n fig.cbar = colorbar\n s = Style()\n s.apply_to_fig(fig)\n return fig\n\n decorated.__name__ = func.__name__\n decorated.__doc__ = \"This method is decorated. See SimplePlotter.plot \" \\\n \"for additional parameters\"\n if func.__doc__ is not None:\n decorated.__doc__ += \"\\nSpecial Options for this plot:\" + textwrap.dedent(func.__doc__)\n decorated.mesh = True\n return decorated", "def fplot(self, function, xlimits, **kwargs):\n\t\tfig, ax = plt.subplots() # Create axes and figure objects at once.\n\t\tself.fig = fig\n\t\tself.ax = ax\n\t\tkwargs = SciencePlotting._set_kwargs_defaults(**kwargs)\n\t\tkwargs.setdefault('mat_folder', self.mat_folder)\n\t\tself.fig_title = kwargs['fig_title']\n\t\tself.open_folders = kwargs.get('open_folders')\n\t\tself.save_fig = kwargs.get('save_fig')\n\n\t\t# Do previous checks.\n\t\tif not hasattr(function, '__call__') and not isinstance(function, (_collections_abc.Sequence, np.ndarray, str)):\n\t\t\tplt.close(self.fig)\n\t\t\traise TypeError(\n\t\t\t\tf'The functions input must be a callable or an iterable containing a callable, not a {type(function)}')\n\t\tif not isinstance(xlimits, (_collections_abc.Sequence, np.ndarray)):\n\t\t\tif isinstance(xlimits, str):\n\t\t\t\tplt.close(self.fig)\n\t\t\t\traise TypeError('The xlimits input must be an iterable (and not a STRING).')\n\t\t\telse:\n\t\t\t\tplt.close(self.fig)\n\t\t\t\traise TypeError(f'{type(xlimits)} is not a valid input for xlimits. It must be an iterable object.')\n\n\t\t# Write the title of the plot if necessary\n\t\tself._write_title(**kwargs)\n\n\t\t# Set the labels for the axis.\n\t\tif kwargs.get('xlabel') is not None:\n\t\t\tself.ax.set(xlabel=kwargs.get('xlabel'))\n\t\telse:\n\t\t\tself.ax.set(xlabel='x')\n\t\tself.ax.set(ylabel=kwargs.get('ylabel'))\n\n\t\t# Set scales for the axis.\n\t\tself.ax.set(xscale=kwargs.get('xscale'))\n\t\tself.ax.set(yscale=kwargs.get('yscale'))\n\n\t\tself.ax.autoscale(tight=True, axis='x')\n\n\t\t# Call the plotting class.\n\t\tFPlot(self.ax, function, xlimits, **kwargs)\n\n\t\tself._write_title(**kwargs)\n\n\t\t# Save the figure if required.\n\t\tif self.save_fig:\n\t\t\tself.image_format = '.' + kwargs.get('image_format').split('.')[0]\n\t\t\tself._save_figure()\n\n\t\tif self.save_mat:\n\t\t\tif self.open_folders:\n\t\t\t\tself._open_directory(self.mat_folder)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the dimension of the entity this DOF is associated with.
def entity_dim(self) -> int: return self.entity[0]
[ "def dimension(self):\n return self.field(Field.POSITION).shape[1]", "def getDimension():\n ierr = c_int()\n api__result__ = lib.gmshModelGetDimension(\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGetDimension returned non-zero error code: \",\n ierr.value)\n return api__result__", "def dim(self):\n return self._dim", "def cell_dimension(self):\n return self.ufl_cell().topological_dimension()", "def getDimension(self, unit: 'int const'=0) -> \"int32_t\":\n return _coin.SoMultiTextureCoordinateElement_getDimension(self, unit)", "def get_dim(self, key):\n return self.dim.get(key, None)", "def get_input_dimension(self):\n\n return self._parameters._dimension", "def num_dimensions(self):\n return self.numDim.value", "def facet_dimension(self):\n # Facets have co-dimension 1\n return self.ufl_cell().topological_dimension() - 1", "def facet_dimension(self):\n return (self._base_mesh.facet_dimension(), 1)", "def dimensionality(self):\n return base_pb2.Nature.Name(self._message.dimensionality).lower()", "def qudit_dimension(self) -> int:\n return self._qudit_dimension", "def dimensions(self) -> DimensionGraph:\n base = self.universe.empty\n if len(self) == 0:\n return base\n return base.union(*[datasetType.dimensions for datasetType in self.keys()])", "def observation_dim(self):\n return int(np.prod(self.observation_space.high.shape))", "def getNumDimensions(self):\n return self.dimensions.size()", "def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TemplateQuotaDimensionArgs']]]]:\n return pulumi.get(self, \"dimensions\")", "def cell_dimension(self):\n return (self._base_mesh.cell_dimension(), 1)", "def size(self):\n assert self.exists, f\"Dimension {self.name} does not exist for tensor {self.tensor.shape}\"\n return self.tensor.shape.get_size(self.name)", "def getDimensions(self):\n return (self.x_dim, self.y_dim, self.z_dim)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the number of the entity this DOF is associated with.
def entity_number(self) -> int: return self.entity[1]
[ "def entity_dim(self) -> int:\n return self.entity[0]", "def num_entities(self) -> int:\n # TODO: Need to add functions in pymilvus-distributed\n return 0\n # raise NotImplementedError", "def num_entities(self) -> int:\n conn = self._get_connection()\n status = conn.get_collection_stats(db_name=\"\", collection_name=self._name)\n return status[\"row_count\"]", "def _get_count(self) -> \"size_t\" :\n return _core.DocumentReferences__get_count(self)", "def get_count(self):\n return self.hand.compute_bj_count()", "def get_counter(self):\n return self.node_counter", "def obj_count(self):\n with self._lock:\n return self._obj_count", "def getNum(self) -> \"int32_t\":\n return _coin.SoCoordinateElement_getNum(self)", "def get_entity_id(self):\n\n\t\treturn self.__entity_id", "def _get_count(self) -> \"size_t\" :\n return _core.NamedValues__get_count(self)", "def ngens(self):\n return self.base_field().ngens() + 1", "def get_numero(self):\r\n return self.__numero", "def get_number_of_entries(self):\n return self.mongo_db_service.entries", "def nindex(self):\n return self.discretization.nindex", "def getNum(self) -> \"int\":\n return _coin.SoTextureCoordinateCache_getNum(self)", "def number_of_individuals(self):\n return self._number_of_individuals", "def getNum(self) -> \"int32_t\":\n return _coin.SoShininessElement_getNum(self)", "def getNumData(self) -> \"int\":\n return _coin.ScXMLDataModelElt_getNumData(self)", "def get_entry_count(self, stage):\n raise NotImplementedError('Please implement me')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the entity the DOF is associated with in TeX format.
def entity_tex(self) -> str: if self.entity[0] == self.reference.tdim: return "R" else: return f"{'vefc'[self.entity[0]]}_{{{self.entity[1]}}}"
[ "def _get_entity(self) -> \"adsk::core::Ptr< adsk::core::Base >\" :\n return _core.Selection__get_entity(self)", "def Entity(self) -> _n_0_t_1:", "def _get_entity_element(e, t, key, cols=[], ins=None):\n if cols:\n output = '<Entity>\\n'\n output += _static_columns(e, t, key, ins)\n output += \"\\n\".join(cols)\n output += '</Entity>\\n'\n return output\n else:\n return \"\"", "def get_tex(self) -> typing.Tuple[str, typing.List[str]]:\n desc = \"\\\\boldsymbol{v}\\\\mapsto\"\n desc += \"\\\\nablaa\\\\cdot\\\\boldsymbol{v}\"\n desc += \"(\" + \",\".join([_to_tex(i, True) for i in self.dof_point()]) + \")\"\n return desc, []", "def get_entitytype_renderer():\n return RenderFieldValue(\"entitytype\",\n view_renderer=Select_view_renderer(view_entitytype),\n edit_renderer=Select_edit_renderer(edit_entitytype),\n )", "def doc_entity(self) -> str:\n return '{}.{}'.format(self.doc_section, self.name.lower)", "def entity_doc(self, name):\n return self._config.get_entity_def(name).doc", "def _repr_latex_(self):\n return self._latex", "def GetEntity(self, name):\n return self.generalEntities.get(name, None)", "def get_entities(request):\n logging.info('views.get_entities')\n return get_data_json('demo__definition', 'label,numeric', 'category=\\'EntityType\\'', None)", "def get_entity_by_handle(self, handle: str) -> 'DXFEntity':\n return self._dxffactory.wrap_handle(handle)", "def typologie(self):\n return self.__typologie", "def _obtain_entity_def_for_dnode(self, dnode):\n\n if isinstance(dnode, ast.EntityNode):\n return self._flow_state.get_entity_def(dnode.to_descriptor())\n\n elif isinstance(dnode, ast.TupleNode):\n # TODO Since we're using this to describe things something that's not an\n # entity, we should rename `EntityDefinition` to something more general.\n return EntityDefinition(\n name=dnode.to_descriptor(),\n protocol=TupleProtocol(len(dnode.children)),\n doc=f\"A Python tuple with {len(dnode.children)} values.\",\n optional_should_memoize=True,\n optional_should_persist=False,\n )\n\n else:\n raise AssertionError(\n f\"Unexpected dnode type {type(dnode)!r} for dnode {dnode!r}\"\n )", "def getTonalEntity(self, plainEntity, tone, readingN, **options):\n readingOp = self._getReadingOperatorInstance(readingN, **options)\n if not hasattr(readingOp, 'getTonalEntity'):\n raise UnsupportedError(\"method 'getTonalEntity' not supported\")\n return readingOp.getTonalEntity(plainEntity, tone)", "def GetTex(filename):\n \n print(\"Reading {}...\".format(filename))\n with open(filename, 'r') as file:\n return file.read()", "def _read_entity_from_offset(self, offset):\n self.entities_mm.seek(offset)\n l = self.entities_mm.readline()\n return self._string_to_entity(l)", "def transformer(self):\n return self.transformers[self.transformer_tabs.currentIndex()].toPlainText()", "def extract_entities(text):\n\n text = nlp_de(text)\n entities_nr = len(text.ents)\n # print(entities_nr, \"Entities in diesem Text.\")\n entities_labels = Counter([x.label_ for x in text.ents])\n entities_top3 = Counter([x.text for x in text.ents]).most_common(3)\n entities_list = [(X.text, X.label_) for X in text.ents]\n\n return (\n entities_nr,\n \"Entities in diesem Text:\",\n entities_labels,\n \"Die 3 häufigsten Entities:\\n\",\n entities_top3,\n \"Identifizierte Entities:\\n\",\n entities_list,\n )", "def process_entity(self, entity):\n return entity" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a representation of the functional as TeX, and list of terms involved.
def get_tex(self) -> typing.Tuple[str, typing.List[str]]: assert isinstance(self.point, VectorFunction) if len(self.point) == 1: desc = "v\\mapsto " desc += f"v'({','.join([_to_tex(i, True) for i in self.point])})" return desc, [] desc = "v\\mapsto" desc += "\\frac{\\partial" if sum(self.derivative) > 1: desc += f"^{{{sum(self.derivative)}}}" desc += "}{" for v, i in zip("xyz", self.derivative): if i > 0: desc += f"\\partial {v}" if i > 1: desc += f"^{{{i}}}" desc += "}" desc += f"v({','.join([_to_tex(i, True) for i in self.point])})" return desc, []
[ "def get_tex(self) -> typing.Tuple[str, typing.List[str]]:\n desc = \"\\\\boldsymbol{v}\\\\mapsto\"\n desc += \"\\\\nablaa\\\\cdot\\\\boldsymbol{v}\"\n desc += \"(\" + \",\".join([_to_tex(i, True) for i in self.dof_point()]) + \")\"\n return desc, []", "def as_tex(self) -> str:\n out = sympy.latex(sympy.simplify(sympy.expand(self._f)))\n out = out.replace(\"\\\\left[\", \"\\\\left(\")\n out = out.replace(\"\\\\right]\", \"\\\\right)\")\n return out", "def _repr_latex_(self):\n return self._latex", "def terms_string(*args, **kwargs):\n return sym.latex(sym.Add(*args, evaluate=False), order='none', **kwargs)", "def display(self, basis=None, format_spec=None):\n from sage.misc.latex import latex\n from sage.tensor.modules.format_utilities import is_atomic, \\\n FormattedExpansion\n if basis is None:\n basis = self._fmodule._def_basis\n cobasis = basis.dual_basis()\n comp = self.comp(basis)\n terms_txt = []\n terms_latex = []\n for ind in comp.non_redundant_index_generator():\n ind_arg = ind + (format_spec,)\n coef = comp[ind_arg]\n if coef != 0:\n bases_txt = []\n bases_latex = []\n for k in range(self._tensor_rank):\n bases_txt.append(cobasis[ind[k]]._name)\n bases_latex.append(latex(cobasis[ind[k]]))\n basis_term_txt = \"/\\\\\".join(bases_txt)\n basis_term_latex = r\"\\wedge \".join(bases_latex)\n coef_txt = repr(coef)\n if coef_txt == \"1\":\n terms_txt.append(basis_term_txt)\n terms_latex.append(basis_term_latex)\n elif coef_txt == \"-1\":\n terms_txt.append(\"-\" + basis_term_txt)\n terms_latex.append(\"-\" + basis_term_latex)\n else:\n coef_latex = latex(coef)\n if is_atomic(coef_txt):\n terms_txt.append(coef_txt + \" \" + basis_term_txt)\n else:\n terms_txt.append(\"(\" + coef_txt + \") \" +\n basis_term_txt)\n if is_atomic(coef_latex):\n terms_latex.append(coef_latex + basis_term_latex)\n else:\n terms_latex.append(r\"\\left(\" + coef_latex + \\\n r\"\\right)\" + basis_term_latex)\n if not terms_txt:\n expansion_txt = \"0\"\n else:\n expansion_txt = terms_txt[0]\n for term in terms_txt[1:]:\n if term[0] == \"-\":\n expansion_txt += \" - \" + term[1:]\n else:\n expansion_txt += \" + \" + term\n if not terms_latex:\n expansion_latex = \"0\"\n else:\n expansion_latex = terms_latex[0]\n for term in terms_latex[1:]:\n if term[0] == \"-\":\n expansion_latex += term\n else:\n expansion_latex += \"+\" + term\n if self._name is None:\n resu_txt = expansion_txt\n else:\n resu_txt = self._name + \" = \" + expansion_txt\n if self._latex_name is None:\n resu_latex = expansion_latex\n else:\n resu_latex = latex(self) + \" = \" + expansion_latex\n return FormattedExpansion(resu_txt, resu_latex)", "def _repr_latex_(self):\n if not self.poly:\n return '$0$'\n res = ['$']\n first = True\n for exponent, coef in enumerate(self.poly):\n if coef == 0:\n continue\n monomial = latex_monomial(exponent, coef, self.var)\n if first:\n first = False\n res.append(monomial)\n continue\n oper = '+'\n if monomial[0] == '-':\n oper = '-'\n monomial = monomial[1:]\n res.append(oper)\n res.append(monomial)\n res.append('$')\n return ' '.join(res)", "def _reprlatex(self):\n pass # pragma: no cover", "def compile_term(self):\n\n self.out.write('<term>\\n')\n\n unary_ops = ['-', '~']\n\n #############################################\n ### constant, name, expression or unaryOp ###\n #############################################\n\n # '(' expression ')'\n if self.tokenizer.token == '(':\n # '('\n symbol_line = self.format_line()\n self.out.write(symbol_line)\n\n # expression\n self.advance()\n self.compile_expression()\n\n # ')'\n symbol_line = self.format_line()\n self.out.write(symbol_line)\n\n self.advance()\n \n # unaryOp term\n elif self.tokenizer.token in unary_ops:\n # unaryOp\n unary_op_line = self.format_line()\n self.out.write(unary_op_line)\n\n # term\n self.advance()\n self.compile_term()\n\n # integerConstant | stringConstant | keywordConstant |\n # varName | varName '[' expression ']' | subroutineCall\n else:\n # constant or name\n constant_or_name = self.format_line('used')\n self.out.write(constant_or_name)\n\n # varName '[' expression ']' | subroutineCall or end of compile_term function\n # Check if expression: '[', subroutineCall: '(' with parameter skip_subroutine_name = True,\n # otherwise end of compile_term function\n self.advance()\n # '[' expression ']'\n if self.tokenizer.token == '[':\n # '['\n symbol_line = self.format_line()\n self.out.write(symbol_line)\n\n # expression\n self.advance()\n self.compile_expression()\n\n # ']'\n symbol_line = self.format_line()\n self.out.write(symbol_line)\n\n self.advance()\n \n # subroutineCall with skip_subroutine_name=True\n elif self.tokenizer.token in ['(', '.']:\n self.compile_subroutine_call(skip_subroutine_name=True)\n\n self.out.write('</term>\\n')", "def latex_rep(self):\n\n from CommonFiles.symbolics import LatexVisitor\n import ast\n\n class ModelLatexVisitor(LatexVisitor):\n \"\"\" class to convert strings to latex strings \"\"\"\n # def __init__(self, states, params):\n # super(ModelLatexVisitor, self).__init__()\n # self.model_states = states\n # self.model_params = params\n\n def visit_Name(self, n):\n if n.id in self.model_states.listrep():\n return r'\\mathrm{\\bf ' + n.id + r'}'\n\n elif n.id in self.model_params.listrep():\n baseindex = n.id.find('_')\n base = n.id[:baseindex]\n # Name or index if no name\n tempname = n.id[baseindex+1:]\n if '_' in tempname:\n # name and index\n ind = tempname.find('_')\n name = tempname[:ind]\n pindex = tempname[ind+1:]\n else:\n name = tempname\n pindex = None\n\n if pindex: return r'\\mathrm{\\bf ' + base + r'_' + r'{'\\\n + name + r',' + pindex + r'}' + r'}'\n else: return r'\\mathrm{\\bf ' + base + r'_' + r'{'\\\n + name + r'}' + r'}'\n\n else: return n.id\n\n visitor = ModelLatexVisitor()\n visitor.model_states = self.states\n visitor.model_params = self.params\n\n strlist = []\n for i, ode in enumerate(self.odes):\n pt = ast.parse(str(ode))\n lhs = (r'\\frac{d\\mathrm{\\bf ' +\n self.states.listrep()[i] + r'}}{dt} &= ')\n\n strlist += [lhs + visitor.visit(pt.body[0].value) + r' \\\\']\n\n strlist[-1] = strlist[-1][:-2]\n\n return strlist", "def generate_string_latex(self):\n return '\\n'.join([at.generate_string_latex() for at in self.atom_list])", "def get_polyterms_w_xform(self):\n if self.polytermx_cache:\n return self.polytermx_cache\n greens = self.decompose_greens()\n self.polytermx_cache = []\n for (pp,hs,xi) in [self.poly_term_w_xi(t) for t in greens]:\n self.polytermx_cache += [(pp.full_simplify(), hs, xi)]\n\n return self.polytermx_cache", "def tautology(formula):\n return onallvaluations(formula)", "def termlist(self):\n stableTxt = ''\n testingTxt = ''\n unstableTxt = ''\n archaicTxt = ''\n \n queries = ''\n c_ids, p_ids, i_ids = self.vocab.azlist()\n tl = \"\"\"<div class=\"termlist\">\"\"\"\n \n # look, whether individuals are available\n if (len(self.vocab.individuals) > 0):\n tl = \"\"\"%s<h3>Classes, Properties and Individuals (full detail)</h3>\\n<div class='termdetails'><br />\\n\\n\"\"\" % tl\n self.concepttypes = \"Classes, Properties and Individuals\"\n self.concepttypes2 = \"class (categories or types), by property and by individual\"\n self.concepttypes3 = \"classes, properties and individuals\"\n else:\n tl = \"\"\"%s<h3>Classes and Properties (full detail)</h3>\\n<div class='termdetails'><br />\\n\\n\"\"\" % tl\n self.concepttypes = \"Classes and Properties\"\n self.concepttypes2 = \"class (categories or types) and by property\"\n self.concepttypes3 = \"classes and properties\"\n\n \n # danbri hack 20100101 removed: href=\"http://www.w3.org/2003/06/sw-vocab-status/ns#%s\" pending discussion w/ libby and leigh re URIs\n \n # first classes, then properties\n eg = \"\"\"<div class=\"specterm\" id=\"%s\" about=\"%s\" typeof=\"%s\">\n <h3>%s: %s</h3> \n <em property=\"rdfs:label\" >%s</em> - <span property=\"rdfs:comment\" >%s</span> <br /><table style=\"th { float: top; }\">\n <tr><th>Status:</th>\n <td><span property=\"vs:status\" >%s</span></td></tr>\n %s\n %s\n </table>\n %s\n <p style=\"float: right; font-size: small;\">[<a href=\"#%s\">#</a>] <!-- %s --> [<a href=\"#glance\">back to top</a>]</p>\n <br/>\n </div>\"\"\"\n \n # for individuals\n ig = \"\"\"<div class=\"specterm\" id=\"%s\" about=\"%s\" typeof=\"%s\">\n <h3>%s: %s</h3> \n <em property=\"dc:title\" >%s</em> - <span property=\"dc:description\" >%s</span> <br /><table style=\"th { float: top; }\">\n <tr><th>Status:</th>\n <td><span property=\"vs:status\" >%s</span></td></tr>\n %s\n </table>\n %s\n <p style=\"float: right; font-size: small;\">[<a href=\"#%s\">#</a>] <!-- %s --> [<a href=\"#glance\">back to top</a>]</p>\n <br/>\n </div>\"\"\"\n \n # replace this if you want validation queries: xxx danbri\n # <p style=\"float: right; font-size: small;\">[<a href=\"#term_%s\">permalink</a>] [<a href=\"#queries_%s\">validation queries</a>] [<a href=\"#glance\">back to top</a>]</p>\n # todo, push this into an api call (c_ids currently setup by az above)\n # classes\n for term in self.vocab.classes:\n # strings to use later\n domainsOfClass = ''\n rangesOfClass = ''\n \n #class in domain of -> only for classes included in this ontology specification\n g = self.vocab.graph\n \n q = 'SELECT ?d ?l WHERE {?d rdfs:domain <%s> . ?d rdfs:label ?l } ' % (term.uri)\n \n relations = g.query(q)\n startStr = '<tr><th>Properties include:</th>\\n'\n \n contentStr = ''\n for (domain, label) in relations:\n dom = Term(domain)\n # danbri hack 20100101\n # termStr = \"\"\"<a href=\"#term_%s\">%s</a>\\n\"\"\" % (dom.id, label)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (dom.id, dom.id)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n domainsOfClass = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n # class in range of -> only for classes included in this ontology specification\n q2 = 'SELECT ?d ?l WHERE {?d rdfs:range <%s> . ?d rdfs:label ?l } ' % (term.uri)\n relations2 = g.query(q2)\n startStr = '<tr><th>Used with:</th>\\n'\n \n contentStr = ''\n for (range, label) in relations2:\n ran = Term(range)\n # termStr = \"\"\"<a href=\"#term_%s\">%s</a>\\n\"\"\" % (ran.id, label)\n # danbri hack 20100101 better to use exact IDs here\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (ran.id, ran.id)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n rangesOfClass = \"%s <td> %s</td></tr> \" % (startStr, contentStr)\n \n # class sub class of -> handles only \"real\" super classes\n subClassOf = ''\n restriction = ''\n \n q = 'SELECT ?sc ?l WHERE {<%s> rdfs:subClassOf ?sc . ?sc rdfs:label ?l } ' % (term.uri)\n \n relations = g.query(q)\n startStr = '<tr><th>Sub class of</th>\\n'\n \n contentStr = ''\n contentStr2 = ''\n for (subclass, label) in relations:\n sub = Term(subclass)\n termStr = \"\"\"<span rel=\"rdfs:subClassOf\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (subclass, sub.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n subClassOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n # else:\n q1 = 'SELECT ?sc WHERE {<%s> rdfs:subClassOf ?sc } ' % (term.uri)\n \n relations = g.query(q1)\n ordone = False\n for (row) in relations:\n subclass = row[0]\n subclassnice = self.vocab.niceName(subclass)\n # print(\"subclass \",subclass)\n # print(\"subclassnice \",subclassnice)\n # check niceName result\n # TODO: handle other sub class types (...) currently owl:Restriction only\n colon = subclassnice.find(':')\n #print(\"ns uri \", str(self.vocab._get_uri()))\n if(subclass.find(str(self.vocab._get_uri())) < 0):\n if (colon > 0):\n termStr = \"\"\"<span rel=\"rdfs:subClassOf\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (subclass, subclass, subclassnice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n #print(\"must be super class from another ns: \", subclassnice)\n elif (ordone == False):\n # with that query I get all restrictions of a concept :\\\n # TODO: enable a query with bnodes (_:bnode currently doesn't work :( )\n # that's why the following code isn't really nice\n q2 = 'SELECT ?orsc ?or ?orv WHERE { <%s> rdfs:subClassOf ?orsc . ?orsc rdf:type <http://www.w3.org/2002/07/owl#Restriction> . ?orsc ?or ?orv }' % (term.uri)\n \n print(\"try to fetch owl:Restrictions with query \", q2)\n orrelations = g.query(q2)\n startStr2 = '<tr><th class=\"restrictions\">Restriction(s):</th>\\n'\n orpcounter = 0\n orsubclass = ''\n contentStr3 = ''\n termStr1 = ''\n termStr2 = ''\n prop = ''\n oronproperty = ''\n orproperty = ''\n orpropertyvalue = ''\n orscope = ''\n for (orsc, orp, orpv) in orrelations:\n orproperty2 = ''\n orpropertyvalue2 = ''\n orscope2 = ''\n if (orsubclass == \"\"):\n print(\"initialize orsubclass with \", orsc)\n orsubclass = orsc\n if(orsubclass != orsc):\n termStr1 = \"\"\"<span about=\"%s\" rel=\"rdfs:subClassOf\" resource=\"[_:%s]\"></span>\\n\"\"\" % (term.uri, orsubclass)\n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"owl:Restriction\"></span>The property \n <span about=\"[_:%s]\" rel=\"owl:onProperty\" href=\"%s\"><a href=\"#%s\">%s</a></span> must be set <em>%s</em> \n <span about=\"[_:%s]\" property=\"%s\" datatype=\"xsd:nonNegativeInteger\" >%s</span> time(s)\"\"\" % (orsubclass, orsubclass, oronproperty, prop.id, prop.type, orscope, orsubclass, orproperty, orpropertyvalue)\n \n contentStr2 = \"%s %s %s %s<br/>\" % (contentStr2, termStr1, termStr2, contentStr3)\n print(\"change orsubclass to\", orsc)\n orsubclass = orsc\n contentStr3 = ''\n orpcounter = 0\n termStr1 = ''\n termStr2 = ''\n prop = ''\n oronproperty = ''\n orproperty = ''\n orpropertyvalue = ''\n orscope = ''\n \n print(\"orp \", orp)\n print(\"orpv\", orpv)\n if (str(orp) == \"http://www.w3.org/2002/07/owl#onProperty\"):\n oronproperty = orpv\n prop = Term(orpv)\n prop.type = self.vocab.niceName(orpv)\n print(\"found new owl:Restriction\")\n print(\"write onproperty property\")\n elif ((str(orp) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\") & (str(orp) != \"http://www.w3.org/2002/07/owl#onProperty\")):\n if (orpcounter == 0):\n orproperty = self.vocab.niceName(orp)\n # <- that must be a specific cardinality restriction\n orpropertyvalue = orpv\n if (str(orp) == \"http://www.w3.org/2002/07/owl#cardinality\"):\n orscope = \"exactly\"\n if (str(orp) == \"http://www.w3.org/2002/07/owl#minCardinality\"):\n orscope = \"at least\"\n if (str(orp) == \"http://www.w3.org/2002/07/owl#maxCardinality\"):\n orscope = \"at most\"\n print(\"write 1st cardinality of restriction\")\n else:\n orproperty2 = self.vocab.niceName(orp)\n # <- that must be another specific cardinality restriction\n orpropertyvalue2 = orpv\n if (str(orp) == \"http://www.w3.org/2002/07/owl#cardinality\"):\n orscope2 = \"exactly\"\n if (str(orp) == \"http://www.w3.org/2002/07/owl#minCardinality\"):\n orscope2 = \"at least\"\n if (str(orp) == \"http://www.w3.org/2002/07/owl#maxCardinality\"):\n orscope2 = \"at most\"\n print(\"write another cardinality of restriction\")\n orpcounter = orpcounter + 1\n else:\n print(\"here I am with \", orp)\n \n if (str(orproperty2) != \"\"):\n termStr3 = \"\"\" and <em>%s</em> \n <span about=\"[_:%s]\" property=\"%s\" >%s</span> time(s)\"\"\" % (orscope2, orsubclass, orproperty2, orpropertyvalue2)\n contentStr3 = \"%s %s\" % (contentStr3, termStr3)\n \n # write also last/one restriction\n termStr1 = \"\"\"<span about =\"%s\" rel=\"rdfs:subClassOf\" resource=\"[_:%s]\"></span>\\n\"\"\" % (term.uri, orsubclass)\n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"owl:Restriction\"></span>The property \n <span about=\"[_:%s]\" rel=\"owl:onProperty\" href=\"%s\"><a href=\"#%s\">%s</a></span> must be set <em>%s</em> \n <span about=\"[_:%s]\" property=\"%s\" datatype=\"xsd:nonNegativeInteger\" >%s</span> time(s)\"\"\" % (orsubclass, orsubclass, oronproperty, prop.id, prop.type, orscope, orsubclass, orproperty, orpropertyvalue)\n \n contentStr2 = \"%s %s %s %s\\n\" % (contentStr2, termStr1, termStr2, contentStr3)\n \n ordone = True\n print(\"owl restriction modelling done for\", term.uri)\n \n if contentStr != \"\":\n subClassOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n if contentStr2 != \"\":\n restriction = \"%s <td> %s </td></tr>\" % (startStr2, contentStr2)\n \n # class has sub class -> handles only \"real\" super classes\n hasSubClass = ''\n \n q = 'SELECT ?sc ?l WHERE {?sc rdfs:subClassOf <%s>. ?sc rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Has sub class</th>\\n'\n \n contentStr = ''\n for (subclass, label) in relations:\n sub = Term(subclass)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (sub.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n hasSubClass = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n q = 'SELECT ?sc WHERE {?sc rdfs:subClassOf <%s> } ' % (term.uri)\n \n relations = g.query(q)\n for (row) in relations:\n subclass = row[0]\n subclassnice = self.vocab.niceName(subclass)\n #print(\"has subclass \", subclass)\n #print(\"has subclassnice \", subclassnice)\n # check niceName result\n colon = subclassnice.find(':')\n if(subclass[0].find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<a href=\"%s\">%s</a>\\n\"\"\" % (subclass, subclassnice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n hasSubClass = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n \n # is defined by\n classIsDefinedBy = ''\n \n q = 'SELECT ?idb WHERE { <%s> rdfs:isDefinedBy ?idb } ' % (term.uri)\n relations = g.query(q)\n startStr\t = '\\n'\n \n contentStr = ''\n for (isdefinedby) in relations:\n termStr = \"\"\"<span rel=\"rdfs:isDefinedBy\" href=\"%s\"></span>\\n\"\"\" % (isdefinedby)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n classIsDefinedBy = \"%s <tr><td> %s </td></tr>\" % (startStr, contentStr)\n \n \n # disjoint with\n isDisjointWith = ''\n \n q = 'SELECT ?dj ?l WHERE { <%s> <http://www.w3.org/2002/07/owl#disjointWith> ?dj . ?dj rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Disjoint With:</th>\\n'\n \n contentStr = ''\n for (disjointWith, label) in relations:\n termStr = \"\"\"<span rel=\"owl:disjointWith\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (disjointWith, label, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n isDisjointWith = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # owl class\n oc = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#Class> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">OWL Class</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:Class\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#Class\"></span>\"\"\"\n oc = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # rdfs class\n rc = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2000/01/rdf-schema#Class> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">RDFS Class</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"rdfs:Class\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2000/01/rdf-schema#Class\"></span>\"\"\"\n rc = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # dcterms agent class\n dctac = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://purl.org/dc/terms/AgentClass> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">DCTerms Agent Class</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"dcterms:AgentClass\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"ttp://purl.org/dc/terms/AgentClass\"></span>\"\"\"\n dctac = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n # end\n\n dn = os.path.join(self.basedir, \"doc\")\n filename = os.path.join(dn, term.id + \".en\")\n s = ''\n try:\n f = open (filename, \"r\")\n s = f.read()\n except:\n s = ''\n\n # if we want validation queries this is where it looks for them.\n filename = os.path.join(dn, term.id + \".sparql\")\n fileStr = ''\n try:\n f = open (filename, \"r\")\n fileStr = f.read()\n fileStr = \"<h4><a name=\\\"queries_\" + term.id + \"\\\"></a>\" + term.id + \" Validation Query</h4><pre>\" + cgi.escape(ss) + \"</pre>\"\n except:\n fileStr = ''\n\n queries = queries + \"\\n\" + fileStr\n sn = self.vocab.niceName(term.uri)\n s = termlink(s)\n\n # danbri added another term.id 20010101 and removed term.status\n # ATTENTION: writing all class descriptions into template here\n zz = eg % (term.id, term.uri, term.type, \"Class\", sn, term.label, term.comment, term.status, domainsOfClass, rangesOfClass + subClassOf + restriction + hasSubClass + classIsDefinedBy + isDisjointWith + oc + rc + dctac, s, term.id, term.id)\n\n ## we add to the relevant string - stable, unstable, testing or archaic\n if(term.status == \"stable\"):\n stableTxt = stableTxt + zz\n if(term.status == \"testing\"):\n testingTxt = testingTxt + zz\n if(term.status == \"unstable\"):\n unstableTxt = unstableTxt + zz\n if(term.status == \"archaic\"):\n archaicTxt = archaicTxt + zz\n if((term.status == None) or (term.status == \"\") or (term.status == \"unknown\")):\n archaicTxt = archaicTxt + zz\n\n ## then add the whole thing to the main tl string\n tl = tl + \"<h2>Classes</h2>\\n\"\n tl = \"%s %s\" % (tl, stableTxt + \"\\n\" + testingTxt + \"\\n\" + unstableTxt + \"\\n\" + archaicTxt)\n tl = tl + \"<h2>Properties</h2>\\n\"\n\n # properties\n stableTxt = ''\n testingTxt = ''\n unstableTxt = ''\n archaicTxt = ''\n\n for term in self.vocab.properties:\n domainsOfProperty = ''\n rangesOfProperty = ''\n\n # domain of properties\n g = self.vocab.graph\n q = 'SELECT ?d ?l WHERE {<%s> rdfs:domain ?d . ?d rdfs:label ?l } ' % (term.uri)\n # print(\"term.uri before \", term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Domain:</th>\\n'\n\n contentStr = ''\n contentStr3 = ''\n for (domain, label) in relations:\n dom = Term(domain)\n termStr = \"\"\"<span rel=\"rdfs:domain\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (domain, dom.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n q = 'SELECT ?d WHERE {<%s> rdfs:domain ?d } ' % (term.uri)\n\n relations = g.query(q)\n for (row) in relations:\n domain = row[0]\n domainnice = self.vocab.niceName(domain)\n # print(\"domain \",domain)\n # print(\"domainnice \",domainnice)\n # check niceName result\n # TODO: handle other domain types\n colon = domainnice.find(':')\n if(domain.find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<span rel=\"rdfs:domain\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (domain, domain, domainnice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n else:\n # that will be a huge hack now\n # 1st: pick out the union domain and its list bnode\n q2 = 'SELECT ?d ?url ?urli ?urlipt WHERE {<%s> rdfs:domain ?d . ?d <http://www.w3.org/2002/07/owl#unionOf> ?url . ?url ?urlipt ?urli } ' % (term.uri)\n print(\"try to fetch union domain with \", q2)\n relations2 = g.query(q2)\n \n contentStr2 = ''\n termStr2 = ''\n listbnode = ''\n urfirstlistitem = ''\n urnextlistbnode = ''\n urfirstlistitemnice = ''\n domainbnode = ''\n for (domain, list, listitem, listitempropertytype) in relations2:\n # print(\"list \", list , \" :: listitem \" , listitem , \" :: listitempropertytype \" , listitempropertytype)\n listbnode = list\n domainbnode = domain\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#first\"):\n urfirstlistitem = listitem\n urfirstlistitemnice = self.vocab.niceName(urfirstlistitem)\n print(\"listitem \", urfirstlistitem)\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#rest\"):\n urnextlistbnode = listitem\n print(\"urnextlistbnode \", urnextlistbnode)\n \n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"rdf:Description\"></span>\n <span about=\"[_:%s]\" rel=\"rdf:first\" href=\"%s\"><a href=\"%s\">%s</a></span>\n <span about=\"[_:%s]\" rel=\"rdf:rest\" resource=\"[_:%s]\"></span>\"\"\" % (listbnode, listbnode, urfirstlistitem, urfirstlistitem, urfirstlistitemnice, listbnode, urnextlistbnode)\n contentStr2 = \"%s %s\" % (contentStr2, termStr2)\n \n # 2nd: go down the list and collect all list items\n if(urnextlistbnode != \"\"):\n oldlistbnode = ''\n termstr3 = ''\n while (str(urnextlistbnode) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#nil\"):\n q3 = 'SELECT ?urnlbn ?urlipt ?urli WHERE {?lbn <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <%s> . ?lbn <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> ?urnlbn . ?urnlbn ?urlipt ?urli } ' % (urfirstlistitem)\n print(\"try to fetch more lists with \" , q3)\n relations3 = g.query(q3)\n \n oldlistbnode = urnextlistbnode\n for (urnlbn, listitempropertytype, listitem) in relations3:\n print(\"what to do next with urnlbn \" , urnlbn , \" :: listitempropertytype \" , listitempropertytype , \" :: listitem \" , listitem , \" :: urnextlistbnode \" , urnextlistbnode)\n # to check the bnode of the list in the union domain\n if(str(urnlbn) == str(oldlistbnode)):\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#first\"):\n urfirstlistitem = listitem\n urfirstlistitemnice = self.vocab.niceName(urfirstlistitem)\n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"rdf:Description\"></span>\n <span about=\"[_:%s]\" rel=\"rdf:first\" href=\"%s\"><a href=\"%s\">%s</a></span>\"\"\" % (oldlistbnode, oldlistbnode, urfirstlistitem, urfirstlistitem, urfirstlistitemnice)\n print(\"new listitem \", urfirstlistitem)\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#rest\"):\n urnextlistbnode = listitem\n if(str(urnextlistbnode) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#nil\"):\n termStr3 = \"\"\"<span about=\"[_:%s]\" rel=\"rdf:rest\" resource=\"[_:%s]\"></span>\"\"\" % (oldlistbnode, urnextlistbnode)\n else:\n termStr3 = \"\"\"<span about=\"[_:%s]\" rel=\"rdf:rest\" href=\"%s\"></span>\"\"\" % (oldlistbnode, urnextlistbnode)\n print(\"new urnextlistbnode \", urnextlistbnode) \n contentStr2 = \"%s or %s %s\" % (contentStr2, termStr2, termStr3) \n print(\"here I am\")\n termStr = \"\"\"<span rel=\"rdfs:domain\" resource=\"[_:%s]\"></span>\n <span about=\"[_:%s]\" typeof=\"owl:Class\"></span>\n <span about=\"[_:%s]\" rel=\"owl:unionOf\" resource=\"[_:%s]\"></span>\"\"\" % (domainbnode, domainbnode, domainbnode, listbnode)\n contentStr3 = \"%s %s %s\" % (contentStr3, termStr, contentStr2)\n\n # merge together the results of both queries\n if contentStr3 != \"\":\n contentStr = \"%s %s\" % (contentStr, contentStr3)\n if contentStr != \"\":\n domainsOfProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # range of properties\n q2 = 'SELECT ?r ?l WHERE {<%s> rdfs:range ?r . ?r rdfs:label ?l } ' % (term.uri)\n relations2 = g.query(q2)\n startStr = '<tr><th>Range:</th>\\n'\n contentStr = ''\n contentStr3 = ''\n for (range, label) in relations2:\n ran = Term(range)\n termStr = \"\"\"<span rel=\"rdfs:range\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (range, ran.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n q = 'SELECT ?r WHERE {<%s> rdfs:range ?r } ' % (term.uri)\n\n relations = g.query(q)\n for (row) in relations:\n range = row[0]\n rangenice = self.vocab.niceName(range)\n # print(\"range \",range)\n # print(\"rangenice \",rangenice)\n # check niceName result\n # TODO: handle other range types\n colon = rangenice.find(':')\n if(range.find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<span rel=\"rdfs:range\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (range, range, rangenice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n else:\n # that will be a huge hack now\n # 1st: pick out the union range and its list bnode\n q2 = 'SELECT ?r ?url ?urli ?urlipt WHERE {<%s> rdfs:range ?r . ?r <http://www.w3.org/2002/07/owl#unionOf> ?url . ?url ?urlipt ?urli } ' % (term.uri)\n print(\"try to fetch union range with \", q2)\n relations2 = g.query(q2)\n \n contentStr2 = ''\n termStr2 = ''\n listbnode = ''\n urfirstlistitem = ''\n urnextlistbnode = ''\n urfirstlistitemnice = ''\n rangebnode = ''\n for (range, list, listitem, listitempropertytype) in relations2:\n # print(\"list \", list , \" :: listitem \" , listitem , \" :: listitempropertytype \" , listitempropertytype)\n listbnode = list\n rangebnode = range\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#first\"):\n urfirstlistitem = listitem\n urfirstlistitemnice = self.vocab.niceName(urfirstlistitem)\n print(\"listitem \", urfirstlistitem)\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#rest\"):\n urnextlistbnode = listitem\n print(\"urnextlistbnode \", urnextlistbnode)\n \n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"rdf:Description\"></span>\n <span about=\"[_:%s]\" rel=\"rdf:first\" href=\"%s\"><a href=\"%s\">%s</a></span>\n <span about=\"[_:%s]\" rel=\"rdf:rest\" resource=\"[_:%s]\"></span>\"\"\" % (listbnode, listbnode, urfirstlistitem, urfirstlistitem, urfirstlistitemnice, listbnode, urnextlistbnode)\n contentStr2 = \"%s %s\" % (contentStr2, termStr2)\n \n # 2nd: go down the list and collect all list items\n if(urnextlistbnode != \"\"):\n oldlistbnode = ''\n termstr3 = ''\n while (str(urnextlistbnode) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#nil\"):\n q3 = 'SELECT ?urnlbn ?urlipt ?urli WHERE {?lbn <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <%s> . ?lbn <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> ?urnlbn . ?urnlbn ?urlipt ?urli } ' % (urfirstlistitem)\n print(\"try to fetch more lists with \" , q3)\n relations3 = g.query(q3)\n \n oldlistbnode = urnextlistbnode\n for (urnlbn, listitempropertytype, listitem) in relations3:\n print(\"what to do next with urnlbn \" , urnlbn , \" :: listitempropertytype \" , listitempropertytype , \" :: listitem \" , listitem , \" :: urnextlistbnode \" , urnextlistbnode)\n # to check the bnode of the list in the union range\n if(str(urnlbn) == str(oldlistbnode)):\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#first\"):\n urfirstlistitem = listitem\n urfirstlistitemnice = self.vocab.niceName(urfirstlistitem)\n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"rdf:Description\"></span>\n <span about=\"[_:%s]\" rel=\"rdf:first\" href=\"%s\"><a href=\"%s\">%s</a></span>\"\"\" % (oldlistbnode, oldlistbnode, urfirstlistitem, urfirstlistitem, urfirstlistitemnice)\n print(\"new listitem \", urfirstlistitem)\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#rest\"):\n urnextlistbnode = listitem\n if(str(urnextlistbnode) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#nil\"):\n termStr3 = \"\"\"<span about=\"[_:%s]\" rel=\"rdf:rest\" resource=\"[_:%s]\"></span>\"\"\" % (oldlistbnode, urnextlistbnode)\n else:\n termStr3 = \"\"\"<span about=\"[_:%s]\" rel=\"rdf:rest\" href=\"%s\"></span>\"\"\" % (oldlistbnode, urnextlistbnode)\n print(\"new urnextlistbnode \", urnextlistbnode)\n \n contentStr2 = \"%s or %s %s\" % (contentStr2, termStr2, termStr3)\n \n print(\"here I am\")\n \n termStr = \"\"\"<span rel=\"rdfs:range\" resource=\"[_:%s]\"></span>\n <span about=\"[_:%s]\" typeof=\"owl:Class\"></span>\n <span about=\"[_:%s]\" rel=\"owl:unionOf\" resource=\"[_:%s]\"></span>\"\"\" % (rangebnode, rangebnode, rangebnode, listbnode)\n contentStr3 = \"%s %s %s\" % (contentStr3, termStr, contentStr2)\n\n # merge together the results of both queries\n if contentStr3 != \"\":\n contentStr = \"%s %s\" % (contentStr, contentStr3)\n if contentStr != \"\":\n rangesOfProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n # property sub property of -> only for property included in this ontology specification\n subPropertyOf = ''\n\n q = 'SELECT ?sp ?l WHERE {<%s> rdfs:subPropertyOf ?sp . ?sp rdfs:label ?l } ' % (term.uri)\n # print(\"term.uri \", term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Sub property of</th>\\n'\n\n contentStr = ''\n for (subproperty, label) in relations:\n sub = Term(subproperty)\n termStr = \"\"\"<span rel=\"rdfs:subPropertyOf\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (subproperty, sub.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n subPropertyOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n q1 = 'SELECT ?sp WHERE {<%s> rdfs:subPropertyOf ?sp } ' % (term.uri)\n \n relations = g.query(q1)\n for (row) in relations:\n subproperty = row[0]\n subpropertynice = self.vocab.niceName(subproperty)\n # check niceName result\n colon = subpropertynice.find(':')\n if(subproperty.find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<span rel=\"rdfs:subPropertyOf\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (subproperty, subproperty, subpropertynice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n print(\"must be super property from another ns\")\n \n if contentStr != \"\":\n subPropertyOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n # property has sub property -> only for property included in this ontology specification\n hasSubProperty = ''\n\n q = 'SELECT ?sp ?l WHERE {?sp rdfs:subPropertyOf <%s>. ?sp rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Has sub property</th>\\n'\n\n contentStr = ''\n for (subproperty, label) in relations:\n sub = Term(subproperty)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (sub.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n hasSubProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # property inverse property of -> only for property included in this ontology specification\n inverseOf = ''\n\n q = 'SELECT ?ip ?l WHERE {<%s> <http://www.w3.org/2002/07/owl#inverseOf> ?ip . ?ip rdfs:label ?l } ' % (term.uri)\n # print(\"term.uri \", term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Inverse property of</th>\\n'\n\n contentStr = ''\n for (inverseproperty, label) in relations:\n ipnice = self.vocab.niceName(inverseproperty)\n colon = ipnice.find(':')\n # check wether explicite defined inverse property or anonymous defined inverse property\n if colon > 0:\n inverse = Term(inverseproperty)\n termStr = \"\"\"<span rel=\"owl:inverseOf\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (inverseproperty, inverse.id, label)\n #print(\"inverse property must be explicitly defined\")\n else:\n q2 = 'SELECT ?ipt WHERE {<%s> <http://www.w3.org/2002/07/owl#inverseOf> ?ip . ?ip rdfs:label ?l . ?ip rdf:type ?ipt } ' % (term.uri)\n relations2 = g.query(q2)\n\n contentStr2 = ''\n iptcounter = 0\n termStr2 = ''\n for (row) in relations2:\n inversepropertytype = row[0]\n print(\"inversepropertytype \", inversepropertytype)\n iptype = ''\n termStr3 = ''\n iptypenice = self.vocab.niceName(inversepropertytype)\n if (str(inversepropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#Property\"):\n iptype = \"RDF Property\"\n if (str(inversepropertytype) == \"http://www.w3.org/2002/07/owl#ObjectProperty\"):\n iptype = \"Object Property\"\n if (str(inversepropertytype) == \"http://www.w3.org/2002/07/owl#DatatypeProperty\"):\n iptype = \"Datatype Property\"\n if (str(inversepropertytype) == \"http://www.w3.org/2002/07/owl#InverseFunctionalProperty\"):\n iptype = \"Inverse Functional Property\"\n if (str(inversepropertytype) == \"http://www.w3.org/2002/07/owl#FunctionalProperty\"):\n iptype = \"Functional Property\"\n if (iptype != \"\"):\n termStr3 = \"\"\"<span about=\"[_:%s]\" typeof=\"%s\"><strong>%s</strong></span>\"\"\" % (inverseproperty, iptypenice, iptype)\n if (iptcounter > 0):\n termStr2 = \"%s, %s\" % (termStr2, termStr3)\n else:\n termStr2 = termStr3\n iptcounter = iptcounter + 1\n if (termStr2 != \"\"):\n contentStr2 = \"(%s)\" % (termStr2)\n termStr = \"\"\"<span rel=\"owl:inverseOf\" resource=\"[_:%s]\">the anonymous defined property with the label\n \\'<em about=\"[_:%s]\" property=\"rdfs:label\">%s</em>\\'</span>\\n\"\"\" % (inverseproperty, inverseproperty, label)\n termStr = \"%s %s\" % (termStr, contentStr2)\n print(\"inverse property must be anonymous defined\")\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n inverseOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n # print(\"write inverse property\")\n\n # property has inverse property -> only for property included in this ontology specification\n hasInverseProperty = ''\n\n q = 'SELECT ?ip ?l WHERE {?ip <http://www.w3.org/2002/07/owl#inverseOf> <%s>. ?ip rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Has inverse property</th>\\n'\n\n contentStr = ''\n for (inverseproperty, label) in relations:\n inverse = Term(inverseproperty)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (inverse.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n hasInverseProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n # print(\"write has inverse property\")\n\n\n # is defined by\n propertyIsDefinedBy = ''\n\n q = 'SELECT ?idb WHERE { <%s> rdfs:isDefinedBy ?idb } ' % (term.uri)\n relations = g.query(q)\n startStr = '\\n'\n\n contentStr = ''\n for (row) in relations:\n isdefinedby = row[0]\n termStr = \"\"\"<span rel=\"rdfs:isDefinedBy\" href=\"%s\"></span>\\n\"\"\" % (isdefinedby)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n propertyIsDefinedBy = \"%s <tr><td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # equivalent property\n equivalentProperty = ''\n\n q = 'SELECT ?ep WHERE { <%s> <http://www.w3.org/2002/07/owl#equivalentProperty> ?ep } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Equivalent Property</th>'\n\n contentStr = ''\n for (row) in relations:\n equiprop = row[0]\n equipropnice = self.vocab.niceName(equiprop)\n termStr = \"\"\"<span rel=\"owl:equivalentProperty\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (equiprop, equiprop, equipropnice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n equivalentProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n # rdf property\n rp = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/1999/02/22-rdf-syntax-ns#Property> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">RDF Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"rdf:Property\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#Property\"></span>\"\"\"\n rp = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # object property\n op = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#ObjectProperty> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">Object Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:ObjectProperty\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#ObjectProperty\"></span>\"\"\"\n op = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # datatype property\n dp = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#DatatypeProperty> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">Datatype Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:DatatypeProperty\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#DatatypeProperty\"></span>\"\"\"\n dp = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # inverse functional property\n ifp = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#InverseFunctionalProperty> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">Inverse Functional Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:InverseFunctionalProperty\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#InverseFunctionalProperty\"></span>\"\"\"\n ifp = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # functonal property\n fp = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#FunctionalProperty> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">Functional Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:FunctionalProperty\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#FunctionalProperty\"></span>\"\"\"\n fp = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n # end\n\n dn = os.path.join(self.basedir, \"doc\")\n filename = os.path.join(dn, term.id + \".en\")\n\n s = ''\n try:\n f = open (filename, \"r\")\n s = f.read()\n except:\n s = ''\n\n sn = self.vocab.niceName(term.uri)\n s = termlink(s)\n\n # danbri added another term.id 20010101\n # ATTENTION: writing all property descriptions into template here\n zz = eg % (term.id, term.uri, term.type, \"Property\", sn, term.label, term.comment, term.status, domainsOfProperty, rangesOfProperty + subPropertyOf + hasSubProperty + inverseOf + hasInverseProperty + propertyIsDefinedBy + equivalentProperty + rp + op + dp + ifp + fp, s, term.id, term.id)\n\n ## we add to the relevant string - stable, unstable, testing or archaic\n if(term.status == \"stable\"):\n stableTxt = stableTxt + zz\n if(term.status == \"testing\"):\n testingTxt = testingTxt + zz\n if(term.status == \"unstable\"):\n unstableTxt = unstableTxt + zz\n if(term.status == \"archaic\"):\n archaicTxt = archaicTxt + zz\n if((term.status == None) or (term.status == \"\") or (term.status == \"unknown\")):\n archaicTxt = archaicTxt + zz\n\n ## then add the whole thing to the main tl string\n tl = \"%s %s\" % (tl, stableTxt + \"\\n\" + testingTxt + \"\\n\" + unstableTxt + \"\\n\" + archaicTxt)\n ## tl = \"%s %s\" % (tl, zz)\n\n\n # ATTENTION: let's begin with the individual stuff here\n # do this only, when individuals are available\n if (len(self.vocab.individuals) > 0):\n tl = tl + \"<h2>Individuals</h2>\\n\"\n\n # individuals\n stableTxt = ''\n testingTxt = ''\n unstableTxt = ''\n archaicTxt = ''\n\n for term in self.vocab.individuals:\n # individual has type\n hasType = ''\n \n q = 'SELECT ?t ?l WHERE {<%s> rdf:type ?t. ?t rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Type:</th>\\n'\n \n contentStr = ''\n for (type, label) in relations:\n t = Term(type)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (type.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n hasType = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n q = 'SELECT ?t WHERE {<%s> rdf:type ?t } ' % (term.uri)\n \n relations = g.query(q)\n for (type) in relations:\n typenice = self.vocab.niceName(type)\n print(\"has type \", type)\n print(\"has typenice \", typenice)\n # check niceName result\n colon = typenice.find(':')\n if(type.find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<a href=\"%s\">%s</a>\\n\"\"\" % (type, typenice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n hasType = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # is defined by\n individualIsDefinedBy = ''\n\n q = 'SELECT ?idb WHERE { <%s> rdfs:isDefinedBy ?idb } ' % (term.uri)\n relations = g.query(q)\n startStr = '\\n'\n\n contentStr = ''\n for (isdefinedby) in relations:\n termStr = \"\"\"<span rel=\"rdfs:isDefinedBy\" href=\"%s\"></span>\\n\"\"\" % (isdefinedby)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n individualIsDefinedBy = \"%s <tr><td> %s </td></tr>\" % (startStr, contentStr)\n\n # end\n\n dn = os.path.join(self.basedir, \"doc\")\n filename = os.path.join(dn, term.id + \".en\")\n\n s = ''\n try:\n f = open (filename, \"r\")\n s = f.read()\n except:\n s = ''\n\n sn = self.vocab.niceName(term.uri)\n s = termlink(s)\n\n # ATTENTION: writing all individual descriptions into template here\n zz = ig % (term.id, term.uri, term.type, \"Individual\", sn, term.label, term.comment, term.status, hasType + individualIsDefinedBy, s, term.id, term.id)\n\n ## we add to the relevant string - stable, unstable, testing or archaic\n if(term.status == \"stable\"):\n stableTxt = stableTxt + zz\n if(term.status == \"testing\"):\n testingTxt = testingTxt + zz\n if(term.status == \"unstable\"):\n unstableTxt = unstableTxt + zz\n if(term.status == \"archaic\"):\n archaicTxt = archaicTxt + zz\n if((term.status == None) or (term.status == \"\") or (term.status == \"unknown\")):\n archaicTxt = archaicTxt + zz\n \n ## then add the whole thing to the main tl string\n tl = \"%s %s\" % (tl, stableTxt + \"\\n\" + testingTxt + \"\\n\" + unstableTxt + \"\\n\" + archaicTxt)\n\n ## ensure termlist tag is closed\n return(tl + \"\\n\" + queries + \"</div>\\n</div>\")", "def printTerm4(self):\n vprint=[]\n counter=0\n for x in self.pl:\n if self.pl[x] != ['I']:\n counter=counter+1\n vprint += '\\sigma_'\n cosa=self.pl[x][0]\n vprint += self.pl[x]\n vprint += '^'\n vprint += str(x)\n vprint=''.join(vprint)\n return self.c,vprint,counter", "def _repr_latex_(self):\n return unit_format.Latex.to_string(self)", "def evaluate(self, env):\n if self.ident in env.functions:\n arg_vals = [expr.evaluate(env) for expr in self.args]\n try:\n out = env.functions[self.ident](*arg_vals)\n except Exception, exc:\n # Function raised exception! Maybe inlining the name of\n # the exception will help debug.\n return u'<%s>' % unicode(exc)\n return unicode(out)\n else:\n return self.original", "def _repr_latex_(self):\n return f\"${self._reprlatex}$\"", "def __exp_tree(self):\n print(style.YELLOW(\"[ ¬ : ! ][ ∨ : | ][ ∧ : & ][ → : > ][ ↔ : ~ ][ ⊤ : 1 ][ ⊥ : 0 ]\") + style.RESET(\"\"))\n expr = input(style.BLUE(\"Insert your expression(with the help of the table above: \") + style.RESET(\"\"))\n expr = self.__convert_str(expr)\n print(expr)\n form = WFPropositionalFormula(expr)\n form.is_WFF()\n form.store_as_exp_tree()\n form.print_exp_tree()", "def test_algebra_print():\n\n # The language of constructors we are allowed to use with this context. \n lang = frozenset([Var, Add, Sub, Mul, Val])\n\n @term_algebra(lang)\n class PPrint():\n \"\"\"\n For every term in our language we write a function that can add some\n info to its key value store.\n\n Because we traverse in order, this will flesh out those pieces of\n information in a nice bottom-up manner. \n \"\"\"\n\n def _init_algebra(self, ctxt):\n pass\n def _init_pass(self, ctxt):\n pass\n def _end_pass(self, ctxt):\n return False\n def _end_algebra(self, ctxt):\n return None\n\n def run_add(self, ident : 'ID[Add]', val : 'Add[ID]'):\n ident[\"pp\"] = \"(\" + val.exp_a[\"pp\"] + \" + \" + val.exp_b[\"pp\"] + \")\"\n\n def run_sub(self, ident : 'ID[Sub]', val : 'Sub[ID]') -> None:\n ident[\"pp\"] = \"(\" + val.exp_a[\"pp\"] + \" - \" + val.exp_b[\"pp\"] + \")\"\n \n def run_mul(self, ident : 'ID[Mul]', val : 'Mul[ID]') -> None:\n ident[\"pp\"] = \"(\" + val.exp_a[\"pp\"] + \" * \" + val.exp_b[\"pp\"] + \")\"\n\n def run_var(self, ident : 'ID[Var]', val : 'Var[ID]') -> None:\n ident[\"pp\"] = val.name \n\n def run_val(self, ident : 'ID[Val]', val : 'Val[ID]') -> None:\n ident[\"pp\"] = repr(val.val) \n\n\n ctxt = Context(\"Foo\", _term_language=lang)\n\n x = ctxt.insert_fresh(Var('X'))\n y = ctxt.insert_fresh(Var('Y'))\n\n c = ctxt.insert(Add(x,Mul(y,Val(5))))\n\n printer = PPrint()\n\n ctxt.run_algebra(printer)\n\n assert x[\"pp\"] == \"X\"\n assert c[\"pp\"] == \"(X + (Y * 5))\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a representation of the functional as TeX, and list of terms involved.
def get_tex(self) -> typing.Tuple[str, typing.List[str]]: desc = "\\boldsymbol{v}\\mapsto" desc += "\\nablaa\\cdot\\boldsymbol{v}" desc += "(" + ",".join([_to_tex(i, True) for i in self.dof_point()]) + ")" return desc, []
[ "def get_tex(self) -> typing.Tuple[str, typing.List[str]]:\n assert isinstance(self.point, VectorFunction)\n if len(self.point) == 1:\n desc = \"v\\\\mapsto \"\n desc += f\"v'({','.join([_to_tex(i, True) for i in self.point])})\"\n return desc, []\n desc = \"v\\\\mapsto\"\n desc += \"\\\\frac{\\\\partial\"\n if sum(self.derivative) > 1:\n desc += f\"^{{{sum(self.derivative)}}}\"\n desc += \"}{\"\n for v, i in zip(\"xyz\", self.derivative):\n if i > 0:\n desc += f\"\\\\partial {v}\"\n if i > 1:\n desc += f\"^{{{i}}}\"\n desc += \"}\"\n desc += f\"v({','.join([_to_tex(i, True) for i in self.point])})\"\n return desc, []", "def as_tex(self) -> str:\n out = sympy.latex(sympy.simplify(sympy.expand(self._f)))\n out = out.replace(\"\\\\left[\", \"\\\\left(\")\n out = out.replace(\"\\\\right]\", \"\\\\right)\")\n return out", "def _repr_latex_(self):\n return self._latex", "def terms_string(*args, **kwargs):\n return sym.latex(sym.Add(*args, evaluate=False), order='none', **kwargs)", "def display(self, basis=None, format_spec=None):\n from sage.misc.latex import latex\n from sage.tensor.modules.format_utilities import is_atomic, \\\n FormattedExpansion\n if basis is None:\n basis = self._fmodule._def_basis\n cobasis = basis.dual_basis()\n comp = self.comp(basis)\n terms_txt = []\n terms_latex = []\n for ind in comp.non_redundant_index_generator():\n ind_arg = ind + (format_spec,)\n coef = comp[ind_arg]\n if coef != 0:\n bases_txt = []\n bases_latex = []\n for k in range(self._tensor_rank):\n bases_txt.append(cobasis[ind[k]]._name)\n bases_latex.append(latex(cobasis[ind[k]]))\n basis_term_txt = \"/\\\\\".join(bases_txt)\n basis_term_latex = r\"\\wedge \".join(bases_latex)\n coef_txt = repr(coef)\n if coef_txt == \"1\":\n terms_txt.append(basis_term_txt)\n terms_latex.append(basis_term_latex)\n elif coef_txt == \"-1\":\n terms_txt.append(\"-\" + basis_term_txt)\n terms_latex.append(\"-\" + basis_term_latex)\n else:\n coef_latex = latex(coef)\n if is_atomic(coef_txt):\n terms_txt.append(coef_txt + \" \" + basis_term_txt)\n else:\n terms_txt.append(\"(\" + coef_txt + \") \" +\n basis_term_txt)\n if is_atomic(coef_latex):\n terms_latex.append(coef_latex + basis_term_latex)\n else:\n terms_latex.append(r\"\\left(\" + coef_latex + \\\n r\"\\right)\" + basis_term_latex)\n if not terms_txt:\n expansion_txt = \"0\"\n else:\n expansion_txt = terms_txt[0]\n for term in terms_txt[1:]:\n if term[0] == \"-\":\n expansion_txt += \" - \" + term[1:]\n else:\n expansion_txt += \" + \" + term\n if not terms_latex:\n expansion_latex = \"0\"\n else:\n expansion_latex = terms_latex[0]\n for term in terms_latex[1:]:\n if term[0] == \"-\":\n expansion_latex += term\n else:\n expansion_latex += \"+\" + term\n if self._name is None:\n resu_txt = expansion_txt\n else:\n resu_txt = self._name + \" = \" + expansion_txt\n if self._latex_name is None:\n resu_latex = expansion_latex\n else:\n resu_latex = latex(self) + \" = \" + expansion_latex\n return FormattedExpansion(resu_txt, resu_latex)", "def _repr_latex_(self):\n if not self.poly:\n return '$0$'\n res = ['$']\n first = True\n for exponent, coef in enumerate(self.poly):\n if coef == 0:\n continue\n monomial = latex_monomial(exponent, coef, self.var)\n if first:\n first = False\n res.append(monomial)\n continue\n oper = '+'\n if monomial[0] == '-':\n oper = '-'\n monomial = monomial[1:]\n res.append(oper)\n res.append(monomial)\n res.append('$')\n return ' '.join(res)", "def _reprlatex(self):\n pass # pragma: no cover", "def compile_term(self):\n\n self.out.write('<term>\\n')\n\n unary_ops = ['-', '~']\n\n #############################################\n ### constant, name, expression or unaryOp ###\n #############################################\n\n # '(' expression ')'\n if self.tokenizer.token == '(':\n # '('\n symbol_line = self.format_line()\n self.out.write(symbol_line)\n\n # expression\n self.advance()\n self.compile_expression()\n\n # ')'\n symbol_line = self.format_line()\n self.out.write(symbol_line)\n\n self.advance()\n \n # unaryOp term\n elif self.tokenizer.token in unary_ops:\n # unaryOp\n unary_op_line = self.format_line()\n self.out.write(unary_op_line)\n\n # term\n self.advance()\n self.compile_term()\n\n # integerConstant | stringConstant | keywordConstant |\n # varName | varName '[' expression ']' | subroutineCall\n else:\n # constant or name\n constant_or_name = self.format_line('used')\n self.out.write(constant_or_name)\n\n # varName '[' expression ']' | subroutineCall or end of compile_term function\n # Check if expression: '[', subroutineCall: '(' with parameter skip_subroutine_name = True,\n # otherwise end of compile_term function\n self.advance()\n # '[' expression ']'\n if self.tokenizer.token == '[':\n # '['\n symbol_line = self.format_line()\n self.out.write(symbol_line)\n\n # expression\n self.advance()\n self.compile_expression()\n\n # ']'\n symbol_line = self.format_line()\n self.out.write(symbol_line)\n\n self.advance()\n \n # subroutineCall with skip_subroutine_name=True\n elif self.tokenizer.token in ['(', '.']:\n self.compile_subroutine_call(skip_subroutine_name=True)\n\n self.out.write('</term>\\n')", "def latex_rep(self):\n\n from CommonFiles.symbolics import LatexVisitor\n import ast\n\n class ModelLatexVisitor(LatexVisitor):\n \"\"\" class to convert strings to latex strings \"\"\"\n # def __init__(self, states, params):\n # super(ModelLatexVisitor, self).__init__()\n # self.model_states = states\n # self.model_params = params\n\n def visit_Name(self, n):\n if n.id in self.model_states.listrep():\n return r'\\mathrm{\\bf ' + n.id + r'}'\n\n elif n.id in self.model_params.listrep():\n baseindex = n.id.find('_')\n base = n.id[:baseindex]\n # Name or index if no name\n tempname = n.id[baseindex+1:]\n if '_' in tempname:\n # name and index\n ind = tempname.find('_')\n name = tempname[:ind]\n pindex = tempname[ind+1:]\n else:\n name = tempname\n pindex = None\n\n if pindex: return r'\\mathrm{\\bf ' + base + r'_' + r'{'\\\n + name + r',' + pindex + r'}' + r'}'\n else: return r'\\mathrm{\\bf ' + base + r'_' + r'{'\\\n + name + r'}' + r'}'\n\n else: return n.id\n\n visitor = ModelLatexVisitor()\n visitor.model_states = self.states\n visitor.model_params = self.params\n\n strlist = []\n for i, ode in enumerate(self.odes):\n pt = ast.parse(str(ode))\n lhs = (r'\\frac{d\\mathrm{\\bf ' +\n self.states.listrep()[i] + r'}}{dt} &= ')\n\n strlist += [lhs + visitor.visit(pt.body[0].value) + r' \\\\']\n\n strlist[-1] = strlist[-1][:-2]\n\n return strlist", "def generate_string_latex(self):\n return '\\n'.join([at.generate_string_latex() for at in self.atom_list])", "def get_polyterms_w_xform(self):\n if self.polytermx_cache:\n return self.polytermx_cache\n greens = self.decompose_greens()\n self.polytermx_cache = []\n for (pp,hs,xi) in [self.poly_term_w_xi(t) for t in greens]:\n self.polytermx_cache += [(pp.full_simplify(), hs, xi)]\n\n return self.polytermx_cache", "def tautology(formula):\n return onallvaluations(formula)", "def termlist(self):\n stableTxt = ''\n testingTxt = ''\n unstableTxt = ''\n archaicTxt = ''\n \n queries = ''\n c_ids, p_ids, i_ids = self.vocab.azlist()\n tl = \"\"\"<div class=\"termlist\">\"\"\"\n \n # look, whether individuals are available\n if (len(self.vocab.individuals) > 0):\n tl = \"\"\"%s<h3>Classes, Properties and Individuals (full detail)</h3>\\n<div class='termdetails'><br />\\n\\n\"\"\" % tl\n self.concepttypes = \"Classes, Properties and Individuals\"\n self.concepttypes2 = \"class (categories or types), by property and by individual\"\n self.concepttypes3 = \"classes, properties and individuals\"\n else:\n tl = \"\"\"%s<h3>Classes and Properties (full detail)</h3>\\n<div class='termdetails'><br />\\n\\n\"\"\" % tl\n self.concepttypes = \"Classes and Properties\"\n self.concepttypes2 = \"class (categories or types) and by property\"\n self.concepttypes3 = \"classes and properties\"\n\n \n # danbri hack 20100101 removed: href=\"http://www.w3.org/2003/06/sw-vocab-status/ns#%s\" pending discussion w/ libby and leigh re URIs\n \n # first classes, then properties\n eg = \"\"\"<div class=\"specterm\" id=\"%s\" about=\"%s\" typeof=\"%s\">\n <h3>%s: %s</h3> \n <em property=\"rdfs:label\" >%s</em> - <span property=\"rdfs:comment\" >%s</span> <br /><table style=\"th { float: top; }\">\n <tr><th>Status:</th>\n <td><span property=\"vs:status\" >%s</span></td></tr>\n %s\n %s\n </table>\n %s\n <p style=\"float: right; font-size: small;\">[<a href=\"#%s\">#</a>] <!-- %s --> [<a href=\"#glance\">back to top</a>]</p>\n <br/>\n </div>\"\"\"\n \n # for individuals\n ig = \"\"\"<div class=\"specterm\" id=\"%s\" about=\"%s\" typeof=\"%s\">\n <h3>%s: %s</h3> \n <em property=\"dc:title\" >%s</em> - <span property=\"dc:description\" >%s</span> <br /><table style=\"th { float: top; }\">\n <tr><th>Status:</th>\n <td><span property=\"vs:status\" >%s</span></td></tr>\n %s\n </table>\n %s\n <p style=\"float: right; font-size: small;\">[<a href=\"#%s\">#</a>] <!-- %s --> [<a href=\"#glance\">back to top</a>]</p>\n <br/>\n </div>\"\"\"\n \n # replace this if you want validation queries: xxx danbri\n # <p style=\"float: right; font-size: small;\">[<a href=\"#term_%s\">permalink</a>] [<a href=\"#queries_%s\">validation queries</a>] [<a href=\"#glance\">back to top</a>]</p>\n # todo, push this into an api call (c_ids currently setup by az above)\n # classes\n for term in self.vocab.classes:\n # strings to use later\n domainsOfClass = ''\n rangesOfClass = ''\n \n #class in domain of -> only for classes included in this ontology specification\n g = self.vocab.graph\n \n q = 'SELECT ?d ?l WHERE {?d rdfs:domain <%s> . ?d rdfs:label ?l } ' % (term.uri)\n \n relations = g.query(q)\n startStr = '<tr><th>Properties include:</th>\\n'\n \n contentStr = ''\n for (domain, label) in relations:\n dom = Term(domain)\n # danbri hack 20100101\n # termStr = \"\"\"<a href=\"#term_%s\">%s</a>\\n\"\"\" % (dom.id, label)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (dom.id, dom.id)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n domainsOfClass = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n # class in range of -> only for classes included in this ontology specification\n q2 = 'SELECT ?d ?l WHERE {?d rdfs:range <%s> . ?d rdfs:label ?l } ' % (term.uri)\n relations2 = g.query(q2)\n startStr = '<tr><th>Used with:</th>\\n'\n \n contentStr = ''\n for (range, label) in relations2:\n ran = Term(range)\n # termStr = \"\"\"<a href=\"#term_%s\">%s</a>\\n\"\"\" % (ran.id, label)\n # danbri hack 20100101 better to use exact IDs here\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (ran.id, ran.id)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n rangesOfClass = \"%s <td> %s</td></tr> \" % (startStr, contentStr)\n \n # class sub class of -> handles only \"real\" super classes\n subClassOf = ''\n restriction = ''\n \n q = 'SELECT ?sc ?l WHERE {<%s> rdfs:subClassOf ?sc . ?sc rdfs:label ?l } ' % (term.uri)\n \n relations = g.query(q)\n startStr = '<tr><th>Sub class of</th>\\n'\n \n contentStr = ''\n contentStr2 = ''\n for (subclass, label) in relations:\n sub = Term(subclass)\n termStr = \"\"\"<span rel=\"rdfs:subClassOf\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (subclass, sub.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n subClassOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n # else:\n q1 = 'SELECT ?sc WHERE {<%s> rdfs:subClassOf ?sc } ' % (term.uri)\n \n relations = g.query(q1)\n ordone = False\n for (row) in relations:\n subclass = row[0]\n subclassnice = self.vocab.niceName(subclass)\n # print(\"subclass \",subclass)\n # print(\"subclassnice \",subclassnice)\n # check niceName result\n # TODO: handle other sub class types (...) currently owl:Restriction only\n colon = subclassnice.find(':')\n #print(\"ns uri \", str(self.vocab._get_uri()))\n if(subclass.find(str(self.vocab._get_uri())) < 0):\n if (colon > 0):\n termStr = \"\"\"<span rel=\"rdfs:subClassOf\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (subclass, subclass, subclassnice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n #print(\"must be super class from another ns: \", subclassnice)\n elif (ordone == False):\n # with that query I get all restrictions of a concept :\\\n # TODO: enable a query with bnodes (_:bnode currently doesn't work :( )\n # that's why the following code isn't really nice\n q2 = 'SELECT ?orsc ?or ?orv WHERE { <%s> rdfs:subClassOf ?orsc . ?orsc rdf:type <http://www.w3.org/2002/07/owl#Restriction> . ?orsc ?or ?orv }' % (term.uri)\n \n print(\"try to fetch owl:Restrictions with query \", q2)\n orrelations = g.query(q2)\n startStr2 = '<tr><th class=\"restrictions\">Restriction(s):</th>\\n'\n orpcounter = 0\n orsubclass = ''\n contentStr3 = ''\n termStr1 = ''\n termStr2 = ''\n prop = ''\n oronproperty = ''\n orproperty = ''\n orpropertyvalue = ''\n orscope = ''\n for (orsc, orp, orpv) in orrelations:\n orproperty2 = ''\n orpropertyvalue2 = ''\n orscope2 = ''\n if (orsubclass == \"\"):\n print(\"initialize orsubclass with \", orsc)\n orsubclass = orsc\n if(orsubclass != orsc):\n termStr1 = \"\"\"<span about=\"%s\" rel=\"rdfs:subClassOf\" resource=\"[_:%s]\"></span>\\n\"\"\" % (term.uri, orsubclass)\n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"owl:Restriction\"></span>The property \n <span about=\"[_:%s]\" rel=\"owl:onProperty\" href=\"%s\"><a href=\"#%s\">%s</a></span> must be set <em>%s</em> \n <span about=\"[_:%s]\" property=\"%s\" datatype=\"xsd:nonNegativeInteger\" >%s</span> time(s)\"\"\" % (orsubclass, orsubclass, oronproperty, prop.id, prop.type, orscope, orsubclass, orproperty, orpropertyvalue)\n \n contentStr2 = \"%s %s %s %s<br/>\" % (contentStr2, termStr1, termStr2, contentStr3)\n print(\"change orsubclass to\", orsc)\n orsubclass = orsc\n contentStr3 = ''\n orpcounter = 0\n termStr1 = ''\n termStr2 = ''\n prop = ''\n oronproperty = ''\n orproperty = ''\n orpropertyvalue = ''\n orscope = ''\n \n print(\"orp \", orp)\n print(\"orpv\", orpv)\n if (str(orp) == \"http://www.w3.org/2002/07/owl#onProperty\"):\n oronproperty = orpv\n prop = Term(orpv)\n prop.type = self.vocab.niceName(orpv)\n print(\"found new owl:Restriction\")\n print(\"write onproperty property\")\n elif ((str(orp) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\") & (str(orp) != \"http://www.w3.org/2002/07/owl#onProperty\")):\n if (orpcounter == 0):\n orproperty = self.vocab.niceName(orp)\n # <- that must be a specific cardinality restriction\n orpropertyvalue = orpv\n if (str(orp) == \"http://www.w3.org/2002/07/owl#cardinality\"):\n orscope = \"exactly\"\n if (str(orp) == \"http://www.w3.org/2002/07/owl#minCardinality\"):\n orscope = \"at least\"\n if (str(orp) == \"http://www.w3.org/2002/07/owl#maxCardinality\"):\n orscope = \"at most\"\n print(\"write 1st cardinality of restriction\")\n else:\n orproperty2 = self.vocab.niceName(orp)\n # <- that must be another specific cardinality restriction\n orpropertyvalue2 = orpv\n if (str(orp) == \"http://www.w3.org/2002/07/owl#cardinality\"):\n orscope2 = \"exactly\"\n if (str(orp) == \"http://www.w3.org/2002/07/owl#minCardinality\"):\n orscope2 = \"at least\"\n if (str(orp) == \"http://www.w3.org/2002/07/owl#maxCardinality\"):\n orscope2 = \"at most\"\n print(\"write another cardinality of restriction\")\n orpcounter = orpcounter + 1\n else:\n print(\"here I am with \", orp)\n \n if (str(orproperty2) != \"\"):\n termStr3 = \"\"\" and <em>%s</em> \n <span about=\"[_:%s]\" property=\"%s\" >%s</span> time(s)\"\"\" % (orscope2, orsubclass, orproperty2, orpropertyvalue2)\n contentStr3 = \"%s %s\" % (contentStr3, termStr3)\n \n # write also last/one restriction\n termStr1 = \"\"\"<span about =\"%s\" rel=\"rdfs:subClassOf\" resource=\"[_:%s]\"></span>\\n\"\"\" % (term.uri, orsubclass)\n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"owl:Restriction\"></span>The property \n <span about=\"[_:%s]\" rel=\"owl:onProperty\" href=\"%s\"><a href=\"#%s\">%s</a></span> must be set <em>%s</em> \n <span about=\"[_:%s]\" property=\"%s\" datatype=\"xsd:nonNegativeInteger\" >%s</span> time(s)\"\"\" % (orsubclass, orsubclass, oronproperty, prop.id, prop.type, orscope, orsubclass, orproperty, orpropertyvalue)\n \n contentStr2 = \"%s %s %s %s\\n\" % (contentStr2, termStr1, termStr2, contentStr3)\n \n ordone = True\n print(\"owl restriction modelling done for\", term.uri)\n \n if contentStr != \"\":\n subClassOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n if contentStr2 != \"\":\n restriction = \"%s <td> %s </td></tr>\" % (startStr2, contentStr2)\n \n # class has sub class -> handles only \"real\" super classes\n hasSubClass = ''\n \n q = 'SELECT ?sc ?l WHERE {?sc rdfs:subClassOf <%s>. ?sc rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Has sub class</th>\\n'\n \n contentStr = ''\n for (subclass, label) in relations:\n sub = Term(subclass)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (sub.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n hasSubClass = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n q = 'SELECT ?sc WHERE {?sc rdfs:subClassOf <%s> } ' % (term.uri)\n \n relations = g.query(q)\n for (row) in relations:\n subclass = row[0]\n subclassnice = self.vocab.niceName(subclass)\n #print(\"has subclass \", subclass)\n #print(\"has subclassnice \", subclassnice)\n # check niceName result\n colon = subclassnice.find(':')\n if(subclass[0].find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<a href=\"%s\">%s</a>\\n\"\"\" % (subclass, subclassnice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n hasSubClass = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n \n # is defined by\n classIsDefinedBy = ''\n \n q = 'SELECT ?idb WHERE { <%s> rdfs:isDefinedBy ?idb } ' % (term.uri)\n relations = g.query(q)\n startStr\t = '\\n'\n \n contentStr = ''\n for (isdefinedby) in relations:\n termStr = \"\"\"<span rel=\"rdfs:isDefinedBy\" href=\"%s\"></span>\\n\"\"\" % (isdefinedby)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n classIsDefinedBy = \"%s <tr><td> %s </td></tr>\" % (startStr, contentStr)\n \n \n # disjoint with\n isDisjointWith = ''\n \n q = 'SELECT ?dj ?l WHERE { <%s> <http://www.w3.org/2002/07/owl#disjointWith> ?dj . ?dj rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Disjoint With:</th>\\n'\n \n contentStr = ''\n for (disjointWith, label) in relations:\n termStr = \"\"\"<span rel=\"owl:disjointWith\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (disjointWith, label, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n isDisjointWith = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # owl class\n oc = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#Class> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">OWL Class</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:Class\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#Class\"></span>\"\"\"\n oc = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # rdfs class\n rc = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2000/01/rdf-schema#Class> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">RDFS Class</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"rdfs:Class\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2000/01/rdf-schema#Class\"></span>\"\"\"\n rc = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # dcterms agent class\n dctac = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://purl.org/dc/terms/AgentClass> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">DCTerms Agent Class</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"dcterms:AgentClass\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"ttp://purl.org/dc/terms/AgentClass\"></span>\"\"\"\n dctac = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n # end\n\n dn = os.path.join(self.basedir, \"doc\")\n filename = os.path.join(dn, term.id + \".en\")\n s = ''\n try:\n f = open (filename, \"r\")\n s = f.read()\n except:\n s = ''\n\n # if we want validation queries this is where it looks for them.\n filename = os.path.join(dn, term.id + \".sparql\")\n fileStr = ''\n try:\n f = open (filename, \"r\")\n fileStr = f.read()\n fileStr = \"<h4><a name=\\\"queries_\" + term.id + \"\\\"></a>\" + term.id + \" Validation Query</h4><pre>\" + cgi.escape(ss) + \"</pre>\"\n except:\n fileStr = ''\n\n queries = queries + \"\\n\" + fileStr\n sn = self.vocab.niceName(term.uri)\n s = termlink(s)\n\n # danbri added another term.id 20010101 and removed term.status\n # ATTENTION: writing all class descriptions into template here\n zz = eg % (term.id, term.uri, term.type, \"Class\", sn, term.label, term.comment, term.status, domainsOfClass, rangesOfClass + subClassOf + restriction + hasSubClass + classIsDefinedBy + isDisjointWith + oc + rc + dctac, s, term.id, term.id)\n\n ## we add to the relevant string - stable, unstable, testing or archaic\n if(term.status == \"stable\"):\n stableTxt = stableTxt + zz\n if(term.status == \"testing\"):\n testingTxt = testingTxt + zz\n if(term.status == \"unstable\"):\n unstableTxt = unstableTxt + zz\n if(term.status == \"archaic\"):\n archaicTxt = archaicTxt + zz\n if((term.status == None) or (term.status == \"\") or (term.status == \"unknown\")):\n archaicTxt = archaicTxt + zz\n\n ## then add the whole thing to the main tl string\n tl = tl + \"<h2>Classes</h2>\\n\"\n tl = \"%s %s\" % (tl, stableTxt + \"\\n\" + testingTxt + \"\\n\" + unstableTxt + \"\\n\" + archaicTxt)\n tl = tl + \"<h2>Properties</h2>\\n\"\n\n # properties\n stableTxt = ''\n testingTxt = ''\n unstableTxt = ''\n archaicTxt = ''\n\n for term in self.vocab.properties:\n domainsOfProperty = ''\n rangesOfProperty = ''\n\n # domain of properties\n g = self.vocab.graph\n q = 'SELECT ?d ?l WHERE {<%s> rdfs:domain ?d . ?d rdfs:label ?l } ' % (term.uri)\n # print(\"term.uri before \", term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Domain:</th>\\n'\n\n contentStr = ''\n contentStr3 = ''\n for (domain, label) in relations:\n dom = Term(domain)\n termStr = \"\"\"<span rel=\"rdfs:domain\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (domain, dom.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n q = 'SELECT ?d WHERE {<%s> rdfs:domain ?d } ' % (term.uri)\n\n relations = g.query(q)\n for (row) in relations:\n domain = row[0]\n domainnice = self.vocab.niceName(domain)\n # print(\"domain \",domain)\n # print(\"domainnice \",domainnice)\n # check niceName result\n # TODO: handle other domain types\n colon = domainnice.find(':')\n if(domain.find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<span rel=\"rdfs:domain\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (domain, domain, domainnice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n else:\n # that will be a huge hack now\n # 1st: pick out the union domain and its list bnode\n q2 = 'SELECT ?d ?url ?urli ?urlipt WHERE {<%s> rdfs:domain ?d . ?d <http://www.w3.org/2002/07/owl#unionOf> ?url . ?url ?urlipt ?urli } ' % (term.uri)\n print(\"try to fetch union domain with \", q2)\n relations2 = g.query(q2)\n \n contentStr2 = ''\n termStr2 = ''\n listbnode = ''\n urfirstlistitem = ''\n urnextlistbnode = ''\n urfirstlistitemnice = ''\n domainbnode = ''\n for (domain, list, listitem, listitempropertytype) in relations2:\n # print(\"list \", list , \" :: listitem \" , listitem , \" :: listitempropertytype \" , listitempropertytype)\n listbnode = list\n domainbnode = domain\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#first\"):\n urfirstlistitem = listitem\n urfirstlistitemnice = self.vocab.niceName(urfirstlistitem)\n print(\"listitem \", urfirstlistitem)\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#rest\"):\n urnextlistbnode = listitem\n print(\"urnextlistbnode \", urnextlistbnode)\n \n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"rdf:Description\"></span>\n <span about=\"[_:%s]\" rel=\"rdf:first\" href=\"%s\"><a href=\"%s\">%s</a></span>\n <span about=\"[_:%s]\" rel=\"rdf:rest\" resource=\"[_:%s]\"></span>\"\"\" % (listbnode, listbnode, urfirstlistitem, urfirstlistitem, urfirstlistitemnice, listbnode, urnextlistbnode)\n contentStr2 = \"%s %s\" % (contentStr2, termStr2)\n \n # 2nd: go down the list and collect all list items\n if(urnextlistbnode != \"\"):\n oldlistbnode = ''\n termstr3 = ''\n while (str(urnextlistbnode) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#nil\"):\n q3 = 'SELECT ?urnlbn ?urlipt ?urli WHERE {?lbn <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <%s> . ?lbn <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> ?urnlbn . ?urnlbn ?urlipt ?urli } ' % (urfirstlistitem)\n print(\"try to fetch more lists with \" , q3)\n relations3 = g.query(q3)\n \n oldlistbnode = urnextlistbnode\n for (urnlbn, listitempropertytype, listitem) in relations3:\n print(\"what to do next with urnlbn \" , urnlbn , \" :: listitempropertytype \" , listitempropertytype , \" :: listitem \" , listitem , \" :: urnextlistbnode \" , urnextlistbnode)\n # to check the bnode of the list in the union domain\n if(str(urnlbn) == str(oldlistbnode)):\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#first\"):\n urfirstlistitem = listitem\n urfirstlistitemnice = self.vocab.niceName(urfirstlistitem)\n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"rdf:Description\"></span>\n <span about=\"[_:%s]\" rel=\"rdf:first\" href=\"%s\"><a href=\"%s\">%s</a></span>\"\"\" % (oldlistbnode, oldlistbnode, urfirstlistitem, urfirstlistitem, urfirstlistitemnice)\n print(\"new listitem \", urfirstlistitem)\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#rest\"):\n urnextlistbnode = listitem\n if(str(urnextlistbnode) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#nil\"):\n termStr3 = \"\"\"<span about=\"[_:%s]\" rel=\"rdf:rest\" resource=\"[_:%s]\"></span>\"\"\" % (oldlistbnode, urnextlistbnode)\n else:\n termStr3 = \"\"\"<span about=\"[_:%s]\" rel=\"rdf:rest\" href=\"%s\"></span>\"\"\" % (oldlistbnode, urnextlistbnode)\n print(\"new urnextlistbnode \", urnextlistbnode) \n contentStr2 = \"%s or %s %s\" % (contentStr2, termStr2, termStr3) \n print(\"here I am\")\n termStr = \"\"\"<span rel=\"rdfs:domain\" resource=\"[_:%s]\"></span>\n <span about=\"[_:%s]\" typeof=\"owl:Class\"></span>\n <span about=\"[_:%s]\" rel=\"owl:unionOf\" resource=\"[_:%s]\"></span>\"\"\" % (domainbnode, domainbnode, domainbnode, listbnode)\n contentStr3 = \"%s %s %s\" % (contentStr3, termStr, contentStr2)\n\n # merge together the results of both queries\n if contentStr3 != \"\":\n contentStr = \"%s %s\" % (contentStr, contentStr3)\n if contentStr != \"\":\n domainsOfProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # range of properties\n q2 = 'SELECT ?r ?l WHERE {<%s> rdfs:range ?r . ?r rdfs:label ?l } ' % (term.uri)\n relations2 = g.query(q2)\n startStr = '<tr><th>Range:</th>\\n'\n contentStr = ''\n contentStr3 = ''\n for (range, label) in relations2:\n ran = Term(range)\n termStr = \"\"\"<span rel=\"rdfs:range\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (range, ran.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n q = 'SELECT ?r WHERE {<%s> rdfs:range ?r } ' % (term.uri)\n\n relations = g.query(q)\n for (row) in relations:\n range = row[0]\n rangenice = self.vocab.niceName(range)\n # print(\"range \",range)\n # print(\"rangenice \",rangenice)\n # check niceName result\n # TODO: handle other range types\n colon = rangenice.find(':')\n if(range.find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<span rel=\"rdfs:range\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (range, range, rangenice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n else:\n # that will be a huge hack now\n # 1st: pick out the union range and its list bnode\n q2 = 'SELECT ?r ?url ?urli ?urlipt WHERE {<%s> rdfs:range ?r . ?r <http://www.w3.org/2002/07/owl#unionOf> ?url . ?url ?urlipt ?urli } ' % (term.uri)\n print(\"try to fetch union range with \", q2)\n relations2 = g.query(q2)\n \n contentStr2 = ''\n termStr2 = ''\n listbnode = ''\n urfirstlistitem = ''\n urnextlistbnode = ''\n urfirstlistitemnice = ''\n rangebnode = ''\n for (range, list, listitem, listitempropertytype) in relations2:\n # print(\"list \", list , \" :: listitem \" , listitem , \" :: listitempropertytype \" , listitempropertytype)\n listbnode = list\n rangebnode = range\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#first\"):\n urfirstlistitem = listitem\n urfirstlistitemnice = self.vocab.niceName(urfirstlistitem)\n print(\"listitem \", urfirstlistitem)\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#rest\"):\n urnextlistbnode = listitem\n print(\"urnextlistbnode \", urnextlistbnode)\n \n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"rdf:Description\"></span>\n <span about=\"[_:%s]\" rel=\"rdf:first\" href=\"%s\"><a href=\"%s\">%s</a></span>\n <span about=\"[_:%s]\" rel=\"rdf:rest\" resource=\"[_:%s]\"></span>\"\"\" % (listbnode, listbnode, urfirstlistitem, urfirstlistitem, urfirstlistitemnice, listbnode, urnextlistbnode)\n contentStr2 = \"%s %s\" % (contentStr2, termStr2)\n \n # 2nd: go down the list and collect all list items\n if(urnextlistbnode != \"\"):\n oldlistbnode = ''\n termstr3 = ''\n while (str(urnextlistbnode) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#nil\"):\n q3 = 'SELECT ?urnlbn ?urlipt ?urli WHERE {?lbn <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <%s> . ?lbn <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> ?urnlbn . ?urnlbn ?urlipt ?urli } ' % (urfirstlistitem)\n print(\"try to fetch more lists with \" , q3)\n relations3 = g.query(q3)\n \n oldlistbnode = urnextlistbnode\n for (urnlbn, listitempropertytype, listitem) in relations3:\n print(\"what to do next with urnlbn \" , urnlbn , \" :: listitempropertytype \" , listitempropertytype , \" :: listitem \" , listitem , \" :: urnextlistbnode \" , urnextlistbnode)\n # to check the bnode of the list in the union range\n if(str(urnlbn) == str(oldlistbnode)):\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#first\"):\n urfirstlistitem = listitem\n urfirstlistitemnice = self.vocab.niceName(urfirstlistitem)\n termStr2 = \"\"\"<span about=\"[_:%s]\" typeof=\"rdf:Description\"></span>\n <span about=\"[_:%s]\" rel=\"rdf:first\" href=\"%s\"><a href=\"%s\">%s</a></span>\"\"\" % (oldlistbnode, oldlistbnode, urfirstlistitem, urfirstlistitem, urfirstlistitemnice)\n print(\"new listitem \", urfirstlistitem)\n if (str(listitempropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#rest\"):\n urnextlistbnode = listitem\n if(str(urnextlistbnode) != \"http://www.w3.org/1999/02/22-rdf-syntax-ns#nil\"):\n termStr3 = \"\"\"<span about=\"[_:%s]\" rel=\"rdf:rest\" resource=\"[_:%s]\"></span>\"\"\" % (oldlistbnode, urnextlistbnode)\n else:\n termStr3 = \"\"\"<span about=\"[_:%s]\" rel=\"rdf:rest\" href=\"%s\"></span>\"\"\" % (oldlistbnode, urnextlistbnode)\n print(\"new urnextlistbnode \", urnextlistbnode)\n \n contentStr2 = \"%s or %s %s\" % (contentStr2, termStr2, termStr3)\n \n print(\"here I am\")\n \n termStr = \"\"\"<span rel=\"rdfs:range\" resource=\"[_:%s]\"></span>\n <span about=\"[_:%s]\" typeof=\"owl:Class\"></span>\n <span about=\"[_:%s]\" rel=\"owl:unionOf\" resource=\"[_:%s]\"></span>\"\"\" % (rangebnode, rangebnode, rangebnode, listbnode)\n contentStr3 = \"%s %s %s\" % (contentStr3, termStr, contentStr2)\n\n # merge together the results of both queries\n if contentStr3 != \"\":\n contentStr = \"%s %s\" % (contentStr, contentStr3)\n if contentStr != \"\":\n rangesOfProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n # property sub property of -> only for property included in this ontology specification\n subPropertyOf = ''\n\n q = 'SELECT ?sp ?l WHERE {<%s> rdfs:subPropertyOf ?sp . ?sp rdfs:label ?l } ' % (term.uri)\n # print(\"term.uri \", term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Sub property of</th>\\n'\n\n contentStr = ''\n for (subproperty, label) in relations:\n sub = Term(subproperty)\n termStr = \"\"\"<span rel=\"rdfs:subPropertyOf\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (subproperty, sub.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n subPropertyOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n q1 = 'SELECT ?sp WHERE {<%s> rdfs:subPropertyOf ?sp } ' % (term.uri)\n \n relations = g.query(q1)\n for (row) in relations:\n subproperty = row[0]\n subpropertynice = self.vocab.niceName(subproperty)\n # check niceName result\n colon = subpropertynice.find(':')\n if(subproperty.find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<span rel=\"rdfs:subPropertyOf\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (subproperty, subproperty, subpropertynice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n print(\"must be super property from another ns\")\n \n if contentStr != \"\":\n subPropertyOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n # property has sub property -> only for property included in this ontology specification\n hasSubProperty = ''\n\n q = 'SELECT ?sp ?l WHERE {?sp rdfs:subPropertyOf <%s>. ?sp rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Has sub property</th>\\n'\n\n contentStr = ''\n for (subproperty, label) in relations:\n sub = Term(subproperty)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (sub.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n hasSubProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # property inverse property of -> only for property included in this ontology specification\n inverseOf = ''\n\n q = 'SELECT ?ip ?l WHERE {<%s> <http://www.w3.org/2002/07/owl#inverseOf> ?ip . ?ip rdfs:label ?l } ' % (term.uri)\n # print(\"term.uri \", term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Inverse property of</th>\\n'\n\n contentStr = ''\n for (inverseproperty, label) in relations:\n ipnice = self.vocab.niceName(inverseproperty)\n colon = ipnice.find(':')\n # check wether explicite defined inverse property or anonymous defined inverse property\n if colon > 0:\n inverse = Term(inverseproperty)\n termStr = \"\"\"<span rel=\"owl:inverseOf\" href=\"%s\"><a href=\"#%s\">%s</a></span>\\n\"\"\" % (inverseproperty, inverse.id, label)\n #print(\"inverse property must be explicitly defined\")\n else:\n q2 = 'SELECT ?ipt WHERE {<%s> <http://www.w3.org/2002/07/owl#inverseOf> ?ip . ?ip rdfs:label ?l . ?ip rdf:type ?ipt } ' % (term.uri)\n relations2 = g.query(q2)\n\n contentStr2 = ''\n iptcounter = 0\n termStr2 = ''\n for (row) in relations2:\n inversepropertytype = row[0]\n print(\"inversepropertytype \", inversepropertytype)\n iptype = ''\n termStr3 = ''\n iptypenice = self.vocab.niceName(inversepropertytype)\n if (str(inversepropertytype) == \"http://www.w3.org/1999/02/22-rdf-syntax-ns#Property\"):\n iptype = \"RDF Property\"\n if (str(inversepropertytype) == \"http://www.w3.org/2002/07/owl#ObjectProperty\"):\n iptype = \"Object Property\"\n if (str(inversepropertytype) == \"http://www.w3.org/2002/07/owl#DatatypeProperty\"):\n iptype = \"Datatype Property\"\n if (str(inversepropertytype) == \"http://www.w3.org/2002/07/owl#InverseFunctionalProperty\"):\n iptype = \"Inverse Functional Property\"\n if (str(inversepropertytype) == \"http://www.w3.org/2002/07/owl#FunctionalProperty\"):\n iptype = \"Functional Property\"\n if (iptype != \"\"):\n termStr3 = \"\"\"<span about=\"[_:%s]\" typeof=\"%s\"><strong>%s</strong></span>\"\"\" % (inverseproperty, iptypenice, iptype)\n if (iptcounter > 0):\n termStr2 = \"%s, %s\" % (termStr2, termStr3)\n else:\n termStr2 = termStr3\n iptcounter = iptcounter + 1\n if (termStr2 != \"\"):\n contentStr2 = \"(%s)\" % (termStr2)\n termStr = \"\"\"<span rel=\"owl:inverseOf\" resource=\"[_:%s]\">the anonymous defined property with the label\n \\'<em about=\"[_:%s]\" property=\"rdfs:label\">%s</em>\\'</span>\\n\"\"\" % (inverseproperty, inverseproperty, label)\n termStr = \"%s %s\" % (termStr, contentStr2)\n print(\"inverse property must be anonymous defined\")\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n inverseOf = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n # print(\"write inverse property\")\n\n # property has inverse property -> only for property included in this ontology specification\n hasInverseProperty = ''\n\n q = 'SELECT ?ip ?l WHERE {?ip <http://www.w3.org/2002/07/owl#inverseOf> <%s>. ?ip rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Has inverse property</th>\\n'\n\n contentStr = ''\n for (inverseproperty, label) in relations:\n inverse = Term(inverseproperty)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (inverse.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n hasInverseProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n # print(\"write has inverse property\")\n\n\n # is defined by\n propertyIsDefinedBy = ''\n\n q = 'SELECT ?idb WHERE { <%s> rdfs:isDefinedBy ?idb } ' % (term.uri)\n relations = g.query(q)\n startStr = '\\n'\n\n contentStr = ''\n for (row) in relations:\n isdefinedby = row[0]\n termStr = \"\"\"<span rel=\"rdfs:isDefinedBy\" href=\"%s\"></span>\\n\"\"\" % (isdefinedby)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n propertyIsDefinedBy = \"%s <tr><td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # equivalent property\n equivalentProperty = ''\n\n q = 'SELECT ?ep WHERE { <%s> <http://www.w3.org/2002/07/owl#equivalentProperty> ?ep } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Equivalent Property</th>'\n\n contentStr = ''\n for (row) in relations:\n equiprop = row[0]\n equipropnice = self.vocab.niceName(equiprop)\n termStr = \"\"\"<span rel=\"owl:equivalentProperty\" href=\"%s\"><a href=\"%s\">%s</a></span>\\n\"\"\" % (equiprop, equiprop, equipropnice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n equivalentProperty = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n # rdf property\n rp = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/1999/02/22-rdf-syntax-ns#Property> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">RDF Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"rdf:Property\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#Property\"></span>\"\"\"\n rp = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # object property\n op = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#ObjectProperty> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">Object Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:ObjectProperty\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#ObjectProperty\"></span>\"\"\"\n op = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # datatype property\n dp = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#DatatypeProperty> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">Datatype Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:DatatypeProperty\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#DatatypeProperty\"></span>\"\"\"\n dp = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # inverse functional property\n ifp = ''\n termStr = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#InverseFunctionalProperty> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">Inverse Functional Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:InverseFunctionalProperty\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#InverseFunctionalProperty\"></span>\"\"\"\n ifp = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n\n # functonal property\n fp = ''\n\n q = 'SELECT * WHERE { <%s> rdf:type <http://www.w3.org/2002/07/owl#FunctionalProperty> } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th colspan=\"2\">Functional Property</th>\\n'\n\n if (len(relations) > 0):\n if (str(term.type) != \"owl:FunctionalProperty\"):\n termStr = \"\"\"<span rel=\"rdf:type\" href=\"http://www.w3.org/2002/07/owl#FunctionalProperty\"></span>\"\"\"\n fp = \"%s <td> %s </td></tr>\" % (startStr, termStr)\n\n # end\n\n dn = os.path.join(self.basedir, \"doc\")\n filename = os.path.join(dn, term.id + \".en\")\n\n s = ''\n try:\n f = open (filename, \"r\")\n s = f.read()\n except:\n s = ''\n\n sn = self.vocab.niceName(term.uri)\n s = termlink(s)\n\n # danbri added another term.id 20010101\n # ATTENTION: writing all property descriptions into template here\n zz = eg % (term.id, term.uri, term.type, \"Property\", sn, term.label, term.comment, term.status, domainsOfProperty, rangesOfProperty + subPropertyOf + hasSubProperty + inverseOf + hasInverseProperty + propertyIsDefinedBy + equivalentProperty + rp + op + dp + ifp + fp, s, term.id, term.id)\n\n ## we add to the relevant string - stable, unstable, testing or archaic\n if(term.status == \"stable\"):\n stableTxt = stableTxt + zz\n if(term.status == \"testing\"):\n testingTxt = testingTxt + zz\n if(term.status == \"unstable\"):\n unstableTxt = unstableTxt + zz\n if(term.status == \"archaic\"):\n archaicTxt = archaicTxt + zz\n if((term.status == None) or (term.status == \"\") or (term.status == \"unknown\")):\n archaicTxt = archaicTxt + zz\n\n ## then add the whole thing to the main tl string\n tl = \"%s %s\" % (tl, stableTxt + \"\\n\" + testingTxt + \"\\n\" + unstableTxt + \"\\n\" + archaicTxt)\n ## tl = \"%s %s\" % (tl, zz)\n\n\n # ATTENTION: let's begin with the individual stuff here\n # do this only, when individuals are available\n if (len(self.vocab.individuals) > 0):\n tl = tl + \"<h2>Individuals</h2>\\n\"\n\n # individuals\n stableTxt = ''\n testingTxt = ''\n unstableTxt = ''\n archaicTxt = ''\n\n for term in self.vocab.individuals:\n # individual has type\n hasType = ''\n \n q = 'SELECT ?t ?l WHERE {<%s> rdf:type ?t. ?t rdfs:label ?l } ' % (term.uri)\n relations = g.query(q)\n startStr = '<tr><th>Type:</th>\\n'\n \n contentStr = ''\n for (type, label) in relations:\n t = Term(type)\n termStr = \"\"\"<a href=\"#%s\">%s</a>\\n\"\"\" % (type.id, label)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n hasType = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n \n q = 'SELECT ?t WHERE {<%s> rdf:type ?t } ' % (term.uri)\n \n relations = g.query(q)\n for (type) in relations:\n typenice = self.vocab.niceName(type)\n print(\"has type \", type)\n print(\"has typenice \", typenice)\n # check niceName result\n colon = typenice.find(':')\n if(type.find(str(self.vocab._get_uri())) < 0):\n if colon > 0:\n termStr = \"\"\"<a href=\"%s\">%s</a>\\n\"\"\" % (type, typenice)\n contentStr = \"%s %s\" % (contentStr, termStr)\n \n if contentStr != \"\":\n hasType = \"%s <td> %s </td></tr>\" % (startStr, contentStr)\n\n\n # is defined by\n individualIsDefinedBy = ''\n\n q = 'SELECT ?idb WHERE { <%s> rdfs:isDefinedBy ?idb } ' % (term.uri)\n relations = g.query(q)\n startStr = '\\n'\n\n contentStr = ''\n for (isdefinedby) in relations:\n termStr = \"\"\"<span rel=\"rdfs:isDefinedBy\" href=\"%s\"></span>\\n\"\"\" % (isdefinedby)\n contentStr = \"%s %s\" % (contentStr, termStr)\n\n if contentStr != \"\":\n individualIsDefinedBy = \"%s <tr><td> %s </td></tr>\" % (startStr, contentStr)\n\n # end\n\n dn = os.path.join(self.basedir, \"doc\")\n filename = os.path.join(dn, term.id + \".en\")\n\n s = ''\n try:\n f = open (filename, \"r\")\n s = f.read()\n except:\n s = ''\n\n sn = self.vocab.niceName(term.uri)\n s = termlink(s)\n\n # ATTENTION: writing all individual descriptions into template here\n zz = ig % (term.id, term.uri, term.type, \"Individual\", sn, term.label, term.comment, term.status, hasType + individualIsDefinedBy, s, term.id, term.id)\n\n ## we add to the relevant string - stable, unstable, testing or archaic\n if(term.status == \"stable\"):\n stableTxt = stableTxt + zz\n if(term.status == \"testing\"):\n testingTxt = testingTxt + zz\n if(term.status == \"unstable\"):\n unstableTxt = unstableTxt + zz\n if(term.status == \"archaic\"):\n archaicTxt = archaicTxt + zz\n if((term.status == None) or (term.status == \"\") or (term.status == \"unknown\")):\n archaicTxt = archaicTxt + zz\n \n ## then add the whole thing to the main tl string\n tl = \"%s %s\" % (tl, stableTxt + \"\\n\" + testingTxt + \"\\n\" + unstableTxt + \"\\n\" + archaicTxt)\n\n ## ensure termlist tag is closed\n return(tl + \"\\n\" + queries + \"</div>\\n</div>\")", "def printTerm4(self):\n vprint=[]\n counter=0\n for x in self.pl:\n if self.pl[x] != ['I']:\n counter=counter+1\n vprint += '\\sigma_'\n cosa=self.pl[x][0]\n vprint += self.pl[x]\n vprint += '^'\n vprint += str(x)\n vprint=''.join(vprint)\n return self.c,vprint,counter", "def _repr_latex_(self):\n return unit_format.Latex.to_string(self)", "def evaluate(self, env):\n if self.ident in env.functions:\n arg_vals = [expr.evaluate(env) for expr in self.args]\n try:\n out = env.functions[self.ident](*arg_vals)\n except Exception, exc:\n # Function raised exception! Maybe inlining the name of\n # the exception will help debug.\n return u'<%s>' % unicode(exc)\n return unicode(out)\n else:\n return self.original", "def _repr_latex_(self):\n return f\"${self._reprlatex}$\"", "def __exp_tree(self):\n print(style.YELLOW(\"[ ¬ : ! ][ ∨ : | ][ ∧ : & ][ → : > ][ ↔ : ~ ][ ⊤ : 1 ][ ⊥ : 0 ]\") + style.RESET(\"\"))\n expr = input(style.BLUE(\"Insert your expression(with the help of the table above: \") + style.RESET(\"\"))\n expr = self.__convert_str(expr)\n print(expr)\n form = WFPropositionalFormula(expr)\n form.is_WFF()\n form.store_as_exp_tree()\n form.print_exp_tree()", "def test_algebra_print():\n\n # The language of constructors we are allowed to use with this context. \n lang = frozenset([Var, Add, Sub, Mul, Val])\n\n @term_algebra(lang)\n class PPrint():\n \"\"\"\n For every term in our language we write a function that can add some\n info to its key value store.\n\n Because we traverse in order, this will flesh out those pieces of\n information in a nice bottom-up manner. \n \"\"\"\n\n def _init_algebra(self, ctxt):\n pass\n def _init_pass(self, ctxt):\n pass\n def _end_pass(self, ctxt):\n return False\n def _end_algebra(self, ctxt):\n return None\n\n def run_add(self, ident : 'ID[Add]', val : 'Add[ID]'):\n ident[\"pp\"] = \"(\" + val.exp_a[\"pp\"] + \" + \" + val.exp_b[\"pp\"] + \")\"\n\n def run_sub(self, ident : 'ID[Sub]', val : 'Sub[ID]') -> None:\n ident[\"pp\"] = \"(\" + val.exp_a[\"pp\"] + \" - \" + val.exp_b[\"pp\"] + \")\"\n \n def run_mul(self, ident : 'ID[Mul]', val : 'Mul[ID]') -> None:\n ident[\"pp\"] = \"(\" + val.exp_a[\"pp\"] + \" * \" + val.exp_b[\"pp\"] + \")\"\n\n def run_var(self, ident : 'ID[Var]', val : 'Var[ID]') -> None:\n ident[\"pp\"] = val.name \n\n def run_val(self, ident : 'ID[Val]', val : 'Val[ID]') -> None:\n ident[\"pp\"] = repr(val.val) \n\n\n ctxt = Context(\"Foo\", _term_language=lang)\n\n x = ctxt.insert_fresh(Var('X'))\n y = ctxt.insert_fresh(Var('Y'))\n\n c = ctxt.insert(Add(x,Mul(y,Val(5))))\n\n printer = PPrint()\n\n ctxt.run_algebra(printer)\n\n assert x[\"pp\"] == \"X\"\n assert c[\"pp\"] == \"(X + (Y * 5))\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generic terminal state check, true when maximum depth is reached or the game has ended.
def is_terminal(depth, board): return depth <= 0 or board.is_game_over()
[ "def terminal_test(self, state) -> bool:\n return state.depth == self.depth_limit or is_end_game(state)", "def _is_exit_from_terminal_state(self, curr_state, next_state, curr_is_done, next_is_done):\n return next_is_done and curr_is_done# and (next_state == curr_state)", "def is_win(self):\n if self._is_terminal:\n return self.board[self.player_goal_idx] > self.board[self.opponent_goal_idx]", "def is_terminal(self):\n return self.left is None and self.right is None", "def endState(self):\n return not(self.state.winner() == -1 and len(self.state.getLegalAction(self.state.agent)) > 0)", "def is_goal(state):\n return sum(sum(state, [])) == 1", "def is_winning_state(self, state):\n return self.game.verify_winning_state(state)", "def is_goal(self):\n\n return self.depth == len(self.grid) * len(self.grid[0]) - 1", "def exitLevelCheck(self):\n if(self.current_tile() == None):\n coords = self.coordinates()\n x, y = coords[0], coords[1]\n x_dir, y_dir = 0, 0\n level_dimensions = self.current_level.get_dimensions()\n if x <= 0: x_dir = -1\n elif x >= level_dimensions[0]: x_dir = 1\n if y <= 0: y_dir = -1\n elif y >= level_dimensions[1]: y_dir = 1\n direction = (x_dir, y_dir)\n x -= x_dir\n y -= y_dir\n if self.current_level.next_level_exists(self.current_level.global_coords( (x, y) ), direction) :\n self.exit_level(coords)\n return True\n if self.current_level.next_dungeon_exists(direction):\n self.exit_dungeon(coords)\n return True\n else:\n if self.rect.left < 0: \n self.rect.left = 0\n self.xvel = 0\n if self.rect.right >= 32*level_dimensions[0]: \n self.rect.right = 32*level_dimensions[0]\n self.xvel = 0\n if self.rect.top < 0: \n self.rect.top = 0\n self.yvel = 0\n if self.rect.bottom >= 32*level_dimensions[1]: \n self.rect.bottom = 32*level_dimensions[1]\n self.onGround = True\n self.yvel = 0\n return False", "def is_goal(self, state):\n return state == self.goal", "def isTerminal(self, suitNum):\n return ((self._tileID == 1) or (self._tileID == 9)) and self._suitID <= suitNum", "def is_end_state(self):\n actions = self.get_legal_actions()\n if not actions:\n return True\n return False", "def get_win_state(self) -> bool:\n\n # Directions to check for, first tuple is vertical checks, 2nd tuple is horizontal checks, 3rd and 4th are\n # the two varying diagonal checks\n for delta_row, delta_col in [(1, 0), (0, 1), (1, 1), (1, -1)]:\n consecutive_moves = 1\n\n # This loops allows us to switch directions when we hit a boundary.\n for delta in (1, -1):\n # Calculate the direction (positive or negative) for the position\n delta_row *= delta\n delta_col *= delta\n\n # Compute the next row based on the existing position\n next_row = self.current_move['row'] + delta_row\n next_col = self.current_move['column'] + delta_col\n\n # Once we have our direction, we will keep incrementing in that direction until we hit a boundary, an\n # opponent's position, or a win condition.\n while 0 <= next_row < self.row_count and 0 <= next_col < self.column_count:\n # Player token here is the identifier of '1, 0, or None', indicating a specific player or no move\n if self.board_state[next_row][next_col] == self.current_player_token:\n consecutive_moves += 1\n else:\n break\n if consecutive_moves == self.win_length:\n return True\n\n # Keep tallying up the counts, and we may revert to the parent 'for' loop to check the other\n # direction and keep tallying up 'consecutive_moves'\n next_row += delta_row\n next_col += delta_col\n\n return False", "def is_end_of_game(self):\n pass", "def is_step(self):\n if self.get_level() == 3:\n return True\n else:\n return False", "def get_win_state(self):\n if self.step_count >= GAME_MAX_STEP / self.game_steps_per_update:\n return 1\n elif self.last_obs.observation[PLAYER][ARMY_COUNT] > 10:\n return 2\n else:\n return 0", "def goal_test(self, current):\n\n if current.state == self.goal_state:\n return True\n else:\n return False", "def is_game_over(self, state):\n result = self.outcome(state)\n return not result == 2", "def is_draw(self) -> bool:\n return self.ended and self.winner is None", "def __check_state(self):\n\n # check for tie\n if len([badge for badge in self.grid.grid.values() if badge == ' ']) == 0:\n print(f'Game Over! No moves remain, it is a tie!')\n self.game_over = True\n\n # check for a horizontal win\n for ind in self.grid.row_indices:\n self.__check_for_win(list(zip([ind]*self.grid.cols(), self.grid.col_indices)))\n\n # check for a vertical win\n for ind in self.grid.col_indices:\n self.__check_for_win(list(zip(self.grid.row_indices, [ind]*self.grid.rows())))\n\n # check for a left to right diagonal win\n self.__check_for_win(list(zip(self.grid.row_indices, self.grid.col_indices)))\n\n # check for a right to left diagonal win\n self.__check_for_win(list(zip(self.grid.row_indices, self.grid.col_indices.__reversed__())))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do a minimax search to the specified depth on the specified board. board the ConnectFourBoard instance to evaluate depth the depth of the search tree (measured in maximum distance from a leaf to the root) eval_fn (optional) the evaluation function to use to give a value to a leaf of the tree; see "focused_evaluate" in the lab for an example Returns an integer, the column number of the column that the search determines you should add a token to
def minimax(board, depth, eval_fn = basic_evaluate, get_next_moves_fn = get_all_next_moves, is_terminal_fn = is_terminal, verbose = True): raise NotImplementedError
[ "def minimax(board, depth, eval_fn=basic_evaluate,\n verbose=False):\n\n best_val = None\n\n for move, new_board in board.get_all_next_moves():\n val = -1 * minimax_find_board_value(new_board, depth - 1, eval_fn)\n if best_val == None or val > best_val[0]:\n best_val = (val, move, new_board)\n\n if verbose:\n print(\"MINIMAX: Decided on (row,col) {} with rating {}\".format(best_val[1], best_val[0]))\n\n return best_val[1]", "def minimax(board, depth, eval_fn=basic_evaluate,\n verbose=False):\n\n best_val = None\n\n for move, new_board in board.get_all_next_moves_done():\n val = -1 * minimax_find_board_value(new_board, depth - 1, eval_fn)\n if best_val == None or val > best_val[0]:\n best_val = (val, move, new_board)\n\n if verbose:\n print(\"MINIMAX: Decided on (row,col) {} with rating {}\".format(best_val[1], best_val[0]))\n\n return best_val[1]", "def minimax(self, board, depth, alpha, beta, team):\n\n # Terminate if depth limit has been reached\n if depth == 0:\n # Get the score for this board\n score = self.evaluate(board, self._player_type)\n return score\n # # Terminate if BLACK has won (we need to set a positive threshold?)\n # if score > 2000:\n # return score\n # # Terminate if WHITE has won (set a negative threshold?)\n # if score < -2000:\n # return score\n\n # Create a State Space Generator to generate all legal moves of\n # the resulting board state\n state_space_gen = self.build_state_space_generator(board, team)\n all_legal_moves = state_space_gen.generate_all_legal_moves()\n\n # If player to move is MAX\n if team == self._player_type:\n # Variable to store the best possible score for this Node\n max_eval = StateSpaceGenerator.MIN\n\n # Loop through all legal moves in the resulting state and find the\n # best move by recursively calling minimax\n for move in all_legal_moves:\n # Create a board object to create child node\n max_board = StateSpaceGenerator.build_board(state_space_gen)\n\n # Create a list for pieces to be moved in this move notation\n pieces_to_move = []\n # Get move direction as local variable\n move_enum = move[len(move) - 1]\n\n # Get all the pieces to be moved (exclude the move direction enum)\n for i in range(len(move) - 1):\n pieces_to_move.append(move[i])\n\n # Move the piece\n max_board.move_piece(move_enum, pieces_to_move)\n\n board_configuration = StateSpaceGenerator.generate_board_configuration(max_board)\n\n if self._player_type == PieceType.WHITE:\n transposition_key = 'w ' + board_configuration\n else:\n transposition_key = 'b ' + board_configuration\n\n # Check if this state exists in Transposition table\n if transposition_key in StateSpaceGenerator.TRANSPOSITION_TABLE:\n eval = StateSpaceGenerator.TRANSPOSITION_TABLE.get(transposition_key)\n\n max_eval = max(max_eval, eval)\n\n # Alpha-Beta pruning\n alpha = max(alpha, eval)\n if beta <= alpha:\n break\n else:\n # Find the minimax score for resulting board state\n if self._player_type == PieceType.WHITE:\n eval = self.minimax(max_board, depth - 1, alpha, beta, PieceType.BLACK)\n else:\n eval = self.minimax(max_board, depth - 1, alpha, beta, PieceType.WHITE)\n\n # Add heuristic score to transposition table\n StateSpaceGenerator.TRANSPOSITION_TABLE[transposition_key] = eval\n\n # Set best to eval if it is greater\n max_eval = max(max_eval, eval)\n\n # Alpha-Beta pruning\n alpha = max(alpha, eval)\n if beta <= alpha:\n break\n\n return max_eval\n # If player is MIN\n else:\n minEval = StateSpaceGenerator.MAX\n\n for move in all_legal_moves:\n # Create a board object to create child node\n min_board = StateSpaceGenerator.build_board(state_space_gen)\n\n # Create a list for pieces to be moved in this move notation\n pieces_to_move = []\n # Get move direction as local variable\n move_enum = move[len(move) - 1]\n\n # Get all the pieces to be moved (exclude the move direction enum)\n for i in range(len(move) - 1):\n pieces_to_move.append(move[i])\n\n # Move the piece\n min_board.move_piece(move_enum, pieces_to_move)\n\n board_configuration = StateSpaceGenerator.generate_board_configuration(min_board)\n\n if self._player_type == PieceType.WHITE:\n transposition_key = 'b ' + board_configuration\n else:\n transposition_key = 'w ' + board_configuration\n\n # Check if this state exists in Transposition table\n if transposition_key in StateSpaceGenerator.TRANSPOSITION_TABLE:\n eval = StateSpaceGenerator.TRANSPOSITION_TABLE.get(transposition_key)\n\n minEval = min(minEval, eval)\n\n # Alpha-Beta pruning\n beta = min(beta, eval)\n if beta <= alpha:\n break\n else:\n # Find the minimax score for resulting board state\n if self._player_type == PieceType.WHITE:\n eval = self.minimax(min_board, depth - 1, alpha, beta, PieceType.WHITE)\n else:\n eval = self.minimax(min_board, depth - 1, alpha, beta, PieceType.BLACK)\n\n # Add heuristic score to transposition table\n StateSpaceGenerator.TRANSPOSITION_TABLE[transposition_key] = eval\n\n # Set minEval to eval if lesser\n minEval = min(minEval, eval)\n\n # Alpha-Beta pruning\n beta = min(beta, eval)\n if beta <= alpha:\n break\n\n return minEval", "def min_max(board_state, side, max_depth, evaluation_func=evaluate):\n best_score = None\n best_score_move = None\n\n moves = list(available_moves(board_state))\n if not moves:\n # this is a draw\n return 0, None\n\n for move in moves:\n new_board_state = apply_move(board_state, move, side)\n winner = has_winner(new_board_state)\n if winner != 0:\n return winner * 10000, move\n else:\n if max_depth <= 1:\n score = evaluation_func(new_board_state)\n else:\n score, _ = min_max(new_board_state, -side, max_depth - 1)\n if side > 0:\n if best_score is None or score > best_score:\n best_score = score\n best_score_move = move\n else:\n if best_score is None or score < best_score:\n best_score = score\n best_score_move = move\n return best_score, best_score_move", "def minimax_alphabeta(board_state, depth, MaxPlayer, alpha, beta, prev_move=0, top_tree=False):\n board_copy = copy.deepcopy(board_state)\n if prev_move != 0:\n board_copy = combine_single_move(board_copy, prev_move[0], prev_move[1], prev_move[2], prev_move[3])\n\n if depth == 0 or ai_score(board_copy) == float(\"-infinity\") or ai_score(board_copy) == float(\"infinity\"):\n #print(\"Leaf Node Board: Depth: \" + str(depth) + str(board_copy))\n return ai_score(board_copy)\n board_copy = copy.deepcopy(board_state)\n if MaxPlayer:\n best_val = float(\"-infinity\")\n moves = possible_moves(board_copy, 0)\n # sort moves by ones who kill pieces first\n moves = sorted(moves, key=lambda x: x[1], reverse=True)\n if top_tree:\n best_move = []\n old_piece = []\n\n for move in moves:\n clean_move = move[0]\n #print(\"Max Depth \" + str(depth) + \" Board: \" + str(board_copy))\n val = minimax_alphabeta(copy.deepcopy(board_copy), depth - 1, False, alpha, beta, clean_move)\n #print(\"Score of \" + str(val))\n if val > best_val:\n best_move = clean_move\n if len(move) == 3:\n # this contains values about the piece that died in this process\n old_piece = move[2]\n\n best_val = max(best_val, val)\n alpha = max(best_val, alpha)\n\n if beta <= alpha:\n break\n if top_tree:\n return [[best_move, old_piece], best_val]\n else:\n return best_val\n else:\n # min player\n best_val = float(\"infinity\")\n moves = possible_moves(board_copy, 1)\n # sort moves by ones who kill pieces first\n moves = sorted(moves, key=lambda x: x[1], reverse=True)\n for move in moves:\n move = move[0]\n move_board = copy.deepcopy(board_copy)\n move_board = combine_moves(move_board, move[0], move[1], move[2], move[3], prev_move[0], prev_move[1], prev_move[2], prev_move[3])\n val = minimax_alphabeta(move_board, depth - 1, True, alpha, beta)\n #print(\"Min Depth \" + str(depth) + \" Board: \" + str(move_board))\n #print(\"Score of \" + str(val))\n\n best_val = min(best_val, val)\n beta = min(best_val, beta)\n if beta <= alpha:\n break\n return best_val", "def search(self, depth, rack, pid):\n possibleMoves = [] # prepare you possible moves\n \n if(pid == 1): # determine players\n enemypid = 2\n else:\n enemypid = 1\n \n for i in range(len(rack)): # fill out the possible moves\n if self.isPossibleMove(rack, i):\n temp = self.makeMove(rack, i, pid)\n possibleMoves.append(temp)\n\n if (depth == 0 or len(possibleMoves) == 0 or self.isGameOver(rack)): # if the game is over, finish\n return self.calcStateValue(rack, pid)\n\n alpha = -1000000\n for child in possibleMoves: # go through the children\n if (child == None):\n print(\"no child, how did we let this happen!?\")\n alpha = max(alpha, -self.search(depth-1, child, enemypid)) # keep only the child with the greatest alpha\n return alpha", "def minimax(\n depth: int, node_index: int, is_max: bool, scores: list[int], height: float\n) -> int:\n\n if depth < 0:\n raise ValueError(\"Depth cannot be less than 0\")\n\n if not scores:\n raise ValueError(\"Scores cannot be empty\")\n\n if depth == height:\n return scores[node_index]\n\n return (\n max(\n minimax(depth + 1, node_index * 2, False, scores, height),\n minimax(depth + 1, node_index * 2 + 1, False, scores, height),\n )\n if is_max\n else min(\n minimax(depth + 1, node_index * 2, True, scores, height),\n minimax(depth + 1, node_index * 2 + 1, True, scores, height),\n )\n )", "def depth_first_branch_and_bound_search(start_grid, use_manhattan):\n limit = INFINITY # Initialize limit to infinity\n\n \"\"\"\n Open list contains the live nodes \n \"\"\"\n open_list = PriorityQueue()\n root = Node(start_grid, goal_grid, use_manhattan, '')\n open_list.insert(root)\n result_node = None\n num_nodes_expanded = 0\n start_time = time.perf_counter()\n exec_time_str = '' # Execution time string for measuring execution time in seconds rounded to 4 decimal places\n\n \"\"\"\n While the open_list is not empty,\n Pull out the min f cost node.\n if the node is equal to the goal state, update the limit to the solution's f value\n Otherwise, generate the children, iterate through each child and only insert those elements into the queue with\n an f value <= the current limit\n \n \"\"\"\n while open_list.size() > 0:\n min_node = open_list.delete()\n num_nodes_expanded += 1\n if min_node.h == 0:\n if min_node.f <= limit:\n end_time = time.perf_counter()\n exec_time = end_time - start_time\n exec_time_str = f'Execution Time: {exec_time:0.4f} seconds'\n limit = min_node.f\n result_node = min_node\n else:\n successors = min_node.generate_successors()\n temp_list = PriorityQueue()\n for successor in successors:\n if successor.f <= limit:\n temp_list.insert(successor)\n while temp_list.size() > 0:\n open_list.insert(temp_list.delete())\n\n trace_and_print(root, result_node, num_nodes_expanded, exec_time_str)", "def compute_heuristic(self, global_board: np.ndarray, depth: int) -> float:\n return sum([self._precalc_boards[b] for b in global_board]) / depth", "def depth_first_search(grid: list[list[int]], row: int, col: int, visit: set) -> int:\n row_length, col_length = len(grid), len(grid[0])\n if (\n min(row, col) < 0\n or row == row_length\n or col == col_length\n or (row, col) in visit\n or grid[row][col] == 1\n ):\n return 0\n if row == row_length - 1 and col == col_length - 1:\n return 1\n\n visit.add((row, col))\n\n count = 0\n count += depth_first_search(grid, row + 1, col, visit)\n count += depth_first_search(grid, row - 1, col, visit)\n count += depth_first_search(grid, row, col + 1, visit)\n count += depth_first_search(grid, row, col - 1, visit)\n\n visit.remove((row, col))\n return count", "def get_best_move(board, scores):\n emptys = board.get_empty_squares()\n best = emptys[0]\n ite = 1\n while (ite < len(emptys)):\n if (scores[emptys[ite][0]][emptys[ite][1]] > scores[best[0]][best[1]]):\n best = emptys[ite]\n ite += 1\n return best", "def validate_knight(self, piece, board):\n\t\tmoves = {}\n\n\t\tif piece.row+2 <=7:\n\t\t\tmoves.update(self._traverse_vertically(piece.row+2, piece, board))\n\n\t\tif piece.row-2 >=0:\n\t\t\tmoves.update(self._traverse_vertically(piece.row-2, piece, board))\n\n\t\tif piece.col+2 <= 7:\n\t\t\tmoves.update(self._traverse_horizontally(piece.col+2, piece, board))\n\n\t\tif piece.col-2 >=0:\n\t\t\tmoves.update(self._traverse_horizontally(piece.col-2, piece, board))\n\n\t\treturn moves", "def explore(board, steps=None):\n\n steps = steps or []\n graph = cols_to_graph(board.cols)\n for comp in graph.iter_comps():\n new_board = board.without(comp)\n explore(new_board, steps[:] + [comp[0].cell()])", "def search_for_errors_recursive(self, fen, depth=3):\n import chess # import Python chess library for comparison to find bugs\n\n def get_fen(board, move):\n board.push(move)\n move_fen = board.fen()\n board.pop()\n return move_fen\n\n if depth == 0:\n return 1\n\n chess_board = chess.Board(fen)\n chess_moves = sorted([get_fen(chess_board, move) for move in chess_board.generate_legal_moves()])\n moves = sorted([Chess.encode_fen(move) for move in Chess.get_possible_moves(Chess.parse_fen(fen))])\n\n if len(moves) != len(chess_moves):\n raise AssertionError\n\n total = 0\n for move in chess_moves:\n total += self.search_for_errors_recursive(move, depth - 1)\n return total", "def quiescentSearch(board: chess.Board, alpha: int, beta: int, evaluator: Evaluator):\n score = evaluator.evaluate(board)\n if score >= beta:\n return beta\n if score > alpha:\n alpha = score\n\n for move in board.legal_moves:\n if board.is_capture(move):\n board.push(move)\n score = -quiescentSearch(board, -beta, -alpha, evaluator)\n board.pop()\n\n if score >= beta:\n return beta\n if score > alpha:\n alpha = score\n return alpha", "def find_board(self, board):\n for b in self.boards():\n if b.match(board): return b\n return None", "def evaluator1(board: list[list[int]]) -> int:\r\n return sum(board[line][column] for line in range(8) for column in range(8))", "def findChessboard(self, dimensions = (8, 5), subpixel = True):\n corners = cv.FindChessboardCorners(self._getEqualizedGrayscaleBitmap(), dimensions, cv.CALIB_CB_ADAPTIVE_THRESH + cv.CALIB_CB_NORMALIZE_IMAGE + cv.CALIB_CB_FAST_CHECK )\n if(len(corners[1]) == dimensions[0]*dimensions[1]):\n if (subpixel):\n spCorners = cv.FindCornerSubPix(self.getGrayscaleMatrix(), corners[1], (11, 11), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01))\n else:\n spCorners = corners[1]\n return FeatureSet([ Chessboard(self, dimensions, spCorners) ])\n else:\n return None", "def solve_board(board):\n if find_next_empty(board) != -1:\n print('Solving...')\n solve_tile(board, find_next_empty(board))\n else:\n print('Board already solved')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes distance between self and a test image. Returns scalar value
def compute_distance(self, image): diffs = image - self._array total_dist = np.sqrt(np.sum(diffs**2)) return total_dist
[ "def get_distance(self, index):\n return (np.linalg.norm(self.image.astype('float') - self.population[index].image.astype('float'))) / (\n self.image.shape[0] * self.image.shape[1])", "def testDistance(self):\n\n # testList holds a couple 3-tuple (variable1, variable2, result)\n basicTestList = [(chr(170) * 48, chr(85) * 48, long((chr(255) * 48).encode('hex'), 16))]\n\n for test in basicTestList:\n result = Distance(test[0])(test[1])\n self.failIf(result != test[2], 'Result of _distance() should be %s but %s returned' %\n (test[2], result))", "def get_distance(self, train):\n grid_nr = self.get_grid_nr(train)\n if grid_nr == 'error' or 'distance' not in self.points[grid_nr]:\n return 0\n dist = self.points[grid_nr]['distance']\n if 'x' in self.points[grid_nr]:\n x_dist = train['x']-self.points[grid_nr]['x']\n y_dist = train['y']-self.points[grid_nr]['y']\n if self.points[grid_nr]['incoming'] == 0: # top\n y_dist += 30\n elif self.points[grid_nr]['incoming'] == 1: # bottom\n y_dist -= 30\n elif self.points[grid_nr]['incoming'] == 2: # left\n x_dist += 30\n elif self.points[grid_nr]['incoming'] == 3: # right\n x_dist -= 30\n dist += np.sqrt(np.power(x_dist, 2)+np.power(y_dist, 2))/100\n return dist", "def compute_distance(self, X):\n num_train = self.X_train.shape[0]\n num_test = X.shape[0]\n dists = np.zeros((num_test, num_train), dtype=X.dtype)\n for i in range(num_test):\n dists[i] = np.sum((X[i] - self.X_train)**2, axis=1)\n return dists", "def compute_distances_one_loop(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n # print('X.shape', X.shape)\n # print('X_train.shape', self.X_train.shape)\n for i in range(num_test):\n #######################################################################\n # TODO: #\n # Compute the l2 distance between the ith test point and all training #\n # points, and store the result in dists[i, :]. #\n # Do not use np.linalg.norm(). #\n #######################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Array of pixels [---R---G---B---]\n # Let total pixels (R + G + B pixels) = p\n # shape = (1, p)\n test_example = X[i]\n\n # X.train.shape = (num_train, p)\n # Broadcasts the test example with the training examples matrix\n diff_squares = np.square(test_example - self.X_train)\n # if i == 0:\n # print('diff_squares.shape', diff_squares.shape)\n # print('test_example[0]', test_example)\n # print('train_example[0]', self.X_train[0])\n # print('diff_squares[0]', diff_squares[0][0])\n\n # In each row, sum across the colums\n # axis=0, sum across rows (go down columns)\n # axis=1, sum across columns (go across row)\n sm = np.sum(diff_squares, axis=1, keepdims=True)\n # if i == 0:\n # print('sm.shape', sm.shape)\n assert sm.shape == (num_train, 1)\n\n temp = np.sqrt(sm)\n # if i == 0:\n # print('temp.shape', temp.shape)\n\n # Transpose column vector temp to row vector\n dists[i, :] = temp.T\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return dists", "def _compute_distance(self) -> np.ndarray:\n loc = np.expand_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n self.loc_diff = m-m.transpose(0, 2, 1)\n return np.linalg.norm(self.loc_diff, axis=0)", "def distance(self):\n S = self.get_array('S')\n return numpy.cumsum(2*S)-S", "def distance(state):\n x = state.knight_pos[0]\n y = state.knight_pos[1]\n state.dist = min(x,state.nRows - x - 1) + min(y,state.nCols - y - 1)", "def get_distance(self):\n assert self._distance_from_target != -1, \"Non e' ancora stata settata la distanza dal target\"\n\n return self._distance_from_target", "def __distance(self, figure):\n if isinstance(self.ground, str):\n return figure.distance_to_refpoint(self.ground)\n if self.ground.repr == 'cartesian':\n return self.ground.distance_cartesian(figure)\n else:\n return self.ground.distance_spherical(figure)", "def calc_distance(self):\n total_distance = sum([connection.distance for connection in self.get_true_connections()])\n return total_distance", "def nose_to_target_dist(self):\n return np.linalg.norm(self.nose_to_target())", "def distance(self, other: \"Point\") -> float:\n return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)", "def __call__(self, samples, new_sample):\n\t\tdistance = np.sum(np.abs(samples - new_sample), axis=-1)\n\t\treturn distance", "def distance(image1, image2):\n m1 = 1\n m2 = 2\n assert image1.size() == image2.size()\n flat_1 = [col for dim in image1.pixels for row in dim for col in row]\n flat_2 = [col for dim in image2.pixels for row in dim for col in row]\n dist = [((flat_1[i] - flat_2[i]) ** m2) for i in range(len(flat_1))]\n return sum(dist) ** (m1/m2)", "def compare(self) -> float:\n if self.img_ref is None or self.img_restored is None:\n raise TypeError(\"Please load a reference image and call \"\n \"'restore' method before comparing.\")\n\n img_a = self.img_restored.astype(float)\n img_b = self.img_ref.astype(float)\n rmse = np.sqrt(np.power(img_a - img_b, 2.0).sum()\n / np.prod(self.img_restored.shape))\n return rmse", "def distance(self, src, tar, c):\n src, tar = self.proj(src, c), self.proj(tar, c)\n dist = self.sqdist(src, tar, c)\n dist = tf.expand_dims(dist, -1)\n return dist", "def euclidean_distance(self):\n\t\treturn math.sqrt(math.pow((self.goal[0]-self.pos.position.x),2) + math.pow((self.goal[1]-self.pos.position.y),2))", "def distance(x, y):\n return np.linalg.norm(x - y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare one image to learned images, and update values for correct and wrongly predicted images
def learn_one_image(images_learned, image_features, image_value, lam_val): for image in images_learned: # compare image pass
[ "def predict_single():\n path = 'outputs/gray/img-8-epoch-29.jpg'\n img = Image.open(path)\n img = img.resize((224,224))\n img_original = np.array(img)\n\n gray = rgb2gray(img_original)\n x = TF.to_tensor(gray).float()\n x.unsqueeze_(0)\n model = ColorizationUpsampling()\n model.load_state_dict(torch.load('checkpoints/model-epoch-22-losses-0.002910.pth',\n map_location=torch.device('cpu')))\n\n output = model(x)\n\n output = output.detach()\n color_image = torch.cat((x[0], output[0]), 0).numpy()\n color_image = color_image.transpose((1, 2, 0)) # rescale for matplotlib\n color_image[:, :, 0:1] = color_image[:, :, 0:1] * 100\n color_image[:, :, 1:3] = color_image[:, :, 1:3] * 255 - 128\n color_image = lab2rgb(color_image.astype(np.float16))\n\n color_image_bgr = color_image.astype(np.float32)\n color_image_bgr = cv2.cvtColor(color_image_bgr, cv2.COLOR_RGB2BGR)\n color_image_bgr = cv2.resize(color_image_bgr, (380, 240))\n\n normalized_array = (color_image_bgr - np.min(color_image_bgr)) / (\n np.max(color_image_bgr) - np.min(color_image_bgr)) # this set the range from 0 till 1\n color_image_bgr = (normalized_array * 255).astype(np.uint8)\n gray = cv2.resize(gray, (380, 240))\n gray = np.stack((gray,) * 3, axis=-1)\n\n gray = (gray - np.min(gray)) / (\n np.max(gray) - np.min(gray)) # this set the range from 0 till 1\n gray = (gray * 255).astype(np.uint8)\n vis = np.concatenate((gray, color_image_bgr), axis=1)\n\n frame_normed = np.array(vis, np.uint8)\n\n cv2.imwrite(path[:-4]+\"out.jpg\", frame_normed)\n cv2.imshow(\"out\", frame_normed)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def cnn_predict():\n\n x_test, y_test, file_name_test_list = load_test_set()\n\n model = cnn()\n\n weight_path = Path(config[\"weight_file\"])\n if weight_path.exists() is False:\n log.error(\"Not found weight file %s. Aborting.\" % (weight_path))\n sys.exit(1)\n\n model.load_weights(weight_path)\n\n y_predicted = model.predict(x_test)\n correct_count = 0\n total_count = x_test.shape[0]\n for i in range(total_count):\n # Ground truth\n # Convert the file name to a string that contains only the ground trugh classes\n name = file_name_test_list[i]\n underscore_pos = name.find(\"_\")\n if underscore_pos < 0:\n log.warning(\"Invalid image file name. Missing classification marker for file %s\" % (name))\n continue\n\n classes = name[0:underscore_pos]\n actual = \"\"\n if DogClassMarker.AIMEE in classes:\n actual = actual + DogClassMarker.AIMEE\n if DogClassMarker.MADDIE in classes:\n actual = actual + DogClassMarker.MADDIE\n if DogClassMarker.OLIVIA in classes:\n actual = actual + DogClassMarker.OLIVIA\n if DogClassMarker.PINK in classes:\n actual = actual + DogClassMarker.PINK\n if len(actual) == 0:\n actual = \"_\"\n\n # Prediction\n # Convert the predicted classes contained in the vector to a string.\n # Before conversion, round down or round up values to 0 or 1 except for the mid-range number.\n # A mid-range number is counted as a \"mismatch\".\n v = y_predicted[i]\n\n low_threshold_flag = v < 0.3\n v[low_threshold_flag] = 0\n\n high_threshold_flag = v > 0.7\n v[high_threshold_flag] = 1\n\n predicted = \"\"\n if v[DogClassIndex.AIMEE] == 1:\n predicted = predicted + DogClassMarker.AIMEE\n if v[DogClassIndex.MADDIE] == 1:\n predicted = predicted + DogClassMarker.MADDIE\n if v[DogClassIndex.OLIVIA] == 1:\n predicted = predicted + DogClassMarker.OLIVIA\n if v[DogClassIndex.PINK] == 1:\n predicted = predicted + DogClassMarker.PINK\n if len(predicted) == 0:\n predicted = \"_\"\n\n # Compare the ground-truth classification string and the predicted classification string\n # Count only the complete match as the match. Do not count the partial match.\n if actual == predicted:\n correct_count = correct_count + 1\n\n print(\"Total count: %d\" % (total_count))\n print(\"Correct count (complete match only): %d\" % (correct_count))\n print(\"Accuracy: %f percent\" % (correct_count * 100 / total_count))", "def validate(self,X_validate,y_validate,j):\n y_pred = self.classify1(X_validate)\n count = 0\n length = y_pred.shape\n X3 = []\n Y3 = []\n X4 = []\n Y4 = []\n for i in range(0,length[0]):\n if y_pred[i] != y_validate[i]:\n X3.append(X_validate[i])\n Y3.append(y_validate[i])\n count +=1\n #print(j)\n if j != \"lda\" :\n if j!=\"kernel_lda\":\n length = y_pred.shape\n count = 0\n X3 = []\n Y3 = []\n count1 = 0\n count = 0\n for i in range(0,length[0]):\n if y_pred[i] != y_validate[i]:\n X3.append(X_validate[i])\n Y3.append(y_validate[i])\n count +=1\n else:\n X4.append(X_validate[i])\n Y4.append(y_validate[i])\n count1 +=1\n \n #print(Y3)\n\n X3 = np.array(X3)\n #print(X3.shape)\n\n N,H,W, C = count,32,32,3\n X3 = X3.reshape((N,H,W,C))\n\n\n Y3 = np.array(Y3)\n print(\"wrong classified images\")\n plt.imshow(X3[0])\n plt.show()\n plt.imshow(X3[1])\n plt.show()\n \n X4 = np.array(X4)\n #print(X3.shape)\n\n N,H,W, C = count1,32,32,3\n X4 = X4.reshape((N,H,W,C))\n print(\"correct classified images\")\n\n\n Y4 = np.array(Y4)\n plt.imshow(X4[0])\n plt.show()\n plt.imshow(X4[1])\n plt.show()\n \n # plt.imshow(X3[2])\n X3 = []\n Y3 = []\n X4 = []\n Y4 = []\n\n\n \n \n return self.confusion_matrix(y_validate,y_pred),accuracy_score(y_validate,y_pred),f1_score(y_validate,y_pred,average=\"macro\"),count/length[0],precision_score(y_validate,y_pred,average=\"macro\")", "def test(self, file_dir=\"training_data\"):\n print(\"loading testing data\")\n test_data = MNIST(file_dir)\n img, lbl = test_data.load_testing()\n\n correct = 0\n for i in range(0, len(img)):\n self.classify(img[i])\n b = np.where(self.activations[-1] == max(self.activations[-1]))[0][0]\n c = lbl[i]\n if (np.where(self.activations[-1] == max(self.activations[-1]))[0][0]) == lbl[i]:\n correct += 1\n\n print(str((correct / len(img)) * 100) + \" % accuracy\")", "def update_accuracies(self, decoded, base_model_outputs, true_labels, mask):\n self.num_match_attempts += decoded.size(0)\n max_decoded = torch.max(decoded, dim=2)[1]\n max_outputs = torch.max(base_model_outputs, dim=2)[1]\n\n self.num_reconstruction_match += torch.sum(\n (max_decoded == max_outputs) * mask).item()\n self.num_overall_match += torch.sum(\n (max_decoded == true_labels) * mask).item()", "def checkpoint(img_name):\n base = cv2.imread(os.path.join(base_path,img_name+'.png'))\n\n if os.path.exists(test_img):\n test = cv2.imread(os.path.join(test_img,img_name+'.png'))\n else:\n return(\"No test_img folder is found ,First run\")\n\n diff = cv2.subtract(base,test)\n #cv2.imshow(\"Difference\",diff)\n b,g,r = cv2.split(diff)\n if cv2.countNonZero(b)==0 and cv2.countNonZero(g)==0 and cv2.countNonZero(r)==0:\n print(\"images are same\")\n return(\"Visual Testing Pass\")\n else:\n print(\"Images are Different\")\n #cv2.imshow(\"Difference\",diff)\n #cv2.imwrite(os.path.join(\"diff_results\",img_name+'.png'),diff)\n #imgray = cv2.cvtColor(test,cv2.COLOR_BGR2GRAY)\n #ret,thresh = cv2.threshold(imgray,127,255,0)\n #contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n #cv2.drawContours(diff, contours, -1, (0,255,0), 1)\n #cv2.imwrite(os.path.join(\"diff_results\",img_name+'.png'),diff)\n #cv2.imshow(\"Difference\",diff)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n base_img = Image.open(os.path.join(base_path,img_name+'.png'))\n test_imge = Image.open(os.path.join(test_img,img_name+'.png'))\n result_img = black_or_b(base_img, test_imge)\n result_img.show()\n return(\"Visual Testing Failed\")", "def reid(image_pairs):\n FLAGS.batch_size = image_pairs.shape[1]\n is_train = tf.placeholder(tf.bool, name='is_train')\n weight_decay = 0.0005\n images = tf.placeholder(tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')\n images1, images2 = preprocess(images)\n logits = network(images1, images2, weight_decay)\n inference = tf.nn.softmax(logits)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n feed_dict = {images: image_pairs, is_train: False}\n predictions = sess.run(inference, feed_dict=feed_dict)\n # show_matches_score(image_pairs, predictions)\n return predictions[:, 0].reshape(1, -1)", "def test_point_label(self):\n\t\t\n\t\t# Loading data\n\t\tfor root, dirs, files in os.walk(self.test_scenes):\n\t\t\tsuccess_files = [x for x in files if x.startswith('success') and x.endswith('.jpg')]\n\t\t\tfail_files = [x for x in files if x.startswith('fail') and x.endswith('.jpg')]\n\t\t\tsuccess_states = []\n\t\t\tfor success in success_files:\n\t\t\t\tstates = open(self.test_scene_states, 'r')\n\t\t\t\tstate_line = [line for line in states if success in line][0]\n\t\t\t\tstate = [float(x) for x in state_line.split()[1:5]]\n\t\t\t\tsuccess_states.append(state)\n\t\t\t\tstates.close()\n\t\t\tfail_states = []\n\t\t\tfor fail in fail_files:\n\t\t\t\tstates = open(self.test_scene_states, 'r')\n\t\t\t\tstate_line = [line for line in states if fail in line][0]\n\t\t\t\tstate = [float(x) for x in state_line.split()[1:5]]\n\t\t\t\tfail_states.append(state)\n\t\t\t\tstates.close()\n\t\t\tsuccess_images = [cv2.imread(os.path.join(self.test_scenes, im_file), 1) for im_file in success_files]\n\t\t\tfail_images = [cv2.imread(os.path.join(self.test_scenes, im_file), 1) for im_file in fail_files]\n\t\t\tsuccess_labels = []\n\n\t\t\t# Process images\n\t\t\tfor i in range(len(success_images)):\n\t\t\t\tim = success_images[i]\n\t\t\t\tstate = success_states[i]\n\t\t\t\tpoint_label = self.reward_function(im, state, dist=False, success=True)\n\t\t\t\tsuccess_labels.append(point_label)\n\t\t\tfail_labels = []\n\t\t\tfor i in range(len(fail_images)):\n\t\t\t\tim = fail_images[i]\n\t\t\t\tstate = fail_states[i]\n\t\t\t\tpoint_label = self.reward_function(im, state, dist=False, success=False)\n\t\t\t\tfail_labels.append(point_label)\n\t\t\tacc = np.mean(np.concatenate([np.array(success_labels) == 1, np.array(fail_labels) == 0]))\n\t\t\treturn acc", "def updatePredictions(self,img,predictions,threshold):\n\n #update lasts images prediction with last image predicition\n self.predictions[self.pred_index] = predictions.flatten()\n #compute the average for each lot\n self.logger.debug('Time filtering prediction')\n #return results and draw circles based on the average\n avg_predictions = np.mean(self.predictions,axis=0)\n results = self.drawPredicts(img,avg_predictions,threshold) \n if self.pred_index == len(self.predictions) - 1:\n self.pred_index = 0\n else:\n self.pred_index += 1\n return results", "def get_wrong_labels_and_images(self, correct_labels):\n if not isinstance(correct_labels, list):\n correct_labels = [correct_labels]\n\n shape = (len(correct_labels), *self.image_shape)\n wrong_image_arrays = np.zeros(shape)\n wrong_labels = []\n selected_wrong_indices = []\n \n for i,label in enumerate(correct_labels):\n all_labels = self.labels.copy()\n all_labels.remove(label)\n wrong_label = random.choice(all_labels)\n wrong_labels.append(wrong_label)\n \n wrong_image_indices = self.label_to_indices[wrong_label]\n wrong_image_index = random.choice(wrong_image_indices)\n wrong_image_arrays[i] = self._load_image_as_np_array(wrong_image_index)\n selected_wrong_indices.append(wrong_image_index)\n \n return wrong_labels, tf.convert_to_tensor(wrong_image_arrays)", "def compare_images(gold_dir, pred_dir, dump=False, clobber=False):\n unmatched_files = []\n missing_files = []\n gold_files = os.listdir(gold_dir)\n # n = 100\n for i, fname in enumerate(gold_files):\n # if i>=n:\n # break\n if not os.path.exists(os.path.join(pred_dir, fname)):\n missing_files.append(fname)\n unmatched_files.append(fname)\n continue\n img_gold = trim_image(np.asarray(Image.open(os.path.join(gold_dir, fname)).convert('L')))\n img_gold = np.asarray(img_gold < 255, dtype=np.uint)\n img_pred = trim_image(np.asarray(Image.open(os.path.join(pred_dir, fname)).convert('L')))\n img_pred = np.asarray(img_pred < 255, dtype=np.uint)\n if (img_gold.shape != img_pred.shape) or ((img_gold != img_pred).sum() != 0):\n unmatched_files.append(fname)\n\n if dump:\n with open(os.path.join(os.path.dirname(pred_dir), 'unmatched_files2.txt'), 'w') as f:\n for fname in unmatched_files:\n f.write('%s\\n' % fname)\n with open(os.path.join(os.path.dirname(pred_dir), 'missing_files2.txt'), 'w') as f:\n for fname in missing_files:\n f.write('%s\\n' % fname)\n total = len(gold_files) # min(n, len(gold_files))\n matched = total - len(unmatched_files)\n print('%d (%.2f%%) out of %d images matched binary pixel by binary pixel' % (matched, (matched*100.0)/(total*1.0), total))\n return total, matched, unmatched_files, missing_files", "def test_predict_future_reward(self):\n good_sequence = [\n ([0,0,0,0],1,[0,0,0,1]),\n ([0,0,0,1],0,[1,0,1,0]),\n ([1,0,1,0],1,[1,1,1,1]),\n ]\n bad_sequence = [\n ([0,0,0,0],0,[1,0,0,1]),\n ([1,0,0,1],1,[0,0,1,0]),\n ([0,0,1,0],1,[0,1,1,1]),\n ]\n def expand(r, final_reward):\n results = []\n for i,(state,action,new_state) in enumerate(r):\n record = {\n 'state': np.array(state,'f'),\n 'new_state': np.array(new_state,'f'),\n 'action': action,\n 'done': i >= len(r),\n 'reward': final_reward\n }\n results.append(record)\n assert results[-1]['reward'] == final_reward\n return results \n records = expand(good_sequence,1.0) + expand(bad_sequence,-1.0)\n print(records)\n records = records * 256\n model = main.build_model(env)\n main.train_model( model, records, env, batch_size=8)\n for (state,action,new_state) in good_sequence:\n prediction = main.predict(model,state)\n assert np.argmax(prediction) == action, (state,action,prediction)\n \n for (state,action,new_state) in bad_sequence:\n prediction = main.predict(model,state)\n assert np.argmax(prediction) != action, (state,action,prediction)", "def train(self, images, labels, load):\n \n PATH='./trained.pickle'\n\n if os.path.isfile(PATH) and load:\n print 'Loading already existing training values from ' + PATH\n with open('trained.pickle') as f:\n self.classes, self.prClass, self.prPixelGivenClass = pickle.load(f)\n else:\n self.prClass = [0 for i in range(10)]\n self.classes = [i for i in range(10)]\n self.prPixelGivenClass = [[0 for i in range(14*14)] for j in range(10)]\n \n for i in range(len(labels)):\n self.prClass[labels[i]] += 1 # Count how many times a class appears in the labels list.\n for j in range(len(images[i])):\n if images[i][j] < 100:\n self.prPixelGivenClass[labels[i]][j] += 1 # For every class, count how many times\n # a pixel is black.\n \n for i in range(len(self.prPixelGivenClass)):\n for j in range(len(self.prPixelGivenClass[i])):\n self.prPixelGivenClass[i][j] /= float(self.prClass[i]) # Divide the count of black pixels\n # by the number of times a class\n # appears, to get a percentage.\n self.prClass[i] /= float(len(images)) # Divide the number of times a class\n # appears, by the total number of classes\n # to get a percentage.\n \n print ''\n for i in range(len(self.prClass)): # some useful output that shows the probability of each class.\n print 'Pr(C=' + str(i) + ') = ' + str(self.prClass[i])[:5]\n # print 'Probabilites of the individual pixel in this class:' \"\"Commented because we now have\n # self.print_ascii_probabilities(self.prPixelGivenClass[i]) \"\"'heat-maps' for each image\n # print''\n print ''\n with open('trained.pickle', 'w') as f:\n pickle.dump([self.classes, self.prClass, self.prPixelGivenClass], f)", "def predict_own(image: Any) -> Any:\n model = load_own()\n original_dims = (image.shape[1], image.shape[0])\n L_orig = cv.split(cv.cvtColor(image, cv.COLOR_RGB2LAB))[0]\n input = prepare_image(image)\n prediction = predict_image(model, input)\n prediction = (255 * prediction).astype(\"uint8\")\n pred_resized = cv.resize(prediction, original_dims)\n _, a_pred, b_pred = cv.split(cv.cvtColor(pred_resized, cv.COLOR_RGB2LAB))\n final_lab = cv.merge((L_orig, a_pred, b_pred))\n final_rgb = cv.cvtColor(final_lab, cv.COLOR_LAB2RGB)\n return final_rgb", "def evaluate_one_fold(directory, dataset, w_path, mode):\n dname = dataset[-1]\n path = os.path.join(w_path, directory, dname)\n ld = os.listdir(path)\n if \"label\" in ld:\n # 単一resolutionの場合はそのままlabelディレクトリ読み込み\n pred_path = tl.getFilelist(\n os.path.join(path, \"label\"), \".png\"\n )\n else:\n # multi resolutionの場合は、最も解像度が高いディレクトリを読み込み\n tmp = 0\n for d in ld:\n if \"label\" in d and int(d[-1]) > tmp:\n tmp = int(d[-1])\n di = \"label\" + str(tmp)\n pred_path = tl.getFilelist(\n os.path.join(path, di), \".png\"\n )\n pred_path.sort()\n\n jaccard = []\n dice = []\n tpr = []\n tnr = []\n acc = []\n class_j = []\n\n # インスタンス化するために適当なパスを読み込み\n if \"ips\" in path:\n img_list, true_path = tl.load_datapath(dataset, mode=mode)\n labels = [1, 2, 3]\n else:\n img_list, true_path = tl.load_datapath(dataset, mode=mode)\n labels = [1, 2]\n DL = Patch_DataLoader(img_list, true_path)\n for pred, true in zip(pred_path, true_path):\n pred_name, _ = os.path.splitext(pred.split(\"/\")[-1])\n true_name, _ = os.path.splitext(true.split(\"/\")[-1])\n assert pred_name == true_name\n\n y_pred = np.array(Image.open(pred), int)\n y_true = np.array(Image.open(true), int)\n y_true = DL.image2label(y_true, evaluate=True)\n\n # out of region of evaluation\n oor = ~(y_true == 0) * 1\n y_pred = y_pred * oor\n j, d, tp, tn, a, c_j = evaluate_one_image(y_true, y_pred, labels)\n class_j.append(c_j)\n jaccard.append(j)\n dice.append(d)\n tpr.append(tp)\n tnr.append(tn)\n acc.append(a)\n jaccard = sum(jaccard) / len(jaccard)\n dice = sum(dice) / len(dice)\n tpr = sum(tpr) / len(tpr)\n tnr = sum(tnr) / len(tnr)\n acc = sum(acc) / len(acc)\n class_j = np.asarray(class_j)\n return jaccard, dice, tpr, tnr, acc, class_j", "def state_this_is_train_or_new_label(self, label):\n num_images = self.check_num_images_label(label)\n if num_images == \"-5\":\n print(\"\\nS> Please, show me more examples of {0}.\".format(label))\n self.save_img(label)\n elif num_images == \"=5\":\n print(\"\\nS> I am learning {0}.\".format(label))\n self.train_new_label(label)\n else:\n print(\"\\nS> I am updating my systems on {0}.\".format(label))\n self.train_model(label)", "def predict_images_in_directory(\n self, test_images_path, check_invalid_images_flag=False\n ):\n\n test_images_path = glob.glob(test_images_path + \"\\**.jpg\")\n\n for image_path in test_images_path:\n # Check if the image is invalid/faulty\n if check_invalid_images_flag:\n invalid = check_for_invalid_image(image_path)\n if invalid[0]:\n print(f\"faulty image ({image_path})...\")\n print(invalid[1])\n continue\n\n image = cv2.imread(image_path)\n print(\"Test image path:\", image_path)\n team, player_no = self.predict_image(image, predict_flag=True)\n team_name = [key for key, val in self.teams_dic.items() if val == team]\n player_no_name = [\n key for key, val in self.players_dic.items() if val == player_no\n ]\n print(\n f\"Predictions - Team: {team_name[0]}, Player_no: {player_no_name[0]}\\n\"\n )", "def coco_in_test_to_train():\n with open('data/refvg/image_data_split3000_100_slim.json') as f:\n info = json.load(f)\n to_move = set()\n for img in info:\n if img['split'] == 'test' and img['coco_id'] is not None:\n to_move.add(img['image_id'])\n img['split'] = 'train'\n print('%d imgs to move from test to train' % len(to_move))\n\n with open('data/refvg/image_data_split3000_100_slim_nococo.json', 'w') as f:\n json.dump(info, f)\n print(len(info))\n print('data/refvg/image_data_split3000_100_slim_nococo.json saved.')\n\n with open('data/refvg/amt_result/refer_filtered_instance_refine_slim_nodup_input_test.json') as f:\n ref = json.load(f)\n new_ref = [t for t in ref if t['image_id'] in to_move]\n with open('data/refvg/amt_result/refer_filtered_instance_refine_slim_nodup_input_train.json') as f:\n ref = json.load(f)\n print(len(ref))\n ref += new_ref\n print(len(ref))\n with open('data/refvg/amt_result/refer_filtered_instance_refine_slim_nodup_input_train_nococo.json', 'w') as f:\n json.dump(ref, f)\n print('data/refvg/amt_result/refer_filtered_instance_refine_slim_nodup_input_train_nococo.json saved.')\n\n with open('data/refvg/amt_result/refer_filtered_instance_refine_slim_nodup_test.json') as f:\n ref = json.load(f)\n new_ref = [t for t in ref if t['image_id'] in to_move]\n with open('data/refvg/amt_result/refer_filtered_instance_refine_slim_nodup_train.json') as f:\n ref = json.load(f)\n print(len(ref))\n ref += new_ref\n print(len(ref))\n with open('data/refvg/amt_result/refer_filtered_instance_refine_slim_nodup_train_nococo.json', 'w') as f:\n json.dump(ref, f)\n print('data/refvg/amt_result/refer_filtered_instance_refine_slim_nodup_train_nococo.json saved.')", "def augment_input(x, y):\n path = './images/train'\n n_classes = 43\n min_ex = 35\n #list of classes that are symmetric horizontally or vertically,\n #meaning that if you flip them 180 degrees then they will be the same traffic sign.\n #This were selected manually.\n symmetric_h = [12, 15, 17, 26, 40]\n #All these classes will have double the images then.\n for cla in symmetric_h:\n if y[cla] <= min_ex:\n x , y = rotate_img(x, y, path, cla, lenet=True)\n\n #Now this is a list of classes whose convert into another class when flipped\n #they convert to is always the next one.\n #For example: turn left converts in turn right when flipped\n flip_change = [19, 33, 36, 38]\n for cla in flip_change:\n if y[cla] <= min_ex:\n #if in the current there are not enough images, rotate from the next\n x , y = flip_img(x, y, path, cla+1, lenet=True)\n if y[cla+1] <= min_ex:\n #if on the next there are not enough images, rotate from the current\n x, y = flip_img(x, y, path, cla, lenet=True)\n\n #Now images when flipped stay the same class.\n flippable = [11, 12, 13, 15, 17, 18, 22, 26, 30, 35, 40]\n for cla in flippable:\n if y[cla] <= min_ex:\n #if in the current there are not enough images, rotate from the next\n x, y = flip_img(x, y, path, cla,lenet=True)\n\n #Note: I print a lot of stuff here because this part is kinda slow, so I print in order to see\n #if everything is running smoothly. It transforms in batches, that is, it transforms all the\n #images in a folder each iteration.\n for cla in range(43):\n #Do random transforms until I have all classes with at least 35 images.\n print('Current class:')\n print(cla)\n counts, unique = np.unique(y, return_counts=True)\n examples = unique[cla]\n i=0\n while examples <= min_ex:\n print('Current amount of batch transformations:')\n print(i)\n x, y = transform_img(x, y, path, cla)\n counts, unique = np.unique(y, return_counts=True)\n examples = unique[cla]\n print('Current amount of images in class:')\n print(examples)\n i+=1\n\n return x, y" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Command line tool for adding virtual makers to Coda Motion C3D files from the information in exported Mat files. The tool assume the c3d and mat files have the same filename but different extensions. If called without arguments the tool will find all matching c3d/mat files in the working directory.
def main(c3dfile, overwrite): if not c3dfile: click.confirm('Combine all C3D/Mat files in current dir?', abort=True) filelist = [(f, os.path.splitext(f)[0]+'.mat') for f in os.listdir('.') if f.upper().endswith('.C3D')] elif os.path.isfile(c3dfile): matfile = os.path.splitext(c3dfile)[0]+'.mat' if not os.path.isfile(matfile): raise click.UsageError('No mat file found matching {}' ''.format(c3dfile)) filelist = [(c3dfile, matfile)] else: raise click.UsageError('No such file {}'.format(c3dfile)) filelist = [(str(f), str(m)) for f, m in filelist if os.path.exists(f) and os.path.exists(m)] for c3dfile, matfile in filelist: postfix = '' if overwrite else '_updated' new_c3d = combine_files(c3dfile, matfile, postfix=postfix) print('Updated: {}'.format(new_c3d))
[ "def main():\n dirlist = ['./']\n dir_path = os.getcwd()\n names = os.listdir(dir_path)\n for n in names:\n if 'Location' in n:\n dirlist.append(n)\n if '-fmt' in sys.argv:\n ind = sys.argv.index(\"-fmt\")\n fmt = sys.argv[ind+1]\n else:\n fmt = 'png'\n if '-f' in sys.argv:\n ind = sys.argv.index(\"-f\")\n filelist = [sys.argv[ind+1]]\n else:\n filelist = os.listdir(dir_path)\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n for loc in dirlist:\n print('working on: ', loc)\n os.chdir(loc) # change working directories to each location\n crd = 's'\n if 'er_samples.txt' in filelist: # find coordinate systems\n samps, file_type = pmag.magic_read(\n 'er_samples.txt') # read in data\n # get all none blank sample orientations\n Srecs = pmag.get_dictitem(samps, 'sample_azimuth', '', 'F')\n if len(Srecs) > 0:\n crd = 'g'\n if 'magic_measurements.txt' in filelist: # start with measurement data\n print('working on measurements data')\n data, file_type = pmag.magic_read(\n 'magic_measurements.txt') # read in data\n if loc == './':\n # get all the blank location names from data file\n data = pmag.get_dictitem(data, 'er_location_name', '', 'T')\n # looking for zeq_magic possibilities\n # get all none blank method codes\n AFZrecs = pmag.get_dictitem(\n data, 'magic_method_codes', 'LT-AF-Z', 'has')\n # get all none blank method codes\n TZrecs = pmag.get_dictitem(\n data, 'magic_method_codes', 'LT-T-Z', 'has')\n # get all none blank method codes\n MZrecs = pmag.get_dictitem(\n data, 'magic_method_codes', 'LT-M-Z', 'has')\n # get all dec measurements\n Drecs = pmag.get_dictitem(data, 'measurement_dec', '', 'F')\n # get all dec measurements\n Irecs = pmag.get_dictitem(data, 'measurement_inc', '', 'F')\n Mkeys = ['measurement_magnitude', 'measurement_magn_moment',\n 'measurement_magn_volume', 'measurement_magn_mass']\n for key in Mkeys:\n Mrecs = pmag.get_dictitem(\n data, key, '', 'F') # get intensity data\n if len(Mrecs) > 0:\n break\n # potential for stepwise demag curves\n if len(AFZrecs) > 0 or len(TZrecs) > 0 or len(MZrecs) > 0 and len(Drecs) > 0 and len(Irecs) > 0 and len(Mrecs) > 0:\n print('zeq_magic.py -fsp pmag_specimens.txt -sav -fmt ' +\n fmt+' -crd '+crd)\n os.system('zeq_magic.py -sav -fmt '+fmt+' -crd '+crd)\n # looking for thellier_magic possibilities\n if len(pmag.get_dictitem(data, 'magic_method_codes', 'LP-PI-TRM', 'has')) > 0:\n print('thellier_magic.py -fsp pmag_specimens.txt -sav -fmt '+fmt)\n os.system('thellier_magic.py -sav -fmt '+fmt)\n # looking for hysteresis possibilities\n if len(pmag.get_dictitem(data, 'magic_method_codes', 'LP-HYS', 'has')) > 0: # find hyst experiments\n print('quick_hyst.py -sav -fmt '+fmt)\n os.system('quick_hyst.py -sav -fmt '+fmt)\n if 'pmag_results.txt' in filelist: # start with measurement data\n data, file_type = pmag.magic_read(\n 'pmag_results.txt') # read in data\n print('number of datapoints: ', len(data))\n if loc == './':\n # get all the concatenated location names from data file\n data = pmag.get_dictitem(data, 'er_location_names', ':', 'has')\n print('number of datapoints: ', len(data), loc)\n print('working on pmag_results directions')\n SiteDIs = pmag.get_dictitem(\n data, 'average_dec', \"\", 'F') # find decs\n print('number of directions: ', len(SiteDIs))\n SiteDIs = pmag.get_dictitem(\n SiteDIs, 'average_inc', \"\", 'F') # find decs and incs\n print('number of directions: ', len(SiteDIs))\n # only individual results - not poles\n SiteDIs = pmag.get_dictitem(SiteDIs, 'data_type', 'i', 'has')\n print('number of directions: ', len(SiteDIs))\n # tilt corrected coordinates\n SiteDIs_t = pmag.get_dictitem(\n SiteDIs, 'tilt_correction', '100', 'T')\n print('number of directions: ', len(SiteDIs))\n if len(SiteDIs_t) > 0:\n print('eqarea_magic.py -sav -crd t -fmt '+fmt)\n os.system('eqarea_magic.py -sav -crd t -fmt '+fmt)\n elif len(SiteDIs) > 0 and 'tilt_correction' not in SiteDIs[0].keys():\n print('eqarea_magic.py -sav -fmt '+fmt)\n os.system('eqarea_magic.py -sav -fmt '+fmt)\n else:\n SiteDIs_g = pmag.get_dictitem(\n SiteDIs, 'tilt_correction', '0', 'T') # geographic coordinates\n if len(SiteDIs_g) > 0:\n print('eqarea_magic.py -sav -crd g -fmt '+fmt)\n os.system('eqarea_magic.py -sav -crd g -fmt '+fmt)\n else:\n SiteDIs_s = pmag.get_dictitem(\n SiteDIs, 'tilt_correction', '-1', 'T') # sample coordinates\n if len(SiteDIs_s) > 0:\n print('eqarea_magic.py -sav -crd s -fmt '+fmt)\n os.system('eqarea_magic.py -sav -crd s -fmt '+fmt)\n else:\n SiteDIs_x = pmag.get_dictitem(\n SiteDIs, 'tilt_correction', '', 'T') # no coordinates\n if len(SiteDIs_x) > 0:\n print('eqarea_magic.py -sav -fmt '+fmt)\n os.system('eqarea_magic.py -sav -fmt '+fmt)\n print('working on pmag_results VGP map')\n VGPs = pmag.get_dictitem(\n SiteDIs, 'vgp_lat', \"\", 'F') # are there any VGPs?\n if len(VGPs) > 0: # YES!\n os.system(\n 'vgpmap_magic.py -prj moll -res c -sym ro 5 -sav -fmt png')\n print('working on pmag_results intensities')\n os.system(\n 'magic_select.py -f pmag_results.txt -key data_type i T -F tmp.txt')\n os.system(\n 'magic_select.py -f tmp.txt -key average_int 0. has -F tmp1.txt')\n os.system(\n \"grab_magic_key.py -f tmp1.txt -key average_int | awk '{print $1*1e6}' >tmp2.txt\")\n data, file_type = pmag.magic_read('tmp1.txt') # read in data\n locations = pmag.get_dictkey(data, 'er_location_names', \"\")\n histfile = 'LO:_'+locations[0]+'_intensities_histogram:_.'+fmt\n os.system(\n \"histplot.py -b 1 -xlab 'Intensity (uT)' -sav -f tmp2.txt -F \" + histfile)\n print(\n \"histplot.py -b 1 -xlab 'Intensity (uT)' -sav -f tmp2.txt -F \" + histfile)\n os.system('rm tmp*.txt')\n if 'rmag_hysteresis.txt' in filelist: # start with measurement data\n print('working on rmag_hysteresis')\n data, file_type = pmag.magic_read(\n 'rmag_hysteresis.txt') # read in data\n if loc == './':\n # get all the blank location names from data file\n data = pmag.get_dictitem(data, 'er_location_name', '', 'T')\n hdata = pmag.get_dictitem(data, 'hysteresis_bcr', '', 'F')\n hdata = pmag.get_dictitem(hdata, 'hysteresis_mr_moment', '', 'F')\n hdata = pmag.get_dictitem(hdata, 'hysteresis_ms_moment', '', 'F')\n # there are data for a dayplot\n hdata = pmag.get_dictitem(hdata, 'hysteresis_bc', '', 'F')\n if len(hdata) > 0:\n print('dayplot_magic.py -sav -fmt '+fmt)\n os.system('dayplot_magic.py -sav -fmt '+fmt)\n # if 'er_sites.txt' in filelist: # start with measurement data\n # print 'working on er_sites'\n #os.system('basemap_magic.py -sav -fmt '+fmt)\n if 'rmag_anisotropy.txt' in filelist: # do anisotropy plots if possible\n print('working on rmag_anisotropy')\n data, file_type = pmag.magic_read(\n 'rmag_anisotropy.txt') # read in data\n if loc == './':\n # get all the blank location names from data file\n data = pmag.get_dictitem(data, 'er_location_name', '', 'T')\n # get specimen coordinates\n sdata = pmag.get_dictitem(\n data, 'anisotropy_tilt_correction', '-1', 'T')\n # get specimen coordinates\n gdata = pmag.get_dictitem(\n data, 'anisotropy_tilt_correction', '0', 'T')\n # get specimen coordinates\n tdata = pmag.get_dictitem(\n data, 'anisotropy_tilt_correction', '100', 'T')\n if len(sdata) > 3:\n print('aniso_magic.py -x -B -crd s -sav -fmt '+fmt)\n os.system('aniso_magic.py -x -B -crd s -sav -fmt '+fmt)\n if len(gdata) > 3:\n os.system('aniso_magic.py -x -B -crd g -sav -fmt '+fmt)\n if len(tdata) > 3:\n os.system('aniso_magic.py -x -B -crd t -sav -fmt '+fmt)\n if loc != './':\n os.chdir('..') # change working directories to each location", "def main():\n\n allowed_parameters = ('e-the', 'e-phi', 'l-the', 'l-phi', 'w-ang')\n\n phits_default = 'phits'\n convert_default = 'convert'\n if os.name in ( 'nt', 'os2' ): \n phits_default = 'c:/phits/bin/phits_c.exe' # !!! should use just phits.exe here and set correct PATH\n convert_default = 'convert.exe'\n\n parser = argparse.ArgumentParser(description=main.__doc__, epilog=\"Homepage: https://github.com/kbat/mc-tools\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('parameter', type=str, help='Parameter to rotate', choices=allowed_parameters) #, metavar='(e-the|e-phi)')\n parser.add_argument('nimages', type=int, default='10', help='Number of images per full revolution (360 deg) of selected parameter')\n parser.add_argument('input_file', type=argparse.FileType('rt'), help='PHITS input file')\n parser.add_argument('-o', type=str, dest='outname', help='If specified, the produced images will be merged in the animated GIF file with this name.', required=False)\n parser.add_argument('-copt', type=str, dest='copt', help='Options passed to the convert tool used to convert from EPS to GIF. See the \"Format conversion\" section of the ImageMagick manual: http://www.imagemagick.org', required=False, default='-rotate 90 -density 100x100')\n parser.add_argument('-aopt', type=str, dest='aopt', help='Options passed to the convert tool used to produce the animated GIF file. See the \"Format conversion\" section of the ImageMagick manual for details: http://www.imagemagick.org', required=False, default='-delay 5 -dispose background')\n parser.add_argument('-epsname', type=str, dest='epsname', help='Name of the EPS file produced by the [t-3dshow] tally', required=False, default='3dshow.eps')\n parser.add_argument('-phits', type=str, dest='phits', help='PHITS executable', required=False, default=phits_default)\n parser.add_argument('-convert', type=str, dest='convert', help='ImageMagic\\'s convert executable', required=False, default=convert_default)\n parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbosity', help='Explain what is being done')\n\n arguments = parser.parse_args()\n tmpdir = 'rotate3d-gifs' # should not be allowed as an argument since this folder is being purged by the script (a user can set an existing folder here)\n tmpinp = os.path.join(tmpdir, 'panimate.phits')\n\n input_data = arguments.input_file.readlines() # open(fname_in).readlines()\n \n output_data = []\n\n angle0 = 0\n angle = 0\n angleStep = 360/arguments.nimages # [deg]\n isFirst = True\n\n if not os.path.exists(tmpdir): \n os.mkdir(tmpdir)\n else: # remove the .gif files from tmpdir\n for the_file in os.listdir(tmpdir):\n file_path = os.path.join(tmpdir, the_file)\n try:\n if os.path.isfile(file_path) and file_path[-4:] == '.gif':\n os.unlink(file_path)\n except (Exception, e):\n print (e)\n\n\n for istep in range(arguments.nimages):\n isFound = False # True if arguments.parameter is found\n del output_data[:]\n for i, line in enumerate(input_data):\n words = line.split()\n if len(words)>=3:\n if words[0] == 'icntl':\n words[2] = str(11) # 3dshow\n line = \" \".join(words) + '\\n'\n\n if isFirst: # get the value of initial angle\n if words[0] == arguments.parameter:\n angle = float(words[2])\n angle0 = angle # the value of the parameter set in the input file. we will start rotation from this position\n isFirst = False\n isFound = True\n else: # rotate\n if words[0] == arguments.parameter:\n angle = angle0+istep*angleStep\n words[2] = str(angle)\n line = \" \".join(words) + '\\n'\n isFound = True\n \n output_data.append(line)\n if not isFound:\n print(\"ERROR: '%s' not found in the input file.\" % arguments.parameter) # file=sys.stderr\n return 1\n\n tmp_file = open(tmpinp, 'w+')\n for line in output_data:\n tmp_file.write(line)\n tmp_file.close()\n command = \"%s < %s\" % (arguments.phits, tmpinp)\n if arguments.verbosity: print ( \"Producing the image at %.0f deg:\\t%s\" % (angle, command) )\n os.system(command)\n# os.system(\"grep -iH error $(ls -1rt |tail -1)\") # check the output file for the errors\n gifname = os.path.join(tmpdir, \"%.3d.gif\" % istep)\n command = \"%s %s %s %s\" % (arguments.convert, arguments.copt, arguments.epsname, gifname)\n if arguments.verbosity: print ( command )\n os.system(command)\n if not os.path.isfile(arguments.epsname) or not os.path.isfile(gifname): return 2\n\n if arguments.verbosity: print ( 'The output files are in %s' % tmpdir )\n\n if arguments.outname:\n if arguments.outname[-4:] == '.gif':\n command = '%s %s %s/*.gif %s' % (arguments.convert, arguments.aopt, tmpdir, arguments.outname)\n if arguments.verbosity:\n print ( \"Generating the gif-animation file '%s': %s\" % (arguments.outname, command) )\n os.system(command)\n\n return 0", "def load_c3d(self):\n\n print(\"Loading c3d..\")\n\n basic_filter = \"*.c3d\"\n last_directory = pm.optionVar(q=\"lastPeelC3dDir\")\n\n # open file browser\n self.c3d_files = pm.fileDialog2(fm=4, fileFilter=basic_filter, dialogStyle=2, dir=last_directory)\n\n print(\"Found these c3d..\", self.c3d_files)\n\n if self.c3d_files is None or len(self.c3d_files) == 0:\n return\n\n self.populate_c3d_table()", "def main():\n parser = argparse.ArgumentParser(\n description='Create tonella mp3s ready for sd card')\n parser.add_argument('path_to_yaml_file',\n help='yaml file with figurine config',\n nargs=\"+\")\n parser.add_argument('--path_src',\n help='folder where source mp3s are cached',\n default=\".\")\n parser.add_argument('--path_dest',\n help='folder where final mp3s are saved',\n default=\".\")\n\n args = parser.parse_args()\n\n for yaml_file in args.path_to_yaml_file:\n print(\"process %s\" % yaml_file)\n create_mp3s(yaml_file, args.path_src, args.path_dest)", "def main():\n\tif len(sys.argv) == 3:\n\t\tfilename = sys.argv[1]\n\t\tthreshold = int(sys.argv[2])\n\t\tcollection = atom.AtomCollection(filename, atom.Atom.INPUT_MARK_SUR)\n\t\tpatches = collection.get_surface_patches(threshold)\n\t\tfor atom_collection in patches:\n\t\t\tatom_collection.write_to_file()\n\telse:\n\t\tprint \"Usage: \" + sys.argv[0] + \" <mark_sur pdb> <distance threshold>\"", "def main(_, infile, outdir, params):\n outname_template = join(outdir, params['suffix'] + '_%Y%m%d%H.nc')\n\n dimpairs = [('lev', # name in src\n 'level'), # name in dst\n ('lat',\n 'lat'),\n ('lon',\n 'lon'),\n ('ilev',\n 'ilev')]\n\n dim_copiers = [DimensionCopier(src_name, dst_name)\n for src_name, dst_name in dimpairs]\n\n varpairs_to_copy = [(['CH3CHO_VMR_inst', 'GLYALD_VMR_inst'],\n 'ALD'),\n ('CO_VMR_inst', # name in src, lists added toghether\n 'CO'), # name in dst\n ('CRESOL_VMR_inst',\n 'CSL'),\n ('C2H6_VMR_inst',\n 'ETH'),\n ('GLYOXAL_VMR_inst',\n 'GLY'),\n ('H2O2_VMR_inst',\n 'H2O2'),\n ('C3H8_VMR_inst',\n 'HC3'),\n ('HNO3_VMR_inst',\n 'HNO3'),\n ('BIGALK_VMR_inst',\n 'HC5'),\n ('CH2O_VMR_inst',\n 'HCHO'),\n ('HO2NO2_VMR_inst',\n 'HNO4'),\n ('HO2_VMR_inst',\n 'HO2'),\n ('ISOP_VMR_inst',\n 'ISO'),\n (['CH3COCH3_VMR_inst',\n 'HYAC_VMR_inst',\n 'MEK_VMR_inst'],\n 'KET'),\n (['MVK_VMR_inst', 'MACR_VMR_inst'],\n 'MACR'),\n ('CH3COCHO_VMR_inst',\n 'MGLY'),\n ('MPAN_VMR_inst',\n 'MPAN'),\n ('N2O5_VMR_inst',\n 'N2O5'),\n ('NH3_VMR_inst',\n 'NH3'),\n ('NO_VMR_inst',\n 'NO'),\n ('NO2_VMR_inst',\n 'NO2'),\n ('NO3_VMR_inst',\n 'NO3'),\n ('OH_VMR_inst',\n 'OH'),\n ('C2H4_VMR_inst',\n 'OL2'),\n ('ONIT_VMR_inst',\n 'ONIT'),\n ('CH3OOH_VMR_inst',\n 'OP1'),\n ('C2H5OOH_VMR_inst',\n 'OP2'),\n ('CH3COOH_VMR_inst',\n 'ORA2'),\n ('O3_VMR_inst',\n 'OZONE'),\n ('CH3COOOH_VMR_inst',\n 'PAA'),\n ('PAN_VMR_inst',\n 'PAN'),\n ('SO2_VMR_inst',\n 'SO2'),\n ('T',\n 'T'),\n ('TOLUENE_VMR_inst',\n 'TOL'),\n ('DUST1',\n 'VSOILA'),\n ('DUST2',\n 'VSOILB'),\n ('DUST3',\n 'VSOILC')]\n\n varpairs_to_copy_dimchange = [('NH4_VMR_inst',\n 'VNH4Jm'),\n (['OC1_VMR_inst', 'OC2_VMR_inst'],\n 'VORG1Jm'),\n ('SO4_VMR_inst',\n 'VSO4Jm'),\n (['CB1_VMR_inst', 'CB2_VMR_inst'],\n 'VSOOTJ')]\n\n for time_index in range(Dataset(infile).dimensions['time'].size):\n # Have to give dimensions explicitly because 'lev' changes to 'level'\n # Have to give var_val_indices explicitly because we only copy one\n # time index\n spacial_variable_options = {'var_args': {'dimensions': ('time',\n 'level',\n 'lat',\n 'lon')},\n 'var_val_indices': np.s_[time_index, :]}\n\n # 3D variables that simply get copied\n var_opts = [{'src_names': src,\n 'dst_name': dst,\n **spacial_variable_options}\n for src, dst in varpairs_to_copy]\n\n # 3D variables with dimchange to mol/mol\n var_opts += [{'src_names': src,\n 'dst_name': dst,\n 'var_attrs': {'units': 'mol/mol'},\n **spacial_variable_options}\n for src, dst in varpairs_to_copy_dimchange]\n\n # Others\n var_opts += [{'src_names': 'lat',\n 'dst_name': 'lat'},\n {'src_names': 'lev',\n 'dst_name': 'level',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'lon',\n 'dst_name': 'lon'},\n {'src_names': 'P0',\n 'dst_name': 'P0'},\n {'src_names': 'PS',\n 'dst_name': 'PSURF',\n 'var_args': {'dimensions': ('time', 'lat', 'lon')},\n 'var_val_indices': np.s_[time_index, :]},\n {'src_names': 'hyam',\n 'dst_name': 'hyam',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'hybm',\n 'dst_name': 'hybm',\n 'var_args': {'dimensions': ('level', )}},\n {'src_names': 'ilev',\n 'dst_name': 'ilev'}]\n\n var_copiers = [VariableCopier(**kwargs)\n for kwargs in var_opts]\n\n extract_data(infile,\n time_index,\n dim_copiers,\n var_copiers,\n outname_template)", "def test_cli_describes_model_file(monkeypatch):\n params = [\"overreact\", \"data/ethane/B97-3c/model.jk\"]\n monkeypatch.setattr(\"sys.argv\", params)\n cli.main()", "def run(self, args):\n print('kubic-ci project ({})'.format(self.get_dotci3_path()))", "def main(_):\n # Construct list containing input images\n images, filenames = build_list_from_filepaths(FLAGS.input_dir)\n # Perform augmentation\n batch_augment(images, filenames, 5)", "def main():\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n # initialize variables from command line + defaults\n FIG = {} # plot dictionary\n FIG['demag'] = 1 # demag is figure 1\n in_file = pmag.get_named_arg_from_sys(\"-f\", default_val=\"measurements.txt\")\n plot_by = pmag.get_named_arg_from_sys(\"-obj\", default_val=\"loc\")\n name_dict = {'loc': 'location', 'sit': 'site',\n 'sam': 'sample', 'spc': 'specimen'}\n plot_key = name_dict[plot_by]\n LT = \"LT-\" + pmag.get_named_arg_from_sys(\"-LT\", \"AF\") + \"-Z\"\n if LT == \"LT-T-Z\":\n units, dmag_key = 'K', 'treat_temp'\n elif LT == \"LT-AF-Z\":\n units, dmag_key = 'T', 'treat_ac_field'\n elif LT == 'LT-M-Z':\n units, dmag_key = 'J', 'treat_mw_energy'\n else:\n units = 'U'\n no_norm = pmag.get_flag_arg_from_sys(\"-N\")\n norm = 0 if no_norm else 1\n no_plot = pmag.get_flag_arg_from_sys(\"-sav\")\n plot = 0 if no_plot else 1\n fmt = pmag.get_named_arg_from_sys(\"-fmt\", \"svg\")\n XLP = pmag.get_named_arg_from_sys(\"-XLP\", \"\")\n dir_path = pmag.get_named_arg_from_sys(\"-WD\", os.getcwd())\n spec_file = pmag.get_named_arg_from_sys(\"-fsp\", default_val=\"specimens.txt\")\n samp_file = pmag.get_named_arg_from_sys(\"-fsa\", default_val=\"samples.txt\")\n site_file = pmag.get_named_arg_from_sys(\"-fsi\", default_val=\"sites.txt\")\n\n # create contribution and add required headers\n fnames = {\"specimens\": spec_file, \"samples\": samp_file, 'sites': site_file}\n contribution = nb.Contribution(dir_path, single_file=in_file,\n custom_filenames=fnames)\n file_type = list(contribution.tables.keys())[0]\n print(len(contribution.tables['measurements'].df), ' records read from ', in_file)\n # add plot_key into measurements table\n if plot_key not in contribution.tables['measurements'].df.columns:\n contribution.propagate_name_down(plot_key, 'measurements')\n data_container = contribution.tables[file_type]\n # pare down to only records with useful data\n # grab records that have the requested code\n data_slice = data_container.get_records_for_code(LT)\n # and don't have the offending code\n data = data_container.get_records_for_code(XLP, incl=False, use_slice=True,\n sli=data_slice, strict_match=False)\n\n # make sure quality is in the dataframe\n if 'quality' not in data.columns:\n data['quality'] = 'g'\n # get intensity key and make sure intensity data is not blank\n intlist = ['magn_moment', 'magn_volume', 'magn_mass']\n IntMeths = [col_name for col_name in data.columns if col_name in intlist]\n # get rid of any entirely blank intensity columns\n for col_name in IntMeths:\n if not data[col_name].any():\n data.drop(col_name, axis=1, inplace=True)\n IntMeths = [col_name for col_name in data.columns if col_name in intlist]\n if len(IntMeths) == 0:\n print('No intensity headers found')\n sys.exit()\n\n int_key = IntMeths[0] # plot first intensity method found - normalized to initial value anyway - doesn't matter which used\n data = data[data[int_key].notnull()]\n # make list of individual plots\n # by default, will be by location_name\n plotlist = data[plot_key].unique()\n plotlist.sort()\n pmagplotlib.plot_init(FIG['demag'], 5, 5)\n # iterate through and plot the data\n for plt in plotlist:\n plot_data = data[data[plot_key] == plt].copy()\n if plot:\n print(plt, 'plotting by: ', plot_key)\n if len(plot_data) > 2:\n title = plt\n spcs = []\n spcs = plot_data['specimen'].unique()\n for spc in spcs:\n INTblock = []\n spec_data = plot_data[plot_data['specimen'] == spc]\n for ind, rec in spec_data.iterrows():\n INTblock.append([float(rec[dmag_key]), 0, 0, float(rec[int_key]), 1, rec['quality']])\n if len(INTblock) > 2:\n pmagplotlib.plotMT(FIG['demag'], INTblock,\n title, 0, units, norm)\n\n if not plot:\n files = {}\n for key in list(FIG.keys()):\n files[key] = title + '_' + LT + '.' + fmt\n pmagplotlib.saveP(FIG, files)\n #sys.exit()\n else:\n pmagplotlib.drawFIGS(FIG)\n prompt = \" S[a]ve to save plot, [q]uit, Return to continue: \"\n ans = input(prompt)\n if ans == 'q':\n sys.exit()\n if ans == \"a\":\n files = {}\n for key in list(FIG.keys()):\n files[key] = title + '_' + LT + '.' + fmt\n pmagplotlib.saveP(FIG, files)\n pmagplotlib.clearFIG(FIG['demag'])", "def main():\n parser = argparse.ArgumentParser(description=\"Get points clouds and save them.\")\n parser.add_argument(\"save_dir\", help=\"Save destination dir.\")\n args = parser.parse_args()\n\n camera_data = CameraData()\n run_inference = RunInference()\n camera_visualizer = CameraVisualizer()\n\n camera_data.start()\n\n while True:\n color = camera_data.color\n depth = camera_data.depth\n\n if not color or not depth:\n continue\n\n color_image, depth_data, pred_mask, pred_mask_colors, depth_data_array = \\\n run_inference.run_inference(color, depth, True)\n\n camera_visualizer.visualize(color_image,\n depth_data,\n pred_mask_colors,\n CLASS_LABELS_AND_COLORS,\n pred_mask,\n depth_data_array,\n args.save_dir)", "def main(args):\n from glob import glob\n from os.path import join, basename, exists\n from os import makedirs\n import numpy as np\n import shutil\n import sys\n\n if not exists(args.output):\n makedirs(args.output)\n\n # Facial encodings\n image_paths = glob(join(args.input, '*.jpg'))\n\n if len(image_paths) == 0:\n print \"No jpg images found in {}, exiting...\".format(args.input)\n sys.exit(0)\n\n facial_encodings = compute_facial_encodings(image_paths)\n\n # Save facial encodings\n with open(join(args.output, 'facial_encodings.npy'), 'w') as outfile:\n np.save(outfile, facial_encodings)\n\n # Compute facial clusters, return as sorted\n sorted_clusters = cluster_facial_encodings(facial_encodings)\n\n print \"Created {} clusters:\".format(len(sorted_clusters))\n for idx, cluster in enumerate(sorted_clusters):\n print \" - cluster {} size {}\".format(idx, len(cluster))\n\n # Save clusters\n with open(join(args.output, 'facial_clusters.npy'), 'w') as outfile:\n np.save(outfile, sorted_clusters)\n\n # Copy image files to cluster folders\n for idx, cluster in enumerate(sorted_clusters):\n cluster_dir = join(args.output, str(idx).zfill(4))\n if not exists(cluster_dir):\n makedirs(cluster_dir)\n for path in cluster:\n shutil.copy(path, join(cluster_dir, basename(path)))\n\n print \"Saved results to {}\".format(args.output)", "def main(args):\n mnist = load_mnist(val_seed=123)\n\n if not args.no_training:\n train_network(mnist, 1, args.outdir)\n train_network(mnist, 2, args.outdir)\n\n model1 = load_network(1, args.outdir)\n model2 = load_network(2, args.outdir)\n encoder1, decoder1 = split_network(model1)\n encoder2, decoder2 = split_network(model2)\n\n create_montage(mnist, model1, model2, args.outdir)\n create_scatter(mnist, encoder1, decoder1, args.outdir)\n do_experiment_on_model1_rules(decoder1, args.outdir)\n do_experiment_on_model2_rules(mnist, encoder2, decoder2, args.outdir)", "def cli(\n folder_location, index_file_dir, hdu_index_file, obs_index_file, recreate, debug\n):\n if folder_location is None:\n click.secho(\n \"No DL3 fits files path specified. \"\n \"Assume the current \"\n \"folder is the location of DL3 files.\",\n fg=\"yellow\",\n )\n folder_location = os.getcwd()\n if index_file_dir is None:\n click.secho(\n \"No output path specified. \"\n \"Assume the current folder is the output path.\",\n fg=\"yellow\",\n )\n index_file_dir = os.getcwd()\n\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n if not recreate:\n if os.path.exists(f\"{index_file_dir}/obs-index.fits.gz\"):\n logging.info(f\"Existing {index_file_dir}/obs-index.fits.gz\")\n logging.info(\"Remove before continuing or use recreate option -r\")\n return\n if os.path.exists(f\"{index_file_dir}/hdu-index.fits.gz\"):\n logging.info(f\"Existing {index_file_dir}/hdu-index.fits.gz\")\n logging.info(\"Remove before continuing or use recreate option -r\")\n return\n\n logging.debug(\"Start by searching all DL3 files in:\\n{}\".format(folder_location))\n\n __fits_files = [\n _file[:-1] for _file in list(os.popen(f\"ls {folder_location}/[0-9]*.fits*\"))\n ]\n print(len(__fits_files))\n if len(__fits_files) == 0:\n logging.info(\"No FITS files found, trying Eventdisplay-style DL3 archive folder.\")\n __fits_files = [\n _file[:-1] for _file in list(os.popen(f\"ls {folder_location}/[0-9]*/[0-9]*.fits*\"))\n ]\n fits_files = [\n f\n for f in __fits_files\n if f.find(obs_index_file) == -1 and f.find(hdu_index_file) == -1\n ]\n if not fits_files:\n logging.error(\"No fits files found\")\n return\n\n logging.info(\"Found the following fits files:\")\n for f in fits_files:\n logging.info(\" -> {0}\".format(f))\n\n logging.info(\n f\"Generating index files {index_file_dir}/obs-index.fits.gz and {index_file_dir}/hdu-index.fits.gz\"\n )\n create_obs_hdu_index_file(\n fits_files, index_file_dir, hdu_index_file, obs_index_file\n )", "def makeCat(args):\n\n descr_text = 'Create merged catalog of sources from multiple tables'\n parser = argparse.ArgumentParser(description=descr_text)\n parser.add_argument('repoDir', help='Name of repository directory')\n parser.add_argument('visits', help='Visit selector')\n parser.add_argument('-c', '--ColFile', help='File of space-separated columns of interest')\n args = parser.parse_args()\n\n visits = args.visits\n if args.ColFile is None:\n cols = DEFAULT_COLS\n else:\n with open(args.ColFile, 'rb') as f:\n # The file of column names should contain only one row.\n for row in csv.reader(f, delimiter=' '):\n cols = row\n\n print '#' + ' '.join(cols) + ' filter'\n\n butler = dafPersist.Butler(args.repoDir)\n# vList = [dict(visit=int(v)) for v in visits.split('^')]\n vList = []\n for v in visits.split(\"^\"):\n mat = re.search(r\"^(\\d+)\\.\\.(\\d+)(?::(\\d+))?$\", v)\n if mat:\n v1 = int(mat.group(1))\n v2 = int(mat.group(2))\n v3 = mat.group(3); v3 = int(v3) if v3 else 1\n for v in range(v1, v2 + 1, v3):\n vList.append(dict(visit=v))\n else:\n vList.append(dict(visit=int(v)))\n \n for dataId in (vList):\n if not butler.datasetExists(\"src\", **dataId):\n continue\n\n srcs = butler.get(\"src\", **dataId)\n filter = butler.queryMetadata(\"calexp\", \"filter\", visit=dataId['visit'])[0]\n\n vecs = []\n for col in cols:\n if col.endswith(\".ra\") or col.endswith(\".dec\"):\n v = np.rad2deg(srcs.get(col))\n elif re.search(r\"\\.err\\.(xx|yy|xy)$\", col):\n field, which = re.search(r\"^(.*\\.err)\\.(xx|yy|xy)$\", col).groups()\n key = srcs.schema.find(field).key\n key = key[0,0] if which == \"xx\" else key[1,1] if which == \"yy\" else key[0, 1]\n\n v = srcs.get(key)\n else:\n v = srcs.get(col)\n\n vecs.append(v)\n\n for vals in zip(*vecs):\n print ' '.join([str(el) for el in vals]) + ' ' + filter", "def _main(argv):\n flag = flags.FLAGS\n flag.showprefixforinfo = False\n if len(argv) < 3:\n raise app.UsageError(\n 'Invalid number of arguments; expected 2 or more, got %d' %\n (len(argv) - 1))\n\n build_graph(argv[1:-1], argv[-1], flag.similarity_threshold,\n flag.id_feature_name, flag.embedding_feature_name)", "def cli(path, name, fnirt_path, output_path, anchors):\n\n # Data validation\n _, ext = os.path.splitext(output_path)\n if ext != \".mat\":\n sys.exit('Need to specify an output path with .mat file name. You provided {0}.'\n .format(output_path))\n if os.path.exists(output_path):\n sys.exit('Output path {0} already exists.'.format(output_path))\n\n # Parse and convert anchor indexes to Python indexes\n anchors = [int(x) - 1 for x in anchors.split(\",\")]\n\n # Load the image file\n data = hdf5storage.loadmat(path)\n if not name in data: # Validate the dataset name\n sys.exit('Dataset not found in file at {0}.'.format(path))\n\n # Load the dataset\n img = data[name]\n img = np.absolute(data[name])\n # img = img / img.flatten().max() * 100 # FSL likes signal intensities 0–100\n\n # Create a temporary directory and do the work\n with tf.TemporaryDirectory() as tmp_path:\n # Convert .mat to .nii\n mat_to_nii(img, tmp_path)\n # Register the data\n reg_data(fnirt_path, tmp_path, anchors, img.shape[3])\n # Load the registered data\n reg_img = load_reg_vols(tmp_path, anchors, img.shape)\n # Save the registered data\n hdf5storage.savemat(output_path, {\"registeredImages\": reg_img})", "def write_3d_output():\n changes = 0\n no_changes = \"\\n No updates to be made after running the rfam-3d-seed-alignments script.\\n\"\n with open(add_3d_git_output, \"r\") as output:\n contents = output.readlines()\n for line in contents:\n if \"data/output\" in line:\n changes += 1\n line = line.replace(\"data/output/\", \"\")\n line = line.replace(\".sto\", \"\")\n if \"A\" in line:\n added = \"The following families have newly added 3D information: \\n\"\n line = line.replace(\"A\", \"\")\n added += line\n elif \"M\" in line:\n modified = \"\\nThe following families have been updated with 3D information: \\n\"\n line = line.replace(\"M\", \"\")\n modified += line\n\n today_date = str(datetime.date.today())\n pdb_txt = \"{dir}/pdb_families_{date}.txt\".format(dir=pdb_files, date=today_date)\n\n with open(pdb_txt, \"a\") as pdb_file:\n if changes == 0:\n pdb_file.write(no_changes + \"\\n\")\n else:\n if modified:\n pdb_file.write(modified + \"\\n\")\n if added:\n pdb_file.write(added + \"\\n\")", "def _main():\r\n\r\n print('Enabling required add-ons (better collada, fbx, rigify)')\r\n _enable_required_plugins()\r\n\r\n cwd = os.getcwd()\r\n print('Base path: \\033[94m' + cwd + '\\033[0m')\r\n\r\n argv = sys.argv\r\n argv = argv[argv.index(\"--\") + 1:] # get all args after \"--\"\r\n\r\n # Target path for the .fbx we're going to export\r\n outpath = argv[0]\r\n print('Output path: \\033[94m' + outpath + '\\033[0m')\r\n\r\n # Meshes that should be exported\r\n export_masks = argv[1:]\r\n\r\n _make_all_layers_visible() # meshes might be on different layers\r\n meshes_to_export = _get_meshes_matching_masks(export_masks)\r\n\r\n print('Selected meshes:')\r\n for mesh in meshes_to_export:\r\n print('\\t\\033[94m' + mesh.name + '\\033[0m')\r\n\r\n if len(meshes_to_export) == 0:\r\n print('\\t\\033[93mWARNING: No meshes matching any of the specified names/wildcards\\033[0m')\r\n\r\n _clear_selection() # clear the saved selection\r\n _select_meshes(meshes_to_export)\r\n\r\n # Apply all modifiers (the Collada exporter has such an option, but it's broken)\r\n print('Applying modifiers:')\r\n for mesh in meshes_to_export:\r\n print('\\t\\033[94m' + mesh.name + '\\033[0m')\r\n _apply_all_modifiers_except_armature(mesh);\r\n\r\n # If any masks didn't have matches, tell the user\r\n _print_masks_not_matching_masks(export_masks)\r\n\r\n # Figure out which export format the user wants to use\r\n filename, file_extension = os.path.splitext(outpath)\r\n file_extension = file_extension.lower()\r\n\r\n # Export the scene\r\n print('Exporting to \\033[94m' + outpath + '\\033[0m')\r\n if file_extension == '.fbx':\r\n _export_scene_to_fbx(outpath)\r\n elif file_extension == '.dae':\r\n _export_scene_to_collada(outpath)\r\n else:\r\n die('Only FBX and Collada (.dae) are supported at this time')\r\n\r\n print('\\033[92mCompleted!\\033[0m')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the parameter of this ParameterStatusDTO. The name of the Parameter
def parameter(self, parameter): self._parameter = parameter
[ "def set_parameter(self, parameter):\n self.parameter = parameter", "def set_parameter(self, a_name, a_value):\n self.parameters[str(a_name)] = a_value\n return self", "def setup_parameter(self, parameter, value):\n self.__dict__[parameter] = value", "def set_parameter(cls, param_name, config):\n if config == None:\n if param_name in cls.parameters:\n del cls.parameters[param_name]\n else:\n cls.parameters[param_name] = config", "def set_param(self, name, value):\n param = self._find_param(name)\n if param is not None:\n try:\n param.value = value\n except:\n pass\n else:\n return True\n return False", "def setParameterDescriptor(self, parameterDescriptor: cern.japc.core.ParameterDescriptor) -> None:\n ...", "def name(self, name_):\n if name_ is None:\n name_ = PARAMETER_NAME_DEFAULT\n elif isinstance(name_, str):\n name_ = name_.strip()\n if name_ == '':\n name_ = PARAMETER_NAME_DEFAULT\n if len(name_) > PARAMETER_NAME_PREFIX_MAX_LEN:\n raise ValueError(\"The length of the '{}' name should be less than {}.\".\n format(name_, PARAMETER_NAME_PREFIX_MAX_LEN))\n else:\n raise ValueError(\"The type of the Parameter's name should be 'string' or 'None', \"\n \"but got {}.\".format(type(name_)))\n\n if _is_role_worker() and self.cache_enable:\n _reinsert_hash_table_size(name_, self.param_info.name)\n self.param_info.name = name_", "def set_parambyname(self, name, value):\n if not name in self.set_param_map.keys():\n msg = 'Object {type} does not have parameter {param}'.format(type=self.__class__, param=name)\n raise KeyError(msg)\n else:\n self.set_param_map[name](value)", "def set_parameter(self, parameter, value):\n if not self.data:\n self.data = {}\n\n self.data[parameter.argument] = (\n parameter.name,\n parameter.model,\n value.pk,\n )\n\n self.save()", "def set_default_parameter_name(self, name):\n if 'parameter_name' not in self.attr:\n self.attr['parameter_name'] = name", "def setValue(self, parameterValue: cern.japc.value.ParameterValue) -> None:\n ...", "def set_hyper_parameter(self, name: str, value) -> None:\n self._hyper_parameters[name] = value\n return", "def setparam(self, param, value):\n\t\treturn self.__command(\"param.set %s %s\" % (param, value))", "def set_parameter(self, parameter: str, component: str, value: dict, *, vhost: str = None):\n vhost = vhost if vhost is not None else self.vhost\n endpoint = self.build_url(\"/parameters/{component}/{vhost}/{parameter}\",\n component=component, vhost=vhost, parameter=parameter)\n return self.request('put', endpoint, data=value)", "def replace_with_parameter(\n self, name: T.Union[int, str], param: Parameter\n ):\n return replace_with_parameter(self, name, param)", "def parameter(self, identifier, name, value, parameterDescription=None, other_attributes=None):\n if parameterDescription is not None:\n self.description(identifier, parameterDescription)\n return self.new_record(VOPROV_CONFIGURATION_PARAMETER , identifier, {\n VOPROV_ATTR_NAME: name,\n VOPROV_ATTR_VALUE: value,\n PROV_LABEL: name + \" = \" + value\n }, other_attributes)", "def setStagedParameter(self, value, parameter, leg=0):\n if parameter in self.staged[__tp__][leg]:\n self.staged[__tp__][leg][parameter] = value\n else:\n raise ValueError", "def set_evaluation_parameter(self, parameter_name, parameter_value):\n self._expectation_suite.evaluation_parameters.update(\n {parameter_name: parameter_value}\n )", "def setNameAttribute(self, name: 'char const *') -> \"void\":\n return _coin.ScXMLParamElt_setNameAttribute(self, name)", "def set_from_value(self, name, value):\n retval = False\n\n if name not in self._param_dict:\n raise InstrumentParameterException(\n 'Unable to set parameter %s to %s: parameter %s not an dictionary' % (name, value, name))\n\n if ((self._param_dict[name].value.f_format == NortekProtocolParameterDict.word_to_string) or\n (self._param_dict[name].value.f_format == NortekProtocolParameterDict.double_word_to_string)):\n if not isinstance(value, int):\n raise InstrumentParameterException(\n 'Unable to set parameter %s to %s: value not an integer' % (name, value))\n elif self._param_dict[name].value.f_format == NortekProtocolParameterDict.convert_datetime_to_words:\n if not isinstance(value, list):\n raise InstrumentParameterException('Unable to set parameter %s to %s: value not a list' % (name, value))\n\n if value != self._param_dict[name].value.get_value():\n log.debug(\"old value: %s, new value: %s\", self._param_dict[name].value.get_value(), value)\n retval = True\n self._param_dict[name].value.set_value(value)\n\n return retval" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a 3d plot of the given 3d points.
def plot_3d(pts): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') xs, ys, zs = zip(*pts) ax.scatter(xs, ys, zs, c='r', marker='o') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.show()
[ "def plot_3d(x, y, z, df, cmap = plt.cm.seismic_r):\n\n fig = plt.figure(figsize = (10, 10))\n \n ax = fig.add_subplot(111, projection='3d')\n \n # 3d scatterplot\n ax.scatter(df[x], df[y],\n df[z], c = df[z], \n cmap = cmap, s = 40)\n\n # Plot labeling\n ax.set_xlabel(x)\n ax.set_ylabel(y)\n ax.set_zlabel(z)\n\n plt.title('{} as function of {} and {}'.format(\n z, x, y), size = 18);", "def draw(xs,ys,zs):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n xs,ys = np.meshgrid(xs,ys)\n ax.plot_surface(xs,ys,zs)\n plt.show()", "def scatter3D(X, Y, Z):\n print('Plot in 3D...')\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X, Y, Z, c=np.abs(Z), cmap=cm.coolwarm)\n ax.set_xlabel('M (slope)')\n ax.set_ylabel('B (intercept)')\n ax.set_zlabel('Z Label')\n plt.show()", "def point_3d(self, x, y, z):\n self._point_3d(x, y, z)", "def plot_triangle_mesh_3d(triangles, vertices, *args, **kwargs):\n fig = kwargs.pop(\"fig\", None)\n unique = (fig is None)\n if fig is None:\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_trisurf(vertices[:, 0], vertices[:, 1], triangles, vertices[:, 2], *args, **kwargs)\n if unique:\n plt.show()", "def plot_3D(self, with_triangulation=False):\n plotly = ensure_plotly()\n\n plots = []\n\n vertices = self.tri.vertices\n if with_triangulation:\n Xe, Ye, Ze = [], [], []\n for simplex in self.tri.simplices:\n for s in itertools.combinations(simplex, 2):\n Xe += [vertices[i][0] for i in s] + [None]\n Ye += [vertices[i][1] for i in s] + [None]\n Ze += [vertices[i][2] for i in s] + [None]\n\n plots.append(plotly.graph_objs.Scatter3d(\n x=Xe, y=Ye, z=Ze, mode='lines',\n line=dict(color='rgb(125,125,125)', width=1),\n hoverinfo='none'\n ))\n\n Xn, Yn, Zn = zip(*vertices)\n colors = [self.data[p] for p in self.tri.vertices]\n marker = dict(symbol='circle', size=3, color=colors,\n colorscale='Viridis',\n line=dict(color='rgb(50,50,50)', width=0.5))\n\n plots.append(plotly.graph_objs.Scatter3d(\n x=Xn, y=Yn, z=Zn, mode='markers',\n name='actors', marker=marker,\n hoverinfo='text'\n ))\n\n axis = dict(\n showbackground=False,\n showline=False,\n zeroline=False,\n showgrid=False,\n showticklabels=False,\n title='',\n )\n\n layout = plotly.graph_objs.Layout(\n showlegend=False,\n scene=dict(xaxis=axis, yaxis=axis, zaxis=axis),\n margin=dict(t=100),\n hovermode='closest')\n\n fig = plotly.graph_objs.Figure(data=plots, layout=layout)\n\n return plotly.offline.iplot(fig)", "def show_3d(self, colors=None, colormap='gray', scale_factor=0.02, \n display=True):\n \n from enthought.mayavi import mlab\n \n if colors is None:\n colors = self[2,:]\n\n # I want at most 50K points\n stride = 1 + len(self)/50000\n\n pts = self[:,::stride]\n colors = colors[::stride]\n\n # Draw clusters in point cloud\n fig = mlab.figure()\n mlab.points3d(pts[0,:], pts[1,:], pts[2,:], \\\n colors, colormap=colormap, figure=fig, \\\n scale_mode='none', scale_factor=scale_factor)\n\n mlab.view(180,180)\n \n if show:\n mlab.show() \n else:\n return fig", "def plotGlobe3D():", "def ThreeDPositionPlot(self):\r\n try:\r\n numberOfParticles = len(self.LoadSim.Simulation[0])\r\n lengthOfSimulation = len(self.LoadSim.Time)\r\n # creates a list of three lists per particle.\r\n inputData = [[[], [], []] for i in range(numberOfParticles)]\r\n for i in range(lengthOfSimulation):\r\n for j in range(numberOfParticles):\r\n for k in range(3):\r\n inputData[j][k].append(self.LoadSim.Simulation[i][j].position[k])\r\n\r\n fig = plt.figure()\r\n ax = fig.gca(projection='3d')\r\n for j in range(numberOfParticles):\r\n ax.plot(inputData[j][0], inputData[j][1], inputData[j][2]\r\n , label='%s'%(self.LoadSim.Simulation[0][j].name))\r\n plt.title(\"Position of particles over time\")\r\n ax.set_xlabel(\"x position (m)\"), ax.set_ylabel(\"y position (m)\"), ax.set_zlabel(\"z position (m)\")\r\n ax.legend()\r\n plt.savefig(\"%s 3D position.jpg\"%(self.fileName))\r\n plt.show()\r\n\r\n except:\r\n AttributeError\r\n print(\"You cannot plot this figure with the data you have provided.\")", "def plt_3d_slices(slice_dict):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection = '3d')\n num_slices = len(slice_dict)\n \n for key in slice_dict:\n intersects = slice_dict[key]\n idx = 0\n while idx < len(intersects):\n if intersects[idx].shape[0] == 1:\n x_vec, y_vec, z_vec = intersects[idx][0][0], intersects[idx][0][1], intersects[idx][0][2]\n ax.scatter(x_vec, y_vec, z_vec, color = 'green', s = 1)\n if intersects[idx].shape[0] == 2:\n x_vec, y_vec, z_vec = [], [], []\n x_points, y_points, z_points = intersects[idx][:,0], intersects[idx][:,1], intersects[idx][:,2] \n x_vec, y_vec, z_vec = np.hstack((x_vec, x_points)), np.hstack((y_vec,y_points)), np.hstack((z_vec, z_points))\n ax.plot(x_vec, y_vec, z_vec ,color = 'green')\n if intersects[idx].shape[0] == 3:\n x_vec, y_vec, z_vec = [], [], []\n x_points, y_points, z_points = intersects[idx][:,0], intersects[idx][:,1], intersects[idx][:,2] \n x_vec, y_vec, z_vec = np.hstack((x_vec, x_points)), np.hstack((y_vec,y_points)), np.hstack((z_vec, z_points))\n ax.plot(x_vec, y_vec, z_vec, color = 'blue')\n idx+=1\n plt.show()", "def show_palpation_point_cloud(data_file):\n\n coords = np.array([])\n\n with open(data_file, 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n coord = np.array([\n float(row[\"arm_position_x\"]),\n float(row[\"arm_position_y\"]),\n float(row[\"arm_position_z\"])\n ])\n coords = np.append(coords, coord)\n\n coords = coords.reshape(-1, 3)\n\n X, Y = np.meshgrid(\n np.arange(\n min(coords[:, 0])-0.05,\n max(coords[:, 0])+0.05,\n 0.05\n ),\n np.arange(\n min(coords[:, 1])-0.05,\n max(coords[:, 1])+0.05,\n 0.05\n )\n )\n\n (A, B, C), error = get_best_fit_plane(coords)\n Z = A*X + B*Y + C\n\n # plot points and fitted surface\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.2)\n ax.scatter(coords[:, 0], coords[:, 1], coords[:, 2], c='r', s=20)\n\n plt.xlabel('X')\n plt.ylabel('Y')\n ax.set_zlabel('Z')\n ax.legend()\n plt.show()", "def plot_scatter_all(dataset, scale_q=1.0): \n \n ax = plt.axes(projection='3d')\n ax.view_init(22, 45)\n for key in dataset.keys():\n X = dataset[key][0]\n u = dataset[key][1]\n ax.scatter3D(X[:,0], X[:,1]*scale_q, u[:,0], color='r')\n plt.xlabel(\"x\")\n plt.ylabel(\"q\")\n plt.title(\"Data\")", "def surface_plot():\n X = np.linspace(-2, 2, 100)\n Y = np.linspace(-1, 3, 100)\n [x, y] = np.meshgrid(X, Y)\n z = h(x, y)\n\n plt.style.use('classic')\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=plt.cm.viridis, linewidth=0, antialiased=False)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$')\n ax.set_zlabel('$h(x, y)$')\n plt.show()", "def drawGeometry(posSrc, posMic):\r\n fig = plt.figure(figsize=(8, 6))\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter3D(posMic[:,0], posMic[:,1], posMic[:,2], marker='.')\r\n ax.scatter3D(posSrc[:,0], posSrc[:,1], posSrc[:,2], marker='*')\r\n ax.set_xlabel(\"x (m)\")\r\n ax.set_ylabel(\"y (m)\")\r\n ax.set_zlabel(\"z (m)\")\r\n plt.show()", "def plot3d(solver_obj):\r\n if not hasattr(solver_obj, '_euler_path'):\r\n print('An euler_path attribute has not been calculated')\r\n return False\r\n\r\n x = solver_obj.euler_path[:, 0]\r\n y = solver_obj.euler_path[:, 1]\r\n z = solver_obj.euler_path[:, 2]\r\n\r\n fig = plt.figure(figsize=(15, 8))\r\n\r\n p1 = plt.subplot2grid((1, 6), (0, 0), colspan=4, projection='3d')\r\n p2 = plt.subplot2grid((1, 6), (0, 4), colspan=2)\r\n\r\n plt.sca(p1) # select p1 for plotting\r\n d = np.zeros(len(x))\r\n col = np.zeros(len(d))\r\n d[1:] = np.sqrt(((x[1:] - x[:-1])**2 +\r\n (y[1:] - y[:-1])**2 +\r\n (z[1:] - z[:-1])**2)) # calculate euclidean distance in 3d\r\n for i in range(0, len(d), 1):\r\n col[i] = ((d[i] - d.min()) / (d.max() - d.min())) # normalize distance\r\n\r\n\r\n points = np.array([x, y, z]).T.reshape(-1, 1, 3)\r\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\r\n lc = Line3DCollection(segments, array=col, cmap='jet',\r\n norm=plt.Normalize(0.0, 1.0),\r\n linewidth=0.3, alpha=1)\r\n\r\n p1.add_collection3d(lc)\r\n fig.colorbar(lc, label='relative euclidean distance', shrink=0.5)\r\n p1.set_title('3D plot for Lorenz attractor')\r\n p1.set_xlabel('x')\r\n p1.set_ylabel('y')\r\n p1.set_zlabel('z')\r\n p1.set_xlim(x.min(), x.max())\r\n p1.set_ylim(y.min(), y.max())\r\n p1.set_zlim(z.min(), z.max())\r\n\r\n plt.sca(p2) # select p2 for plotting\r\n\r\n # printing parameter values to p4\r\n p2.text(0.5, 0.9, 'Parameters:',\r\n verticalalignment='top', horizontalalignment='center', fontsize=12)\r\n p2.text(0.5, 0.8, r'$\\sigma = {}$'.format(round(solver_obj.sigma, 4)),\r\n verticalalignment='top', horizontalalignment='center', fontsize=12)\r\n p2.text(0.5, 0.7, r'$\\beta = {}$'.format(round(solver_obj.beta, 4)),\r\n verticalalignment='top', horizontalalignment='center', fontsize=12)\r\n p2.text(0.5, 0.6, r'$\\rho = {}$'.format(round(solver_obj.rho, 4)),\r\n verticalalignment='top', horizontalalignment='center', fontsize=12)\r\n p2.text(0.5, 0.5, r'$init = {}$'.format(solver_obj.init),\r\n verticalalignment='top', horizontalalignment='center', fontsize=12)\r\n p2.text(0.5, 0.4, r'$N (steps) = {}$'.format(solver_obj.N),\r\n verticalalignment='top', horizontalalignment='center', fontsize=12)\r\n p2.text(0.5, 0.3, r'$t (stepzise) = {}$'.format(round(solver_obj.t, 4)),\r\n verticalalignment='top', horizontalalignment='center', fontsize=12)\r\n p2.get_xaxis().set_visible(False) # hide x axis\r\n p2.get_yaxis().set_visible(False) # hide y axis\r\n\r\n return fig", "def plotPath_3D(self, seq, poses_gt, poses_result, plot_path_dir):\n from mpl_toolkits.mplot3d import Axes3D\n\n start_point = [[0], [0], [0]]\n fontsize_ = 8\n style_pred = 'b-'\n style_gt = 'r-'\n style_O = 'ko'\n\n poses_dict = {} \n poses_dict[\"Ours\"] = poses_result\n if poses_gt:\n poses_dict[\"Ground Truth\"] = poses_gt\n\n fig = plt.figure(figsize=(8,8), dpi=110)\n ax = fig.gca(projection='3d')\n\n for key,_ in poses_dict.items():\n plane_point = []\n for frame_idx in sorted(poses_dict[key].keys()):\n pose = poses_dict[key][frame_idx]\n plane_point.append([pose[0,3], pose[2,3], pose[1,3]])\n plane_point = np.asarray(plane_point)\n style = style_pred if key == 'Ours' else style_gt\n plt.plot(plane_point[:,0], plane_point[:,1], plane_point[:,2], style, label=key) \n plt.plot(start_point[0], start_point[1], start_point[2], style_O, label='Start Point')\n\n xlim = ax.get_xlim3d()\n ylim = ax.get_ylim3d()\n zlim = ax.get_zlim3d()\n xmean = np.mean(xlim)\n ymean = np.mean(ylim)\n zmean = np.mean(zlim)\n plot_radius = max([abs(lim - mean_)\n for lims, mean_ in ((xlim, xmean),\n (ylim, ymean),\n (zlim, zmean))\n for lim in lims])\n ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])\n ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])\n ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])\n\n ax.legend()\n # plt.legend(loc=\"upper right\", prop={'size':fontsize_}) \n ax.set_xlabel('x (m)', fontsize=fontsize_)\n ax.set_ylabel('z (m)', fontsize=fontsize_)\n ax.set_zlabel('y (m)', fontsize=fontsize_)\n ax.view_init(elev=20., azim=-35)\n\n png_title = \"{}_path_3D\".format(seq)\n plt.savefig(plot_path_dir+\"/\"+png_title+\".png\", bbox_inches='tight', pad_inches=0.1)\n pdf = matplotlib.backends.backend_pdf.PdfPages(plot_path_dir + \"/\" + png_title + \".pdf\") \n fig.tight_layout()\n pdf.savefig(fig) \n # plt.show()\n plt.close()", "def plots_3d_z(self, roots, param_x, param_y, param_z=None, max_z=None, **kwargs):\n roots = makeList(roots)\n param_z = self.get_param_array(roots[0], param_z)\n if max_z is not None and len(param_z) > max_z:\n param_z = param_z[:max_z]\n param_x, param_y = self.get_param_array(roots[0], [param_x, param_y])\n sets = [[param_x, param_y, z] for z in param_z if z != param_x and z != param_y]\n return self.plots_3d(roots, sets, **kwargs)", "def plot_pose(pose):\n import mpl_toolkits.mplot3d.axes3d as p3\n _CONNECTION = [\n [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6], [0, 7], [7, 8],\n [8, 9], [9, 10], [8, 11], [11, 12], [12, 13], [8, 14], [14, 15],\n [15, 16]]\n\n # fig = plt.figure()\n # ax = fig.gca(projection='3d')\n for c in _CONNECTION:\n col = '#%02x%02x%02x' % joint_color(c[0])\n ax.plot([pose[0, c[0]], pose[0, c[1]]],\n [pose[1, c[0]], pose[1, c[1]]],\n [pose[2, c[0]], pose[2, c[1]]], c=col)\n\n\n for j in range(pose.shape[1]):\n col = '#%02x%02x%02x' % joint_color(j)\n ax.scatter(pose[0, j], pose[1, j], pose[2, j],\n c=col, marker='*', edgecolor=col)\n smallest = pose.min()\n largest = pose.max()\n ax.set_xlim3d(smallest, largest)\n ax.set_ylim3d(smallest, largest)\n ax.set_zlim3d(smallest, largest)\n\n return fig", "def view_projection(points_3d: np.ndarray,\n focal_length: float,\n bounds_sphere_ratio: float = 500,\n plotter: \"pv.Plotter\" = None) -> \"pv.Plotter\":\n import pyvista as pv\n points_proj = project_points(points_3d, focal_length)\n points_proj_3d = -np.concatenate([points_proj, np.full(\n (points_proj.shape[0], 1), focal_length)], axis=1)\n all_points = np.concatenate([points_3d, points_proj_3d], axis=0)\n bounds_size = np.max(nd_width(all_points))\n\n pv_points = pv_spheres_from_numpy(\n all_points, bounds_size / bounds_sphere_ratio)\n edges = np.stack([np.arange(points_3d.shape[0]),\n points_3d.shape[0] + np.arange(points_3d.shape[0])], axis=1)\n\n if plotter is None:\n plotter = pv.Plotter()\n plotter.add_mesh(pv_points)\n plotter.add_mesh(pv_line_from_numpy(all_points, edges))\n return plotter" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds whether product is available or not in particular warehouse.
def get_product_available(self): print("\n\n\n\n in get_product_available") if self._context is None: self._context = {} location_obj = self.env['stock.location'] warehouse_obj = self.env['stock.warehouse'] shop_obj = self.env['sale.shop'] states = self._context.get('states', []) what = self._context.get('what', ()) if not self._ids: ids = self.search([]) res = {}.fromkeys(ids, 0.0) if not self._ids: return res if self._context.get('shop', False): warehouse_id = shop_obj.read(['warehouse_id'])['warehouse_id'][0] if warehouse_id: self._context['warehouse'] = warehouse_id if self._context.get('warehouse', False): lot_id = warehouse_obj.read(['lot_stock_id'])['lot_stock_id'][0] if lot_id: self._context['location'] = lot_id if self._context.get('location', False): if type(self._context['location']) == type(1): location_ids = [self._context['location']] elif type(self._context['location']) in (type(''), type(u'')): location_ids = location_obj.search( [('name', 'ilike', self._context['location'])]) else: location_ids = self._context['location'] else: location_ids = [] wids = warehouse_obj.search([]) if not wids: return res for w in warehouse_obj.browse(wids): location_ids.append(w.lot_stock_id.id) # build the list of ids of children of the location given by id if self._context.get('compute_child', True): child_location_ids = location_obj.search( [('location_id', 'child_of', location_ids)]) location_ids = child_location_ids or location_ids # this will be a dictionary of the product UoM by product id product2uom = {} uom_ids = [] for product in self.read(['uom_id']): product2uom[product['id']] = product['uom_id'][0] uom_ids.append(product['uom_id'][0]) # this will be a dictionary of the UoM resources we need for conversion # purposes, by UoM id uoms_o = {} for uom in self.env['uom.uom'].browse(uom_ids): uoms_o[uom.id] = uom results = [] results2 = [] from_date = self._context.get('from_date', False) to_date = self._context.get('to_date', False) date_str = False date_values = False where = [tuple(location_ids), tuple( location_ids), tuple(ids), tuple(states)] if from_date and to_date: date_str = "date>=%s and date<=%s" where.append(tuple([from_date])) where.append(tuple([to_date])) elif from_date: date_str = "date>=%s" date_values = [from_date] elif to_date: date_str = "date<=%s" date_values = [to_date] if date_values: where.append(tuple(date_values)) prodlot_id = self._context.get('prodlot_id', False) prodlot_clause = '' if prodlot_id: prodlot_clause = ' and prodlot_id = %s ' where += [prodlot_id] # TODO: perhaps merge in one query. if 'in' in what: # all moves from a location out of the set to a location in the set self._cr.execute( 'select sum(product_qty), product_id, product_uom ' 'from stock_move ' 'where location_id NOT IN %s ' 'and location_dest_id IN %s ' 'and product_id IN %s ' 'and state IN %s ' + (date_str and 'and ' + date_str + ' ' or '') + ' ' + prodlot_clause + 'group by product_id,product_uom', tuple(where)) results = self._cr.fetchall() if 'out' in what: # all moves from a location in the set to a location out of the set self._cr.execute( 'select sum(product_qty), product_id, product_uom ' 'from stock_move ' 'where location_id IN %s ' 'and location_dest_id NOT IN %s ' 'and product_id IN %s ' 'and state in %s ' + (date_str and 'and ' + date_str + ' ' or '') + ' ' + prodlot_clause + 'group by product_id,product_uom', tuple(where)) results2 = self._cr.fetchall() # Get the missing UoM resources uom_obj = self.env['uom.uom'] uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2) if self._context.get('uom', False): uoms += [self._context['uom']] uoms = filter(lambda x: x not in uoms_o.keys(), uoms) if uoms: uoms = uom_obj.browse(list(set(uoms))) for o in uoms: uoms_o[o.id] = o # TOCHECK: before change uom of product, stock move line are in old # uom. self._context.update({'raise-exception': False}) # Count the incoming quantities for amount, prod_id, prod_uom in results: amount = uom_obj._compute_qty_obj(uoms_o[prod_uom], amount, uoms_o[self._context.get('uom', False) or product2uom[prod_id]]) res[prod_id] += amount # Count the outgoing quantities for amount, prod_id, prod_uom in results2: amount = uom_obj._compute_qty_obj(uoms_o[prod_uom], amount, uoms_o[self._context.get('uom', False) or product2uom[prod_id]]) res[prod_id] -= amount return res
[ "def get_product_available(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n states = context.get('states',[])\n what = context.get('what',())\n if not ids:\n #ids = self.search(cr, uid, [])\n ids = self.pool.get('product.product').search(cr, uid, [])\n res = {}.fromkeys(ids, 0.0)\n if not ids:\n return res\n\n # TODO: write in more ORM way, less queries, more pg84 magic\n if context.get('shop', False):\n cr.execute('select warehouse_id from sale_shop where id=%s', (int(context['shop']),))\n res2 = cr.fetchone()\n if res2:\n context['warehouse'] = res2[0]\n\n if context.get('warehouse', False):\n cr.execute('select lot_stock_id from stock_warehouse where id=%s', (int(context['warehouse']),))\n res2 = cr.fetchone()\n if res2:\n context['location'] = res2[0]\n\n if context.get('location', False):\n if type(context['location']) == type(1):\n location_ids = [context['location']]\n elif type(context['location']) in (type(''), type(u'')):\n location_ids = self.pool.get('stock.location').search(cr, uid, [('name','ilike',context['location'])], context=context)\n else:\n location_ids = context['location']\n else:\n location_ids = []\n wids = self.pool.get('stock.warehouse').search(cr, uid, [], context=context)\n for w in self.pool.get('stock.warehouse').browse(cr, uid, wids, context=context):\n location_ids.append(w.lot_stock_id.id)\n\n # build the list of ids of children of the location given by id\n if context.get('compute_child',True):\n child_location_ids = self.pool.get('stock.location').search(cr, uid, [('location_id', 'child_of', location_ids)])\n location_ids = child_location_ids or location_ids\n else:\n location_ids = location_ids\n\n uoms_o = {}\n product2uom = {}\n #for product in self.browse(cr, uid, ids, context=context):\n for product in self.pool.get('product.product').browse(cr, uid, ids, context=context):\n product2uom[product.id] = product.uom_id.id\n uoms_o[product.uom_id.id] = product.uom_id\n\n results = []\n results2 = []\n\n from_date = context.get('from_date',False)\n to_date = context.get('to_date',False)\n date_str = False\n date_values = False\n where = [tuple(location_ids),tuple(location_ids),tuple(ids),tuple(states)]\n if from_date and to_date:\n date_str = \"date>=%s and date<=%s\"\n where.append(tuple([from_date]))\n where.append(tuple([to_date]))\n elif from_date:\n date_str = \"date>=%s\"\n date_values = [from_date]\n elif to_date:\n date_str = \"date<=%s\"\n date_values = [to_date]\n\n prodlot_id = context.get('prodlot_id', False)\n\n # TODO: perhaps merge in one query.\n if date_values:\n where.append(tuple(date_values))\n if 'in' in what:\n # all moves from a location out of the set to a location in the set\n cr.execute(\n 'select sum(product_qty), product_id, product_uom '\\\n 'from stock_move '\\\n 'where location_id NOT IN %s '\\\n 'and location_dest_id IN %s '\\\n 'and product_id IN %s '\\\n '' + (prodlot_id and ('and prodlot_id = ' + str(prodlot_id)) or '') + ' '\\\n 'and state IN %s ' + (date_str and 'and '+date_str+' ' or '') +' '\\\n 'group by product_id,product_uom',tuple(where))\n results = cr.fetchall()\n if 'out' in what:\n # all moves from a location in the set to a location out of the set\n cr.execute(\n 'select sum(product_qty), product_id, product_uom '\\\n 'from stock_move '\\\n 'where location_id IN %s '\\\n 'and location_dest_id NOT IN %s '\\\n 'and product_id IN %s '\\\n '' + (prodlot_id and ('and prodlot_id = ' + str(prodlot_id)) or '') + ' '\\\n 'and state in %s ' + (date_str and 'and '+date_str+' ' or '') + ' '\\\n 'group by product_id,product_uom',tuple(where))\n results2 = cr.fetchall()\n uom_obj = self.pool.get('product.uom')\n uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2)\n if context.get('uom', False):\n uoms += [context['uom']]\n\n uoms = filter(lambda x: x not in uoms_o.keys(), uoms)\n if uoms:\n uoms = uom_obj.browse(cr, uid, list(set(uoms)), context=context)\n for o in uoms:\n uoms_o[o.id] = o\n #TOCHECK: before change uom of product, stock move line are in old uom.\n context.update({'raise-exception': False})\n for amount, prod_id, prod_uom in results:\n amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] += amount\n for amount, prod_id, prod_uom in results2:\n amount = uom_obj._compute_qty_obj(cr, uid, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] -= amount\n return res", "def getAvailableKnownInStoreItems(self,store_id):\n cursor = self.mydb.cursor()\n query = \"SELECT sku,Price,location FROM Walmart{} WHERE availability=1 and price!=-1\".format(store_id)\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n return result", "def is_products_available(self):\n self.get_recommended_products()\n if (len(self.recommended_products) >= 4):\n return True\n return False", "def has_a_product(obj):\n return \"products\" in obj and len(obj[\"products\"]) > 0", "def calc_is_jackpot(self) -> Optional[bool]:\n try:\n products_qualities = [\n product.ore_type.quality_class == OreQualityClass.EXCELLENT\n for product in self.products.select_related(\"ore_type\").all()\n ]\n except (ObjectDoesNotExist, AttributeError):\n return None\n else:\n if not products_qualities:\n return None\n return all(products_qualities)", "def check_product_existence_in_db(self, product_ref):\n product_in_db = Product.objects.filter(ref=product_ref).exists()\n return product_in_db", "def in_stock(self):\n return self.product.in_stock", "def test_product_installed(self):\n installer = getToolByName(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('reptheory.policy'))", "def current_warehouse(self):\n try:\n return self.warehousefurniture_set.get(active=True)\n except ObjectDoesNotExist:\n return None", "def product_exist(self, code):\r\n\r\n if code in self.products:\r\n return True\r\n else:\r\n return False", "def _check_available_product_reminder(self, card, last_state):\n current_prod = card.products if card.products else []\n old_prod = last_state.products if last_state.products else []\n\n for product in current_prod:\n if product not in old_prod:\n return True\n\n # Check autoloads\n current_pending = card.pending if card.pending else []\n old_pending = last_state.pending if last_state.pending else []\n\n for pending in current_pending:\n if pending in old_pending:\n return True\n\n return False", "def _check_product(self, cr, uid, ids, context=None):\n all_prod = []\n res = True\n for bom in self.browse(cr, uid, ids, context=context):\n if bom.product_id.id in all_prod:\n res = False\n break\n all_prod.append(bom.product_id.id)\n return res", "def any_excess(self, sell_set):\n for product in sell_set:\n if self.excess_stock(product):\n return product\n return None", "def is_databroker_available(self):\n return db is not None", "def show_available_products(self):\n prods = {}\n with MongoDB() as database:\n for prod in database[\"products\"].find({\"qty_avail\": {\"$gt\": \"0\"}}):\n prods[prod[\"prod_id\"]] = {\"desc\": prod[\"description\"], \"prod_type\": \\\n prod[\"prod_type\"], \"qty_avail\": \\\n prod[\"qty_avail\"]}\n return prods", "def test_plone_app_dexterity_installed(self):\n qi = self.portal.portal_quickinstaller\n self.assertTrue(qi.isProductInstalled('plone.app.dexterity'))", "def _get_availability_for_order(cls, name):\n if name.lower() in cls.warehouse:\n return cls.warehouse[name.lower()]['_quantity']", "def HasqWGI(self):\n return self.__has('qWGI')", "def can_track_allocations(self):\n return self.product.get_product_class().track_stock" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inject game events as test data to PubSub.
def _inject_pubsub_game_events(self, topic, message_count): logging.debug( 'Injecting %d game events to topic %s', message_count, topic.name) for _ in range(message_count): self.pub_client.publish( topic.name, (self.INPUT_EVENT % self._test_timestamp).encode('utf-8'))
[ "def test_generate_event(self):\n pass", "def test_got_data(self):\n # Create and initialize the instrument driver with a mock port agent\n driver = InstrumentDriver(self._got_data_event_callback)\n self.assert_initialize_driver(driver)\n\n self.assert_raw_particle_published(driver, True)\n\n # Start validating data particles\n self.assert_particle_published(driver, self.VALID_SAMPLE_01, self.assert_particle_sample, True)\n self.assert_particle_published(driver, self.VALID_SAMPLE_02, self.assert_particle_sample_2, True)\n\n # validate status particles\n self.assert_particle_published(driver, self.VALID_STATUS_01, self.assert_status_particle, True)", "def testInit(self):\n event_tester = EventTester()\n self.assertEqual(event_tester.events, [])", "def setup(self, aggregator, count, evCfg, db):\n self.aggregator = aggregator\n self.events = []\n self.db = db\n for i in range(0, count):\n self.events.append(Event(message = \"test \"+str(i), additional = evCfg))", "def test_game_event():\n\n event = events.get(1)\n game = games.get(1)\n\n event.games.append(game)\n\n assert game in event.games", "def test_got_data(self):\n # Create and initialize the instrument driver with a mock port agent\n driver = InstrumentDriver(self._got_data_event_callback)\n self.assert_initialize_driver(driver)\n\n self.assert_raw_particle_published(driver, True)\n\n # Start validating data particles\n self.assert_particle_published(driver, self.VALID_SAMPLE, self.assert_particle_sample, True)\n self.assert_particle_published(driver, self.VALID_GETHD_RESPONSE, self.assert_particle_hardware, True)\n self.assert_particle_published(driver, self.VALID_GETCC_RESPONSE, self.assert_particle_calibration, True)\n self.assert_particle_published(driver, self.VALID_GETSD_RESPONSE, self.assert_particle_status, True)\n self.assert_particle_published(driver, self.VALID_GETCD_RESPONSE, self.assert_particle_configuration, True)", "def init_debug_pub(self):\n namespace = '/aimbot_' + self.team_side + \"/players/ally\" + str(self.num) + '/'\n self.publishers['des_pos'] = rospy.Publisher(namespace + 'des_pos', Pose2D, queue_size=10)\n self.publishers['obsv_pos'] = rospy.Publisher(namespace + 'obsv_pos', Pose2D, queue_size=10)\n self.publishers['robot_vel'] = rospy.Publisher(namespace + 'robot_vel', Twist, queue_size=10)\n self.publishers['wheel_vel1'] = rospy.Publisher(namespace + 'wheel_vel1', Float32, queue_size=10)\n self.publishers['wheel_vel2'] = rospy.Publisher(namespace + 'wheel_vel2', Float32, queue_size=10)\n self.publishers['wheel_vel3'] = rospy.Publisher(namespace + 'wheel_vel3', Float32, queue_size=10)", "def test_publish_message(self):\n pass", "def send_test_event_notification(Notification=None, TestEventType=None):\n pass", "def test_get_multi_run_events(self):\n pass", "def test_get_run_events(self):\n pass", "def test_search_events(self):\n pass", "def test_pubsub(nsproxy, serializer, message):\n a0 = run_agent('a0')\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a0.bind('PUB', alias='pub', serializer=serializer)\n a1.connect(addr, handler=set_received)\n while not a1.get_attr('received'):\n a0.send('pub', message)\n time.sleep(0.1)\n assert a1.get_attr('received') == message", "def test_on():\n obs = Observable()\n nose.assert_false(obs.events)\n\n def on_test():\n pass\n\n obs.on(\"on_test\", on_test)\n nose.assert_in(on_test, obs.events[\"on_test\"])", "def publisher():\n backend = get_backend(\"school_backends\", BACKEND, CHANNEL, \"my.app\")\n for x in range(0, 100):\n data = {\"foo\": \"bar\", \"nested\": [{\"foo\": \"baz\"}]}\n\n print(\"-----------------------\")\n publish(backend, random.choice(events), data)\n sleep_time = random.choice(range(0, 10))\n print(\"Next publication in {}\".format(sleep_time))\n time.sleep(sleep_time)", "def test_subscriptions_dict(self):\n self.dispatcher._handlers[\"notification\"] = {\"some_key\": \"some_value\"}\n self.assertDictEqual(self.dispatcher.subscriptions,\n self.dispatcher._handlers[\"notification\"])", "def __init__(self):\n self._events = self._create_event_objects()", "def test_onPublish(self):\n\n xml = \"\"\"\n <iq type='set' to='pubsub.example.org'\n from='user@example.org'>\n <pubsub xmlns='http://jabber.org/protocol/pubsub'>\n <publish node='test'/>\n </pubsub>\n </iq>\n \"\"\"\n\n def publish(requestor, service, nodeIdentifier, items):\n self.assertEqual(JID('user@example.org'), requestor)\n self.assertEqual(JID('pubsub.example.org'), service)\n self.assertEqual('test', nodeIdentifier)\n self.assertEqual([], items)\n return defer.succeed(None)\n\n self.service.publish = publish\n return self.handleRequest(xml)", "def test_event_subscription_cache(sock_dir):\n with eventpublisher_process(str(sock_dir)):\n with salt.utils.event.MasterEvent(str(sock_dir), listen=True) as me:\n me.subscribe(\"evt1\")\n me.fire_event({\"data\": \"foo1\"}, \"evt1\")\n me.fire_event({\"data\": \"foo2\"}, \"evt2\")\n evt2 = me.get_event(tag=\"evt2\")\n evt1 = me.get_event(tag=\"evt1\")\n _assert_got_event(evt2, {\"data\": \"foo2\"})\n _assert_got_event(evt1, {\"data\": \"foo1\"})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }