query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Removes the 'tmp' directory if there is one under the given `builddpath`. | def clean_tmp(builddpath: str):
tmpdpath = os.path.join(builddpath, "tmp")
if os.path.isdir(tmpdpath):
shutil.rmtree(tmpdpath) | [
"def remove_testdir(tmpdir):\n yield\n if tmpdir.check():\n tmpdir.remove()",
"def delete_tmp_dir(self,name):\r\n\t\tnorm_name = os.path.normpath(name)\r\n\t\trel_path = os.path.relpath(norm_name, os.path.abspath(VDOM_CONFIG[\"TEMP-DIRECTORY\"]))\r\n\t\tif rel_path.find('/')>=0 or rel_path.find('\\\\')>=0:\r\n\t\t\traise VDOM_exception_file_access(\"Provided file name is invalid\")\t\r\n\t\tshutil.rmtree(name)",
"def clean():\n rm_rf(cwd/'_build')",
"def remove_temp_dir(self):\n if os.path.isdir(self.temp_dir):\n shutil.rmtree(self.temp_dir)",
"def delete_project_temp_root():\n if ENV_TEST_DIR:\n # If the environment variable is configured, delete its contents before the tests.\n if TEMP_ROOT_PATH.exists():\n shutil.rmtree(str(TEMP_ROOT_PATH))\n TEMP_ROOT_PATH.mkdir()\n\n yield\n\n if not ENV_TEST_DIR:\n # If the environment variable is not configured, then a random temp dir will be used;\n # its contents should be deleted after the tests.\n shutil.rmtree(str(TEMP_ROOT_PATH))",
"def cleanup_files(base_dir, builder):\n builder.run_root('rm -rf /build')",
"def clear_dir(self, path_=\".temp/\"):\n try:\n shutil.rmtree(path_)\n os.mkdir(path_)\n except:\n os.mkdir(path_)",
"def CleanBuildOutputDirectory(self):\n PrintStatus('Removing '+ self.BuildOutputRootDir())\n if os.path.isdir(self.BuildOutputRootDir()):\n _SmartDeleteDirectory(self.BuildOutputRootDir())",
"def delete_tmp_files():\n try:\n os.remove(settings.CACHE_ARCHIVE_NAME)\n except OSError:\n pass\n try:\n for root, dirs, files in os.walk(settings.CACHE_UNPACK_DIRECTORY, topdown=False):\n [os.remove(os.path.join(root, name)) for name in files]\n [os.rmdir(os.path.join(root, name)) for name in dirs]\n except OSError:\n pass",
"def clean():\n title(\"Cleaning build dir...\")\n Mambo(CWD).clean_build_dir()\n done()",
"def remove_fetched_workflows_dir(tmpdir: str) -> None:\n if tmpdir and os.path.isdir(tmpdir):\n shutil.rmtree(tmpdir)",
"def MakeCleanDir(path_to_dir):\n if os.path.exists(path_to_dir):\n shutil.rmtree(path_to_dir)\n # create new one\n os.makedirs(path_to_dir)\n # return the path\n path_to_dir = os.path.abspath(os.path.join(os.getcwd(), path_to_dir))\n return path_to_dir",
"def remove_dag_run_tmp_data(dag_run):\n\n logging.info(f\"Searching tmp data for dag_id: {dag_run.dag_id}, run_id: {dag_run.run_id}\")\n tmp_folder_set = set()\n for ti in dag_run.get_task_instances():\n logging.info(f\"Task: {ti.task_id}, execution_date: {ti.execution_date}, pid: {ti.pid}, state: {ti.state}\")\n try:\n logging.debug(\" - searching for tmp_folder in the report file\")\n report_location = ti.xcom_pull(task_ids=ti.task_id)\n tmp_folder_set.add(load_yaml(report_location)[\"tmp_folder\"])\n except Exception:\n logging.debug(\" - XCom was empty, or report file has been already deleted, or it's missing tmp_folder field\")\n for tmp_folder in tmp_folder_set:\n try:\n logging.info(f\"Removing tmp data from {tmp_folder}\")\n shutil.rmtree(tmp_folder)\n except Exception as ex:\n logging.error(f\"Failed to delete {tmp_folder}\\n {ex}\")",
"def test_library_configuration_temporary_directory_cleanup(self):\n import gc\n from oodi.configuration import Configuration\n\n configuration = Configuration()\n self.assertIsNone(configuration.__tmp_dir__)\n\n configuration.get_temporary_file_path('foo')\n tmp_dir = configuration.__tmp_dir__.name\n self.assertTrue(os.path.isdir(tmp_dir))\n\n del (configuration)\n gc.collect()\n self.assertFalse(os.path.isdir(tmp_dir))",
"def _delete_temp_folder(self):\n temp = os.path.normpath(self.Pub2SD + '/Temp')\n if os.path.exists(temp):\n shutil.rmtree(temp)\n self.qr.put(('STATUS', \"Deleting old temporary folder.\"))",
"def temp_repo_path():\n # Get a temporary repository path\n repo_path = tempfile.mkdtemp()\n # Yield the temporary repository path\n yield repo_path\n # After remove the remove repository path\n if os.path.exists(repo_path):\n shutil.rmtree(repo_path, True)",
"def _remove_folder(tile_path):\n shutil.rmtree(tile_path)",
"def clean_up() -> None:\n rmtree(TEMP)\n Path.unlink(ROOT_DIR.joinpath(ZIP_NAME))",
"def handle_tmp_dir(config):\n handle_env_var_config(config, 'MET_TMP_DIR', 'TMP_DIR')\n\n # create temp dir if it doesn't exist already\n # this will fail if TMP_DIR is not set correctly and\n # env MET_TMP_DIR was not set\n mkdir_p(config.getdir('TMP_DIR'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy all the Artie Libraries into the given folder. | def copy_artie_libs(dest):
libpath = os.path.join(repo_root(), "libraries")
libs = [os.path.join(libpath, d) for d in os.listdir(libpath) if os.path.isdir(os.path.join(libpath, d)) and d != "base-image"]
for lib in libs:
destpath = os.path.join(dest, os.path.basename(lib))
if not os.path.exists(destpath):
logging.info(f"Trying to copy {lib} to {destpath}")
try:
shutil.copytree(lib, destpath)
except FileExistsError:
# Race condition - someone beat us to it
pass | [
"def copy_to_a3(self):\n\t\tprint_blue(\"Copying addon to Arma 3 folder.\")\n\n\t\treg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)\n\t\ttry:\n\t\t\tk = winreg.OpenKey(reg, r\"SOFTWARE\\Wow6432Node\\Bohemia Interactive\\Arma 3\")\n\t\t\ta3_path = winreg.EnumValue(k, 1)[1]\n\t\t\twinreg.CloseKey(k)\n\t\texcept IOError:\n\t\t\tprint_error(\"Could not find Arma 3's directory in the registry.\")\n\n\t\tif os.path.exists(a3_path):\n\t\t\ttry:\n\t\t\t\tshutil.rmtree(os.path.join(a3_path, \"Mods\", self.project), True)\n\t\t\t\tshutil.copytree(os.path.join(self.release_dir, self.project), os.path.join(a3_path, \"Mods\", self.project))\n\t\t\texcept IOError:\n\t\t\t\tprint_error(\"Could not copy files. Is Arma 3 running?\")",
"def setup():\n for file in glob.glob('{}*'.format(TEST_FILE_DIR)):\n new_dest = file.replace(TEST_FILE_DIR, '')\n shutil.copy(file, new_dest)",
"def set_library_dirs (self, dirs):\r\n self.library_dirs = copy (dirs)",
"def copyRaws(self):\n #make a new dir\n path = 'tmp'\n try:\n os.mkdir(path)\n except:\n for d in glob.glob('./%s/*.*' % path):\n os.remove(d)\n\n for fle in glob.glob('./raw/*_raw.fits'):\n shutil.copy(fle, path)\n\n for fle in glob.glob('./support/*_spt.fits'):\n shutil.copy(fle, path)\n\n for fle in glob.glob('./asn/*_asn.fits'):\n shutil.copy(fle, path)\n\n #change the current working directory to tmp\n os.chdir(os.getcwd() + '/' + path)\n iraf.chdir(os.getcwd())",
"def combine_themes(folder: str):\n sources = os.listdir(folder)\n for source in sources:\n filename = join(folder, source)\n if os.path.isdir(filename):\n combine_pieces(filename)",
"def copyFLTs(self):\n #make a new dir\n path = 'tmp'\n try:\n os.mkdir(path)\n except:\n for d in glob.glob('./%s/*.*' % path):\n os.remove(d)\n\n for fle in glob.glob('./opus/*_flt.fits'):\n shutil.copy(fle, path)\n\n for fle in glob.glob('./support/*_spt.fits'):\n shutil.copy(fle, path)\n\n for fle in glob.glob('./asn/*_asn.fits'):\n shutil.copy(fle, path)\n\n #change the current working directory to tmp\n os.chdir(os.getcwd() + '/' + path)\n iraf.chdir(os.getcwd())",
"def fetch_all_from(self, src_directory):\n copy_tree(src_directory.abspath, self.abspath)",
"def load_directories(self):\n self.SRC_DIR = Path(__file__).parent / \"src\"\n self.ASSETS_DIR = self.SRC_DIR / \"assets\"\n self.IMAGES = self.ASSETS_DIR / \"images\" \n self.MAPS = self.ASSETS_DIR / \"maps\"\n self.SFX = self.ASSETS_DIR / \"sfx\"\n self.MUSIC = self.ASSETS_DIR / \"music\"",
"def copy_dlls(self, dest_dir: str) -> None:\n cfg = self.cfg\n logging.info('Copying Mingw-w64 runtime DLLs')\n for dll_name in self.dlls:\n if dll_name not in self.dll_paths:\n raise util.ToolchainBuildError(\n 'Required DLL {} not found in {}'.format(dll_name,\n self.search_dirs))\n dll_path = self.dll_paths[dll_name]\n dest_path = os.path.join(dest_dir, dll_name)\n if cfg.verbose:\n logging.info('Copying %s to %s', dll_path, dest_path)\n shutil.copy(dll_path, dest_path)",
"def StageDependencies(self, destination_dir):\n assert self.needed is not None\n for arch_file in self.needed.values():\n source = arch_file.path\n destination = os.path.join(destination_dir, arch_file.url)\n\n if (os.path.normcase(os.path.realpath(source)) ==\n os.path.normcase(os.path.realpath(destination))):\n continue\n\n # make sure target dir exists\n MakeDir(os.path.dirname(destination))\n\n Trace('copy: %s -> %s' % (source, destination))\n shutil.copy2(source, destination)",
"def _copy_static_folder(self):\n static_path = os.path.join(BASE_DIR, self.paths['static'])\n os.chdir(static_path)\n for path, subdirs, files in os.walk(os.curdir):\n output_path = os.path.join(BASE_DIR, self.paths['output'],\n path.replace('./', ''))\n for filename in files:\n original_file = os.path.join(path, filename)\n output_file = os.path.join(output_path, filename)\n copy(original_file, output_file)\n os.chdir(BASE_DIR)",
"def _copy_files(self, deployment_dir, oozie_xml, oozie_properties):\n\n self._create_file(deployment_dir, self.job.XML_FILE_NAME, oozie_xml)\n self._create_file(deployment_dir, 'job.properties', data='\\n'.join(['%s=%s' % (key, val) for key, val in oozie_properties.items()]))\n\n # List jar files\n files = []\n lib_path = self.fs.join(deployment_dir, 'lib')\n if hasattr(self.job, 'nodes'):\n for node in self.job.nodes:\n jar_path = node.data['properties'].get('jar_path')\n if jar_path:\n if not jar_path.startswith('/'): # If workspace relative path\n if jar_path.startswith('s3a://') or jar_path.startswith('abfs://'):\n jar_path = jar_path\n else:\n jar_path = self.fs.join(self.job.deployment_dir, jar_path)\n if not jar_path.startswith(lib_path): # If not already in lib\n files.append(jar_path)\n\n if USE_LIBPATH_FOR_JARS.get():\n # Add the jar files to the oozie.libpath\n if files:\n files = list(set(files))\n LOG.debug(\"Adding to oozie.libpath %s\" % files)\n if self.properties.get('oozie.libpath'):\n files.append(self.properties['oozie.libpath'])\n self.properties['oozie.libpath'] = ','.join(files)\n else:\n # Copy the jar files to the workspace lib\n if files:\n for jar_file in files:\n LOG.debug(\"Updating %s\" % jar_file)\n if jar_file.startswith('s3a://') or jar_file.startswith('abfs://'):\n jar_lib_path = jar_file\n else:\n jar_lib_path = self.fs.join(lib_path, self.fs.basename(jar_file))\n # Refresh if needed\n if self.fs.exists(jar_lib_path) and self.fs.exists(jar_file):\n stat_src = self.fs.stats(jar_file)\n stat_dest = self.fs.stats(jar_lib_path)\n if hasattr(stat_src, 'fileId') and hasattr(stat_dest, 'fileId') and stat_src.fileId != stat_dest.fileId:\n self.fs.remove(jar_lib_path, skip_trash=True)\n self.fs.copyfile(jar_file, jar_lib_path)",
"def _copy_storage(self):\n lib = self.db.get_library()\n src = self.lib.get_storage_path()\n dst = os.path.join(self.out_dir, 'storage')\n fswalker = FileSystemCopyWalker(lib, src, dst, self.fscopier)\n logger.info('copying storage {} -> {}'.format(src, dst))\n ZoteroObject.walk(lib, fswalker)",
"def copy_dir(root_src_dir, root_dst_dir):\n\n for src_dir, dirs, files in os.walk(root_src_dir):\n dst_dir = src_dir.replace(root_src_dir, root_dst_dir, 1)\n\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n for file_ in files:\n src_file = os.path.join(src_dir, file_)\n dst_file = os.path.join(dst_dir, file_)\n if os.path.exists(dst_file): \n # If we have .pbf's that clash, we want to merge them \n # together and write them to the new directory as dst_dir\n\n if file_ == \"metadata.json\":\n os.remove(dst_file)\n else:\n print \"\\n Merging tiles to... \" + dst_file\n\n with open(src_file, 'rb') as f:\n data = f.read()\n decoded_data1 = mapbox_vector_tile.decode(data)\n\n with open(dst_file, 'rb') as f:\n data = f.read()\n decoded_data2 = mapbox_vector_tile.decode(data)\n \n for k, v in decoded_data2.items():\n if k in decoded_data1:\n decoded_data1[k][\"features\"] += decoded_data2[k][\"features\"]\n else:\n decoded_data1[k] = decoded_data2[k] \n\n listofdict = []\n for k, v in decoded_data1.items():\n dic = {\n 'name': k,\n 'features': decoded_data1[k][\"features\"]\n }\n listofdict.append(dic)\n\n encoded_data = mapbox_vector_tile.encode(listofdict)\n with open(dst_file, 'w') as f:\n f.write(encoded_data)\n else: \n shutil.copy(src_file, dst_dir)",
"def _copy_newlib_headers_and_libs(self, source_dir: str,\n destination_dir: str) -> None:\n cfg = self.cfg\n join = os.path.join\n # pylint: disable=too-many-nested-blocks\n try:\n if os.path.exists(source_dir):\n if cfg.verbose:\n logging.info('Copying %s to %s', source_dir,\n destination_dir)\n for root, _, files in os.walk(source_dir):\n dst_dir = root.replace(source_dir, destination_dir, 1)\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n for file_ in files:\n src_file = join(root, file_)\n dst_file = join(dst_dir, file_)\n if os.path.exists(dst_file):\n if os.path.samefile(src_file, dst_file):\n continue\n os.remove(dst_file)\n shutil.copy(src_file, dst_dir)\n else:\n logging.error('Does not exist: %s', source_dir)\n raise util.ToolchainBuildError\n except shutil.Error as ex:\n raise util.ToolchainBuildError from ex",
"def folder_manager(dictionary):\n # Make Data Source folder\n if not os.path.isdir('data_source'):\n os.system('mkdir data_source')\n # Make folder containing reference genome\n if not os.path.isdir('ref_genome'):\n os.system('mkdir ref_genome')\n # make folder containing data for PCA/merging/zipping\n if not os.path.isdir('data_sourcer'):\n os.system('mkdir data_sourcer')\n # Copy reference genome to directory\n os.system(\"cp -r \" + dictionary['reference'] + \"* ref_genome\")",
"def prepare_visualization_directory():\n src = html_source_path\n dst = os.path.abspath(os.path.expanduser(\"~/.netwulf/\"))\n\n # always copy source files to the subdirectory\n copy_tree(src, dst)",
"def copy_build_files(base_dir, builder):\n builder.run_root('if [ ! -e /build ]; then mkdir /build; fi')\n for f in BUILD_FILES:\n # /build is used instead of /tmp here because /tmp can be bind-mounted\n # during build on Singularity (and the copied files are hidden by this\n # mount).\n builder.copy_root(os.path.join(base_dir, f), '/build')\n builder.run_root('chmod +x /build/*.sh')\n\n builder.copy_user(os.path.join(base_dir, 'environment.sh'),\n '/casa')\n builder.run_user('chmod a+rx /casa/environment.sh')\n builder.run_user('echo \"{\\\\\"image_id\\\\\": \\\\\"%s\\\\\", '\n '\\\\\"image_version\\\\\": \\\\\"%s\\\\\"}\"'\n ' > /casa/image_id' % (builder.image_id,\n builder.image_version))\n\n builder.copy_user(os.path.join(base_dir, 'bashrc'),\n '/casa')\n builder.copy_root(os.path.join(base_dir, 'entrypoint'),\n '/usr/local/bin/')\n builder.run_root('chmod a+rx /usr/local/bin/entrypoint')",
"def copy_code(self, src=\"./\", dst=\"./code/\"):\n for file in os.listdir(src):\n file_split = file.split('.')\n if len(file_split) >= 2 and file_split[1] == \"py\":\n if not os.path.isdir(dst):\n os.mkdir(dst)\n src_file = os.path.join(src, file)\n dst_file = os.path.join(dst, file)\n try:\n shutil.copyfile(src=src_file, dst=dst_file)\n except:\n print \"copy file error! src: %s, dst: %s\" % (src_file, dst_file)\n elif os.path.isdir(file):\n deeper_dst = os.path.join(dst, file)\n\n self.copy_code(src=os.path.join(src, file), dst=deeper_dst)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the default build location. | def default_build_location():
return os.path.join(repo_root(), "build-artifacts") | [
"def getDefaultOutputPath(self):\n return self.session.request('bootcdbuilder/defaults')",
"def _output_directory_default(self):\n return os.getcwd()",
"def _get_default_path(self):\n #return os.path.join(cfg.DATA_DIR, 'SNUBH_BUS')\n return cfg.DATA_DIR",
"def get_llvm_build_dir():\n # First check for custom provided build\n env_var_name = 'LLVM_BUILD'\n if env_var_name in os.environ:\n return os.environ[env_var_name]\n\n # Then default to project build\n return os.path.join(SCRIPT_DIR, '../../dependencies/llvm-project/build')",
"def getBuildDir(self):\n default = 'build'\n pathstr = self.getCustom('Build', 'builddir', default)\n pathstr = self._getAbsPath(pathstr)\n\n return pathstr",
"def get_build_base_folder(self):\n return os.path.join(self.folder, \"build\")",
"def default_config_path():\n return os.path.join(get_config_home(), 'config')",
"def get_default_path(self, default_path=''):\n return default_path if default_path else os.path.dirname(self.last_path)",
"def get_default_path(self, default_path=''):\n return default_path if default_path else os.path.dirname(self.last_im_path)",
"def default_config():\n path = os.path.join(googkit_root(), DEFAULT_CONFIG)\n if not os.path.exists(path):\n msg = 'Default config file is not found: {path}'.format(path=path)\n raise GoogkitError(msg)\n\n return path",
"def BuildCWD(self):\n return ROOT_DIR",
"def get_base_directory() -> str:\n return SO5CGConfig.base_directory \\\n if SO5CGConfig.base_directory is not None \\\n else expanduser(SO5CGConfig.default_base_directory)",
"def build_config_location(self) -> str:\n return f\"{self.build_variant}/{self.revision}/generate_tasks/burn_in_tests_multiversion_gen-{self.build_id}.tgz\"",
"def most_recent_app_dir(self):\n build_dir = {\n 'xcodebuild': 'xcodebuild',\n 'ninja': 'out',\n }[self.compiler]\n\n platform = {\n 'device': 'iphoneos',\n 'simulator': 'iphonesimulator',\n }[self.platform]\n\n return self.m.path.join(\n 'src',\n build_dir,\n '%s-%s' % (self.configuration, platform),\n )",
"def GetBuildPath(self):\n return self._GetAttribute(self._BUILD_ATTR)",
"def _fname_geotxt_default(self):\n dir_detector = os.path.abspath(os.path.dirname(__file__))\n return '%s/../%s' % (dir_detector, self._path_geo_default)",
"def get_workdir(self, default=None):\n return getnattr(self._raw, [\"settings\", \"workdir\"], default)",
"def default_base_dir():\n cwd = Path('.').resolve()\n\n pwd = os.environ.get('PWD')\n if pwd is None:\n return cwd\n\n pwd = Path(pwd)\n if not pwd.is_absolute():\n return cwd\n\n if cwd != pwd.resolve():\n return cwd\n\n return pwd",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'ftdata')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the default test results location. | def default_test_results_location():
return os.path.join(repo_root(), "test-results") | [
"def get_results_path(self):\n\n return constants[\"RESULTS_BASE_PATH\"] / self.get_module_path()",
"def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'ftdata')",
"def get_analysis_results_path(self):\n if self.config:\n try:\n return self.config.get(\"Analysis\", \"results_path\")\n except Exception, why:\n self._error_parse(why)\n return None\n else:\n return None",
"def get_default_path():\n\n # Try to read from app.yaml; if we can't find it, return the GAEUnit\n # default '/test'\n try:\n gae_config_file = open('/Users/jon/Projects/stremorshort/app.yaml', 'r')\n except IOError as e: pass\n else:\n loaded = yaml.load(gae_config_file.read())\n gae_config_file.close()\n\n for handler in loaded['handlers']:\n if 'script' in handler and handler['script'].startswith('gaeunit'):\n return re.sub(r'[^\\w/]', '', handler['url'])\n\n return '/test'",
"def get_result_path():\n return os.getcwd() + '/' + _result_folder",
"def _output_directory_default(self):\n return os.getcwd()",
"def getDefaultOutputPath(self):\n return self.session.request('bootcdbuilder/defaults')",
"def test_custom_results_path(self):\n\n support.create_project(self, 'lucius')\n\n project = cd.project.get_internal_project()\n project_data = self.read_project_file()\n project_data['path_results'] = project.source_directory\n self.write_project_file(project_data)\n\n self.assertTrue(project.refresh(force=True), 'should have refreshed')\n self.assertEqual(project.source_directory, project.results_path)",
"def get_result_folder():\n return _result_folder",
"def setting_default_out_dir(self):\n root_dir = Path.cwd() # Setting root directory.\n\n data_dir = root_dir / \"data\" / \"makeup_splits\" # Setting data directory.\n\n return data_dir",
"def par_results_path(self) -> str:\n return os.path.join(self.par_rundir_path(), \"results\",\n self.get_setting(\"vlsi.core.technology\"),\n self.top_module)",
"def get_default_result_file_name(self):\n backtestResultsFolder = 'Backtest Results'\n symbol = 'Imported' if not self.symbol else self.symbol\n dateString = datetime.now().strftime(\"%Y-%m-%d_%H-%M\")\n resultFile = f'{symbol}_backtest_results_{\"_\".join(self.interval.lower().split())}-{dateString}.txt'\n os.chdir('../')\n\n if not os.path.exists(backtestResultsFolder):\n os.mkdir(backtestResultsFolder)\n os.chdir(backtestResultsFolder)\n\n counter = 0\n previousFile = resultFile\n\n while os.path.exists(resultFile):\n resultFile = f'({counter}){previousFile}'\n counter += 1\n\n return resultFile",
"def get_default_examples_folder(self):\n return join(self._niftynet_home, 'examples')",
"def _get_default_data_dir_name():\n return _get_path(DATA_DIR)",
"def _get_default_path(self):\n #return os.path.join(cfg.DATA_DIR, 'SNUBH_BUS')\n return cfg.DATA_DIR",
"def def_report_path():\n if os.name == 'nt':\n return(getwindoc())\n else:\n return(os.getenv(\"HOME\"))",
"def get_default_path(self, default_path=''):\n return default_path if default_path else os.path.dirname(self.last_path)",
"def get_default_path(self, default_path=''):\n return default_path if default_path else os.path.dirname(self.last_im_path)",
"def make_default_data_path():\n return os.path.relpath(\n os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"..\", \"..\", \"data\"))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a random string suitable for a temporary directory. | def get_random_dirname() -> str:
return "tempdir-" + "".join(random.choices(string.ascii_letters, k=8)) | [
"def get_random_sting():\n return \"random string\"",
"def randstring():\n return binascii.b2a_hex(os.urandom(15)).upper()",
"def tempname(length, lowercase=False):\n\n chars = string.ascii_lowercase + string.digits\n if not lowercase:\n chars += string.ascii_uppercase\n random_part = ''.join(random.choice(chars) for _ in range(length))\n randomname = 'tmp_' + random_part\n\n return randomname",
"def non_existant_tempfile():\n with tempfile.TemporaryDirectory() as testdir:\n yield os.path.join(testdir, str(random.random()))",
"def generate_random_username():\n return os.urandom(100).hex()[:RANDOM_USERNAME_LENGTH]",
"def urandom(size: int) -> str:\n ...",
"def _make_random_filename(base_dir='',suffix='',num_chars=20):\n all = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n rand_region = ''.join([choice(all) for i in range(num_chars)])\n return path.join(base_dir,rand_region+suffix)",
"def _create_random_string(length=16) -> str:\n chars = string.ascii_letters + string.digits\n import os\n return ''.join(chars[ord(os.urandom(1)) % len(chars)] for i in range(length))",
"def create_temp_filename(prefix, suffix):\n global FILENAME_CHARS\n global FILENAME_NUM_RANDOM_CHARS\n name = \"{}{}{}\".format(\n prefix,\n \"\".join(np.random.choice(FILENAME_CHARS, FILENAME_NUM_RANDOM_CHARS)),\n suffix\n )\n filename = \"/tmp\"\n while os.path.exists(filename):\n filename = os.path.join(\"/tmp\", name)\n return filename",
"def random_filename():\n filegen = faker.Faker()\n return filegen.file_name().title()",
"def generate_safe_random_filename(extension=\"txt\"):\n name = uuid.uuid4()\n filename = base64.urlsafe_b64encode(name.bytes).decode(\"utf-8\").rstrip(\"=\\n\")\n return \"{filename}.{extension}\".format(filename=filename, extension=extension)",
"def get_secure_random_string(size):\r\n value = os.urandom(size)\r\n value = binascii.hexlify(value)\r\n value = value.decode('utf-8')[:size]\r\n return value",
"def _temp_path(self) -> str:\n directory = os.path.dirname(self.path)\n filename = (\n f\".{getpass.getuser()}\"\n f\"_{int(time.time())}\"\n f\"_{str(uuid.uuid4())[:8]}\"\n f\"_{os.path.basename(self.path)}\"\n )\n return os.path.join(directory, filename)",
"def create_file_name():\n # This generates a name that is between 3 to 63 chars long\n return str(uuid.uuid4())",
"def __generate_random():\n random_hash = ''.join(\n (\n random.choice(string.ascii_letters + string.digits + string.punctuation)\n )\n for _ in range(16)\n )\n return random_hash",
"def _temp_file_name():\n f_name = 'local-{}.temp'.format(threading.get_ident())\n\n return os.path.join(os.path.sep, 'tmp', f_name)",
"def random_string():\n rs = (''.join(random.choice(string.ascii_uppercase)\n for i in range(16)))\n\n return rs",
"def get_random_by_string(s):\n sum = reduce(lambda x, y: x+(y*37), [ord(c) for c in s])\n return float(sum % 360) / 360\n # Initialize random gen by server name hash\n #random.seed(s)\n #return random.random()",
"def _secure_imagename():\n return '_'.join([datetime.now().strftime(\"%Y%m%d%H%M%S%f\"), _random_letters()])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the file names (without .py) of all the task modules for dynamic import. | def get_task_modules():
task_folder = os.path.join(repo_root(), "artietool", "tasks")
return [os.path.splitext(fname)[0] for fname in os.listdir(task_folder) if os.path.splitext(fname)[-1] == ".py"] | [
"def getTaskModules(callback = None):\n\tpl = getPluginLoader()\n\treturn pl.getModules(\"Task\", callback = callback, moduleType = \"Task module\")",
"def get_import_paths() -> List[str]:\n return _redun_import_paths",
"def _obtain_imports(self):\n imports = ''\n for model in self.models:\n imports += model.get_import()\n return imports",
"def inspect_module_names(self) -> Set[str]:\n modules = []\n pattern_1 = r\"import\\s+(?P<module>\\w+)\"\n pattern_2 = r\"from\\s+(?P<module>\\w+)\"\n if not self._is_package:\n with open(str(self.root_filename), \"r\") as file:\n for line in file.readlines():\n m = re.match(pattern_1, line)\n if m:\n module = m.group(\"module\")\n modules.append(module)\n pass\n m = re.match(pattern_2, line)\n if m:\n module = m.group(\"module\")\n modules.append(module)\n pass\n pass\n pass\n pass\n else:\n # pattern = r\"import\\s+(?P<module>\\w+)\"\n for path, _, filenames in walk(str(self.root)):\n dir_path = self.root.joinpath(path)\n for filename in filenames:\n abs_path = dir_path.joinpath(filename)\n\n if not str(abs_path).endswith(\".py\"):\n continue\n pass\n modules.append(filename)\n pass\n pass\n return set(modules)",
"def get_test_names():\n pattern = re.compile('.*test_(.*)')\n return [mod[0] for mod in get_test_modules()]",
"def _iter_module_files():\n for module in list(sys.modules.values()):\n filename = getattr(module, '__file__', None)\n if filename:\n if filename[-4:] in ('.pyo', '.pyc'):\n filename = filename[:-1]\n yield filename",
"def _get_module_names(self, search_path=None):\r\n def generate_name(name):\r\n return pr.Name(self.GlobalNamespace, [(name, inf_pos)],\r\n inf_pos, inf_pos, self.import_stmt)\r\n\r\n names = []\r\n inf_pos = float('inf'), float('inf')\r\n # add builtin module names\r\n if search_path is None:\r\n names += [generate_name(name) for name in sys.builtin_module_names]\r\n\r\n if search_path is None:\r\n search_path = self._sys_path_with_modifications()\r\n for module_loader, name, is_pkg in pkgutil.iter_modules(search_path):\r\n names.append(generate_name(name))\r\n return names",
"def get_names_of_src_files(self):\r\n assert self.__is_valid, \"No valid run path: \" + self.__run_path\r\n return self.__names_of_src_files",
"def _get_modules_names(package):\n\n return sorted(\n map(operator.itemgetter(1),\n pkgutil.walk_packages(package.__path__,\n '{0}.'.format(package.__name__))))",
"def __get_all_scrapers_modules(self):\n\n modules = []\n \n file = os.path.realpath(__file__)\n folder = os.path.dirname(file)\n\n for filename in os.listdir(folder + \"/../scrapers\"):\n if filename.endswith(\"Scraper.py\"):\n modules.append(filename[:-3])\n\n return modules",
"def get_installed_modules_to_add_to_installed_apps():\n\treturn [item for item in os.listdir(BASE_DIR) if item.startswith('module_')]",
"def get_app_module_name_list(modules):\n return [app_module['name'] for app_module in modules if 'name' in app_module]",
"def sysfiles(self):\n search_mod_names = filter(\n re.compile(self.match).match,\n list(sys.modules.keys()),\n )\n mods = map(sys.modules.get, search_mod_names)\n return set(filter(None, map(self._file_for_module, mods)))",
"def get_submodule_names():\n return _SUBMODULE_NAMES",
"def _get_module_names():\n result = _stats_collection.get_collection().find_one({'_id': {'subsystem_id': _user.get('scope'), 'type': 'aggregated'}})\n return sorted(list(result['per_module'].keys())) if result else []",
"def get_modules(self, name) -> List[str]:\n results = self._execute(\n models.Name.search_by_name_like.select(\"module\", \"source\"), (name,)\n ).fetchall()\n return sort_and_deduplicate(results)",
"def get_all_names():\n for filename in glob.glob(os.path.join(REQ_DIR, 'requirements-*.txt-raw')):\n basename = os.path.basename(filename)\n yield basename[len('requirements-'):-len('.txt-raw')]",
"def list_py(path = None):\n if(path == None):\n path =os.getcwd()\n return [fname for fname in os.listdir(path)\n if os.path.isfile(fname)\n if fname.endswith('.py')]",
"def __get_powercli_modules_names(powercli_zip):\n if not isinstance(powercli_zip, ZipFile):\n raise TypeError(\"The argument 'powercli_zip' isn't of type 'ZipFile'.\")\n\n modules_names = []\n for single_file in powercli_zip.namelist():\n module_match = match(consts.POWERCLI_MODULE_NAME_REGEX, single_file)\n \n if not module_match:\n continue\n \n module_name = module_match.groups()[0]\n if module_name not in modules_names:\n modules_names.append(module_name)\n \n return modules_names"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds and retrieves the Task from the tasks list based on its name. Returns None if can't find it. | def find_task_from_name(name: str, tasks):
for t in tasks:
if t.name == name:
return t
return None | [
"def getTaskByID(self,taskID : str):\n if self.taskLock.acquire():\n try:\n for item in self.tasks:\n if item.ID == taskID:\n return item\n return None\n finally:\n self.taskLock.release()",
"def get_task(self, task_name):",
"def _get_task_item(self, name, machine):\n if name in self.tasks:\n for task in self.tasks[name]:\n if task['machine'] == machine:\n return task\n return None",
"def get_a_task(self, task_name):\n raise NotImplementedError",
"def get_task_by_id(self,task_id): \n return self.tasks.get_task_by_id(task_id = task_id)",
"def fetch_latest_task(self, named_task):\n task_list, _ = _FlyteClientManager(\n _platform_config.URL.get(), insecure=_platform_config.INSECURE.get()\n ).client.list_tasks_paginated(\n named_task,\n limit=1,\n sort_by=_common.Sort(\"created_at\", _common.Sort.Direction.DESCENDING),\n )\n return task_list[0] if task_list else None",
"def get_task_from_name(task_name: str) -> Type[BaseTask]:\n matching_task = [cls for cls in BaseTask.__subclasses__() if cls.name == task_name]\n\n if len(matching_task) != 1:\n raise ResourceError(\"Invalid task name\")\n\n return matching_task[0]",
"def by_name(self, name):\n name_like = \"%{}%\".format(name)\n with self.cursor() as cursor:\n cursor.execute(\"SELECT TASK, NAME, DESCRIPTION FROM\"\n \" TASK WHERE NAME LIKE ?\", (name_like,))\n return [Task.map_row(row) for row in cursor.fetchall()]",
"def by_id(self, id_):\n with self.cursor() as cursor:\n cursor.execute(\"SELECT TASK, NAME, DESCRIPTION FROM TASK WHERE\"\n \" TASK = ?\", (id_,))\n row = cursor.fetchone()\n if not row:\n raise KeyError(\"No Task with id: {}\".format(id_))\n return Task.map_row(row)",
"def get_next(self):\n if len(self.tasks) == 0:\n return None\n else:\n task = self.get_next_task(self.tasks)\n return task",
"def fetch_task(self, project: str = None, domain: str = None, name: str = None, version: str = None) -> FlyteTask:\n if name is None:\n raise user_exceptions.FlyteAssertion(\"the 'name' argument must be specified.\")\n task_id = _get_entity_identifier(\n self.client.list_tasks_paginated,\n ResourceType.TASK,\n project or self.default_project,\n domain or self.default_domain,\n name,\n version,\n )\n admin_task = self.client.get_task(task_id)\n flyte_task = FlyteTask.promote_from_model(admin_task.closure.compiled_task.template)\n flyte_task._id = task_id\n return flyte_task",
"def get_input_task(self, name=''):\n port = self.get_input(name).other\n if port is None:\n return None\n return port.task",
"def _read_task(self, task_str):\n return self._read_tasks(task_str)[0]",
"def getGoogleTaskByTaskListAndId(self, taskList, taskId):\n service = self.getCredentials()\n task = service.tasks().get(tasklist=taskList, task=taskId).execute()\n return task",
"def _find_task(self, sync_inc_id, payload):\n response = self.get_incident_tasks(sync_inc_id)\n\n for task in response:\n # pick a number of comparison fields to ensure duplicate\n if task['name'] == payload['name'] and task['cat_name'] == payload['cat_name']:\n return task['id']\n\n return None",
"def get_task(selection=None, ask_details=True):\n # Check that the selection is satisfiable.\n if selection is not None and not selection:\n raise ValueError(\"Cannot ask for a task from an empty selection.\")\n\n task = None\n print(\"What is the task? \")\n if selection is not None:\n restricted = True\n else:\n selection = session.tasks\n restricted = False\n\n str2task = dict()\n int2task = dict()\n for task_index, a_task in enumerate(selection):\n # FIXME Distinguish between different tasks with the same string\n # representation.\n task_str = str(a_task)\n str2task[task_str] = a_task\n int2task[str(task_index)] = a_task\n if restricted:\n Cli.choosefrom(int2task)\n\n shown_selection = False\n taskname = input(\"> \")\n while not taskname or taskname == \"?\":\n if restricted:\n Cli.choosefrom(int2task)\n else:\n if selection:\n shown_selection = True\n Cli.choosefrom(int2task,\n msg=\"You can choose from the existing \"\n \"tasks:\")\n else:\n print(\"There are currently no tasks defined.\")\n taskname = input(\"> \")\n # Should the task be one of existing Task tasks,\n if restricted:\n # Find the Task task.\n while taskname not in str2task and taskname not in int2task:\n if not taskname or taskname == \"?\":\n Cli.choosefrom(int2task)\n else:\n print(\"Sorry, this task was not on the menu. Try again.\")\n taskname = input(\"> \")\n if taskname in int2task:\n task = int2task[taskname]\n else:\n task = str2task[taskname]\n # If we are asking for a new task,\n else:\n if shown_selection:\n if taskname in int2task:\n task = int2task[taskname]\n elif taskname in str2task:\n task = str2task[taskname]\n if task is None:\n # Create a new task, asking for optional details.\n project = Cli.get_project(\n prompt=\"What project does it belong to?\")\n task = Task(taskname, project)\n if ask_details:\n print(\"Estimated time?\")\n time = input(\"> \").strip()\n print(\"Deadline?\")\n deadline = input(\"> \").strip()\n if time:\n task.time = parse_timedelta(time)\n if deadline:\n task.deadline = parse_datetime(\n deadline, tz=session.config['TIMEZONE'])\n return task",
"def next(self, task):\n uuid = str(task.uuid)\n for idx, otask in enumerate(self.tasks[:-1]):\n if otask.uuid == uuid:\n if self.tasks[idx + 1].status != 'SUCCESS':\n return self.tasks[idx + 1]\n else:\n uuid = self.tasks[idx + 1].uuid",
"def get_queue(self, task_name):",
"def get_task(self, tid):\n\t\tquery = \"\"\"\n\t\t\tSELECT t.*, c.path as ctx_path\n\t\t\tFROM Task t JOIN Context c\n\t\t\tON t.context = c.id\n\t\t\tWHERE t.id = ?\n\t\t\"\"\"\n\t\tc = self.connection.cursor()\n\t\tc.execute(query, (tid,))\n\t\trow = c.fetchone()\n\t\tif row is None:\n\t\t\treturn None\n\t\treturn row"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a location we can use for scratch stuff. | def get_scratch_location():
scratch_location = os.path.join(repo_root(), "tmp")
if not os.path.isdir(scratch_location):
os.makedirs(scratch_location, exist_ok=True) # Hopefully nip any race conditions in the bud
return scratch_location | [
"def Location(self) -> str:",
"def full_bed_location():\n return \"tests/test_data/full_bed.bed\"",
"def getFrom(self) -> ghidra.program.util.ProgramLocation:\n ...",
"def printable_location(self):\n return '\"{0}\" ({1})'.format(\n concise_path(self.base_dir), self.pyver)",
"def create_world_locator():\n loc = \"PinGuide_World_LOC\"\n if not mc.objExists(\"PinGuide_World_LOC\"):\n mc.spaceLocator(n = loc)[0]\n return loc",
"def generate_location_basic(self):\n xloc = np.random.rand(1)*self.width\n yloc = np.random.rand(1)*self.height\n self.loc = (xloc[0], yloc[0])\n return xloc[0], yloc[0]",
"def __get_file_root_location(self):\n\n return self.main_location",
"def location(self) -> str:\n if self.__expanded_launch_file_path is None:\n # get_launch_description() has not been called yet\n return ' + '.join([str(sub) for sub in self.__launch_file_path])\n return self.__expanded_launch_file_path",
"def find_base_path():\n if platform.system() == 'windows':\n base_path = os.path.join('K:', 'ptestbend')\n else:\n base_path = os.path.join('/mnt','K', 'ptestbend')\n return base_path",
"def location():\n import sys\n end = \"\\n\" if sys.stdout.isatty() else \"\"\n print(path.scriptdir, end=end)",
"def location(self) -> ConsoleLinkLocation:\n return self.__location",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def short_relative_path_from_here(self):\n return self.__class__(os.getcwd()).short_relative_path_to(self)",
"def getTo(self) -> ghidra.program.util.ProgramLocation:\n ...",
"def generate_random_location(self): \n snake = self.snake.get_locations()\n stones = self.stones.get_locations()\n apples = self.apples.get_locations()\n already_taken_space = snake + stones + apples + self.wall\n xy = (0,0)\n while True:\n xy = (random.randrange(0, screen_width, grid_size.x), random.randrange(0, screen_height, grid_size.y))\n if xy not in already_taken_space:\n break\n return xy",
"def short_relative_path_to_here(self):\n return self.short_relative_path_to(os.getcwd())",
"def location(self):\n return self._redunda.location",
"def dna_reference_location():\n return \"/home/user/git_private/data/reference/hs38.fa\"",
"def def_report_path():\n if os.name == 'nt':\n return(getwindoc())\n else:\n return(os.getenv(\"HOME\"))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the git tag of the Artie repo. | def git_tag() -> str:
p = subprocess.run("git log --format='%h' -n 1".split(' '), capture_output=True)
p.check_returncode()
return p.stdout.decode('utf-8').strip().strip("'") | [
"def git_tag():\n return execute(\"git tag\", capture=True).split('\\n')",
"def get_latest_tag() -> str:\n return exec_cmd(\"git describe --tags --abbrev=0\").strip()",
"def last_git_tag(cwd: str) -> str:\n res = subproc.run(\n \"git describe --abbrev=0\".split(),\n capture_stdout=True,\n cwd=cwd,\n errmsg=f\"Failed to get most recent tag from repo at {cwd}.\"\n )\n tag = res.stdout.decode().strip()\n res = subproc.run(\n [\"git\", \"rev-list\", \"-n\", \"1\", tag],\n capture_stdout=True,\n cwd=cwd,\n errmsg=f\"Failed to get SHA for tag {tag}.\",\n )\n return res.stdout.decode().strip()",
"def latest_github_tag():\n release_tag = os.getenv(\"NOVA_AGENT_RELEASE\")\n if release_tag:\n return release_tag\n\n import json\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]",
"def get_latest_tag():\n url = \"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest\"\n logger.info(\"Requesting tag information: %s\", url)\n response = requests.get(url)\n logger.info(\"Response url: %s\", response.url)\n tag = response.url.rsplit(\"/\", 1)[-1]\n logger.info(\"Tag: '%s'\", tag)\n return tag",
"def fx_git_last_tag():\n result = tbx.run(\"git --no-pager tag --sort=taggerdate\")\n tag_l = result.strip().split(\"\\n\")\n latest_tag = tag_l[-1] if 0 < len(tag_l) else \"\"\n return latest_tag",
"def tag_object(tag):\n # We can't use ar.get_tags because that returns the commit's hexsha,\n # not the tag's, and ar.get_hexsha is limited to commit objects.\n return ar.call_git_oneline(\n [\"rev-parse\", \"refs/tags/{}\".format(tag)], read_only=True)",
"def _get_latest_tag(**kwargs):\n try:\n # If the git command output is my-tag-14-g0aed65e,\n # then the return value will become my-tag.\n return (\n _check_output(\n [\"git\", \"describe\", \"--tags\", \"--long\"],\n **kwargs,\n )\n .decode(\"utf-8\")\n .strip()\n .rsplit(\"-\", maxsplit=2)[0]\n )\n except subprocess.CalledProcessError:\n return None",
"def retrieve_git_info():\n # Is Git installed?\n try:\n subprocess.call(['git', '--version'],\n stdout=subprocess.PIPE)\n except OSError:\n return None\n\n # Decide whether this is a release\n p = subprocess.Popen(\n ['git', 'describe', '--tags', '--candidates=0', 'HEAD'],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n p.wait()\n if p.returncode == 0:\n tag = p.stdout.read()\n logger.debug('Most recent tag: ' + tag)\n if tag.startswith('tulip-'):\n return 'release'\n if len(tag) >= 2 and tag.startswith('v'):\n try:\n int(tag[1])\n return 'release'\n except ValueError:\n pass\n\n # Otherwise, return commit hash\n p = subprocess.Popen(\n ['git', 'log', '-1', '--format=%H'],\n stdout=subprocess.PIPE\n )\n p.wait()\n sha1 = p.stdout.read()\n logger.debug('SHA1: ' + sha1)\n return sha1",
"def get_git_revision():\n try:\n import git\n except ImportError:\n return UNKNOWN\n try:\n path = os.path.dirname(__file__)\n gitrepo = git.Git(path)\n return gitrepo.rev_list(\"HEAD\").splitlines()[0]\n except git.GitCommandError:\n return UNKNOWN",
"def _git_tags(self):\n git_cmd = [self.command, 'tag', '--points-at', 'HEAD']\n return \", \".join(line.strip() for line in self.runCommand(git_cmd)[0].strip().splitlines())",
"def get_best_candidate_tag(rev: str, git_repo: str) -> str:\n tags = cmd_output(\n 'git', *NO_FS_MONITOR, 'tag', '--points-at', rev, cwd=git_repo,\n )[1].splitlines()\n for tag in tags:\n if '.' in tag:\n return tag\n return rev",
"def repository_tag_url(namespace, repository, tag_name):\n return _BASE_URL_V2 % ('repositories/%s/%s/refs/tags/%s' % (namespace, repository, tag_name))",
"def get_image_tag(image_version):\n return \"%s-latest\" % str(image_version)",
"def autoincrement_version():\n latest_tag = run_command(['git', 'describe', '--abbrev=0'])\n if latest_tag == \"\":\n msg_info(\"There are no tags yet in this repository.\")\n version = \"1\"\n elif \".\" in latest_tag:\n version = latest_tag.replace(\"v\", \"\").split(\".\")[0] + \".\" + str(int(latest_tag[-1]) + 1)\n else:\n version = int(latest_tag.replace(\"v\", \"\")) + 1\n return version",
"def tag(self):\n return self._tag",
"def get_last_tag(self, sha=None, match=None):\n options = ['--abbrev=0']\n if match:\n options += ['--match', match]\n if sha:\n options.append(sha)\n tag = self.git('describe', *options).stdout.read()\n return tag.decode().strip()",
"def get_latest_repo(tag):\n ks = koji.ClientSession(KOJIHUB)\n pathinfo = koji.PathInfo(topdir=\"\")\n\n repo = ks.getRepo(tag, state=koji.REPO_READY)\n repo_id = repo[\"id\"]\n path = pathinfo.repo(repo_id, tag)\n click.echo(\"{} {}\".format(repo_id, urljoin(KOJIPKGS, path)))",
"def get_git_tags():\n # type: () -> typing.Tuple[str, str]\n try:\n global _GITMETADATA_TAGS\n if _GITMETADATA_TAGS is not None:\n return _GITMETADATA_TAGS\n\n config = GitMetadataConfig()\n\n if config.enabled:\n repository_url, commit_sha = _get_tags_from_env(config)\n log.debug(\"git tags from env: %s %s\", repository_url, commit_sha)\n if not repository_url or not commit_sha:\n pkg_repository_url, pkg_commit_sha = _get_tags_from_package(config)\n log.debug(\"git tags from package: %s %s\", pkg_repository_url, pkg_commit_sha)\n if not repository_url:\n repository_url = pkg_repository_url\n if not commit_sha:\n commit_sha = pkg_commit_sha\n\n log.debug(\"git tags: %s %s\", repository_url, commit_sha)\n _GITMETADATA_TAGS = repository_url, commit_sha\n else:\n log.debug(\"git tags disabled\")\n _GITMETADATA_TAGS = (\"\", \"\")\n return _GITMETADATA_TAGS\n except Exception:\n log.debug(\"git tags failed\", exc_info=True)\n return \"\", \"\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the absolute path of the root of the Artie repository. | def repo_root() -> str:
thisdir = os.path.dirname(os.path.abspath(__file__))
root = os.path.join(thisdir, "..")
if not os.path.isdir(root):
raise FileNotFoundError("The Artie directory seems to have been altered in a way that I can't understand.")
return os.path.abspath(root) | [
"def _get_arc_root():\n return os.path.abspath(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))",
"def full_repository(self):\n base = self.base_repository\n if base:\n if not base.endswith('/'):\n base += '/'\n return urlparse.urljoin(base, self.repository)\n else:\n return self.repository",
"def _get_artifactory_base():\n return os.environ.get('PYBEL_ARTIFACTORY_BASE', _default_artifactory_base).rstrip('/')",
"def get_repositories_path(self):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"../../packages\"))",
"def get_git_root():\n\n rpath = git.Repo('.', search_parent_directories=True).working_tree_dir\n rpath = rpath + '/'\n return rpath",
"def get_root_path(self):\n mock_cmd = self._mock_cmd('--print-root-path')\n output = check_output(mock_cmd)\n return output.rstrip()",
"def platform_root(self):\n return os.getcwd()",
"def root_uri(self) -> str:\n return pulumi.get(self, \"root_uri\")",
"def get_aidegen_root_dir():\n return os.path.join(get_android_root_dir(), constant.AIDEGEN_ROOT_PATH)",
"def get_mo_root_dir():\n return os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))), os.pardir))",
"def path_on_server(self):\n\n # change dev_base if necessary\n if ConfigHandler.cfg.wb_new == \"True\":\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI41\n else:\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI40\n\n # if on Linux, we have to subtract local share base from development folder\n # -> the local share base acts like the drive letter on windows\n if platform.system() == 'Linux':\n tmp = self.projectfolder.replace(ConfigHandler.cfg.local_share_base, \"\")\n else:\n tmp = self.projectfolder\n\n if platform.system() == \"Windows\":\n # remove drive letter\n return oPB.DEV_BASE + tmp[2:].replace(\"\\\\\", \"/\")\n else:\n # replace possible double '/' with single '/'\n return (oPB.DEV_BASE + \"/\" + tmp).replace(\"//\", \"/\")\n\n \"\"\"\n if tmp.startswith(repo_base):\n return tmp\n else:\n if tmp.strip() != \"\":\n ret = (repo_base + \"/\" + tmp + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"a\", ret)\n return ret\n else:\n ret = (repo_base + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"b\", ret)\n return ret\n \"\"\"",
"def get_inner_fileserver_root():\n\n return seahub.settings.INNER_FILE_SERVER_ROOT",
"def __get_file_root_location(self):\n\n return self.main_location",
"def abspath(self):\r\n if self._abspath is None:\r\n self._abspath = os.path.abspath(self.path)\r\n return self._abspath",
"def __repositoryBaseUrl(self):\n CraftCore.debug.trace(\"VersionSystemSourceBase __repositoryBaseUrl\")\n # @todo move to SvnSource\n server = CraftCore.settings.get(\"General\", \"KDESVNSERVER\", \"svn://anonsvn.kde.org\")\n\n return server + \"/home/kde/\"",
"def root_path():\n return Root()",
"def __root_directory__(config) :\n path_config = config.get('ContentPaths', {})\n return os.path.realpath(path_config.get('PService', os.path.join(os.environ['HOME'], '.toxaway')))",
"def index_abspath(self) -> str:\n return getattr(self, '_ref_index_abspath', None) or self.get_file_from_workspace(self.index_filename)",
"def get_artella_python_folder():\n\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a list of TODO information based on the given files. | def parse_TODOs(self, repo_PATH):
# https://gist.github.com/nickpascucci/1267938
# TODO : parser part 2
comments = []
todo_info = None
print ('repo path == ', repo_PATH)
for (dirpath, dirnames, filenames) in os.walk(repo_PATH, topdown=True, onerror=None, followlinks=False):
if not 'QuEST' in repo_PATH:
continue
print('files == ', filenames)
# sys.exit(1)
for f in filenames:
if f == 'TODO.md':
continue
lineN = 0
curr = os.path.join(dirpath, f)
try:
with open(curr, 'r') as file:
for line in file:
line = line.strip()
if line.startswith("# TODO"):
elements = line.split(':')
print ('elements == ', elements)
proj = repo_PATH.replace('./repos/', '')
todo_info = ( repo_PATH.replace('./repos/', ''),
elements[1].strip(),
curr.replace('/repos/', ''), lineN)
if todo_info is not None:
comments.append(todo_info)
lineN += 1
except Exception as e:
print (e); continue;
print ('\n\n\ncomments == ', comments)
return comments
'''
# TODO : append /TODOs/ to /TODO.md/
comments = []
# TODO : recursively search all files in repo_PATH
for source_filename in repo_PATH:
source_file = open(source_filename, "r")
line_number = 0
for line in source_file:
line_number += 1
line = line.strip()
# TODO : add support for /TODOs/ appended to the end of a line
if line.startswith("# TODO"):
elements = line.split(":")
todo_info = (elements[2],
elements[1],
source_filename,
str(line_number),
elements[3].strip())
comments.append(todo_info)
return comments
''' | [
"def listToDo():\n with open(\"todo.txt\") as toDoFile:\n tasks = toDoFile.readlines()\n if len(tasks) > 0:\n for index, task in enumerate(tasks, 1):\n print(\"[{0}] {1}\".format(\n len(tasks) - index + 1, task.strip(\"\\n\")))\n else:\n print(\"There are no pending todos!\")",
"def get_todos(fpath, checked):\n assert type(fpath) is str\n assert os.path.exists(fpath)\n todos = []\n todo_nb = []\n with open(fpath, 'r') as f:\n lines=f.readlines()\n for l_nb, line in enumerate(lines):\n idx, mk_idx = has_checklist_marker(line, checked)\n if idx is not None and mk_idx is not None:\n todos.append( line[idx:].strip() )\n todo_nb.append(l_nb)\n\n return todos, todo_nb",
"def todo_tasks(self):\n for todo in (self._directory / 'todo').iterdir():\n with todo.open('rb') as f:\n yield dill.load(f)",
"def read_tasks_and_print():\n if not _TASK_FILE.is_file():\n print(\"No existing tasks! Try to add some.\")\n return\n \n with open(_TASK_FILE) as f:\n task_dict = json.load(f)\n todo_list = task_dict['todo']\n review_list = task_dict['review']\n\n if 'daily' not in task_dict:\n task_dict['daily'] = []\n daily_list = task_dict['daily']\n\n cprint(\"Daily Tasks:\", 'grey', 'on_yellow', end='\\n')\n _print_daily_task_list(daily_list)\n\n cprint(\"Todos:\", 'grey', 'on_green', end='\\n')\n _print_list(todo_list)\n \n cprint(\"Reviews:\", 'white', 'on_blue', end='\\n')\n _print_list(review_list)",
"def report():\n with open(\"todo.txt\") as toDoFile:\n remTasks = toDoFile.readlines()\n with open(\"done.txt\") as doneFile:\n completedTasks = doneFile.readlines()\n print(\"{0} Pending : {1} Completed : {2}\".format(\n date.today(), len(remTasks), len(completedTasks)))",
"def parse_notes():\n notes = []\n for note_filename in os.listdir(NOTES_DIR):\n # Parse note file\n assert note_filename.endswith(\".txt\")\n note = parse(os.path.join(NOTES_DIR, note_filename))\n assert note_filename == note[\"id\"] + \".txt\", note_filename\n notes.append(note)\n return notes",
"def sort_tasks():\n\n done_re = re.compile(r\"\\s@(done|cancell?ed)\")\n reminder_re = re.compile(r\"([^\\s\\\"`'\\(\\[])remind\\((.*)(\\s\\\"(.*?)\\\")?\\)\")\n\n task_section = False\n pending_tasks = []\n reminding_tasks = []\n done_tasks = []\n\n for line in fileinput.input(filename, inplace=1):\n if \"## Tasks.todo\" in line:\n task_section = True\n print(line)\n elif task_section:\n if line.startswith(\"##\"):\n # If we've hit the log, then spew everything out\n task_section = False\n # Print out all pending and done tasks\n for task in pending_tasks:\n print(task, end=\"\")\n for task in reminding_tasks:\n print(task, end=\"\")\n for task in done_tasks:\n print(task, end=\"\")\n\n print(\"\\n\" + line, end=\"\")\n\n else:\n # Otherwise, accumulate the tasks, but don't print yet\n if line.strip() == \"\":\n continue\n if done_re.search(line):\n done_tasks.append(line)\n elif reminder_re.search(line):\n reminding_tasks.append(line)\n else:\n pending_tasks.append(line)\n else:\n print(line, end=\"\")",
"def get_new_unchecked_todos(fpath):\n todos, todo_lnb = get_todos(fpath, False)\n \n # check there isn't a hash already given. if so, drop it\n new_todos = []\n new_todo_lnb = []\n for idx, todo in enumerate(todos):\n if find_hex(todo) is None: # means that it's new\n new_todos.append(todo)\n new_todo_lnb.append(todo_lnb[idx])\n\n new_hashes = [generate_hex() for todo in new_todos]\n # for each todo, we should check that there's a time duration indicated\n new_todos_wtime = []\n for todo in new_todos:\n duration = find_duration(todo)\n if duration is None:\n print('WARNING: adding default duration of 30 m for {}'.format(todo))\n todo = add_placeholder_duration(todo)\n new_todos_wtime.append(todo)\n else:\n new_todos_wtime.append(todo) \n assert len(new_hashes) == len(new_todos_wtime)\n return list(zip(new_todos_wtime, new_hashes, new_todo_lnb))",
"def find_in_file(files: list, todo: bool, fixme: bool) -> None:\n for file in files:\n ext = '.' + file.split('.')[-1]\n try:\n comment_sign = ext_to_comment_sign(ext.lower())\n # if language supports only single line comments\n # comment_sign is instance of string\n #\n # if language supports multilines comments\n # comment_sign is instance of tuple which contains\n # (single line comment sign, multiline comment sing for start\n # and end of the comment)\n if comment_sign['multi']:\n\n comment_sign = (comment_sign['single'], comment_sign['multi'][0], comment_sign['multi'][1])\n\n searcher = TodoSearcherM(todo=todo,\n fixme=fixme,\n comment_sign=comment_sign)\n searcher.search(file, single_file=True)\n else:\n comment_sign = comment_sign['single']\n searcher = TodoSearcherS(todo=todo,\n fixme=fixme,\n comment_sign=comment_sign)\n searcher.search(file, single_file=True)\n except CommentSignNotFound as e:\n print(e.message)",
"def show_notes(self):\n\t\tprint(\"You have the following to-do notes added: \\n\")\n\t\tfor n, note in enumerate(glob.glob(self.dir_address + '\\\\*.txt')):\n\t\t\ttitle = note.split('\\\\')\n\t\t\ttitle_name = title[-1].strip(\".txt\")\n\t\t\tprint(f\"{n+1}. {title_name}\")",
"def gen_task_desc(**kwargs):\n logger = logging.getLogger(__name__)\n\n suppressempty = kwargs[\"suppressempty\"]\n blend = kwargs[\"blend_info\"][\"blend\"]\n tasksprefix = kwargs[\"blend_info\"][\"tasksprefix\"]\n blend_dependencies = kwargs[\"blend_dependencies\"]\n\n\n task_desc_path = \"taskdesc-sec.template\"\n logger.debug(\"Opening file {0} to write\".format(task_desc_path))\n with open(task_desc_path,'w') as fout:\n\n for task in sorted(blend_dependencies.keys()): \n\n if blend_dependencies[task]['Leaf'] == 'false':\n continue\n\n if suppressempty and blend_dependencies[task][\"haspackages\"] == 0:\n if blend_dependencies[task]['test_always_lang']:\n logger.debug(\"Print empty task {0} because Test-always-lang is set\\n\".format(task))\n else:\n logger.debug(\"The metapackage {2} will not be created because {0} dependant are in the pool and suppressempty was set {1}\\n\".format(blend_dependencies[task][\"haspackages\"], suppressempty, task))\n continue\n\n fout.write(\"Task: {0}-{1}\\n\".format(tasksprefix, task))\n fout.write(\"Section: {0}\\n\".format(blend));\n fout.write(\"Description: {0}\\n\".format(blend_dependencies[task][\"description\"]))\n fout.write(\"{0}\".format(blend_dependencies[task][\"long_description\"])) #Already contains a newline\n fout.write(\"Relevance: 10\\n\")\n\n if blend_dependencies[task][\"Enhances\"]:\n fout.write(\"Enhances: {0}\\n\".format(blend_dependencies[task][\"Enhances\"]))\n\n if blend_dependencies[task][\"metapackage\"]:\n #No use listing a metapackage as a key package, if no metapackage exist.\n fout.write(\"Key: \\n\");\n fout.write(\" {0}-{1}\\n\".format(tasksprefix, task))\n\n fout.write(\"Packages: list\\n \")\n for header in [\"Depends\", \"Recommends\"]:\n if not blend_dependencies[task][header]:\n continue \n fout.write(\"{0}\".format(\"\\n \".join(sorted(blend_dependencies[task][header]))))\n fout.write(\"\\n\")\n\n fout.write(\"\\n\")",
"def _create_nodes_with_file_names(self, files, dirpath, level):\n files = [f for f in files if not self._controler.should_ignore(f)]\n return [self.create_node(dirpath, f, level) for f in files]",
"def format_files(files=None, from_git=True):\n\n for file_in in files:\n\n file_in = os.path.abspath(file_in)\n\n print 'checking %s' % file_in\n\n if not os.path.exists(file_in):\n print 'missing file %s' % file_in\n\n continue\n\n file_out = tempfile.mktemp()\n try:\n\n import PythonTidy\n\n reload(PythonTidy)\n\n # monkeypach constants\n # TODO: make this more configurable\n\n PythonTidy.COL_LIMIT = 120\n PythonTidy.MAX_SEPS_FUNC_DEF = 8\n PythonTidy.MAX_SEPS_FUNC_REF = 8\n\n PythonTidy.tidy_up(file_in=file_in, file_out=file_out)\n\n # overwrite original file only if hash differs\n\n if hashfile(file_in) != hashfile(file_out):\n print ' tidying %s' % file_in\n shutil.copymode(file_in, file_out)\n shutil.copy(file_out, file_in)\n\n # re-add modified file to git if required\n\n if from_git:\n execute('git add %s' % file_in)\n finally:\n os.remove(file_out)",
"def _write_entries(po_files, languages, msgid, msgstrs, metadata, comment):\n start = re.compile(r'^[\\s]+')\n end = re.compile(r'[\\s]+$')\n for i, lang in enumerate(languages):\n meta = ast.literal_eval(metadata)\n entry = polib.POEntry(**meta)\n entry.tcomment = comment\n entry.msgid = msgid\n if msgstrs[i]:\n start_ws = start.search(msgid)\n end_ws = end.search(msgid)\n entry.msgstr = str(start_ws.group() if start_ws else '') + \\\n unicode(msgstrs[i].strip()) + \\\n str(end_ws.group() if end_ws else '')\n else:\n entry.msgstr = ''\n po_files[lang].append(entry)",
"def migrate(ctx, interactive, sync, map_project, map_tag):\n\n if sync:\n ctx.invoke(synchronize)\n\n tasks = todoist.items.all()\n io.important(f'Starting migration of {len(tasks)} tasks...')\n for idx, task in enumerate(tasks):\n data = {}\n tid = data['tid'] = task['id']\n name = data['name'] = task['content']\n\n # Log message and check if exists\n io.important(f'Task {idx + 1} of {len(tasks)}: {name}')\n if check_task_exists(tid):\n io.info(f'Already exists (todoist_id={tid})')\n continue\n\n # Project\n p = todoist.projects.get_by_id(task['project_id'])\n project_hierarchy = [p]\n while p['parent_id']:\n p = todoist.projects.get_by_id(p['parent_id'])\n project_hierarchy.insert(0, p)\n\n project_name = '.'.join(p['name'] for p in project_hierarchy)\n project_name = utils.try_map(\n map_project,\n project_name\n )\n data['project'] = utils.maybe_quote_ws(project_name)\n\n # Priority\n data['priority'] = utils.parse_priority(task['priority'])\n\n # Tags\n data['tags'] = [\n utils.try_map(map_tag, todoist.labels.get_by_id(l_id)['name'])\n for l_id in task['labels']\n ]\n\n # Dates\n data['entry'] = utils.parse_date(task['date_added'])\n data['due'] = utils.parse_due(utils.try_get_model_prop(task, 'due'))\n data['recur'] = parse_recur_or_prompt(utils.try_get_model_prop(task, 'due'))\n\n if not interactive:\n add_task(**data)\n else:\n add_task_interactive(**data)",
"def list(config):\n store = api_todo.Todo()\n #tasks = api_sort(store.ls())\n tasks = store.ls()\n headers = ['id', 'Priority', 'done', 'description']\n data = []\n for el in tasks:\n identifier, content, _, _, active, priority = el\n data.append([identifier, priority, \"\" if active else \"X\", content])\n console.show_table(data, headers, 'tasks')",
"def getTaskDictionary(includeTagList, excludeTagList, dateList, \\\n taskLabelList):\n taskDict = dict()\n extractedTagsList = list()\n extractedAuthorList = list()\n diaryDir = commonDiaryFunctions.unicodeDir(os.path.abspath(__file__))\n for date in dateList:\n relativeDateDir = 'entries/' + str(date.year) + '/' + \\\n str(date.month).zfill(2) + '/' + str(date.day).zfill(2)\n # The file name of a task must match the pattern YYYYMMDD_XXXI.tex\n # where XXX are optional initials (letters a-zA-Z) and I is a number.\n fileNamePattern = re.compile(r'^' + str(date.year) + \\\n str(date.month).zfill(2) + str(date.day).zfill(2) + \\\n '_([a-zA-Z]*)([0-9]+)\\.tex$')\n # Retrieve a sorted list of all files and folders in relativeDateDir\n filesAndFoldersList = \\\n sorted(os.listdir(diaryDir + '/' + relativeDateDir))\n validTaskPathList = list()\n for fileOrFolder in filesAndFoldersList:\n relativeTaskPath = relativeDateDir + '/' + fileOrFolder\n taskPath = diaryDir + '/' + relativeTaskPath\n if os.path.isfile(taskPath) and \\\n re.match(fileNamePattern, fileOrFolder):\n # If the taskLabelList is not empty, check if the file name\n # is in the list\n if len(taskLabelList)==0 or fileOrFolder[:-4] in taskLabelList:\n extractedTags = extractTagsFromValidTask(taskPath, \\\n includeTagList, excludeTagList)\n if len(extractedTags)>0:\n extractedAuthors = extractAuthorsFromTask(taskPath)\n if len(extractedAuthors)>0:\n extractedAuthorList.extend(extractedAuthors)\n validTaskPathList.append(relativeTaskPath)\n extractedTagsList.extend(extractedTags)\n # If a least one task path has been added, add it to the dictionary\n if len(validTaskPathList)>0:\n taskDict[date] = validTaskPathList\n # return the task dictionary and the unique extracted tags and authors\n return taskDict, sorted(list(set(extractedTagsList))), \\\n sorted(list(set(extractedAuthorList)))",
"def __init__(self):\n\n # build entries for current files\n for f in glob.glob(\"*\"):\n self[f] = FileComment(f)\n\n # add saved comments\n saved = saved_comments()\n for fn in sorted(self, reverse=True):\n self[fn].comment = saved.comment_for_name(fn, remove=True)",
"def _outputFileLists(self, idef=('slice1', 'slice2', 'slice3')):\n for id in idef:\n out = id + 'filelist'\n fh = open(out, 'w')\n for file in self.files:\n tmp = file[:-4] + id\n fh.write(tmp + '\\n')\n fh.close()\n self.log.info('Writing %s' % out)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the summation of the integers from low to high. | def summation2(low, high):
total = 0
for number in range(low, high + 1):
total += number
return total | [
"def get_sum(a, b):\n return sum(range(min(a, b), max(a, b) + 1))",
"def rangeSumBST(self, root: Optional[TreeNode], low: int, high: int) -> int:\n ans = 0\n t = [root]\n while t:\n node = t.pop()\n if node:\n if low <= node.val <= high:\n ans += node.val\n if low < node.val:\n t.append(node.left)\n if node.val < high:\n t.append(node.right)\n\n return ans",
"def sum_range(num):\n if num == 0:\n return 0\n return num + sum_range(num - 1)",
"def sumRange(self, i, j):\n if self.nums == []:\n return 0\n\n return self.dp[j] - self.dp[i] + self.nums[i]",
"def sumOfMultiples(upper_bound, m1, m2):\n\ts = 0\n\t#add sums of each multiple\n\ts += m1 * sumOfConsInts((upper_bound-1) / m1) \n\ts += m2 * sumOfConsInts((upper_bound-1) / m2) \n\t#subtract sums of both multiples to avoid double counting\n\ts -= m1 * m2 * sumOfConsInts((upper_bound-1) / m1 / m2)\n\treturn s",
"def firstnsum(n):\n\treturn sum(range(n+1))",
"def sumAmicableNumbers(upperBound):\n\tsumm = 0\n\tfor a in xrange(1, upperBound):\n\t\tb = d(a)\n\t\tif a != b and d(b) == a:\n\t\t\tsumm += a\n\treturn summ",
"def sum_of_odd_integers(start, end):\n if start % 2 == 0:\n start += 1\n\n sum_of_numbers = 0\n\n for number in range(start, end+1, 2):\n sum_of_numbers += number\n\n return sum_of_numbers",
"def two_sum(int_array, lb, ub):\n hash_int = {}\n sum_hash = {}\n for x in int_array:\n if x >= ub: continue\n hash_int[x] = True\n\n for x in int_array:\n if x >= ub: continue\n for sum_x_y in range(lb, ub + 1):\n y = sum_x_y - x\n if x < y and y in hash_int:\n # print '%s + %s = %s' % (x, y, sum_x_y)\n sum_hash[sum_x_y] = True\n return len(sum_hash)",
"def sumRange(self, i, j):\n bucket_size, nums, table = NumArray.bucket_size, self.nums, self.table\n \n if i == j:\n return nums[i]\n \n vsum = 0\n now = i\n \n while now <= j:\n if now % bucket_size != 0:\n for k in range(now, min(j+1, now-(now%bucket_size) + bucket_size)):\n vsum += nums[k]\n now -= (now%bucket_size)\n else:\n if now+bucket_size-1 <= j:\n vsum += table[now//bucket_size]\n else:\n for k in range(now, j+1):\n vsum += nums[k]\n \n now += bucket_size\n \n return vsum",
"def _sum(a, i, j):\n if i > j: # T(n) = 0 \n return 0\n if i == j: # T(n) = 1\n return a[i]\n mid = (i+j)//2\n return _sum(a, i, mid) + _sum(a, mid+1, j)",
"def square_of_sums(upper_bound):\n # Use Gauss' insight about n/2 * (n + 1).\n return (upper_bound * (upper_bound + 1) / 2) ** 2",
"def sum_to(limit):\n return sum(primes_up_to(limit))",
"def sum_list(numbers: list) -> int:\n\n return sum(numbers)",
"def SimpleAdding(num):\n\n # code goes here\n return sum([x for x in range(num+1)])",
"def sum(n):\n return summation_using_accumulate(n, lambda x: x)",
"def sum(xs):\r\n return reduce(add, xs)",
"def sum_integer(number: int) -> int:\n\n return sum_list([int(i) for i in convert_int(number)])",
"def square_of_sum(num):\n return sum(range(num + 1))**2"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a graph, return False, if target node not in graph. else, return True. Using depthfirst search. | def node_in_graph(graph, target_node, start=None, visited=set()):
if start == target_node:
return True
stack = list(start)
while stack:
cur = stack.pop()
visited.add(cur)
for child in graph[cur]:
if child == target_node:
return True
if child not in visited:
stack.append(child)
return False | [
"def if_conn(graph):\n\n nodes = graph.nodes()\n first_node = nodes[0]\n last_node = nodes[-1]\n return nx.has_path(graph, first_node, last_node)",
"def depth_first_search(graph, start, v):\n\n visited = set()\n stack = [start]\n\n while stack:\n curr = stack.pop()\n if curr == v:\n return True\n\n for node in graph[curr]:\n if node not in visited:\n stack.append(node)\n\n return False",
"def has_path(G, source, target):\n try:\n nx.shortest_path(G, source, target)\n except nx.NetworkXNoPath:\n return False\n return True",
"def allvisited(nodes):\n for node in nodes:\n if not node.visited: \n return False\n return True",
"def is_graph(graph: Graph) -> bool:\r\n (n_vertices, edges) = graph\r\n for edge in edges:\r\n for vertex in edge:\r\n if not 1 <= vertex <= n_vertices:\r\n return False\r\n if edge[0] == edge[1]:\r\n return False\r\n return True",
"def check_if_path_exists(graph, start, end):\n return bfs(graph, start, end) is not None",
"def __contains__ (self, target):\n node = self.root\n while node:\n rc = node.compareTo(target)\n if rc > 0:\n node = node.left\n elif rc < 0:\n node = node.right\n else:\n return True\n \n \n return False",
"def is_walk(g, walk):\n\n for v in walk: # O(k)\n if not g.is_vertex(v): # O(1)\n return False\n\n if len(walk) == 0:\n return False\n\n # Note, can reduce the running time of the entire function\n # to O(k) if we implement the method is_edge to run in O(1) time.\n # This is a good exercise to think about.\n for node in range(0, len(walk) - 1): # O(k)\n if not g.is_edge((walk[node], walk[node + 1])): # O(d)\n return False\n\n return True",
"def graph_traverse(graph, node): \n visited = set()\n def _graph_traverse(n):\n visited.add(n)\n for neighbor in graph[n]:\n if neighbor not in visited:\n _graph_traverse(neighbor)\n _graph_traverse(node)\n return visited",
"def Search(graph, root):\n \n visited = []\n stack = []\n stack.append(root)\n while stack:\n node = stack.pop()\n if node not in visited:\n visited.append(node)\n for item in reversed(sorted(graph.neighbors(node))):\n if item not in visited:\n stack.append(item)\n return visited",
"def route_exists(self, node1, node2):\n stack = Stack()\n for node in self.get_nodes():\n node.visited = False\n stack.push(node1)\n while not stack.is_empty():\n node = stack.pop()\n if node:\n for child in node.get_children():\n if not child.visited:\n if child is node2:\n return True\n else:\n stack.push(child)\n node.visited = True\n return False",
"def has_vertex(self,v):\n return v in self.graph",
"def is_tree(self):\n\n if not(self.is_connected()):\n return False\n\n seen = []\n nodes_to_visit = [self.nodes[0]]\n\n # We keep track of what the last node we have seen is to not backtrack\n last_node = None\n while nodes_to_visit:\n # If we have traversed all the nodes without seeing a duplicate\n # we have a tree\n curr_node = nodes_to_vist.pop()\n if curr_node in seen:\n return False\n seen.append(curr_node)\n\n # Pick all the next nodes except for the one just visited\n next_nodes = [node for node in self.adjacency_list[curr_node]\n if node != last_node]\n nodes_to_visit.extend(next_nodes)\n last_node = curr_node\n return True",
"def has_dependencies(node, dag):\n for downstream_nodes in dag.values():\n if node in downstream_nodes:\n return True\n return False",
"def graph_search(problem, fringe):\n visited = [] # store nodes visited that should not be visited again\n fringe.push(Node(problem.getStartState()))\n while not fringe.isEmpty():\n node = fringe.pop()\n visited.append(node.state)\n if problem.isGoalState(node.state):\n return node.get_actions()\n for subNode in node.expand(problem):\n if subNode.state not in visited:\n\t fringe.push(subNode)\n return None",
"def is_connected(G, node, visited, depth_limit=0):\n \n if depth_limit == 20:\n return False\n if all(list(visited.values())):\n return True\n visited[node] = True\n connected = []\n for child in G[node]:\n connected += [is_connected(G, child, visited, depth_limit+1)]\n return all(connected)",
"def check_graph():\n return None",
"def is_connected(graph, node):\n queue = {node}\n hop_count = 0\n\n while True:\n # when nothing in the queue then exit loop\n try:\n node = queue.pop()\n except KeyError:\n break\n if node.visited is False:\n node.visited = True\n hop_count += 1\n queue.update([\n n for n in node.get_all_edges()\n if n.visited is False\n ])\n print(hop_count, len(graph))\n return hop_count == len(graph), graph",
"def IsNode(self, *args):\n return _snap.PUNGraph_IsNode(self, *args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure UserMessage rows with historical flag are also considered for read receipts. | def test_historical_usermessages_read_flag_not_considered(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
stream_name = "test stream"
self.subscribe(cordelia, stream_name)
message_id = self.send_stream_message(cordelia, stream_name, content="foo")
self.login("hamlet")
# Have hamlet react to the message to
# create a historical UserMessage row.
reaction_info = {
"emoji_name": "smile",
}
result = self.client_post(f"/json/messages/{message_id}/reactions", reaction_info)
self.assert_json_success(result)
# Ensure UserMessage row with historical and read flags exists
user_message = UserMessage.objects.get(user_profile=hamlet, message_id=message_id)
self.assertTrue(user_message.flags.historical)
self.assertTrue(user_message.flags.read)
result = self.client_get(f"/json/messages/{message_id}/read_receipts")
self.assert_json_success(result)
self.assertIn(hamlet.id, result.json()["user_ids"]) | [
"def mark_as_unread(self):\n if self.read_at:\n self.read_at = None\n return self.save(query=True)",
"def get_unread_messages(self, user):\n return Message.objects.filter(recipient=user, read_at=None)",
"def mark_as_read(self, message):\n\n if message.read_at is None:\n message.read_at = timezone.now()\n message_read.send(sender=message, from_user=message.sender, to=message.recipient)\n message.save()",
"def mark_unread(self, user, message_id):\n pass",
"def check_messages(self):\n messages = self.intrade.get_messages(self.last_message_check)\n self.last_message_check = int(messages['@timestamp'])\n return True if 'msg' in messages else False",
"def clearReadStatus(message):",
"def mark_as_read(self):\r\n self.hasBeenRead = True",
"def is_read(self):\n return self.read_at is not None",
"def _AllMandatesAccepted(self, user):\n if self._mandates:\n for mandate in self._mandates:\n if mandate.IsAcceptedByTrader(user) is False:\n return False\n return True",
"def mark_as_read(self):\n self.has_been_read = True",
"def topic_is_unread(topic, topicsread, user, forumsread=None):\n if not user.is_authenticated():\n return False\n\n read_cutoff = datetime.utcnow() - timedelta(\n days=flaskbb_config[\"TRACKER_LENGTH\"])\n\n # disable tracker if read_cutoff is set to 0\n if read_cutoff == 0:\n return False\n\n # check read_cutoff\n if topic.last_post.date_created < read_cutoff:\n return False\n\n # topicsread is none if the user has marked the forum as read\n # or if he hasn't visited yet\n if topicsread is None:\n # user has cleared the forum sometime ago - check if there is a new post\n if forumsread and forumsread.cleared is not None:\n return forumsread.cleared < topic.last_post.date_created\n\n # user hasn't read the topic yet, or there is a new post since the user\n # has marked the forum as read\n return True\n\n # check if there is a new post since the user's last topic visit\n return topicsread.last_read < topic.last_post.date_created",
"def test_mark_notification_as_read(self):\n url = f\"{self.base_url}/read/{self.notification.id}/\"\n response = self.client.get(url, **self.restaurant_headers)\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.data['status'])\n notification = NotificationModel.objects.get(id=1)\n self.assertEqual(notification.status, \"R\")",
"def mark_as_read(self):\n self.read = True\n self.save()",
"def get_unread_messages(self):\n return self.messages.all().filter(is_read=False)",
"def can_read__after(self, *args, **kwargs):\n return self._check_default('can_read')",
"def test_read_at_time(self):\n participant = self.thread.participants.get(user=self.mortimer)\n participant.read_thread()\n self.assertTrue(participant.read_at is not None, \"There is a read_at date. Message was read\")",
"async def mark_all_notifs_read(self) -> int:\n url = self.get_api_url(\n TYPE_NOTIFICATION, action=\"UserNotification.MarkAllRead()\", format=\"json\")\n async with self._session.post(url) as resp:\n return (await resp.json())[\"d\"][\"count\"]",
"def unread_mail(self):\n em = email.Email()\n emails = em.request(today = False, prnt = True)\n if emails:\n for lines in emails:\n self.log.info(lines)\n else:\n mws.recover()\n tc_fail(\"Failed requesting Unread Mail\")",
"async def received_client_read_marker(\n self, room_id: str, user_id: str, event_id: str\n ) -> None:\n\n async with self.read_marker_linearizer.queue((room_id, user_id)):\n existing_read_marker = await self.store.get_account_data_for_room_and_type(\n user_id, room_id, ReceiptTypes.FULLY_READ\n )\n\n should_update = True\n # Get event ordering, this also ensures we know about the event\n event_ordering = await self.store.get_event_ordering(event_id)\n\n if existing_read_marker:\n try:\n old_event_ordering = await self.store.get_event_ordering(\n existing_read_marker[\"event_id\"]\n )\n except SynapseError:\n # Old event no longer exists, assume new is ahead. This may\n # happen if the old event was removed due to retention.\n pass\n else:\n # Only update if the new marker is ahead in the stream\n should_update = event_ordering > old_event_ordering\n\n if should_update:\n content = {\"event_id\": event_id}\n await self.account_data_handler.add_account_data_to_room(\n user_id, room_id, ReceiptTypes.FULLY_READ, content\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find most popular goods, then set popular good identifiers equal to one but other good identifiers equal to zero. | def process_orders(self):
for person_orders in self.orders.values():
non_zero_count = np.count_nonzero(person_orders)
if non_zero_count < self.num_popular_ids:
non_zero_ind = person_orders.argsort()[::-1][:non_zero_count]
sub_index = []
for index in self.most_popular_good_ids:
if index not in non_zero_ind:
sub_index.append(index)
non_zero_count += 1
if non_zero_count == self.max_good_id:
break
if non_zero_count < self.num_popular_ids:
sub_index.extend(
np.random.randint(self.max_good_id + 1,
size=(self.num_popular_ids -
non_zero_count)).tolist()
)
person_orders[sub_index] = 1
else:
indices = person_orders.argsort()[::-1][:self.num_popular_ids]
not_in_indices = [x for x in range(len(person_orders))
if x not in indices]
person_orders[not_in_indices] = 0
person_orders[indices] = 1 | [
"def update_counts(self, great = 0, good = 0, bad = 0, miss = 0):\n \n self.great = great\n self.good = good\n self.bad = bad\n self.miss = miss\n self.perfect = self.total_count() - (miss + bad + good + great)",
"def frequent_itemset(transactions, minsup):\n pass",
"def good_count(self, good_count):\n\n self._good_count = good_count",
"def reduce_products(data, top_percent): \n # number of products\n n_of_products = data.product_id.nunique()\n\n # output\n print('Total Number of Products: {0}'.format(n_of_products))\n\n # 20% is the regular percentage of reducing the products\n top_20 = int(n_of_products * top_percent)\n\n # select the top products\n n_of_products_bought = data.product_id.value_counts()\n prod_f = n_of_products_bought.nlargest(top_20)\n top_products = prod_f.index\n\n # filter the transactions only for the top products\n data = data[(data.product_id.isin(top_products))]\n\n # output\n print('Number of Products after reduction: {0}'.format(top_20))\n\n return data",
"def _setcover_greedy_new(candidate_sets_dict, items=None, set_weights=None,\n item_values=None, max_weight=None):\n if len(candidate_sets_dict) == 0:\n # O(1) optimal solution, we did it!\n return {}\n\n solution_cover = {}\n solution_weight = 0\n\n if items is None:\n items = list(set(it.chain(*candidate_sets_dict.values())))\n\n # Inverted index\n item_to_keys = {item: set() for item in items}\n # This is actually a fair bit faster than the non-comprehension version\n [item_to_keys[item].add(key)\n for key, vals in candidate_sets_dict.items()\n for item in vals]\n\n # If set_weights or item_values not given use the length as defaults\n if set_weights is None:\n get_weight = len\n else:\n # TODO: we can improve this with bookkeeping\n def get_weight(solution_cover):\n return sum(set_weights[key] for key in solution_cover.keys())\n\n if item_values is None:\n get_value = len\n else:\n def get_value(vals):\n return sum(item_values[v] for v in vals)\n if max_weight is None:\n max_weight = get_weight(candidate_sets_dict)\n\n avail_covers = OrderedDict([\n (key, set(vals))\n for key, vals in sorted(candidate_sets_dict.items())\n ])\n avail_totals = OrderedDict([\n (key, get_value(vals))\n for key, vals in avail_covers.items()\n ])\n\n print('avail_covers = {}'.format(ub.urepr(avail_covers, nl=1)))\n print('avail_totals = {}'.format(ub.urepr(avail_totals, nl=1)))\n\n # While we still need covers\n while solution_weight < max_weight and len(avail_covers) > 0:\n # Find candiate set with the most valuable uncovered items\n chosen_key = ub.argmax(avail_totals)\n if avail_totals[chosen_key] <= 0:\n # needlessly adding value-less covering set\n break\n\n print('-----')\n print('CHOOSE COVER SET = {!r}'.format(chosen_key))\n\n # Add values in this key to the cover\n chosen_items = avail_covers[chosen_key]\n solution_cover[chosen_key] = candidate_sets_dict[chosen_key]\n\n # Update the solution weight\n chosen_weight = (1 if set_weights is None else set_weights[chosen_key])\n solution_weight += chosen_weight\n\n # Remove chosen covering set from available options\n del avail_covers[chosen_key]\n del avail_totals[chosen_key]\n\n # For each chosen item, find the other sets that it belongs to\n modified_keys = set()\n for item in chosen_items:\n # Update the inverted index\n new_keys = item_to_keys[item]\n new_keys.remove(chosen_key)\n item_to_keys[item] = new_keys\n # And mark the non-chosen reamining cover sets as modified\n modified_keys.update(new_keys)\n # Then update and recompute the value of the modified sets\n for key in modified_keys:\n avail_covers[key].difference_update(chosen_items)\n newval = get_value(avail_covers[key])\n avail_totals[key] = newval\n\n print('avail_covers = {}'.format(ub.urepr(avail_covers, nl=1)))\n print('avail_totals = {}'.format(ub.urepr(avail_totals, nl=1)))\n\n print('solution_cover = {!r}'.format(solution_cover))\n return solution_cover",
"def update_popularity_of_word( item ):\n item.PopularityOfWord = 0\n\n if item.ExplainationExamplesRaw is not None:\n item.PopularityOfWord += len( item.ExplainationExamplesRaw ) * 5\n\n if item.RelatedTerms is not None:\n item.PopularityOfWord += len( item.RelatedTerms )\n\n other_cost = 0\n\n if item.Translation_DE is not None:\n other_cost += len( item.Translation_DE )\n\n if item.Translation_EN is not None:\n other_cost += len( item.Translation_EN )\n\n if item.Translation_ES is not None:\n other_cost += len( item.Translation_ES )\n\n if item.Translation_FR is not None:\n other_cost += len( item.Translation_FR )\n\n if item.Translation_IT is not None:\n other_cost += len( item.Translation_IT )\n\n if item.Translation_PT is not None:\n other_cost += len( item.Translation_PT )\n\n if item.Translation_RU is not None:\n other_cost += len( item.Translation_RU )\n\n if item.Holonymy is not None:\n other_cost += len( item.Holonymy )\n\n if item.Troponymy is not None:\n other_cost += len( item.Troponymy )\n\n if item.Hypernymy is not None:\n other_cost += len( item.Hypernymy )\n\n if item.Hyponymy is not None:\n other_cost += len( item.Hyponymy )\n\n if item.Meronymy is not None:\n other_cost += len( item.Meronymy )\n\n if item.Synonymy is not None:\n other_cost += len( item.Synonymy )\n\n if item.Antonymy is not None:\n other_cost += len( item.Antonymy )\n\n item.PopularityOfWord += 1 if other_cost else 0",
"def greedily_fill(knap, items, i_sorted_by_cost_weight, i_sorted_by_cost, i_sorted_by_weight):\n i = 0\n while i < len(items):\n item = items[i_sorted_by_cost_weight[i]]\n if (item['cost'] + knap.cost) > MAX_COST or (item['weight'] + knap.weight) > MAX_WEIGHT:\n pass\n else:\n knap.add_items(items, [i_sorted_by_cost_weight[i]])\n\n item = items[i_sorted_by_cost[i]]\n if (item['cost'] + knap.cost) > MAX_COST or (item['weight'] + knap.weight) > MAX_WEIGHT:\n pass\n else:\n knap.add_items(items, [i_sorted_by_cost[i]])\n\n item = items[i_sorted_by_weight[i]]\n if (item['cost'] + knap.cost) > MAX_COST or (item['weight'] + knap.weight) > MAX_WEIGHT:\n pass\n else:\n knap.add_items(items, [i_sorted_by_weight[i]])\n i += 1",
"def majority_stockholders(self, hotel):\r\n players_with_stocks = self.players_with_stocks(hotel)\r\n max_stocks = max([s for p, s in players_with_stocks])\r\n return set([p for p, s in players_with_stocks if s == max_stocks])",
"def minority_stockholders(self, hotel):\r\n not_majority_shareholders = \\\r\n [(p, s) for p, s in self.players_with_stocks(hotel)\r\n if p not in self.majority_stockholders(hotel)]\r\n if len(not_majority_shareholders) == 0:\r\n return set([])\r\n max_stocks = max([s for p, s in not_majority_shareholders])\r\n return set([p for p, s in not_majority_shareholders if s == max_stocks])",
"def calcul_moyenne_good(self):\n\t\t\t\n\t\tfor mot in self.mots:\n\t\t\tsomme = 0.0\n\t\t\ttot = 0.0\n\t\t\tfor pred in graph.getInNodes(mot):\n\t\t\t\tsomme += self.good[pred]\n\t\t\t\ttot += 1.0\n\t\t\n\t\t\tself.good[mot]\t= somme/tot",
"def test_edit_product_popularity(self):\n self.open_filters()\n self.add_col_to_grid('is_popular')\n old_popular_state, new_popular_state = self.perform_checkbox_toggle('is_popular')\n\n self.assertNotEqual(new_popular_state, old_popular_state)",
"def goods(take_me, free = False):\n global winningConditions\n goods_lst = []\n for n in take_me:\n condition_n = [condition for condition in winningConditions if n in condition] #all winning conditions contains n\n for cond in condition_n: #for all conditions contains n\n goods_lst += [m for m in cond if m != n] #add to goods_lst the two others positions to win with n\n if free:\n goods_free = [n for n in goods_lst if n in free] #list of good and free positions\n return goods_free\n else:\n return goods_lst",
"def really_goods(take_me, free):\n global winningConditions\n goods_lst = []\n for n in take_me:\n condition_n = [condition for condition in winningConditions if n in condition] #all winning conditions contains n\n for cond in condition_n: #for all conditions contains n\n p, q = [m for m in cond if m != n] #list of the two others positions to win with n\n if p in free and q in free: #If the missing positions to win are free\n goods_lst.append(p)\n goods_lst.append(q)\n return goods_lst",
"def most_popular(self, n):\n return dict(collections.Counter([x[2] for x in self.tags]).most_common(n))",
"def _process_empty_predictions(self, predictions):\n for prediction in predictions:\n if not prediction:\n prediction.extend(self._parser.most_popular_good_ids)",
"def test_dontCountItemsInMultipleOffers(self):\n basket = self.createBasket()\n\n basket.addItem(\"beans\")\n basket.addItem(\"beans\")\n basket.addItem(\"beans\")\n basket.addItem(\"chickpeas\")\n basket.addItem(\"spaghetti hoops\")\n\n # Three-for-two should win, so no cheapest-free savings should happen here\n self.assertEqual(basket.total(), 2 * beans.price() + chickpeas.price() + spaghettiHoops.price())\n self.assertEqual(basket.savings(), beans.price())",
"def test_products_like_with_better_score(self):\r\n # p002 # best score => no one better => 0 results\r\n p002 = prd.Product.objects.get(code='0000000000002')\r\n p002_categories = p002.categories.all()\r\n p002_nutrition_grade = p002.nutrition_grade\r\n\r\n raws = prd.Product.objects.filter(\r\n categories__in=p002_categories,\r\n nutrition_grade__lt=p002_nutrition_grade)\r\n self.assertEqual(len(raws), 0)\r\n\r\n # p001 # worst score => 3 others are better\r\n p001 = prd.Product.objects.get(code='0000000000001')\r\n p001_categories = p001.categories.all()\r\n p001_nutrition_grade = p001.nutrition_grade\r\n\r\n raws = prd.Product.objects.filter(\r\n categories__in=p001_categories,\r\n nutrition_grade__lt=p001_nutrition_grade)\r\n self.assertEqual(len(raws), 3)",
"def not_good_count(self, not_good_count):\n\n self._not_good_count = not_good_count",
"def reset_counts_quicksort_randomized():\r\n # reset comparison and exchange counts for next run\r\n global QSR_COMP\r\n global QSR_EX\r\n QSR_COMP = 0\r\n QSR_EX = 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market | async def fetch_ticker(self, symbol: str, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market) | [
"def run(self):\n\n self.sleep_if_market_not_available()\n\n LOG_INSTANCE.info(f\"Retrieving {self.ticker} price\")\n self.reset_cache()\n\n # curls and save intraday data\n intraday_price_so_far = self.retrieve_start_price()\n self.cache_intraday_ticker_data(intraday_price_so_far)\n latest_price = self.get_latest_price_from_cache()\n LOG_INSTANCE.info(\"Retrieved Latest Intraday data for %s: %s\", self.ticker, latest_price)\n\n self.save_start_price_to_file(intraday_price_so_far)\n # we will stop here for now for saving data\n current_hour = excalibur.time_conversion.get_current_hour()\n\n # import ipdb\n # ipdb.set_trace()\n\n # TODO: intraday disconnected, then we will have one hour carries all daily data, we need to address this issue\n\n while True:\n # if market is closed or is weekend, or is market holidays, we will just keep sleeping\n if mini_midas.common.is_market_not_available():\n LOG_INSTANCE.debug('Market Closed,sleeping....Zzzz...')\n time.sleep(65)\n continue\n\n ticker_minute_data = self.get_ticker_price()\n LOG_INSTANCE.info(\"%s intraday: %s\", self.ticker, ticker_minute_data)\n self.cache_ticker_minute_data(ticker_minute_data)\n\n # save current cached prices every hour\n new_hour = excalibur.time_conversion.get_current_hour()\n if current_hour != new_hour:\n self.save_current_cached_data()\n self.clear_intraday_prices()\n # sleep 1 minute before retry\n time.sleep(65)",
"def fetchData(exchange, symbol, timeframe, since=None, limit=None):\n\n # Supported exchanges\n exchange_list = ['binance','bitfinex','bytetrade','ftx','kraken','poloniex','upbit','acx','bequant','bigone','bitforex','bitkk','bitz','btcalpha','coinex','crex24','digifinex','gateio','hitbtc2','huobipro','huobiru','kucoin','lbank','okex','okex3','stex','upbit','whitebit','zb']\n \n # Get our Exchange\n try:\n exchange = getattr (ccxt, exchange) ()\n\n # In case exchange is not supported by ccxt\n except AttributeError:\n print('-'*36,' ERROR ','-'*35)\n print('Exchange \"{}\" not found. Please check the exchange is supported.'.format(exchange))\n print('Supported exchanges are:')\n print(exchange_list)\n print('-'*80)\n quit()\n \n # Check if fetching of OHLC Data is supported\n if exchange.has[\"fetchOHLCV\"] != True:\n print('-'*36,' ERROR ','-'*35)\n print('{} does not support fetching OHLC data. Please use another exchange'.format(exchange))\n print('-'*80)\n quit()\n \n # Check requested timeframe is available. If not return a helpful error.\n if (not hasattr(exchange, 'timeframes')) or (timeframe not in exchange.timeframes):\n print('-'*36,' ERROR ','-'*35)\n print('The requested timeframe ({}) is not available from {}\\n'.format(timeframe,exchange))\n print('Available timeframes are:')\n for key in exchange.timeframes.keys():\n print(' - ' + key)\n print('-'*80)\n quit()\n \n # Check if the symbol is available on the Exchange\n exchange.load_markets()\n if symbol not in exchange.symbols:\n print('-'*36,' ERROR ','-'*35)\n print('The requested symbol ({}) is not available from {}\\n'.format(symbol,exchange))\n print('Available symbols are:')\n for key in exchange.symbols:\n print(' - ' + key)\n print('-'*80)\n quit()\n\n # Get data\n data = exchange.fetch_ohlcv(symbol, timeframe, since, limit)\n header = ['Timestamp', 'open', 'high', 'low', 'close', 'volume']\n df = pd.DataFrame(data, columns=header)\n\n # Convert Timestamp to date\n df.Timestamp = df.Timestamp/1000 # Timestamp is 1000 times bigger than it should be in this case\n df['date'] = pd.to_datetime(df.Timestamp,unit='s')\n\n # Drop timestamp and replace it by date\n df = df[['date', 'open', 'high', 'low', 'close', 'volume']]\n\n # The default values are string, so convert these to numeric values\n df['open'] = pd.to_numeric(df['open'])\n df['high'] = pd.to_numeric(df['high'])\n df['low'] = pd.to_numeric(df['low'])\n df['close'] = pd.to_numeric(df['close'])\n df['volume'] = pd.to_numeric(df['volume'])\n\n # Returned DataFrame should consists of columns: index starting from 0, date as datetime, open, high, low, close, volume in numbers\n return df",
"def _callApi(ticker):\n global last_request_time\n global local_cache\n if ticker in local_cache:\n return local_cache[ticker]\n result = {}\n # Track the number of 503s in case the server is down for an extended period.\n count_503s = 0\n attempts = 0\n while 'Time Series (Daily)' not in result and attempts < 120:\n attempts += 1\n sleep(max(\n 0,\n Config.MIN_TIME_BETWEEN_CALLS - (time() - last_request_time)))\n raw_result = requests.get(Config.BASE_REQUEST + ticker)\n last_request_time = time()\n try:\n if raw_result.status_code == 503:\n count_503s += 1\n if count_503s >= 60:\n raise IOError('Too many 503s from API.')\n continue\n result = raw_result.json()\n except ValueError as e:\n print(raw_result)\n raise e\n\n if attempts >= 120:\n raise IOError('Could not get ticker %s' % ticker)\n\n # Extract the date-price pairs.\n data = {}\n for date in result['Time Series (Daily)']:\n data[date] = _stringToDecimal(\n result['Time Series (Daily)'][date]['5. adjusted close'])\n\n # Convert the date-dict to an ordered one.\n ordered_date_dict = OrderedDict(sorted(data.items(), key=lambda t: t[0]))\n cache_data = {ticker: ordered_date_dict}\n cache_data[ticker].update({'_timestamp': time()})\n\n local_cache[ticker] = cache_data\n\n return cache_data",
"def prices(ticker):\n\n # Contact API\n api_key = os.environ.get(\"FMP_API_KEY\")\n url = f\"https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?serietype=line&apikey={api_key}\"\n response = urlopen(url)\n prices = response.read().decode(\"utf-8\")\n return json.loads(prices)",
"def lookup_prices(symbol: str,\n period: int = 2,\n period_type: str = \"month\",\n frequency: int = 1,\n frequency_type: str = \"daily\",\n end_date: str = \"\",\n num_entries_to_analyze: int = 40) -> pd.DataFrame:\n\n if end_date == \"\":\n end_date = int(round(time.time() * 1000))\n else:\n end_date = int(\n round(datetime.datetime.strptime(end_date, '%m-%d-%Y').timestamp() * 1000))\n\n endpoint = f\"https://api.tdameritrade.com/v1/marketdata/{symbol}/pricehistory\"\n payload = {\n 'apikey': config.config['AMERITRADE']['API_KEY'],\n 'period': period,\n 'periodType': period_type,\n 'frequency': frequency,\n 'frequencyType': frequency_type,\n 'endDate': end_date,\n 'needExtendedHoursData': 'false',\n }\n\n # TODO: Add more exception handling\n try:\n content = requests.get(url=endpoint, params=payload)\n except requests.exceptions.ProxyError:\n print(\"ProxyError, maybe you need to connect to to your proxy server?\")\n sys.exit()\n\n try:\n data = content.json()\n except json.decoder.JSONDecodeError:\n print(\"Error, API Request Returned: \" + str(content))\n print(\"Endpoint: \" + endpoint)\n print(\"payload:: \" + str(payload))\n return None\n\n candle_data = pd.DataFrame.from_records(data['candles'])\n\n if candle_data.empty:\n return None\n\n candle_data = candle_data[['datetime', 'open', 'high', 'low', 'close', 'volume']]\n candle_data = candle_data[-num_entries_to_analyze:]\n candle_data = pd.DataFrame.reset_index(candle_data, drop=True)\n\n # Convert datetime TODO: Understand the different timestamps used\n candle_data['datetime'] = mdates.epoch2num(candle_data['datetime'] / 1000)\n\n return candle_data",
"def Forecast(ticker, type = 'market', api='iex', start='1/1/2017', end=None):\n\n\tdf = pdr.DataReader(ticker, api, start, end)\n\tnew = pd.DataFrame()\n\tif api == 'quandl':\n\t\topen = 'AdjOpen'\n\t\tclose = 'AdjClose'\n\t\thigh = 'AdjHigh'\n\t\tlow = 'AdjLow'\n\t\tvolume = 'AdjVolume'\n\tif api == 'iex':\n\t\topen = 'open'\n\t\tclose = 'close'\n\t\thigh = 'high'\n\t\tlow = 'low'\n\t\tvolume = 'volume'\n\tif type == 'market':\n\t\tnew = new.append(df[close])\n\t\tnew = new.T\n\t\tnew['ds'] = new.index\n\t\tnew['y'] = new[close]\n\t\tcols = new.columns.tolist()\n\t\tcols.remove(close)\n\t\tnew = new[cols]\n\tm = Prophet(changepoint_prior_scale=.1999)\n\tm.fit(new)\n\tfuture = m.make_future_dataframe(periods=7)\n\tforecast = m.predict(future)\n\tprint(\"Yesterday's closing price:\", df[close][-1])\n\tprint(\"Prediction:\", '\\n', forecast[['ds', 'trend','yhat_lower', 'yhat_upper']])\n\tforecast['avg'] = (forecast['yhat_upper'] +forecast['yhat_lower']) / 2\n\tavg = forecast[['ds', 'avg']]\n\tprint(avg)\n\t# forecast.to_excel(ticker + '__' + '7DayForecast.xlsx')\n\tm.plot(forecast)\n\tplt.title(ticker)\n\tplt.show(block=False)\n\t# m.plot_components(forecast)\n\treturn forecast",
"def get_current_price():\n try:\n return exchange.fetch_ticker(conf.pair)['bid']\n\n except (ccxt.ExchangeError, ccxt.AuthenticationError, ccxt.ExchangeNotAvailable, ccxt.RequestTimeout) as error:\n log.error('Got an error %s %s, retrying in about 5 seconds...', type(error).__name__, str(error.args))\n sleep_for(4, 6)\n return get_current_price()",
"def get_market_ticker(self, pair):\n\n pair = self.format_pair(pair)\n return self.api(\"ticker/\" + pair)",
"def download_historical_prices(symbol):\r\n auth_token = 'g1CWzGxxg2WxNVbV5n9y'\r\n\r\n # add exchange prefix to symbol name\r\n futures_info = get_futures_info()\r\n prefix = futures_info['Exchange'].loc[futures_info['Symbol'] == symbol[:-5]].values[0] # strip off month and year\r\n full_name = prefix + '/' + symbol\r\n\r\n prices = pd.DataFrame()\r\n try:\r\n # download prices from quandl using full_name\r\n prices = quandl.get(full_name, authtoken=auth_token)\r\n prices = prices['Settle']\r\n # add contract_sort in order to sort by year then by month using contract name\r\n prices = pd.DataFrame({'Settle': pd.Series(prices),\r\n 'Contract': symbol,\r\n 'Contract_Sort': symbol[-4:] + symbol[-5:-4] + symbol[:-5]})\r\n except:\r\n pass\r\n return prices",
"def fetch_price(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n # Note: This is day-ahead prices\n if not session:\n session = requests.session()\n if zone_key in ENTSOE_PRICE_DOMAIN_OVERRIDE:\n domain = ENTSOE_PRICE_DOMAIN_OVERRIDE[zone_key]\n else:\n domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]\n # Grab consumption\n parsed = parse_price(query_price(domain, session, target_datetime=target_datetime))\n if parsed:\n data = []\n prices, currencies, datetimes = parsed\n for i in range(len(prices)):\n data.append({\n 'zoneKey': zone_key,\n 'datetime': datetimes[i].datetime,\n 'currency': currencies[i],\n 'price': prices[i],\n 'source': 'entsoe.eu'\n })\n\n return data",
"def call_api(self, crypto = \"ADA\"):\r\n now = datetime.datetime.utcnow()\r\n before = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)\r\n \r\n start = calendar.timegm(before.timetuple()) * 1000\r\n end = calendar.timegm(now.timetuple()) * 1000\r\n \r\n CANDLE_DATA = f'https://api.kraken.com/0/public/OHLC'\r\n pair = f'{crypto}USD'\r\n PARAMS = {\r\n 'pair' : pair,\r\n 'interval' : '1',\r\n 'since' : end\r\n }\r\n \r\n \r\n response = requests.get(url = CANDLE_DATA, params = PARAMS)\r\n \r\n while response.status_code != 200:\r\n time.sleep(2)\r\n response = requests.get(url = CANDLE_DATA, params = PARAMS)\r\n \r\n # Get the most important information from the response recieved\r\n data = response.json().get('result').get(pair)[0]\r\n \r\n return data",
"async def get_ticker(self, pair: Pair) -> MarketTicker:\n data = await self.api.get(\n \"public/get-ticker\", {\"instrument_name\": pair.name}\n )\n return MarketTicker.from_api(pair, data[0])",
"def tickPrice(self, TickerId, tickType, price, canAutoExecute):\n for security in self.data: \n if security.req_real_time_price_id==TickerId:\n self.data[security].datetime=self.stime\n self.log.debug(__name__ + ', ' + str(TickerId) + \", \" + MSG_TABLE[tickType]\n + \", \" + str(security.symbol) + \", price = \" + str(price))\n if tickType==1: #Bid price\n self.data[security].bid_price = price\n self.update_DataClass(security, 'bid_price_flow', price)\n elif tickType==2: #Ask price\n self.data[security].ask_price = price\n self.update_DataClass(security, 'ask_price_flow', price) \n elif tickType==4: #Last price\n self.data[security].price = price\n self.update_DataClass(security, 'last_price_flow', price) \n elif tickType==6: #High daily price\n self.data[security].daily_high_price=price\n elif tickType==7: #Low daily price\n self.data[security].daily_low_price=price\n elif tickType==9: #last close price\n self.data[security].daily_prev_close_price = price\n elif tickType == IBCpp.TickType.OPEN:\n self.data[security].daily_open_price = price\n\n #if (self.stime_previous is None or self.stime - \n #self.stime_previous > self.barSize):\n # # the begining of the bar\n # self.data[security].open_price=self.data[security].bid_price\n # self.data[security].high=self.data[security].bid_price\n # self.data[security].low=self.data[security].bid_price\n #else:\n # if tickType==4 and price>self.data[security].high: #Bid price\n # self.data[security].high=price\n # if tickType==4 and price<self.data[security].low: #Bid price\n # self.data[security].low=price",
"def fetch_ticker(ticker_name,timerange=''):\r\n ticker_name= str.upper(ticker_name)\r\n if timerange!=\"\" and timerange!=\"full\":\r\n raise ValueError(\"The parameter timerange can only accept the values: '' of 'full'.\")\r\n if timerange==\"\":\r\n timerange=\"compact\"\r\n try:\r\n url= \"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&apikey=ROB5U3GW2F12PUIH&datatype=csv&outputsize={}\".format(ticker_name, timerange)\r\n ready_data_ticker = pd.read_csv(url,sep=',', encoding='utf8', parse_dates=['timestamp'],dayfirst=True, index_col='timestamp')\r\n ready_data_ticker = ready_data_ticker.fillna(method='ffill')\r\n \r\n if os.path.exists(\"./data\")==False:\r\n os.makedirs(\"./data\")\r\n if os.path.exists(\"./data/{}.csv\".format(ticker_name))==False:\r\n ready_data_ticker.to_csv(\"./data/{}.csv\".format(ticker_name))\r\n except ValueError:\r\n raise ValueError(\"Sorry. The ticker_name that you choise is not found in the database of the site 'alphavantage'.\")",
"def get_stock_prices(self, ticker, time_period=False, limit=False, time_normalized=False, dataframe=False):\n cursor = self.db.cursor()\n sql = \"SELECT `date`, `open`, `high`, `low`, `close`, `volume` FROM `stock_data` WHERE `ticker` = '{}'\".format(\n ticker)\n\n if time_period:\n\n if type(time_period) != list or len(time_period) != 2:\n return False\n\n if not time_period[0]:\n\n earliest_timestamp = \"SELECT `date` FROM `stock_data` WHERE `ticker` = '{}' ORDER BY `date` ASC LIMIT 1;\".format(\n ticker)\n cursor.execute(earliest_timestamp)\n results = cursor.fetchall()\n\n if not results:\n raise Exception(\n \"Ticker value `{}` not found\".format(ticker))\n\n time_period[0] = results[0]['date']\n time_period[0] = time_period[0].strftime(\n \"%Y:%m:%d 00:00:00\")\n\n sql = sql + \\\n \" AND `date` BETWEEN '{}' AND '{}'\".format(\n time_period[0], time_period[1])\n if limit:\n sql = sql + \" LIMIT {}\".format(limit)\n sql = sql + \" ORDER BY `date` DESC;\"\n cursor.execute(sql)\n results = cursor.fetchall()\n cursor.close()\n\n if not results:\n\n return False\n\n if time_normalized:\n\n result_datetimes = [x['date'] for x in results]\n norm_results = []\n times = [results[0]['date'], results[len(results) - 1]['date']]\n\n diff = (times[0] - times[1]).days\n for d in range(diff):\n dt = times[len(times) - 1] + datetime.timedelta(days=d)\n if dt not in result_datetimes:\n norm_results.append({\n \"ticker\": ticker,\n \"date\": dt,\n \"open\": np.nan,\n \"high\": np.nan,\n \"low\": np.nan,\n \"close\": np.nan,\n \"volume\": np.nan})\n else:\n idx = result_datetimes.index(dt)\n norm_results.append(results[idx])\n results = norm_results\n\n if dataframe:\n results = DataFrame(results)\n\n return results",
"def get_prices_data(\n metrics: dict,\n market: Market,\n query_window: Tuple[datetime, datetime],\n resolution: str,\n forecast_horizon: timedelta,\n) -> Tuple[pd.DataFrame, pd.DataFrame, dict]:\n\n market_name = \"\" if market is None else market.name\n\n # Get price data\n price_bdf: tb.BeliefsDataFrame = Price.collect(\n [market_name],\n query_window=query_window,\n resolution=resolution,\n belief_horizon_window=(None, timedelta(hours=0)),\n )\n price_df: pd.DataFrame = simplify_index(\n price_bdf, index_levels_to_columns=[\"belief_horizon\", \"source\"]\n )\n\n if not price_bdf.empty:\n metrics[\"realised_unit_price\"] = price_df[\"event_value\"].mean()\n else:\n metrics[\"realised_unit_price\"] = np.NaN\n\n # Get price forecast\n price_forecast_bdf: tb.BeliefsDataFrame = Price.collect(\n [market_name],\n query_window=query_window,\n resolution=resolution,\n belief_horizon_window=(forecast_horizon, None),\n source_types=[\"user\", \"forecasting script\", \"script\"],\n )\n price_forecast_df: pd.DataFrame = simplify_index(\n price_forecast_bdf, index_levels_to_columns=[\"belief_horizon\", \"source\"]\n )\n\n # Calculate the price metrics\n if not price_forecast_df.empty and price_forecast_df.size == price_df.size:\n metrics[\"expected_unit_price\"] = price_forecast_df[\"event_value\"].mean()\n metrics[\"mae_unit_price\"] = calculations.mean_absolute_error(\n price_df[\"event_value\"], price_forecast_df[\"event_value\"]\n )\n metrics[\"mape_unit_price\"] = calculations.mean_absolute_percentage_error(\n price_df[\"event_value\"], price_forecast_df[\"event_value\"]\n )\n metrics[\"wape_unit_price\"] = calculations.weighted_absolute_percentage_error(\n price_df[\"event_value\"], price_forecast_df[\"event_value\"]\n )\n else:\n metrics[\"expected_unit_price\"] = np.NaN\n metrics[\"mae_unit_price\"] = np.NaN\n metrics[\"mape_unit_price\"] = np.NaN\n metrics[\"wape_unit_price\"] = np.NaN\n return price_df, price_forecast_df, metrics",
"def download_market_data(tickers):\n\n for ticker in tickers:\n url = f\"https://poloniex.com/public\"\n params = {\n 'command': 'returnChartData',\n 'currencyPair': ticker,\n 'start': 1493669112,\n 'end': 9999999999,\n 'period': 1800\n }\n query_string = parse.urlencode(params)\n save_response(ticker, f\"{url}?{query_string}\")\n time.sleep(1)",
"def acquire_data(ticker):\n to = 'BTC'\n if ticker == 'BTC':\n print(to)\n to = 'USD'\n print(to)\n\n if not os.path.exists('coins_dfs/{}.csv'.format(ticker)):\n print('Getting {}'.format(ticker))\n df = prices(ticker, to)\n print('Got {}'.format(ticker))\n df.to_csv('coins_dfs/{}.csv'.format(ticker))\n else:\n print('Already have {}'.format(ticker))",
"async def _async_get_price(exchange: ExchangeClient, pair: str) -> Tuple[str, Decimal]:\n ticker = await exchange.fetch_ticker(pair)\n return exchange.id, Decimal(\n exchange.price_to_precision(\n pair,\n # \"last\" is an alias to \"close\"\n ticker.get(\"last\"),\n )\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fetch the trading fees for multiple markets | async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result | [
"async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.privateGetFees(params)\n #\n # {\n # \"maker_fee_rate\": \"0.0050\",\n # \"taker_fee_rate\": \"0.0050\",\n # \"usd_volume\": \"43806.92\"\n # }\n #\n maker = self.safe_number(response, 'maker_fee_rate')\n taker = self.safe_number(response, 'taker_fee_rate')\n result = {}\n for i in range(0, len(self.symbols)):\n symbol = self.symbols[i]\n result[symbol] = {\n 'info': response,\n 'symbol': symbol,\n 'maker': maker,\n 'taker': taker,\n 'percentage': True,\n 'tierBased': True,\n }\n return result",
"async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.v3PrivateGetAccountinfo(params)\n #\n # {\n # \"success\": True,\n # \"data\": {\n # \"applicationId\": \"dsa\",\n # \"account\": \"dsa\",\n # \"alias\": \"haha\",\n # \"accountMode\": \"MARGIN\",\n # \"leverage\": 1,\n # \"takerFeeRate\": 1,\n # \"makerFeeRate\": 1,\n # \"interestRate\": 1,\n # \"futuresTakerFeeRate\": 1,\n # \"futuresMakerFeeRate\": 1,\n # \"otpauth\": True,\n # \"marginRatio\": 1,\n # \"openMarginRatio\": 1,\n # \"initialMarginRatio\": 1,\n # \"maintenanceMarginRatio\": 1,\n # \"totalCollateral\": 1,\n # \"freeCollateral\": 1,\n # \"totalAccountValue\": 1,\n # \"totalVaultValue\": 1,\n # \"totalStakingValue\": 1\n # },\n # \"timestamp\": 1673323685109\n # }\n #\n data = self.safe_value(response, 'data', {})\n maker = self.safe_string(data, 'makerFeeRate')\n taker = self.safe_string(data, 'takerFeeRate')\n result = {}\n for i in range(0, len(self.symbols)):\n symbol = self.symbols[i]\n result[symbol] = {\n 'info': response,\n 'symbol': symbol,\n 'maker': self.parse_number(Precise.string_div(maker, '10000')),\n 'taker': self.parse_number(Precise.string_div(taker, '10000')),\n 'percentage': True,\n 'tierBased': True,\n }\n return result",
"async def fetch_trading_fee(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = await self.privateGetFeeInfo(self.extend(request, params))\n #\n # {\n # \"maker_fee\": \"0.00250000\",\n # \"taker_fee\": \"0.00500000\",\n # \"thirty_day_volume\": \"0\"\n # }\n #\n return {\n 'info': response,\n 'symbol': symbol,\n 'maker': self.safe_number(response, 'maker_fee'),\n 'taker': self.safe_number(response, 'taker_fee'),\n }",
"def fees(self):\n\t\tdat = self.conn.call('GET', '/api/fees/').json()\n\t\tdeposit = float(dat['data']['deposit_fee'])\n\t\toutgoing = float(dat['data']['outgoing_fee'])\n\t\treturn (deposit, outgoing)",
"def request_fundamentals(stock_index):\n items = [\n ['l1', 'Last Price'],\n ['y', 'Dividend Yield'],\n ['r', 'Price/Earnings'],\n ['e', 'Earnings/Share'],\n ['b4', 'Book Value'],\n ['j', '52 week low'],\n ['k', '52 week high'],\n ['j1', 'Market Cap'],\n ['j4', 'EBITDA'],\n ['p5', 'Price/Sales'],\n ['p6', 'Price/Book'],\n ['f6','Float']\n ] \n params = ''.join([ x[0] for x in items ])\n url = 'http://download.finance.yahoo.com/d/quotes.csv?'\n #edgar = 'http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK='\n\n reader = csv.reader(open(data_dir + stock_index +'/'+ stock_index +'_atoms.csv'))\n outrows = [ row for row in reader ]\n symbols = [ row[0] for row in outrows[1:] ]\n\n #outrows[0] += [ item[1] for item in items ] + ['SEC Filings']\n outrows[0] += [ item[1] for item in items ]\n \n print('Getting fundamentals of stocks in {}'.format(stock_index))\n for idx in range(0,len(symbols),20):\n query = url + 's=' + '+'.join(symbols[idx:idx+20]) + '&f=' + params\n fo = urlopen(query)\n tmpcsv = csv.reader(fo)\n rows = [ row for row in tmpcsv ]\n for count, row in enumerate(rows):\n realidx = idx + count + 1\n # change n/a to empty cell\n row = [ x.replace('N/A', '') for x in row ]\n # market cap and ebitda have 'B' or 'M' in them sometimes\n row[7] = correctToBillions(row[7])\n row[8] = correctToBillions(row[8])\n # add the edgar link\n #row.append(edgar + symbols[realidx-1])\n outrows[realidx] = outrows[realidx] + row\n #print('Processed: %s rows' % (idx + 20))\n\n output_dir = data_dir + stock_index + '/' + todays_date_mmddyy() + '/'\n fo = open(output_dir + 'fundm_'+ todays_date_mmddyy() +'.csv', 'w')\n writer = csv.writer(fo, lineterminator='\\n')\n writer.writerows(outrows)\n fo.close()",
"def do_get_fees(self):\n request = self._request.get_fees()\n return self._send_get_first_result(request)",
"async def _update_trading_fees(self):\n \"\"\"\n {\n \"status\": \"success\",\n \"data\": {\n \"taker_fee_rates_x18\": [\n \"0\",\n \"300000000000000\",\n \"200000000000000\",\n \"300000000000000\",\n \"200000000000000\"\n ],\n \"maker_fee_rates_x18\": [\n \"0\",\n \"0\",\n \"0\",\n \"0\",\n \"0\"\n ],\n \"liquidation_sequencer_fee\": \"250000000000000000\",\n \"health_check_sequencer_fee\": \"100000000000000000\",\n \"taker_sequencer_fee\": \"25000000000000000\",\n \"withdraw_sequencer_fees\": [\n \"10000000000000000\",\n \"40000000000000\",\n \"0\",\n \"600000000000000\",\n \"0\"\n ]\n }\n }\n \"\"\"\n try:\n fee_rates = await self._get_fee_rates()\n taker_fees = {idx: fee_rate for idx, fee_rate in enumerate(fee_rates[\"taker_fee_rates_x18\"])}\n maker_fees = {idx: fee_rate for idx, fee_rate in enumerate(fee_rates[\"maker_fee_rates_x18\"])}\n # NOTE: This builds our fee rates based on indexed product_id\n for trading_pair in self._trading_pairs:\n product_id = utils.trading_pair_to_product_id(\n trading_pair=trading_pair, exchange_market_info=self._exchange_market_info[self._domain]\n )\n self._trading_fees[trading_pair] = {\n \"maker\": Decimal(utils.convert_from_x18(maker_fees[product_id])),\n \"taker\": Decimal(utils.convert_from_x18(taker_fees[product_id])),\n }\n except Exception:\n # NOTE: If failure to fetch, build default fees\n for trading_pair in self._trading_pairs:\n self._trading_fees[trading_pair] = {\n \"maker\": utils.DEFAULT_FEES.maker_percent_fee_decimal,\n \"taker\": utils.DEFAULT_FEES.taker_percent_fee_decimal,\n }",
"def get_all_markets(self):\n markets = [\n (i.primaryCurrency, i.secondaryCurrency,i.contractName, int(i.priceSource), i)\n for i in self.c.marketDataApi.get_all_price_markets().result\n ]\n df = pd.DataFrame(\n markets,\n columns=(\n [\"primarycurrency\", \"secondarycurrency\",\"contract\", \"pricesource\", \"marketobj\"]\n ),\n )\n df.drop_duplicates(inplace=True, ignore_index=True)\n df[\"Ticker\"] = df.primarycurrency.values + df.secondarycurrency.values\n return df",
"async def fetch_markets(self, params={}):\n response = await self.publicGetProducts(params)\n #\n # [\n # {\n # id: 'BTCAUCTION-USD',\n # base_currency: 'BTC',\n # quote_currency: 'USD',\n # base_min_size: '0.000016',\n # base_max_size: '1500',\n # quote_increment: '0.01',\n # base_increment: '0.00000001',\n # display_name: 'BTCAUCTION/USD',\n # min_market_funds: '1',\n # max_market_funds: '20000000',\n # margin_enabled: False,\n # fx_stablecoin: False,\n # max_slippage_percentage: '0.02000000',\n # post_only: False,\n # limit_only: False,\n # cancel_only: True,\n # trading_disabled: False,\n # status: 'online',\n # status_message: '',\n # auction_mode: False\n # },\n # {\n # id: 'BTC-USD',\n # base_currency: 'BTC',\n # quote_currency: 'USD',\n # base_min_size: '0.000016',\n # base_max_size: '1500',\n # quote_increment: '0.01',\n # base_increment: '0.00000001',\n # display_name: 'BTC/USD',\n # min_market_funds: '1',\n # max_market_funds: '20000000',\n # margin_enabled: False,\n # fx_stablecoin: False,\n # max_slippage_percentage: '0.02000000',\n # post_only: False,\n # limit_only: False,\n # cancel_only: False,\n # trading_disabled: False,\n # status: 'online',\n # status_message: '',\n # auction_mode: False\n # }\n # ]\n #\n result = []\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'id')\n baseId, quoteId = id.split('-')\n # BTCAUCTION-USD vs BTC-USD conflict workaround, see the output sample above\n # baseId = self.safe_string(market, 'base_currency')\n # quoteId = self.safe_string(market, 'quote_currency')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n status = self.safe_string(market, 'status')\n result.append(self.extend(self.fees['trading'], {\n 'id': id,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': self.safe_value(market, 'margin_enabled'),\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': (status == 'online'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.safe_number(market, 'base_increment'),\n 'price': self.safe_number(market, 'quote_increment'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': None,\n 'max': None,\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': self.safe_number(market, 'min_market_funds'),\n 'max': None,\n },\n },\n 'info': market,\n }))\n return result",
"def fees_sell(self) -> float:\n txs = [t for t in self.__transactions if isinstance(t, CryptoSellTransaction)]\n return sum([t.fees for t in txs])",
"def prices(ticker):\n\n # Contact API\n api_key = os.environ.get(\"FMP_API_KEY\")\n url = f\"https://financialmodelingprep.com/api/v3/historical-price-full/{ticker}?serietype=line&apikey={api_key}\"\n response = urlopen(url)\n prices = response.read().decode(\"utf-8\")\n return json.loads(prices)",
"def fees_buy(self) -> float:\n txs = [t for t in self.__transactions if isinstance(t, CryptoBuyTransaction)]\n return sum([t.fees for t in txs])",
"def findFeesSecondary(txdict,df):\n\n cols = [\"galleryFee\",\"artistFee\",\"sellerFee\",\"sale\",\"timestamp\"]\n out = pd.DataFrame(columns=cols)\n for k,v in txdict.items():\n if len(v)==3:\n values = []\n for row in v:\n values.append(int(row[4])/(10**18))\n values.append(sum(values))\n values = sorted(values)\n values.append(v[0][1])\n out = out.append(pd.DataFrame([values],columns=cols),ignore_index=True)\n return out",
"def get_market_data(tickers):\n print(\"Downloading market data...\")\n df = yf.download(\n tickers=tickers,\n period=\"1mo\",\n interval=\"1d\",\n group_by=\"ticker\",\n auto_adjust=False,\n prepost=False,\n threads=True,\n proxy=None,\n )\n return df",
"def get_fee(market, price):\r\n return round(market.api.fees['trading']['taker'] * price,5)",
"def get_contracts(symbol, month, type):\n\ttarget = contract_urlmask % (symbol.strip(\"$\"), month, type)\n\t\t\n\tkey = \"%s-%s-%s\" % (symbol, month, type)\n\tif key in _ts_data_cache and (datetime.datetime.now() - _ts_data_cache[key][0]).total_seconds() / 60 < get_setting(\"DATA_STALE_TIMEOUT\"):\n\t\theader = _ts_data_cache[key][1]\n\t\tprice = Decimal(header[header.find(\"Last:\")+6:header.find(\" , \")])\n\t\treturn (price, _ts_data_cache[key][2])\n\t\n\t# Request\n\tif(get_setting(\"DEBUG\")): print(target)\n\tresponse = requests.get(target, cookies={'slogin' : get_setting(\"SLOGIN\")})\n\txhtml = response.text\n\n\t# Decode\n\tparser = HTMLTableParser()\n\tparser.feed(xhtml)\n\t\n\t# TODO: Check if no options available (raise exception or print error, schedule contract reload)\n\n\t# Modify list - [strike, bid, ask, odds]\n\ttables = parser.tables[6:-5]\n\tcontracts = [ [ Decimal(tables[i-1][1][0].split(\" \")[0]), Decimal(tables[i-1][1][1]), Decimal(tables[i][0][1]), int(tables[i][0][5][:-1]) ] for i in range(1, len(tables)) ]\n\t\n\t# Grab header with price and change\n\theader = tables[0][0][2].split(\"\\n\")[-1]\n\tstart_index = header.find(\"(\")\n\tend_index = header.find(\", V\")\n\theader = header[start_index:end_index]\n\t\n\t# Grab price\n\tprice = Decimal(header[header.find(\"Last:\")+6:header.find(\" , \")])\n\t\n\t_ts_data_cache[key] = (datetime.datetime.now(), header, contracts)\n\t\n\treturn (price, contracts)",
"def getExchangeFee(self):\n # get trade info from public API v3\n # info = requests.get(\"https://btc-e.com/api/3/info\").json()\n\n # fee = info['pairs']['btc_usd']['fee']\n\n return 0.2",
"def fees(self):\n return self._get_prop_value(self._FEES_KEY)",
"def collect_borrow_fees(countries=None):\n params = {}\n if countries:\n params[\"countries\"] = countries\n response = houston.post(\"/fundamental/stockloan/fees\", params=params)\n\n houston.raise_for_status_with_json(response)\n return response.json()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fetch the deposit address for a currency associated with self account | async def fetch_deposit_address(self, code: str, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
} | [
"async def get_deposit_address(self, **params):\r\n return await self.client_helper(\"get_deposit_address\", **params)",
"def get_deposit_address(self, coin):\r\n url = self.url_base + \"id=\" + self.user_id + '&deposit=' + str(coin)\r\n\r\n if self.debug == 1:\r\n print url\r\n\r\n try:\r\n result = requests.get(url, timeout=self.timeout)\r\n except requests.exceptions.RequestException as exception:\r\n print exception\r\n return \"ERROR\"\r\n\r\n return result.text",
"def test_get_deposit_address(self):\n pass",
"def get_withdraw_address(self, currency: str):\n\n self.check_currency(currency)\n params = {\n 'currency': currency\n }\n\n addresses = self.private_get_withdraw_address(params)\n\n return [WithdrawAddress(**address) for address in addresses]",
"def deposit(self, commitment_service_address, deposit_amount):",
"def get_address(self):\n return self.account.address",
"async def btc_address(self, *_):\n if self.bitcoin_address:\n return self.bitcoin_address\n return await self.exec(GetBTCAddress())",
"def get_address(self, address: str) -> Address:",
"async def create_deposit_address(self, code: str, params={}):\n await self.load_markets()\n currency = self.currency(code)\n accounts = self.safe_value(self.options, 'coinbaseAccounts')\n if accounts is None:\n accounts = await self.privateGetCoinbaseAccounts()\n self.options['coinbaseAccounts'] = accounts # cache it\n self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')\n currencyId = currency['id']\n account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)\n if account is None:\n # eslint-disable-next-line quotes\n raise InvalidAddress(self.id + \" createDepositAddress() could not find currency code \" + code + \" with id = \" + currencyId + \" in self.options['coinbaseAccountsByCurrencyId']\")\n request = {\n 'id': account['id'],\n }\n response = await self.privatePostCoinbaseAccountsIdAddresses(self.extend(request, params))\n address = self.safe_string(response, 'address')\n tag = self.safe_string(response, 'destination_tag')\n return {\n 'currency': code,\n 'address': self.check_address(address),\n 'tag': tag,\n 'info': response,\n }",
"def current_address():\n return wallet['obj'].current_address",
"def get_contract_address(self) -> Address:\n return self.contract.address",
"def get_address_for_account(email):\n query = (email, )\n conn = sqlite3.connect(WALLET_KEYS)\n c = conn.cursor()\n c.execute('SELECT wallet_key from wallet_keys WHERE email=?', query)\n wallet_key = c.fetchone()[0]\n wallet = Wallet.Wallet.from_wallet_key(wallet_key)\n return wallet.address()",
"def deposit(self):\n pass",
"def ex_get_address(self, name, region=None):\r\n region = self._set_region(region) or self._find_zone_or_region(\r\n name, 'addresses', region=True, res_name='Address')\r\n request = '/regions/%s/addresses/%s' % (region.name, name)\r\n response = self.connection.request(request, method='GET').object\r\n return self._to_address(response)",
"def deposit():\n return DEFAULT_DEPOSIT",
"def get_balance(self, address=''):\n params = {'address': address}\n r = self._make_request('getBalance', **params)\n return r",
"def get_balance(self, address, chain_id=None, token_id=None, issuer_id=None):\n params = FATd.check_id_params(chain_id, token_id, issuer_id)\n params[\"address\"] = address\n return self._request(\"get-balance\", params)",
"def get_payout_address(account=None):\n return wallet['obj'].get_payout_address(account)",
"def fetch_account(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the multiplicative modular inverse of `a` mod n. That is, solve the linear congruence a x \equiv 1 (mod n). On success, a solution tuple (base, mod), which represents a solution ` base + mod k`, with k { Z, is returned. If no solution is found (that is, gcd(a, n) != n), None is returned | def modinv(a: int, n: int) -> Optional[Tuple[int, int]]:
return solve_lincongr(a, 1, n, simplify=True) | [
"def modular_linear_equation_solver(a, b, n):\n d, x_prime, _ = recur_extended_euclid(a, n)\n if not b % d:\n x = x_prime*(b//d) % n\n return True, [(x + i*(n//d)) % n for i in range(d)]\n return False, []",
"def modinv(a, N):\n x, y, d = extended_Euclid(a, N)\n if d == 1:\n return x % N\n else:\n return float(\"nan\")",
"def mod_inv(n, div):\n if math.gcd(n, div) != 1:\n return None\n else:\n return pow(n, sh.phi(div)-1, div) # sh.phi finds phi(n)",
"def modulo_inverse(x: int, n: int) -> int:\n\n def egcd(a: int, b: int) -> typing.Tuple[int, int, int]:\n \"\"\"Euler's extended algorithm for GCD\"\"\"\n if a == 0:\n return b, 0, 1\n else:\n g, y, x = egcd(b % a, a)\n return g, x - (b // a) * y, y\n\n return (n + egcd(n, abs(x % n))[2]) % n",
"def invmodn(b, n):\r\n n0 = n\r\n b0 = b\r\n t0 = 0\r\n t = 1\r\n\r\n q = n0 // b0\r\n r = n0 - q * b0\r\n while r > 0:\r\n temp = t0 - q * t\r\n if (temp >= 0):\r\n temp = temp % n\r\n if (temp < 0):\r\n temp = n - (-temp % n)\r\n t0 = t\r\n t = temp\r\n n0 = b0\r\n b0 = r\r\n q = n0 // b0\r\n r = n0 - q * b0\r\n\r\n if b0 !=1:\r\n return None\r\n else:\r\n return t % n",
"def exp_mod(a: int, b: int, n: int) -> int:\n result = 1\n e = bin(b)\n for i in range(b.bit_length()):\n if e[len(e) - 1 - i] == \"1\":\n result *= a\n result %= n\n a **= 2\n a %= n\n return result",
"def mod_inverse(g: int, n: int) -> int:\n \n assert g > 0 and n > 1, \"Inappropriate values to compute inverse\"\n\n # g = g mod n\n # The inverse wouldn't change if the input was g or g mod p\n g = g % n\n\n # Inverse of g exists mod n iff gcd (g,n) == 1\n # In case the inverse exists it is v\n # where v is such that nu + gv = 1\n # This v is the one returned by the extended Euclidean algorithm\n _, v, g = extended_euclidean_algorithm(n, g)\n\n if g != 1:\n print(\"Inverse doesn't exist\")\n exit()\n else:\n # As v can be negative we take mod n\n # to make it more readable.\n v = v % n\n return v",
"def calculate_x_n_mod_p( x, p, x_n_mod_p=None, n=1 ):\n if x_n_mod_p is None: x_n_mod_p={}\n if n==0: return 1\n if n==1: return x\n if n in x_n_mod_p: return x_n_mod_p[n]\n a = n/2\n b = n-n/2\n x_a = calculate_x_n_mod_p( x,p,x_n_mod_p, a )\n x_b = calculate_x_n_mod_p( x,p,x_n_mod_p, b )\n r = (x_a * x_b) % p\n x_n_mod_p[n] = r\n return r",
"def son_congruentes_modulo(a,b,n):\n\treturn n_esmultiplode_m(a-b,n)",
"def pollard_pminus1_factor(n):\n # choose a > 1 (often a=2)\n a = 2\n\n # choose bound B\n bound = 2\n\n # Compute b = a^(B!)(mod n) as follows\n # Let b(1) = a (mod n) and\n # b(j) = b(j-1)^j (mod n) [from j=2 onwards I'm guessing]\n # Then, b(B) = b (mod n)\n while 1: # Since we only get composite numbers\n b = a % n\n # print \"b1 =\", b\n for j in range(2, bound):\n b = pow(b, j, n)\n # print \"b\" + str(j) + \" = \" + str(b)\n d = gcd(b-1, n) # Let d = gcd(b-1, n)\n # print \"d =\", d\n if 1 < d < n: # if 1 < d < n, d is a nontrivial factor of n\n return int(d), int(n/d)\n else:\n a += 1\n bound += 1",
"def relprimes(n,b=1):\n relprimes = []\n for i in range(1,n):\n if gcd(i,n)==1: relprimes.append(i)\n print(\" n-rp's: %s\" % (relprimes))\n relprimes = map(operator.mul,[b]*len(relprimes),relprimes)\n newremainders = map(operator.mod,relprimes,[n]*len(relprimes))\n print(\"b * n-rp's mod n: %s\" % newremainders)",
"def multiplicative_inverse(a, b, n):\n d, x, y = extended_euclidean(a, n)\n if b % d == 0:\n temp_x = (x * (b/d)) % n\n result = []\n for i in range(d):\n result.append((temp_x + i*(n/d)) % n)\n return result\n return []",
"def solution(n):\n try:\n n = int(n)\n except (TypeError, ValueError) as e:\n raise TypeError(\"Parameter n must be int or passive of cast to int.\")\n if n <= 0:\n raise ValueError(\"Parameter n must be greater or equal to one.\")\n i = 0\n while True:\n i += n * (n - 1)\n nfound = 0\n for j in range(2, n):\n if i % j != 0:\n nfound = 1\n break\n if nfound == 0:\n if i == 0:\n i = 1\n return i",
"def power_non_recursive(a, n):\n result = 1\n while n != 0:\n t = n % 2\n n = n // 2 \n if t == 1:\n result *= a\n a = a * a\n return result",
"def findPrime3Mod4(n): \n c=False\n while c==False:\n length = n\n a = getrandbits(length-2)\n a|=(1<<length-2-1)\n a=(a*4)+3\n c=isPrime(a)\n return a",
"def find_inverse(a, N):\n\ti = 1\n\twhile(True):\n\t\tif((a*i) % N == 1):\n\t\t\treturn i\n\t\ti += 1",
"def modinv(a, b):\n g, x, _ = xgcd(a, b)\n if g == 1:\n return x % b",
"def modpow(k, n, m):\n ans = 1\n while n:\n if n & 1:\n ans = (ans*k) % m\n k = (k*k) % m\n n >>= 1\n return ans",
"def euclideanModInverse(self, a, m):\r\n if a == 0 : \r\n return m, 0, 1\r\n gcd, x1, y1 = self.euclideanModInverse(m%a, a) \r\n x = y1 - (m//a) * x1 \r\n y = x1\r\n return gcd, x, y"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Indicates whether or not the achieved goal successfully achieved the desired goal. | def _is_success(self, achieved_goal, desired_goal):
# TODO: may need to tune parameters
return np.logical_and(
goal_distance(achieved_goal[..., :2], desired_goal[..., :2]) < 5e-3 * self.SCALING,
np.abs(achieved_goal[..., -1] - desired_goal[..., -1]) < 4e-3 * self.SCALING
).astype(np.float32) | [
"def success(self) -> bool:\n return self.status == \"completed\" and self.code == 0",
"def goal_test(self, current):\n\n if current.state == self.goal_state:\n return True\n else:\n return False",
"def is_goal(self):\r\n return np.array_equal(PuzzleState.SOLVED_PUZZLE, self.puzzle)",
"def goal_status(self, status, result):\n self.completion += 1\n\n # Goal reached\n if status == 3:\n rospy.loginfo(\"Goal succeeded\")\n\n # Goal aborted\n if status == 4:\n rospy.loginfo(\"Goal aborted\")\n\n # Goal rejected\n if status == 5:\n rospy.loginfo(\"Goal rejected\")",
"def check_goal(self):\n for prod, amount in self.goal.items():\n if self.inventory[prod][0] < amount:\n return None\n self.goal_achieved = True\n return None",
"def succeeded(self) -> bool:\n return pulumi.get(self, \"succeeded\")",
"def is_goal(self, state):\n return state == self.goal",
"def goal_test(self, node):\n return node.is_goal",
"def finished(self):\n return self.board == self.goal",
"def __return_success(self):\n\n success = -2\n while success not in (-1, 0, 1):\n success = int(input(f\"The agent could complete the task? -- (-1, 0 or 1) for (loss, neither loss nor win, win)\\nAnswer: \"))\n return success",
"def test_case_passed(self):\n self.__set_test_case_result(result='PASSED', message='')",
"def wasSuccess(self):\n return (self.exitCode == 0) and (self.status == \"Success\")",
"def checkGoalReach(self, goal):\n reachedGoal = False\n if pygame.Rect.contains(goal.rect, self.rect):\n logger.info(f\"Player reached the goal\")\n reachedGoal = True\n \n return reachedGoal",
"def goal(self):\n return self.goal",
"def score_a_goal(self):\n self.go_behind_ball_facing_goal(constants.dis_from_ball) #Todo this was changed\n\n intersect = self.get_intersect_robot_ball_with_goal()\n if(not intersect.is_empty): # if there is an intersection\n goal_pos = Position()\n goal_pos.update(intersect.x, intersect.y, 0)\n tol = 0.1 #Todo this was changed\n in_tol = self.dis_from_point_to_line(self.pos, self.ball_pos, goal_pos) < tol\n #if (self.theta_within_error(3) and in_tol):\n if(in_tol):\n #if (self.theta_within_error(3)):\n self.attack_ball()\n #else:\n #print(\"theta\", self.theta_within_error(3))\n #print(\"tol\", in_tol)",
"def success(self):\n return self.error_rate is not None and \\\n self.error_rate <= self.tolerance",
"def Success(self) -> bool:",
"def check_complete(self, msg):\n if self.is_completed:\n return\n\n if msg.data <= self.goal_tolerance and self.latch == False:\n self.set_latched()\n self.increment_navigations()\n self.utils.set_tag(name=self.test_name + \"_Successful_Nav_\" + str(self.successful_navigations), value=str(self.successful_navigations))\n if self.is_complete():\n self.is_completed = True\n self.utils.set_tag(name=self.test_name + \"_Status\", value=\"Passed\")\n self.utils.cancel_job()\n else:\n # request for new goal\n self.new_goal_request()\n elif msg.data > 1:\n self.set_unlatched()",
"def check_goal_reached(self):\n logger.debug(\"Checking current state against goal states\")\n for goal in self.goals:\n goal_id = goal['id']\n goal_key = goal['key']\n goal_value = goal['value']\n try:\n goal_host = goal['host']\n except:\n goal_host = '*'\n if (\n goal_key == 'current_host'\n and self.check_current_host_goal(goal_value)\n or goal_key == 'current_status'\n and self.check_current_status_goal(goal_value, goal_host)\n or goal_key == 'current_access'\n and self.check_current_access_goal(goal_value, goal_host)\n or goal_key == 'current_role'\n and self.check_current_role_goal(goal_value, goal_host)\n ):\n logger.debug(\"Met Goal %s!\", goal_id)\n return goal_id\n\n return None",
"def goal_test(self, state):\n return state.visited_indices == self.goal_visits"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Samples a new goal and returns it. | def _sample_goal(self) -> np.ndarray:
goal = np.array(get_link_pose(self.obj_ids['fixed'][1], self._pegs[0])[0])
return goal.copy() | [
"def sample_goal(self):\n #TODO: We don't need this\n raise NotImplementedError",
"def sample_goal_params(self):\n pass",
"def _sample_achieved_goal(self, episode_transitions, transition_idx):\r\n if self.goal_selection_strategy == \"future\":\r\n # Sample a goal that was observed in the same episode after the current step\r\n selected_idx = np.random.choice(np.arange(transition_idx + 1, len(episode_transitions)))\r\n selected_transition = episode_transitions[selected_idx]\r\n elif self.goal_selection_strategy == \"final\":\r\n # Choose the goal achieved at the end of the episode\r\n selected_transition = episode_transitions[-1]\r\n else:\r\n raise ValueError(\"Invalid goal selection strategy,\"\r\n )\r\n ag = selected_transition[0]['achieved_goal']\r\n return ag",
"def _proposal(self, currval, params):\n\t\treturn self._sample_impl(params)",
"def sample_trajectory(self):\n ind = np.random.choice(self.N, 1, p=self.W[-1, :])\n return self.genealogy(ind)",
"def create_from_goal_state(cls, goal_state):\n if type(goal_state.position) == pyfvks.collision.RectOBB:\n # sample in goal region\n x_range = [goal_state.position.center()[0] - goal_state.position.r_x(),\n goal_state.position.center()[0] + goal_state.position.r_x()]\n y_range = [goal_state.position.center()[1] - goal_state.position.r_y(),\n goal_state.position.center()[1] + goal_state.position.r_y()]\n sample_x = x_range[0] + (x_range[1] - x_range[0]) * np.random.rand()\n sample_y = y_range[0] + (y_range[1] - y_range[0]) * np.random.rand()\n sampled_state = StateSamplerPosition.state_tuple(np.array([sample_x, sample_y]))\n else:\n raise Exception()\n return sampled_state",
"def randGenTarget(self, feature):\r\n\r\n mean = self.songData[feature].mean()\r\n std = self.songData[feature].std()\r\n\r\n num = random.randint(-1, 1)\r\n\r\n return mean + num*std",
"def sample(self) -> T:\n if self.args:\n return self.args[0]\n return next(iter(self.kwargs.values()))",
"def sample(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return ExponentialDistBase.sample(self)",
"def sample(self):\n self.exp_wt_start = np.random.choice(self.ps.int_exp_wt)\n self.lmbda = np.random.choice(self.ps.int_lambda_soft)\n myns = float('inf')\n while myns > self.ps.max_len:\n walk = self.make()\n myns = len(walk)\n return walk",
"def trigger_sampler():\n global bottle_capacity\n global aliquot_vol_mL\n global aliquots_in_bottle\n global vol_in_bottle\n global time_last_sample\n ## Set trigger to True\n trigger = True\n\n # DO NOT SAMPLE conditions\n # if aliquots_in_bottle >= bottle_capacity:\n # trigger = False # out of capacity - won't overfill bottle\n # elif is_being_tested():\n # trigger = False # script is being tested\n # elif setup_read(\"Recording\").upper() == \"OFF\":\n # trigger = False # if recording is off, do not sample\n\n # If conditions are met, then trigger the sampler\n if trigger == True:\n print ('Sampler Triggered')\n # increment the number of bottles used\n aliquots_in_bottle += 1\n vol_in_bottle = vol_in_bottle + aliquot_vol_mL\n # update the time of the last trigger\n time_last_sample = utime.time()\n # trigger sampler by pulsing output for 0.5 seconds\n power_control('SW1', True)\n utime.sleep(0.5)\n power_control('SW1', False)\n # write a log entry\n t = utime.localtime(time_scheduled())\n day, minute = str(t[2]), str(t[4])\n if len(day) == 1:\n day = '0' + day\n if len(minute) == 1:\n minute = '0' + minute\n sample_time = str(t[1]) + '/' + day + '/' + str(t[0]) + ' ' + str(t[3]) + ':' + minute\n reading = Reading(label=\"Triggered Sampler\", time=time_scheduled(),\n etype='E', value=aliquots_in_bottle,\n right_digits=0, quality='G') # 'E' = event, 'M' = measurement, 'D' = debug\n reading.write_log()\n ## Write display log entries\n global sample_log\n global bottle_num\n global sample_pacing\n pacing_units = setup_read(\"M1 Units\")\n sample_log[sample_time] = {'Pacing': '%.0f' % sample_pacing+pacing_units, 'Bottle#': str(int(bottle_num)),\n 'Aliquot#': str(int(aliquots_in_bottle)), 'SampleTime': sample_time}\n return True\n # If conditions are NOT met, then DONOT trigger the sampler\n else:\n return False # Sampler was NOT triggered.",
"def sampleGiven(self, value):\n assert self.sampler is not None\n return self.sampler.valueFor(self)",
"def newTrialHit(self, **attrlinks):\n return TrialHit(self, **attrlinks)",
"def sample(self):\n return np.random.dirichlet(self.alpha)",
"def sample(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return SliceSamplerBase.sample(self)",
"def create_sample(*args: Any, **kwargs: Any) -> SampleType:\n return cast(SampleType, Sample(*args, **kwargs))",
"def PlaySample(self, *args):\n return _wiimote.wiimote_PlaySample(self, *args)",
"def get_one(self, goal):\n if self.from_goals:\n raise exception.OperationNotPermitted\n\n context = pecan.request.context\n rpc_goal = api_utils.get_resource('Goal', goal)\n policy.enforce(context, 'goal:get', rpc_goal, action='goal:get')\n\n return Goal.convert_with_links(rpc_goal)",
"def _sample(self, task_index, sample_index):\n return self._task(task_index)[sample_index]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Using the CUDA version (the NVCC version) and the target architectures, compute the nvcc architecture flags. | def cuda_select_nvcc_arch_flags(cuda_version, cuda_arch_list="Auto", detected=""):
cuda_known_gpu_architectures = ["Fermi", "Kepler", "Maxwell"]
cuda_common_gpu_architectures = ["3.0", "3.5", "5.0"]
cuda_limit_gpu_architecture = None
cuda_all_gpu_architectures = ["3.0", "3.2", "3.5", "5.0"]
if cuda_ver_cmp(cuda_version, "7.0") < 0:
cuda_limit_gpu_architecture = "5.2"
if cuda_ver_cmp(cuda_version, "7.0") >= 0:
cuda_known_gpu_architectures += ["Kepler+Tegra", "Kepler+Tesla", "Maxwell+Tegra"]
cuda_common_gpu_architectures += ["5.2"]
if cuda_ver_cmp(cuda_version, "8.0") < 0:
cuda_common_gpu_architectures += ["5.2+PTX"]
cuda_limit_gpu_architecture = "6.0"
if cuda_ver_cmp(cuda_version, "8.0") >= 0:
cuda_known_gpu_architectures += ["Pascal"]
cuda_common_gpu_architectures += ["6.0", "6.1"]
cuda_all_gpu_architectures += ["6.0", "6.1", "6.2"]
if cuda_ver_cmp(cuda_version, "9.0") < 0:
cuda_common_gpu_architectures += ["6.1+PTX"]
cuda_limit_gpu_architecture = "7.0"
if cuda_ver_cmp(cuda_version, "9.0") >= 0:
cuda_known_gpu_architectures += ["Volta"]
cuda_common_gpu_architectures += ["7.0"]
cuda_all_gpu_architectures += ["7.0", "7.2"]
if cuda_ver_cmp(cuda_version, "10.0") < 0:
cuda_common_gpu_architectures += ["7.0+PTX"]
cuda_limit_gpu_architecture = "7.5"
if cuda_ver_cmp(cuda_version, "10.0") >= 0:
cuda_known_gpu_architectures += ["Turing"]
cuda_common_gpu_architectures += ["7.5", "7.5+PTX"]
cuda_all_gpu_architectures += ["7.5"]
if cuda_ver_cmp(cuda_version, "11.0") < 0:
cuda_limit_gpu_architecture = "8.0"
if not cuda_arch_list:
cuda_arch_list = "Auto"
if cuda_arch_list == "All":
cuda_arch_list = cuda_known_gpu_architectures
elif cuda_arch_list == "Common":
cuda_arch_list = cuda_common_gpu_architectures
elif cuda_arch_list == "Auto":
if detected:
if isinstance(detected, list):
cuda_arch_list = detected
else:
cuda_arch_list = re.sub("[ \t]+", ";", detected).split(";")
if cuda_limit_gpu_architecture:
filtered_cuda_arch_list = []
for arch in cuda_arch_list:
if arch:
if cuda_arch_cmp(arch, cuda_limit_gpu_architecture) >= 0:
filtered_cuda_arch_list.append(cuda_common_gpu_architectures[-1])
else:
filtered_cuda_arch_list.append(arch)
cuda_arch_list = filtered_cuda_arch_list
else:
cuda_arch_list = cuda_common_gpu_architectures
elif isinstance(cuda_arch_list, str):
cuda_arch_list = re.sub("[ \t]+", ";", cuda_arch_list).split(";")
cuda_arch_list = sorted([x for x in set(cuda_arch_list) if x])
cuda_arch_bin = []
cuda_arch_ptx = []
for arch_name in cuda_arch_list:
arch_bin = []
arch_ptx = []
add_ptx = False
if arch_name.endswith("+PTX"):
add_ptx = True
arch_name = arch_name[:-len("+PTX")]
if re.fullmatch("""[0-9]+\.[0-9](\([0-9]+\.[0-9]\))?""", arch_name):
arch_bin = [arch_name]
arch_ptx = [arch_name]
else:
if arch_name == "Fermi": arch_bin=["2.0", "2.1(2.0)"]
elif arch_name == "Kepler+Tegra": arch_bin=["3.2"]
elif arch_name == "Kepler+Tesla": arch_bin=["3.7"]
elif arch_name == "Kepler": arch_bin=["3.0", "3.5"]; arch_ptx=["3.5"]
elif arch_name == "Maxwell+Tegra": arch_bin=["5.3"]
elif arch_name == "Maxwell": arch_bin=["5.0", "5.2"]; arch_ptx=["5.2"]
elif arch_name == "Pascal": arch_bin=["6.0", "6.1"]; arch_ptx=["6.1"]
elif arch_name == "Volta": arch_bin=["7.0"]; arch_ptx=["7.0"]
elif arch_name == "Turing": arch_bin=["7.5"]; arch_ptx=["7.5"]
else: raise ValueError("Unknown CUDA Architecture Name "+arch_name+
" in cuda_select_nvcc_arch_flags()!")
if not arch_bin:
raise ValueError("arch_bin wasn't set for some reason")
cuda_arch_bin += arch_bin
if add_ptx:
if not arch_ptx:
arch_ptx = arch_bin
cuda_arch_ptx += arch_ptx
cuda_arch_bin = re.sub ("\.", "", " ".join(cuda_arch_bin))
cuda_arch_ptx = re.sub ("\.", "", " ".join(cuda_arch_ptx))
cuda_arch_bin = re.findall("[0-9()]+", cuda_arch_bin)
cuda_arch_ptx = re.findall("[0-9]+", cuda_arch_ptx)
if cuda_arch_bin: cuda_arch_bin = sorted(list(set(cuda_arch_bin)))
if cuda_arch_ptx: cuda_arch_ptx = sorted(list(set(cuda_arch_ptx)))
nvcc_flags = []
nvcc_archs_readable = []
for arch in cuda_arch_bin:
m = re.match("""([0-9]+)\(([0-9]+)\)""", arch)
if m:
nvcc_flags += ["-gencode", "arch=compute_{},code=sm_{}".format(m[1], m[0])]
nvcc_archs_readable += ["sm_"+m[0]]
else:
nvcc_flags += ["-gencode", "arch=compute_"+arch+",code=sm_"+arch]
nvcc_archs_readable += ["sm_"+arch]
for arch in cuda_arch_ptx:
nvcc_flags += ["-gencode", "arch=compute_"+arch+",code=compute_"+arch]
nvcc_archs_readable += ["compute_"+arch]
return nvcc_flags, nvcc_archs_readable | [
"def _nvcc_gencode_options(cuda_version: int) -> List[str]:\n\n if sys.argv == ['setup.py', 'develop']:\n return []\n\n envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None)\n if envcfg is not None and envcfg != 'current':\n return ['--generate-code={}'.format(arch)\n for arch in envcfg.split(';') if len(arch) > 0]\n if envcfg == 'current' and build.get_compute_capabilities() is not None:\n ccs = build.get_compute_capabilities()\n arch_list = [\n f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}')\n for cc in ccs]\n else:\n # The arch_list specifies virtual architectures, such as 'compute_61',\n # and real architectures, such as 'sm_61', for which the CUDA\n # input files are to be compiled.\n #\n # The syntax of an entry of the list is\n #\n # entry ::= virtual_arch | (virtual_arch, real_arch)\n #\n # where virtual_arch is a string which means a virtual architecture and\n # real_arch is a string which means a real architecture.\n #\n # If a virtual architecture is supplied, NVCC generates a PTX code\n # the virtual architecture. If a pair of a virtual architecture and a\n # real architecture is supplied, NVCC generates a PTX code for the\n # virtual architecture as well as a cubin code for the real one.\n #\n # For example, making NVCC generate a PTX code for 'compute_60' virtual\n # architecture, the arch_list has an entry of 'compute_60'.\n #\n # arch_list = ['compute_60']\n #\n # For another, making NVCC generate a PTX code for 'compute_61' virtual\n # architecture and a cubin code for 'sm_61' real architecture, the\n # arch_list has an entry of ('compute_61', 'sm_61').\n #\n # arch_list = [('compute_61', 'sm_61')]\n #\n # See the documentation of each CUDA version for the list of supported\n # architectures:\n #\n # https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-steering-gpu-code-generation\n\n aarch64 = (platform.machine() == 'aarch64')\n if cuda_version >= 12000:\n arch_list = [('compute_50', 'sm_50'),\n ('compute_52', 'sm_52'),\n ('compute_60', 'sm_60'),\n ('compute_61', 'sm_61'),\n ('compute_70', 'sm_70'),\n ('compute_75', 'sm_75'),\n ('compute_80', 'sm_80'),\n ('compute_86', 'sm_86'),\n ('compute_89', 'sm_89'),\n ('compute_90', 'sm_90'),\n 'compute_90']\n if aarch64:\n # Jetson TX1/TX2 are excluded as they don't support JetPack 5\n # (CUDA 11.4).\n arch_list += [\n # ('compute_53', 'sm_53'), # Jetson (TX1 / Nano)\n # ('compute_62', 'sm_62'), # Jetson (TX2)\n ('compute_72', 'sm_72'), # Jetson (Xavier)\n ('compute_87', 'sm_87'), # Jetson (Orin)\n ]\n elif cuda_version >= 11080:\n arch_list = [('compute_35', 'sm_35'),\n ('compute_37', 'sm_37'),\n ('compute_50', 'sm_50'),\n ('compute_52', 'sm_52'),\n ('compute_60', 'sm_60'),\n ('compute_61', 'sm_61'),\n ('compute_70', 'sm_70'),\n ('compute_75', 'sm_75'),\n ('compute_80', 'sm_80'),\n ('compute_86', 'sm_86'),\n ('compute_89', 'sm_89'),\n ('compute_90', 'sm_90'),\n 'compute_90']\n if aarch64:\n # Jetson TX1/TX2 are excluded as they don't support JetPack 5\n # (CUDA 11.4).\n arch_list += [\n # ('compute_53', 'sm_53'), # Jetson (TX1 / Nano)\n # ('compute_62', 'sm_62'), # Jetson (TX2)\n ('compute_72', 'sm_72'), # Jetson (Xavier)\n ('compute_87', 'sm_87'), # Jetson (Orin)\n ]\n elif cuda_version >= 11040:\n # To utilize CUDA Minor Version Compatibility (`cupy-cuda11x`),\n # CUBIN must be generated for all supported compute capabilities\n # instead of PTX:\n # https://docs.nvidia.com/deploy/cuda-compatibility/index.html#application-considerations\n arch_list = [('compute_35', 'sm_35'),\n ('compute_37', 'sm_37'),\n ('compute_50', 'sm_50'),\n ('compute_52', 'sm_52'),\n ('compute_60', 'sm_60'),\n ('compute_61', 'sm_61'),\n ('compute_70', 'sm_70'),\n ('compute_75', 'sm_75'),\n ('compute_80', 'sm_80'),\n ('compute_86', 'sm_86'),\n 'compute_86']\n if aarch64:\n # Jetson TX1/TX2 are excluded as they don't support JetPack 5\n # (CUDA 11.4).\n arch_list += [\n # ('compute_53', 'sm_53'), # Jetson (TX1 / Nano)\n # ('compute_62', 'sm_62'), # Jetson (TX2)\n ('compute_72', 'sm_72'), # Jetson (Xavier)\n ('compute_87', 'sm_87'), # Jetson (Orin)\n ]\n elif cuda_version >= 11010:\n arch_list = ['compute_35',\n 'compute_50',\n ('compute_60', 'sm_60'),\n ('compute_61', 'sm_61'),\n ('compute_70', 'sm_70'),\n ('compute_75', 'sm_75'),\n ('compute_80', 'sm_80'),\n ('compute_86', 'sm_86'),\n 'compute_86']\n elif cuda_version >= 11000:\n arch_list = ['compute_35',\n 'compute_50',\n ('compute_60', 'sm_60'),\n ('compute_61', 'sm_61'),\n ('compute_70', 'sm_70'),\n ('compute_75', 'sm_75'),\n ('compute_80', 'sm_80'),\n 'compute_80']\n elif cuda_version >= 10000:\n arch_list = ['compute_30',\n 'compute_50',\n ('compute_60', 'sm_60'),\n ('compute_61', 'sm_61'),\n ('compute_70', 'sm_70'),\n ('compute_75', 'sm_75'),\n 'compute_70']\n else:\n # This should not happen.\n assert False\n\n options = []\n for arch in arch_list:\n if type(arch) is tuple:\n virtual_arch, real_arch = arch\n options.append('--generate-code=arch={},code={}'.format(\n virtual_arch, real_arch))\n else:\n options.append('--generate-code=arch={},code={}'.format(\n arch, arch))\n\n return options",
"def architecture_flag(compiler, arch, os=None):\n if not compiler or not arch:\n return \"\"\n\n if str(compiler) in ['gcc', 'apple-clang', 'clang', 'sun-cc']:\n if str(arch) in ['x86_64', 'sparcv9', 's390x']:\n return '-m64'\n elif str(arch) in ['x86', 'sparc']:\n return '-m32'\n elif str(arch) in ['s390']:\n return '-m31'\n elif os == 'AIX':\n if str(arch) in ['ppc32']:\n return '-maix32'\n elif str(arch) in ['ppc64']:\n return '-maix64'\n return \"\"",
"def _set_simd_flags(conf):\n CXX = conf.env.get_flat(\"CXX\")\n flags = []\n # DEST_CPU should be set explicitly for clang cross-compilers\n cpu = conf.env[\"DEST_CPU\"]\n\n # Matches both g++ and clang++\n if \"g++\" in CXX or \"clang\" in CXX:\n # Test different compiler flags based on the target CPU\n if cpu == \"x86\" or cpu == \"x86_64\":\n flags += conf.mkspec_try_flags(\n \"cxxflags\", [\"-msse2\", \"-mssse3\", \"-msse4.2\", \"-mavx2\"]\n )\n elif cpu == \"arm\" or cpu == \"thumb\":\n flags += conf.mkspec_try_flags(\"cxxflags\", [\"-mfpu=neon\"])\n\n elif \"CL.exe\" in CXX or \"cl.exe\" in CXX:\n if cpu == \"x86\" or cpu == \"x86_64\" or cpu == \"amd64\":\n flags += conf.mkspec_try_flags(\"cxxflags\", [\"/arch:AVX2\"])\n\n elif \"em++\" in CXX:\n flags = []\n\n else:\n conf.fatal(\"Unknown compiler - no SIMD flags specified\")\n\n conf.env[\"CFLAGS_AYBABTU_SIMD\"] = flags\n conf.env[\"CXXFLAGS_AYBABTU_SIMD\"] = flags",
"def test_arch_target():\n\n arch(\"-t\")\n arch(\"--target\")\n arch(\"-f\", \"-t\")\n arch(\"-b\", \"-t\")",
"def has_arm_build(architectures):\n return 'aarch64' in architectures",
"def test_arch_operating_system():\n\n arch(\"-o\")\n arch(\"--operating-system\")\n arch(\"-f\", \"-o\")\n arch(\"-b\", \"-o\")",
"def test_arch():\n\n arch()\n arch(\"-f\")\n arch(\"--frontend\")\n arch(\"-b\")\n arch(\"--backend\")",
"def architecture():\n import platform\n return platform.architecture()[0][:-3]",
"def _compile_args_current_machine(self, arch, cpu):\n args = [\"-O3 -march=native -flto\"]\n\n if arch == \"x86_32\":\n args.append(\"-m32\")\n if \"SSE\" in cpu.features:\n args.append(\"-mfpmath=sse\")\n\n elif arch == \"x86_64\":\n args.append(\"-m64\")\n\n return \" \".join(args)",
"def OSArchitecture(self) -> Architecture:",
"def __get_target_architecture_from_context(self, benchmark_run):\n\n name_from_context = benchmark_run['context']['hdp_gcn_arch_name'].split(\":\")[0]\n if name_from_context in TARGET_ARCHITECTURES:\n return f'target_arch::{name_from_context}'\n else:\n raise RuntimeError(f\"ERROR: unknown hdp_gcn_arch_name: {name_from_context}\")",
"def system_cpu_arch(self) -> str:\n machine = platform.machine()\n if machine in [\"AMD64\", \"x86_64\"]:\n return \"x64\"\n else:\n return machine",
"def set_cc_opt_flags(environ_cp):\n if is_ppc64le():\n # gcc on ppc64le does not support -march, use mcpu instead\n default_cc_opt_flags = \"-mcpu=native\"\n elif is_windows():\n default_cc_opt_flags = \"/arch:AVX\"\n else:\n # On all other platforms, no longer use `-march=native` as this can result\n # in instructions that are too modern being generated. Users that want\n # maximum performance should compile TF in their environment and can pass\n # `-march=native` there.\n # See https://github.com/tensorflow/tensorflow/issues/45744 and duplicates\n default_cc_opt_flags = \"-Wno-sign-compare\"\n question = (\n \"Please specify optimization flags to use during compilation when\"\n ' bazel option \"--config=opt\" is specified [Default is %s]: '\n ) % default_cc_opt_flags\n cc_opt_flags = get_from_env_or_user_or_default(\n environ_cp, \"CC_OPT_FLAGS\", question, default_cc_opt_flags\n )\n for opt in cc_opt_flags.split():\n write_to_bazelrc(\"build:opt --copt=%s\" % opt)\n write_to_bazelrc(\"build:opt --host_copt=%s\" % opt)\n write_to_bazelrc(\"build:opt --define with_default_optimizations=true\")",
"def test_arch_platform():\n\n arch(\"-p\")\n arch(\"--platform\")\n arch(\"-f\", \"-p\")\n arch(\"-b\", \"-p\")",
"def archs(self, _args):\n print('{Style.BRIGHT}Available target architectures are:'\n '{Style.RESET_ALL}'.format(Style=Out_Style))\n for arch in self.ctx.archs:\n print(' {}'.format(arch.arch))",
"def getVersionFlags():\r\n return {'CCFLAGS':[ \r\n '-D SZG_MAJOR_VERSION=1',\r\n '-D SZG_MINOR_VERSION=4',\r\n '-D SZG_PATCH_VERSION=0'\r\n ]}",
"def _get_foreach_kernels_supported_devices() -> List[str]:\n return [\"cuda\", torch._C._get_privateuse1_backend_name()]",
"def _CipdPlatform(self):\n arch = 'amd64'\n if platform.machine() == 'arm64':\n arch = arm64\n if self.platform == 'win32':\n return 'windows-' + arch\n if self.platform == 'darwin':\n return 'mac-' + arch\n return 'linux-' + arch",
"def supported_archs(self):\n return self.SUPPORTED_ARCHS"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Given a file handle to write in to (which should act like a Python `file` object), write out the landmark data. No value is returned. Writes out the LJSON format which is a verbose format that closely resembles the landmark group format. It describes semantic labels and connectivity between labels. The first axis of the format represents the image yaxis and is consistent with ordering within Menpo. | def LJSONExporter(landmark_group, file_handle, **kwargs):
lg_json = landmark_group.tojson()
# Add version string
lg_json['version'] = 2
# Convert nan values to None so that json correctly maps them to 'null'
points = lg_json['landmarks']['points']
# Flatten list
try:
ndim = len(points[0])
except IndexError:
ndim = 0
filtered_points = [None if np.isnan(x) else x
for x in itertools.chain(*points)]
# Recreate tuples
if ndim == 2:
lg_json['landmarks']['points'] = list(zip(filtered_points[::2],
filtered_points[1::2]))
elif ndim == 3:
lg_json['landmarks']['points'] = list(zip(filtered_points[::3],
filtered_points[1::3],
filtered_points[2::3]))
else:
lg_json['landmarks']['points'] = []
return json.dump(lg_json, file_handle, indent=4, separators=(',', ': '),
sort_keys=True, allow_nan=False) | [
"def ljson_exporter(lmk_points, filepath, **kwargs):\n\n lmk_points[np.isnan(lmk_points)] = None\n\n lmk_points = [list(_tmp) for _tmp in lmk_points]\n\n ljson = {\n 'version': 2,\n 'labels': [],\n 'landmarks': {\n 'points': lmk_points\n }\n }\n\n with open(filepath, \"w\") as file_handle:\n\n return json.dump(ljson, file_handle, indent=4, separators=(',', ': '),\n sort_keys=True, allow_nan=False, ensure_ascii=False)",
"def multilabel2jsonl(self):\n # Baseline of json line dictionary\n json_line_sample = {\n \"image_url\": self.base_url,\n \"label\": [],\n }\n\n # Read each annotation and convert it to jsonl line\n with open(self.label_file, \"r\") as labels:\n for i, line in enumerate(labels):\n # Skipping the title line and any empty lines.\n if i == 0 or len(line.strip()) == 0:\n continue\n line_split = line.strip().split(\",\")\n if len(line_split) != 2:\n print(f\"Skipping the invalid line: {line}\")\n continue\n json_line = dict(json_line_sample)\n json_line[\"image_url\"] += f\"images/{line_split[0]}\"\n json_line[\"label\"] = line_split[1].strip().split(\" \")\n\n self.jsonl_data.append(json_line)\n\n return self.jsonl_data",
"def export_landmark_file(landmark_group, fp, extension=None, overwrite=False):\n from .extensions import landmark_types\n\n _export(landmark_group, fp, landmark_types, extension, overwrite)",
"def write_to_file(self, filename, force=False):\n if osp.isfile(filename) and not force:\n warn(\" %s exists - use force=True to overwrite\" % filename)\n return None\n else:\n fbase, _ = osp.splitext(filename) \n img = nib.Nifti1Image(np.asarray(self._data), self.affine)\n nib.save(img, fbase+'.nii.gz')\n with open(fbase+'.json','w') as f:\n json.dump([(str(idx), l) for (idx,l) in enumerate(self.labels)], f)",
"def saveAsLM(self, path):\n if not path.endswith(\".lm\"):\n path += \".lm\"\n f = open(path, 'w', encoding=self.enc)\n f_lab = open(path+\".lab\", 'w', encoding=self.enc)\n f.write(\"#SpeechMark Landmark File\\n\")\n f.write(\"#SMPRODUCT: TGProcess.py\\n\")\n f.write(\"#SMVERSION: 1\\n\")\n f.write(\"#LMVERSION: 2013-03-26\\n\")\n f.write(\"#WAVEFORM NAME: \"+self.waveformName+\"\\n\")\n f.write(\"#WAVEFORM CHECKSUM: \"+self.waveformChecksum+\"\\n\")\n f.write(\"#FILE CREATED:\"+strftime(\"%m/%d/%Y %H:%M:%S\")+\"\\n\")\n f.write(\"#--------------------------------------------------------------\\n\")\n f.write(\"#\\n\")\n #condense tiers into single list\n items = [(item.mark.replace(\" \",\"_\"), \"%.3f\" % float(item.time)) for tier in self.tiers for item in tier if type(item)==Point]\n items.sort(key=lambda item: item[1])\n last_time = \"0\"\n #write items to both files\n for item in items:\n f.write(item[1]+\" \"+item[0]+\"\\n\")\n f_lab.write(last_time + \" \" + item[1] + \" \" + item[0]+\"\\n\")\n last_time = item[1]",
"def write_geojson_highlight(filename, bounds, world_bounds):\n fp = open(filename, 'w')\n js = geojson_highlight(bounds, world_bounds)\n json.dump(js, fp)\n fp.close()",
"def write(self, filename):\n self._file = HorizonFile(filename, 'wb')\n\n # If self.lines isn't set, default to []\n try:\n self._file.lines = self.lines\n except AttributeError:\n self._file.lines = []\n\n # If self.surface isn't set, default to an empty numpy array\n try:\n self._file.surface = self.surface\n except AttributeError:\n self._file.surface = np.zeros(0, dtype=self.POINT_DTYPE)\n\n self._file.writeAll()",
"def saveMap(filename, paths, images, faces, years, places):\n f = open(filename, 'w+')\n nodes = list(set(cbook.flatten(paths)))\n pathInd = {} #easier form to work with here\n for i in range(len(paths)):\n for j in paths[i]:\n if j in pathInd.keys():\n pathInd[j].append(i+1)\n else:\n pathInd[j] = [i+1]\n strs = []\n\n # Write nodes\n f.write('{ \"nodes\": [\\n')\n for node in nodes:\n imgPath = 'images/' + str(node) + '.png'\n #misc.imsave(websitePath + imgPath, images[node]) #XXX suspect don't need this anymore\n s = '{\"id\": ' + str(node) + ', \"line\": ' + str(pathInd[node])\n s += ', \"faces\": [' + ','.join([str(x) for x in np.nonzero(faces[node])[0]]) + ']'\n p = np.nonzero(places[node])[0]\n s += ', \"time\": ' + str(years[node]) + ', \"place\": ' + str(p[0] if len(p) > 0 else -1)\n s += '}'\n strs.append(s)\n f.write(',\\n'.join(strs) + '],\\n\"links\": [\\n')\n strs = []\n\n # Write links\n for i in range(len(paths)):\n p = paths[i]\n for j in range(0, len(p)-1):\n strs.append('{\"source\": ' + str(nodes.index(p[j])) + ', \"target\": ' + str(nodes.index(p[j+1])) + ', \"line\": ' + str(i+1) + '}')\n f.write(',\\n'.join(strs) + ']}')\n f.close()",
"def create_yolo_label_file(file_: tuple, current_class_id: str, yolo_dataset_dir: str):\n image_path, json_path = file_\n file_name, file_extension = os.path.splitext(os.path.basename(image_path))\n new_file_name = token_hex(10)\n image_new_path = os.path.join(yolo_dataset_folder, f'{new_file_name}{file_extension}')\n # Output file path\n output_file_name = f'{new_file_name}.txt'\n output_file_path = os.path.join(yolo_dataset_dir, output_file_name)\n output_file = open(output_file_path, 'w')\n # Get rectangles\n with open(json_path) as input_stream:\n image_metadata = json.load(input_stream)\n # Form rectangles from labeled points\n rectangles = get_rectangles(image_metadata)\n # Get image sizes\n image = cv2.imread(image_path)\n height, width = image.shape[:2]\n # Iterate throw rectangles and write each of them on a separate line\n for rectangle in rectangles:\n box = (rectangle[0][0], rectangle[1][0], rectangle[0][1], rectangle[1][1])\n yolo_bounding_box = convert_labels_to_yolo(width, height, box)\n\n output_file.write(f'{current_class_id} ' + \" \".join([str(a) for a in yolo_bounding_box]) + '\\n')\n\n if not os.path.isfile(image_new_path):\n copyfile(image_path, image_new_path)\n\n output_file.close()",
"def export_data(fileName): \r\n options = bpy.types.Scene.dmh\r\n \r\n # open file and store date\r\n f = open(fileName,'w')\r\n \r\n list_vertices = []\r\n for v in options.data[0]:\r\n list_vertices.append([v[0],v[1],v[2]]) \r\n \r\n world_pos = [options.data[2][0][3], options.data[2][1][3], options.data[2][2][3]]\r\n \r\n wireframe = [\r\n list_vertices, \r\n options.data[1], world_pos, options.data[3],\r\n options.knot_type,\r\n options.vertex_pvr,\r\n options.edge_pvr,\r\n options.hide_knots,\r\n options.knot_resolution,\r\n options.knot_radius,\r\n options.edge_resolution,\r\n options.edge_radius,\r\n options.smooth\r\n ]\r\n\r\n data = [wireframe]\r\n json.dump(data, f)",
"def pts_exporter(pts, file_handle, **kwargs):\n # Swap the x and y axis and add 1 to undo our processing\n # We are assuming (as on import) that the landmark file was created using\n # Matlab which is 1 based\n\n if len(pts.shape) == 2:\n pts = pts[:, [1, 0]] + 1\n else:\n pts = pts[:, [2, 1, 0]] + 1\n\n header = 'version: 1\\nn_points: {}\\n{{'.format(pts.shape[0])\n np.savetxt(file_handle, pts, delimiter=' ', header=header, footer='}',\n fmt='%.3f', comments='')",
"def write(self):\n \n hdulist = fits.HDUList()\n\n level0 = self.get_level0()\n hdulist.append(level0)\n \n level1 = self.get_level1()\n hdulist.append(level1)\n \n level2 = self.get_level2()\n hdulist.append(level2)\n \n level3 = self.get_level3()\n hdulist.append(level3)\n \n level4 = self.get_level4()\n hdulist.append(level4)\n \n hdulist.writeto(self.metadata_file,clobber=True)\n print('Output metadata to '+self.metadata_file)",
"def write_levels(self, node):\n cur_level=node.level\n filename=r'text.{}.txt'.format(cur_level)\n filename2=r'highlight.{}.txt'.format(cur_level)\n label=node.type\n end=str(node.stop)\n name=node.name[3::]\n if name=='' or name=='unspecified' or name=='unidentified':\n name='unclassified'\n start=node.start\n line_hl='{}\\t{}\\t{}\\n'.format(label, start, end)\n line_label='{}\\t{}\\t{}\\t{}\\n'.format(label, start, end, name)\n with open(filename, \"a\") as myfile:\n myfile.write(line_label)\n with open(filename2, \"a\") as myfile:\n myfile.write(line_hl)\n if cur_level==self.stop_rank:\n line_max='{}\\t{}\\t{}\\t{}\\n'.format(label, start, end, math.log10(node.max))\n with open('max.txt', \"a\") as myfile:\n myfile.write(line_max)\n else:\n for child in node.children:\n self.write_levels(child)",
"def write_geojson_file(self, file_path):\n with open(file_path, 'w') as f:\n f.write(format_to_geojson(self.all))",
"def write_obj(self, file: TextIO, **kwargs):\n if self.errored:\n print(\n f\"Chunk {self.name} had a parsing error. Cannot write .obj for errored chunk.\"\n )\n return\n\n if self.material_list and not kwargs.get(\"ignore_mtllib_line\"):\n file.write(f\"mtllib {self.material_list}.mtl\\n\")\n\n print(f'{self.name}: Writing chunk to {file.name}')\n file.write(f\"o {self.name}\\n\")\n self._write_vertices(file)\n self._write_texture_coordinates(file)\n self._write_triangles(file)",
"def from_shapefile(strict=True, progress=True, verbose=False, **kwargs):\n shp = get_processed_data_file(os.path.join('landmarks', 'landmarks.shp'))\n mapping = LayerMapping(Landmark, shp, landmark_mapping, transform=False)\n mapping.save(strict=strict, progress=progress, verbose=verbose, **kwargs)\n\n add_landmarks_to_parcels()",
"def _write_annotation(filename, annotation):\n _mkdir(os.path.dirname(filename))\n save_pbobject_as_json(annotation, filename)",
"def _write_gro(self, atoms, filename, iteration, replica, title, trajectory_by_state=True):\n \n # Extract coordinates to be written (comes out in nm)\n coordinates = numpy.array(self.ncfile.variables['positions'][iteration,replica,:,:])\n \n # Create file.\n #outfile = open(filename, 'w')\n \n # Write ATOM records.\n for (index, atom) in enumerate(atoms):\n #atom[\"x\"] = \"%8.3f\" % coordinates[index,0]\n #atom[\"y\"] = \"%8.3f\" % coordinates[index,1]\n #atom[\"z\"] = \"%8.3f\" % coordinates[index,2]\n #Increasing precision\n atom[\"x\"] = \"%8.4f\" % coordinates[index,0]\n atom[\"y\"] = \"%8.4f\" % coordinates[index,1]\n atom[\"z\"] = \"%8.4f\" % coordinates[index,2]\n # ResNumber ResName AtomName AtomNumber X-pos Y-pos Z-pos\n filename.write('%(Seqno)5s%(resName)5s%(atom)5s%(serial)5s %(x)8s %(y)8s %(z)8s\\n' % atom)\n \n # Close file.\n #outfile.close()\n \n return",
"def save(self):\n # convert sets to lists\n labels = {fname: list(s) for fname, s in self._labels.items()}\n\n with open(self._fname, 'w') as f:\n json.dump(labels, f, indent=4, sort_keys=True)\n f.flush()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Given a file handle to write in to (which should act like a Python `file` object), write out the landmark data. No value is returned. Writes out the PTS format which is a very simple format that does not contain any semantic labels. We assume that the PTS format has been created using Matlab and so use 1based indexing and put the image xaxis as the first coordinate (which is the second axis within Menpo). | def PTSExporter(landmark_group, file_handle, **kwargs):
pts = landmark_group.lms.points
# Swap the x and y axis and add 1 to undo our processing
# We are assuming (as on import) that the landmark file was created using
# Matlab which is 1 based
pts = pts[:, [1, 0]] + 1
header = 'version: 1\nn_points: {}\n{{'.format(pts.shape[0])
np.savetxt(file_handle, pts, delimiter=' ', header=header, footer='}',
fmt='%.3f', comments='') | [
"def pts_exporter(pts, file_handle, **kwargs):\n # Swap the x and y axis and add 1 to undo our processing\n # We are assuming (as on import) that the landmark file was created using\n # Matlab which is 1 based\n\n if len(pts.shape) == 2:\n pts = pts[:, [1, 0]] + 1\n else:\n pts = pts[:, [2, 1, 0]] + 1\n\n header = 'version: 1\\nn_points: {}\\n{{'.format(pts.shape[0])\n np.savetxt(file_handle, pts, delimiter=' ', header=header, footer='}',\n fmt='%.3f', comments='')",
"def write_to_file(self, filename):\n # check to see if the map exists from instantiation\n if hasattr(self, 'map'):\n sunpy_meta = self.map.meta\n\n psihdf.wrh5_meta(filename, self.x, self.y, np.array([]),\n self.data, chd_meta=self.info, sunpy_meta=sunpy_meta)",
"def write_to_file(points):\n output = open(\"data.txt\", 'a')\n print(points, file=output)\n output.close()",
"def write(self, filename):\n self._file = HorizonFile(filename, 'wb')\n\n # If self.lines isn't set, default to []\n try:\n self._file.lines = self.lines\n except AttributeError:\n self._file.lines = []\n\n # If self.surface isn't set, default to an empty numpy array\n try:\n self._file.surface = self.surface\n except AttributeError:\n self._file.surface = np.zeros(0, dtype=self.POINT_DTYPE)\n\n self._file.writeAll()",
"def write_obj(fname, pts):\n assert pts.shape[1] == 3\n with open(fname, \"w\") as fout:\n for i, p in enumerate(pts):\n fout.write(\"v %f %f %f\\n\" % (p[0], p[1], p[2]))",
"def writePoints(filename, points, indices = True, binary = True):\n \n points = io.readPoints(points);\n #points = points[:,[1,0,2]]; # points in ClearMap (y,x,z) -> permute to (x,y,z)\n \n if binary:\n with open(filename, 'wb') as pointfile:\n if indices:\n np.array(1, dtype = np.int64).tofile(pointfile)\n else:\n np.array(0, dtype = np.int64).tofile(pointfile)\n \n num_points = np.array(len(points), dtype = np.int64);\n num_points.tofile(pointfile);\n\n points = np.asarray(points, dtype = np.double);\n points.tofile(pointfile);\n\n pointfile.close(); \n \n else:\n with open(filename, 'w') as pointfile:\n if indices:\n pointfile.write('index\\n')\n else:\n pointfile.write('point\\n')\n \n pointfile.write(str(points.shape[0]) + '\\n');\n np.savetxt(pointfile, points, delimiter = ' ', newline = '\\n', fmt = '%.5e')\n pointfile.close();\n \n return filename;",
"def write_point_sprite_to_file(file_path=\".\", file_name=\"sprite\",\n file_format=\"ascii\",\n position_array=None, intensity_array=None,\n time_step=0):\n\n print(\"* BEGIN [%s] ...\" % sys._getframe().f_code.co_name)\n start = time.clock()\n\n if(file_format == \"ascii\"):\n ps_file_path = \"%s/%s.sprite\" % (file_path, file_name)\n ps_file = open(ps_file_path, 'w')\n\n # retreive the sprite meta-data and write them to the file\n events_count, \\\n x_center, y_center, z_center, \\\n x_coi, y_coi, z_coi, \\\n width, height, depth = get_sprite_header_data(position_array)\n\n ps_file.write(\"EventsCount=%d\\n\" % events_count)\n ps_file.write(\"XCenter=%f\\n\" % x_center)\n ps_file.write(\"YCenter=%f\\n\" % y_center)\n ps_file.write(\"ZCenter=%f\\n\" % z_center)\n ps_file.write(\"XCOI=%f\\n\" % x_coi)\n ps_file.write(\"YCOI=%f\\n\" % y_coi)\n ps_file.write(\"ZCOI=%f\\n\" % z_coi)\n ps_file.write(\"AABBWidth=%f\\n\" % width)\n ps_file.write(\"AABBHeight=%f\\n\" % height)\n ps_file.write(\"AABBDepth=%f\\n\" % depth)\n ps_file.write(\"TimeStep=%f\\n\" % time_step)\n\n # iterate and fill the file with the position data from the numpy array\n number_events = 0\n if((position_array != None) and (intensity_array != None)):\n for i_position, i_intensity in zip(position_array, intensity_array):\n ps_file.write(\"[%f %f %f] [%f]\\n\" % (i_position[0],\n i_position[1],\n i_position[2],\n i_intensity))\n number_events += 1\n\n # close the position point sprite file\n ps_file.close()\n else:\n psh_file_path = \"%s/%s.psh\" % (file_path, file_name)\n ps_file_path = \"%s/%s.ps\" % (file_path, file_name)\n\n psh_file = open(psh_file_path, 'w')\n\n # retreive the sprite meta-data and write them to the file\n events_count, \\\n x_center, y_center, z_center, \\\n x_coi, y_coi, z_coi, \\\n width, height, depth = get_sprite_header_data(position_array)\n\n psh_file.write(\"EventsCount=%d\\n\" % events_count)\n psh_file.write(\"XCenter=%f\\n\" % x_center)\n psh_file.write(\"YCenter=%f\\n\" % y_center)\n psh_file.write(\"ZCenter=%f\\n\" % z_center)\n psh_file.write(\"XCOI=%f\\n\" % x_coi)\n psh_file.write(\"YCOI=%f\\n\" % y_coi)\n psh_file.write(\"ZCOI=%f\\n\" % z_coi)\n psh_file.write(\"AABBWidth=%f\\n\" % width)\n psh_file.write(\"AABBHeight=%f\\n\" % height)\n psh_file.write(\"AABBDepth=%f\\n\" % depth)\n psh_file.write(\"VSDFile=%s.psi\\n\" % file_name)\n psh_file.write(\"TimeStep=%f\\n\" % time_step)\n psh_file.close()\n\n ps_file = open(ps_file_path, 'wb')\n # iterate and fill the file with the from the numpy arrays\n number_events = 0\n if((position_array != None) and (intensity_array != None)):\n for i_position, i_intensity in zip(position_array, intensity_array):\n ps_file.write(struct.pack('f', (i_position[0])))\n ps_file.write(struct.pack('f', (i_position[1])))\n ps_file.write(struct.pack('f', (i_position[2])))\n ps_file.write(struct.pack('f', (i_intensity)))\n number_events += 1\n\n # close the position point sprite file\n ps_file.close()\n\n print(\"[%d] events have been written to %s\" % (number_events, ps_file_path))\n\n end = time.clock()\n print(\"** DONE [%s] in %f\" % (sys._getframe().f_code.co_name, end - start))",
"def Writergeo(filename, points, options, addphys):\n\tfid = open(filename,'w')\n\tcount = 1\n\tnumpoints = points.shape[0]\n\twhile count <= numpoints:\n\t\tfid.write('Point('+str(count)+') = {'+str(points[count-1,0])+', '+str(points[count-1,1])+', '+str(points[count-1,2])+'};\\n')\n\t\tcount = count + 1\n\tcount =1\n\twhile count <= numpoints:\n\t\tfid.write('Line('+str(count)+') = {')\n\t\tif count < points.shape[0]:\n\t\t\tfid.write(str(count)+', '+str(count+1)+'};\\n')\n\t\telse:\n\t\t\tfid.write(str(count)+', 1};\\n')\n\t\tcount = count + 1\n\tlines = 1\n\tfid.write('Line Loop('+str(count)+') = {')\n\twhile lines <= numpoints:\n\t\tif lines < numpoints:\n\t\t\tfid.write(str(lines)+', ')\n\t\telse:\n\t\t\tfid.write(str(lines)+'};\\n')\n\t\tlines = lines + 1\n\tnotphys = count + 1\n\tcountphys = notphys + 1\n\tfid.write('Plane Surface('+str(notphys)+') = {'+str(count)+'};\\n')\n\tcount = 1\n\twhile count <= numpoints:\n\t\tfid.write('Physical Line('+str(countphys)+') = {'+str(countphys-notphys)+'};\\n')\n\t\tcount = count + 1\n\t\tcountphys = countphys + 1\n\tfid.write('Physical Surface('+str(countphys)+') = {'+str(countphys-notphys+1)+'};\\n')\n\tfid.close()\n\treturn 0",
"def export_landmark_file(landmark_group, fp, extension=None, overwrite=False):\n from .extensions import landmark_types\n\n _export(landmark_group, fp, landmark_types, extension, overwrite)",
"def update_pointings_file(self, infile, ra, dec, prob_fov, skymap):\n \n with open(infile, 'a') as pointing:\n pointing.write(str(ra) + ' ' + str(dec)+ ' ' + str(prob_fov) + ' ' + skymap +'\\n')",
"def save(outfile: str,\n point_data: PointData) -> None:\n point = np.array(point_data.point)\n point_data = np.array(point_data.data)\n np.savez(\n outfile,\n point=point,\n point_data=point_data)",
"def save_lattice(lattice, filename):\n np.save(filename, lattice)\n print (\"SOM lattice saved at %s\" %filename)",
"def writer(output, output_name, output_data):\n\n kml = simplekml.Kml(name=output_name)\n for exif in output_data:\n if 'Latitude' in exif.keys() and 'Latitude Reference' in exif.keys() and 'Longitude Reference' in exif.keys() and 'Longitude' in exif.keys():\n\n if 'Original Date' in exif.keys():\n dt = exif['Original Date']\n else:\n dt = 'N/A'\n\n if exif['Latitude Reference'] == 'S':\n latitude = '-' + exif['Latitude']\n else:\n latitude = exif['Latitude']\n\n if exif['Longitude Reference'] == 'W':\n longitude = '-' + exif['Longitude']\n else:\n longitude = exif['Longitude']\n\n kml.newpoint(name=exif['Name'], description='Originally Created: ' + dt,\n coords=[(longitude, latitude)])\n else:\n pass\n kml.save(os.path.join(output, output_name))",
"def _write_point_cloud(filename, X):\n _mkdir(os.path.dirname(filename))\n np.savez_compressed(filename, data=X)",
"def writePLY(PC, fileName): # real signature unknown; restored from __doc__\n pass",
"def WriteXYZ(data, filename, overwrite=False):\n if overwrite == True:\n output_file = open(filename, \"w\")\n else:\n output_file = open(filename, \"a\")\n\n for frame in data:\n output_file.write(str(len(frame['elements'])) + '\\n' + frame['comment'] + '\\n')\n for element, coordinates in zip(frame['elements'], frame['coordinates']):\n output_file.write(element + \" \" + str(coordinates[0]) + \" \" + str(coordinates[1]) + \" \" + str(coordinates[2]) + \"\\n\")\n \n output_file.close()",
"def write_trk(fname, streamlines, affine=None, shape=None):\n if affine is None:\n affine = np.eye(4)\n\n zooms = np.sqrt((affine * affine).sum(0))\n streamlines = move_streamlines(streamlines, affine)\n data = ((s, None, None) for s in streamlines)\n\n voxel_order = nib.orientations.aff2axcodes(affine)\n voxel_order = \"\".join(voxel_order)\n\n hdr = nib.trackvis.empty_header()\n hdr['voxel_size'] = zooms[:3]\n hdr['voxel_order'] = voxel_order\n hdr['vox_to_ras'] = affine\n if shape is not None:\n hdr['dim'] = shape\n trackvis.write(fname, data, hdr, points_space=\"rasmm\")",
"def writeFits(sOutFileName_p, data_p,header=None):\n data_p=np.rollaxis(data_p,2,0)\n if header==None:\n afits.writeto(sOutFileName_p,data_p,clobber=True)\n else:\n hdu=afits.PrimaryHDU(data=data_p,header=header,uint=True)\n hduList=afits.HDUList([hdu])\n hduList.writeto(sOutFileName_p,clobber=True)",
"def write_patch(filename, pts, edges=None):\n if edges is None:\n edges = set()\n\n with open(filename, 'wb') as fp:\n fp.write(struct.pack('>2i', -1, len(pts)))\n for i, pt in pts:\n if i in edges:\n fp.write(struct.pack('>i3f', -i-1, *pt))\n else:\n fp.write(struct.pack('>i3f', i+1, *pt))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overriding this to avoid having orderOfMagnitude reset elsewhere | def _set_orderOfMagnitude(self, range):
self.orderOfMagnitude = self._order_of_mag | [
"def mag_phase(self):\n\n self.magnitudes = []\n self.phases = []\n for system in self.systems:\n m, p = self.mag_phase_system(system)\n self.magnitudes.append(m)\n self.phases.append(p)",
"def _getMagnitudes(self):\r\n assert self.isTwoComponents()\r\n self._magnitudes = []\r\n realPart = []\r\n complexPartTemp = []\r\n complexPart = []\r\n for i in range(len(self._measuredRange)):\r\n if (i%2) == 1:\r\n complexPartTemp.append(self._measuredRange[i])\r\n else:\r\n realPart.append(self._measuredRange[i])\r\n for i in range(len(complexPartTemp)):\r\n complexPart.append(complex(str(complexPartTemp[i])+'j'))\r\n for i in range(len(realPart)):\r\n self._magnitudes.append(abs(realPart[i]+complexPart[i]))",
"def reset(self):\n self.direction = np.array([uniform(-1,1),uniform(-1,1)])\n self.path = []\n if self.static:\n self.speed = 0\n else:\n self.speed = uniform(self.speed_range[0],self.speed_range[1])\n # normalize direction vector to speed\n self.direction = (self.direction / np.linalg.norm(self.direction)) * self.speed",
"def _sort(self):\n self.objects = self.objects[np.lexsort((self.dx, self.dy))[::-1]]",
"def mags(self):\n return self._try_print(\"magnitude\")",
"def magnetization(self):\n return np.absolute(np.sum(self.data) / self.size ** 2)",
"def magnitude(self):\r\n\t\ttot = 0\r\n\t\tfor (account, amount, etc) in self.lines:\r\n\t\t\ttot = tot + abs(amount)\r\n\t\treturn 0.5 * tot",
"def sort(self):\n \n srt = np.argsort( self.mean_wavelength() )\n self.filters = self.filters[srt]\n self.measurement = self.measurement[srt]\n self.e_measurement = self.e_measurement[srt]\n self.s_measurement = self.s_measurement[srt]\n self.unit = self.unit[srt]\n self.bibcode = self.bibcode[srt]\n self.upperlim = self.upperlim[srt]\n self.ignore = self.ignore[srt]\n self.fnujy = self.fnujy[srt]\n self.e_fnujy = self.e_fnujy[srt]\n self.note = self.note[srt]",
"def __init__(self):\n\n self.__speed = 0.0\n self.__height = 0.0",
"def magnetization(self):\n return np.abs(np.sum(self.system) / self.size ** 2)",
"def fix(self):\n self.__Natoms=len(self.list_of_atoms)\n if self.__Natoms==0:\n return\n self.list_of_atoms.sort()\n self.xyzs=[]\n for i in xrange(self.__Natoms):\n x=self.list_of_atoms[i][1]\n y=self.list_of_atoms[i][2]\n z=self.list_of_atoms[i][3]\n self.xyzs.append(_np.array([float(x),float(y),float(z)]))\n self.xyzs=_np.array(self.xyzs)\n self.__center_of_mass()",
"def __init__(self):\n self.order = 1\n # Here is range of pixels to use in each dimension relative to ceil(u,v)\n self._duv = np.arange(-self.order, self.order, dtype=int)\n # And here are flattened arrays of u, v displacement for whole footprint\n self._du = np.ones( (2*self.order,2*self.order), dtype=int) * self._duv\n self._du = self._du.flatten()\n self._dv = np.ones( (2*self.order,2*self.order), dtype=int) * \\\n self._duv[:,np.newaxis]\n self._dv = self._dv.flatten()",
"def _init_shape_anisotropy_n(self,\n shape_ani_demag_mode,\n shape_ani_demag_n):\n self.shape_ani_demag_n = shape_ani_demag_n\n if shape_ani_demag_mode == 0:\n self.shape_ani_demag_n = np.zeros(3)\n elif shape_ani_demag_mode == 1:\n if np.sum(shape_ani_demag_n) != 1.:\n print('[warning] sum(shape_ani_demag_n) != 1,'\n f'sum: {np.sum(shape_ani_demag_n)}')\n self.shape_ani_demag_n = shape_ani_demag_n\n elif shape_ani_demag_mode == 2:\n # ferromagnetic cylinder\n # Sato, M., & Ishii, Y. (1989).\n # Simple and approximate expressions of demagnetizing\n # factors of uniformly magnetized rectangular\n # rod and cylinder.\n # Journal of Applied Physics, 66(2), 983–985.\n # https://doi.org/10.1063/1.343481\n r = np.sqrt(self.w_fl*self.l_fl)/2\n nz = 1/(2*self.t_fl/(r*np.sqrt(np.pi)) + 1)\n self.shape_ani_demag_n = np.array([\n (1-nz)/2,\n (1-nz)/2,\n nz])\n elif shape_ani_demag_mode == 3:\n # Zhang, K., et al.\n # Compact Modeling and Analysis of Voltage-Gated\n # Spin-Orbit Torque Magnetic Tunnel Junction.\n # IEEE Access, 8, 50792–50800.\n # https://doi.org/10.1109/ACCESS.2020.2980073\n self.shape_ani_demag_n = np.array([\n np.pi*self.t_fl / (4*np.sqrt(self.w_fl*self.l_fl)),\n np.pi*self.t_fl / (4*np.sqrt(self.w_fl*self.l_fl)),\n 1 - 2 * np.pi*self.t_fl/(4*np.sqrt(self.w_fl*self.l_fl))])\n else:\n print('[error] shape_ani_demag_n not supported')\n exit(1)\n self.shape_ani_demag_n = np.array(self.shape_ani_demag_n)\n print(f'\\t[debug] shape_ani mode {shape_ani_demag_mode}. '\n f'shape_ani_n: {self.shape_ani_demag_n}')",
"def __convert_data_to_magnitudes(self):\n if self.__units == \"magnitudes\":\n pass\n\n else:\n for table in self.__tables:\n m, m_err = self.__fluxes_to_magnitudes(\n table[\"flux\"], table[\"flux_err\"]\n )\n\n table.rename_column(\"flux\", \"mag\")\n table.rename_column(\"flux_err\", \"mag_err\")\n table[\"mag\"] = m\n table[\"mag_err\"] = m_err\n\n self.__units = \"magnitudes\"",
"def reset(self):\n\n super().reset()\n self.dynamics = self.set_dynamics()",
"def _resort(self):\n # type: () -> None\n self._fs_sequence = None",
"def EstimateModelOrder(self):\n # Normalize the singular values by the maximum and cut out modes\n # corresponding to singular values below a specified tolerance\n tol1 = 1.0e-2\n snorm = self.s/self.s.max()\n n_above_tol = len(self.s[snorm > tol1])\n\n # Approximate second derivative singular values using convolve as a\n # central difference operator\n w = [1.0, -1.0]\n diff = sig.convolve(snorm, w, 'valid')\n diffdiff = sig.convolve(diff, w, 'valid')\n\n # Cut out more modes depending on the approximated second derivative\n # The idea is sort of to cut at an inflection point in the singular\n # value curve or maybe where they start to bottom out\n tol2 = 1.0e-3\n n_bottom_out = 2 + len(diffdiff[diffdiff > tol2])\n\n # Estimate the number of modes (model order) to have at least two but\n # otherwise informed by the cuts made above\n self.M = min(max(2, min(n_above_tol, n_bottom_out)), self.L)\n\n # Report the model order\n if self.output_level[-1] == \"1\":\n print(\"Model order, M = \", self.M)\n np.savetxt('singular_values.dat',snorm)\n\n # Plotting to help diagnose what the cuts are doing\n if self.output_level[-2] == \"1\":\n # Plot normalized singular values and first cut-off\n plt.figure(figsize=(8, 10))\n ax = plt.gca()\n ax.scatter(np.arange(len(self.s)), snorm, s=40, c='blue', alpha=0.3)\n ax.plot(np.array([-0.2*len(self.s), 1.2*len(self.s)]),\n tol1*np.ones(2),\n color='orange', linestyle='dashed', linewidth=2,\n label='cut-off')\n ax.set_yscale('log')\n ax.set_ylim(1e-20, 1e1)\n plt.legend()\n plt.title('Normalized Singular Values', fontsize=16)\n\n # Plot approximate second derivative and second cut-off\n plt.figure(figsize=(8, 10))\n ax = plt.gca()\n ax.scatter(np.arange(len(diffdiff)), diffdiff,\n s=40, c='blue', alpha=0.3)\n ax.plot(np.array([-0.2*len(self.s), 1.2*len(self.s)]),\n tol2*np.ones(2),\n color='orange', linestyle='dashed', linewidth=2,\n label='2nd cut-off')\n ax.set_yscale('log')\n ax.set_ylim(1e-20, 1e1)\n plt.legend()\n plt.title('Approx. 2nd Derivative of Singular Values', fontsize=16)\n plt.show()\n\n return",
"def OrderOfMagnitude( number ):\n \n return math.floor( math.log( number, 10 ) )",
"def clean_orbitals(self):\n while abs(self.orbitals[-1].fill) < 0.01:\n self.orbitals = self.orbitals[:-1]",
"def drive(self, outputMagnitude: float, curve: float) -> None:\n ..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the most specialized API, with the required flavour (IVICOM or IVIC), or None if the flavour is not supported | def get_session(self, flavour = None):
specialized_apis_com = []
general_apis_com = []
specialized_apis_c = []
general_apis_c = []
for papi_name, papi in self.com_apis.items():
if papi_name in SPECIALIZED_APIS:
specialized_apis_com.append(papi)
else:
general_apis_com.append(papi)
for papi_name, papi in self.c_apis.items():
if papi_name in SPECIALIZED_APIS:
specialized_apis_c.append(papi)
else:
general_apis_c.append(papi)
if flavour == 'IVI-COM' or flavour == None:
for api in specialized_apis_com:
session = SESSION_FACTORY.\
CreateSession('pyivi_' + self._soft_mod.Name)
interface = SPECIALIZED_APIS[api.Name]
return (self._soft_mod.Name,
session.QueryInterface(interface),
api.Name,
'IVI-COM')
for api in general_apis_com:
session = SESSION_FACTORY.\
CreateSession('pyivi_' + self._soft_mod.Name)
if self._soft_mod.Name in SPECIALIZED_APIS:
interface = SPECIALIZED_APIS[self._soft_mod.Name]
else:
interface = IIviDriver
return (self._soft_mod.Name,
session.QueryInterface(interface),
api.Name,
'IVI-COM')
if flavour == 'IVI-C' or flavour == None:
ok = False
for api in specialized_apis_c:
return (self._soft_mod.Name,
ctypes.windll.LoadLibrary(self.module_path),
api.Name,
'IVI-C')
for api in general_apis_c:
return (self._soft_mod.Name,
ctypes.windll.LoadLibrary(self.module_path),
api.Name,
'IVI-C')
if flavour:
raise NotSupportedError('model not supported with flavour '\
+ flavour)
else:
raise NotSupportedError('no software module supports this model') | [
"def determine_os_api(some_function):\r\n @functools.wraps(some_function)\r\n def provide_os_determination_and_call(*args, **kwargs):\r\n # expand request values\r\n if 'params' in request.values:\r\n pairs = [s.split('=', 1) for s in request.values['params'].split('&')]\r\n request.values = {p[0]:p[1] for p in pairs if len(p)==2}\r\n\r\n # naive, could be improved, but sufficient\r\n url_end = request.path[-4:].lower()\r\n if \"ios\" in url_end:\r\n kwargs[\"OS_API\"] = Participant.IOS_API\r\n else:\r\n kwargs[\"OS_API\"] = Participant.ANDROID_API\r\n return some_function(*args, **kwargs)\r\n\r\n return provide_os_determination_and_call",
"def _check_method():\n version = has_IM()\n if version:\n return IMAGEMAGICK, version\n\n version = has_PIL()\n if version:\n return PIL, version\n\n return WEBPROXY, (0)",
"def _check_method():\n version = get_im_version()\n if version:\n version, legacy = version\n return IMAGEMAGICK, version, legacy\n\n version = get_pil_version()\n if version:\n return PIL, version\n\n return WEBPROXY, (0)",
"def loaded_api():\n if 'PyQt4.QtCore' in sys.modules:\n if qtapi_version() == 2:\n return QT_API_PYQT\n else:\n return QT_API_PYQTv1\n elif 'PyQt5.QtCore' in sys.modules:\n return QT_API_PYQT5\n elif 'PySide.QtCore' in sys.modules:\n return QT_API_PYSIDE\n return None",
"def determine_flavor():\n uname, machine = platform.uname()[0:5:4]\n\n print(uname)\n if uname == \"Linux\":\n if machine[-2:] == \"64\":\n return \"Linux64\"\n else:\n return \"Linux\"\n elif uname == \"Darwin\":\n if machine in (\"x86_64\", \"i686\"):\n return \"DarwinX86\"\n else:\n return \"Darwin\"\n elif uname == \"Windows\":\n return \"windows\"\n else:\n raise RuntimeError(\"Unknown flavor: (%s, %s)\" % (uname, machine))",
"def platform_detect():\n\n # TODO: Is there a better way to check if running on BBB or Pi? Relying on\n # the architecture name is brittle because new boards running armv6 or armv7\n # might come along and conflict with this simple identification scheme. One\n # option might be switching to read /proc/cpuinfo.\n plat = platform.platform()\n\n # Handle Raspberry Pi\n # Platform output on Raspbian testing/jessie ~May 2014:\n # Linux-3.10.25+-armv6l-with-debian-7.4\n if plat.lower().find('armv6l-with-debian') > -1:\n return RASPBERRY_PI\n # Handle pidora distribution.\n elif plat.lower().find('raspberry_pi') > -1:\n return RASPBERRY_PI\n # Handle arch distribution.\n elif plat.lower().find('arch-armv6l') > -1:\n return RASPBERRY_PI\n # Handle Beaglebone Black\n # Platform output on Debian ~May 2014:\n # Linux-3.8.13-bone47-armv7l-with-debian-7.4\n elif plat.lower().find('armv7l-with-debian') > -1:\n return BEAGLEBONE_BLACK\n # Handle Beaglebone Black\n # Platform output on Ubuntu ~July 2014:\n # Linux-3.8.13-bone56-armv7l-with-Ubuntu-14.04-trusty\n elif plat.lower().find('armv7l-with-ubuntu') > -1:\n return BEAGLEBONE_BLACK\n elif plat.lower().find('armv7l-with-glibc2.4') > -1:\n return BEAGLEBONE_BLACK\n else:\n return UNKNOWN",
"def loaded_api():\n if 'PyQt4.QtCore' in sys.modules:\n if qtapi_version() == 2:\n return QT_API_PYQT\n else:\n return QT_API_PYQTv1\n elif 'PySide.QtCore' in sys.modules:\n return QT_API_PYSIDE\n elif 'PyQt5.QtCore' in sys.modules:\n return QT_API_PYQT5\n elif 'PyQt6.QtCore' in sys.modules:\n return QT_API_PYQT6\n return None",
"def _get_api_class(provider):\n if provider == storage_url.ProviderPrefix.GCS:\n if (\n properties.VALUES.storage.preferred_api.Get()\n == properties.StoragePreferredApi.GRPC_WITH_JSON_FALLBACK.value\n ):\n log.debug('Using gRPC client with JSON Fallback.')\n return gcs_grpc_client.GrpcClientWithJsonFallback\n if (\n properties.VALUES.storage.gs_xml_access_key_id.Get()\n and properties.VALUES.storage.gs_xml_secret_access_key.Get()\n ):\n return gcs_xml_client.XmlClient\n return gcs_json_client.JsonClient\n elif provider == storage_url.ProviderPrefix.S3:\n # TODO(b/275749579): Change this after the refactor is done.\n return s3_xml_client.S3XmlClient\n else:\n raise errors.Error(_INVALID_PROVIDER_PREFIX_MESSAGE)",
"def backend_for_file(self, filename):\n for backend in self:\n try:\n if self[backend].supports(filename):\n return self[backend]\n except AttributeError:\n # backend doesn't define \"support\"\n pass\n return None",
"def GetApi(provider):\n if getattr(_cloud_api_thread_local_storage, provider, None) is None:\n if provider == GCS_PREFIX:\n # TODO(b/159164504): Update with implemented GCS API.\n _cloud_api_thread_local_storage.gs = CloudApi()\n elif provider == AWS_S3_PREFIX:\n # TODO(b/159164385): Update with implemented S3 API.\n _cloud_api_thread_local_storage.s3 = CloudApi()\n else:\n raise ValueError('Provider API value must be \"gs\" or \"s3\".')\n return getattr(_cloud_api_thread_local_storage, provider)",
"def select_cmake_type(flav):\n fd = cmake_flavors[flav]\n if \"cmflav\" not in fd:\n u.error(\"internal error: build flavor %s has no cmflav setting\" % flav)\n cmflav = fd[\"cmflav\"]\n if not cmflav:\n cmflav = flag_cmake_type\n return cmflav",
"def get_api_gen(self, vin):\n vehicle = self._vehicles.get(vin.upper())\n result = None\n if vehicle:\n if sc.FEATURE_G1_TELEMATICS in vehicle[sc.VEHICLE_FEATURES]:\n result = sc.FEATURE_G1_TELEMATICS\n if sc.FEATURE_G2_TELEMATICS in vehicle[sc.VEHICLE_FEATURES]:\n result = sc.FEATURE_G2_TELEMATICS\n _LOGGER.debug(\"Getting vehicle API gen %s:%s\", vin, result)\n return result",
"def test_azure_service_api_flavors_get(self):\n pass",
"def get_version(option=0):\n release_type = \"\" \n final_version = \"\"\n\n cli_output = cli(\"show version\")\n if legacy:\n result = re.search(r'system.*version\\s*(.*)\\n', cli_output[1])\n if result != None:\n return result.group(1)\n else:\n result = re.search(r'NXOS.*version\\s*(.*)\\n', cli_output)\n #Line is of type NXOS: version <version>\n if result != None and option != 1:\n return result.group(1)\n elif result != None: \n #This checks if the image if of intermediate type of CCO\n #If 'build' is present, then it is of intermediate type\n interim_result = result.group()\n if 'Feature Release' in interim_result:\n release_type = \".F\"\n elif 'Maintenance Release' in interim_result:\n release_type = \".M\" \n else:\n release_type = \"\"\n \n if 'build' in interim_result:\n # We are extracting our answer from the interim_result extracted so far\n # Whatever we were extracting till now isn't enough\n # This is an intermediate image, so our interim result is of form: nxos.9.4.1. [build 10.1.0.60.].bin\n final_version = re.search(r'build.*', interim_result)\n final_version = final_version.group()\n final_version = final_version.replace('(', '.').replace(')', '.').replace(']', '').split()[1]\n \n # Now, the form obtained if of the form 10.1.0.60, and it is a string. \n #return final_version \n else:\n #This fetches the CCO image version\n # interim_result is of form major.minor (patch version)\n final_version = interim_result.replace('(', '.').replace(')', '')\n final_version = final_version.split()[2]\n #return final_version\n \n if final_version == \"\":\n poap_log(\"Unable to get switch version\")\n else:\n final_version = final_version + release_type \n \n return final_version",
"def _get_driver(mime_src, mime_out):\n # TODO: make this configurable\n if mime_src == 'application/x-esa-envisat' and \\\n mime_out == 'application/x-netcdf':\n return \"BEAM\", \"NetCDF4-BEAM\"\n elif mime_src == 'application/x-esa-envisat' and \\\n mime_out == 'application/x-esa-envisat':\n return \"EOXS\", \"envisat\"\n\n frmreg = getFormatRegistry()\n fobj = frmreg.getFormatByMIME(mime_out)\n if fobj is None:\n raise RenderException(\"Invallid output format '%s'!\"%mime_out, \"format\")\n backend, _, driver = fobj.driver.partition(\"/\")\n return backend, driver",
"def recommended_ioapic(self):\n ret = self._get_attr(\"recommendedIOAPIC\")\n return ret",
"def pep425tags_get_supported(versions=None, supplied_platform=None):\n supported = []\n\n # Versions must be given with respect to the preference\n if versions is None:\n versions = []\n version_info = get_impl_version_info()\n major = version_info[:-1]\n # Support all previous minor Python versions.\n for minor in range(version_info[-1], -1, -1):\n versions.append(''.join(map(str, major + (minor,))))\n\n impl = get_abbr_impl()\n\n abis = []\n\n abi = get_abi_tag()\n if abi:\n abis[0:0] = [abi]\n\n abi3s = set()\n import imp\n for suffix in imp.get_suffixes():\n if suffix[0].startswith('.abi'):\n abi3s.add(suffix[0].split('.', 2)[1])\n\n abis.extend(sorted(list(abi3s)))\n\n abis.append('none')\n\n platforms = get_platforms(supplied=supplied_platform)\n\n # Current version, current API (built specifically for our Python):\n for abi in abis:\n for arch in platforms:\n supported.append(('%s%s' % (impl, versions[0]), abi, arch))\n\n # No abi / arch, but requires our implementation:\n for i, version in enumerate(versions):\n supported.append(('%s%s' % (impl, version), 'none', 'any'))\n if i == 0:\n # Tagged specifically as being cross-version compatible\n # (with just the major version specified)\n supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))\n\n # Major Python version + platform; e.g. binaries not using the Python API\n for arch in platforms:\n supported.append(('py%s' % (versions[0][0]), 'none', arch))\n\n # No abi / arch, generic Python\n for i, version in enumerate(versions):\n supported.append(('py%s' % (version,), 'none', 'any'))\n if i == 0:\n supported.append(('py%s' % (version[0]), 'none', 'any'))\n\n return supported",
"def _autodetect_backend(storage_path):\n if storage_path == '::inmem::':\n return 'inmem'\n elif storage_path.endswith('.npz'):\n return 'npz'\n elif storage_path.endswith(('.h5', '.hdf5')):\n return 'hdf5'\n if storage_path.endswith('.mat'):\n return 'mat'\n else:\n raise exceptions.AutodetectBackendError(storage_path)",
"def GetSupportedEngines():\r\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Physically queries the instrument model at the given address | def get_model_name(address):
from visa import VisaIOError
import visa
model = "no device"
try:
instr = visa.Instrument(str(address))
timeout = instr.timeout
except VisaIOError:
print("instrument at address " + str(address) + " didn't reply in "
"time...")
else:
try:
instr.timeout = 0.1
ret = instr.ask("*IDN?")
except (VisaIOError, TypeError):
print("instrument at address " + \
address + \
" didn't reply in time...")
else:
model = ret.split(",")[1]
model = model.replace(' ', '')
finally:
instr.timeout = timeout
return model | [
"def get(address_type, address):",
"def get_address(self, address: str) -> Address:",
"def test_get_xrp__ripple_address_details(self):\n pass",
"def test_list_xrp__ripple_transactions_by_address(self):\n pass",
"def impedance(address, name):\n explore = explorepy.explore.Explore()\n explore.connect(mac_address=address, device_name=name)\n explore.measure_imp()",
"def query_address(self, address):\n try:\n conn = httplib.HTTPSConnection(\n self.server_url, timeout=self._timeout)\n except httplib.HTTPException as e:\n print 'Could not connect to server %s\\n%s' % (self.server_url, str(e))\n return None\n\n conn.request('GET', self.assemble_request(address))\n response = conn.getresponse()\n if response.status != httplib.OK:\n print 'Error in response when querying %s\\n%s' % (address, response.reason)\n return None\n\n data = response.read()\n res = self.parse_response(data)\n if res is None:\n print 'Did not find any matches for %s' % address\n return res",
"def get_libgen_use_phys_addr(unit_type,unit_index):\n command='ILLGTestCli -- -phys %s %s'%(unit_type,unit_index)\n out = connections.execute_mml_without_check(command)\n\n print out\n match = re.search(r\"\\bunit physical address is: \\s*(0x[0-9a-f]*)\", out, re.I)\n if match is not None:\n return match.group(1).upper()\n else:\n return 'failure'",
"def connect(self, address):\n self.address = address\n LOG.info(_(\"Vyatta vRouter REST API: \"\n \"Connecting to vRouter %s\") % address)\n self._process_model()\n self._sync_cache()",
"def request_inventory(self, address):\n self.logger.info(\"Requesting starmap from {}\".format(self.source_format(address)))\n m = Message(\"starmap_request\", data=None)\n self.message_pool.spawn(self.send_message, address, m)",
"def test_find_address(session, manuhome_id, street, city, region, country):\n if model_utils.is_legacy():\n location: Db2Location = Db2Location.find_by_manuhome_id_active(manuhome_id)\n assert location\n loc_json = location.registration_json\n assert loc_json.get('address')\n assert loc_json['address'].get('city') == city\n assert loc_json['address'].get('street') == street\n assert loc_json['address'].get('region') == region\n assert loc_json['address'].get('country') == country",
"def search_txs_by_address(self, address: str) -> List[Transaction]:",
"def test_get_address_entity(self):\n test_service.get_address_entity(self)\n\n query_string = [('',''),\n ('','')]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/{currency}/addresses/{address}/entity'.format(currency='btc', address='1Archive1n2C579dMsAu3iC6tWzuQJz8dN'),\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def fetch(address):\n return self.memory[address]",
"def get(self, address) -> Device:\n try:\n address = Address(address)\n except ValueError:\n address = X10Address(address)\n return self._devices.get(address)",
"def get_potential_matches_from_address(self, address):",
"def evaluate(p: ghidra.program.model.listing.Program, s: unicode) -> ghidra.program.model.address.Address:\n ...",
"def assemble_request(self, address):\n pass",
"def print_search_hit(address) -> None:\n if not address:\n return\n\n vmmap = pwndbg.gdblib.vmmap.find(address)\n if vmmap:\n region = os.path.basename(vmmap.objfile)\n else:\n region = \"[mapped]\"\n\n region = region.ljust(15)\n\n region = M.get(address, region)\n addr = M.get(address)\n display = pwndbg.enhance.enhance(address)\n print(region, addr, display)",
"def query_map():\n if not model_loaded:\n input(\"you must first load a map...\")\n return None\n while True:\n block_id = input(\"please enter a block id of the currently loaded model: \")\n try:\n int(block_id)\n except:\n print(\"You must enter a number...\")\n continue\n if block_id in model_loaded:\n print(\"block \" + block_id + \" info: \")\n for data in model_loaded[block_id]:\n print(\"{0}: {1}\".format(data, model_loaded[block_id][data]))\n break\n else:\n print(\"Invalid block id...\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use this decorator to register the wrapper class for an instrument type | def register_wrapper(flavour, instrument_type):
def func(cls):
WRAPPERS[flavour][instrument_type] = cls
return cls
return func | [
"def register_wrapper(cls):\n for wrapped in cls._WRAPPED:\n if wrapped in cls._WRAPPERS:\n LOGGER.warn('{} is already registered to {}.'.format(wrapped, cls._WRAPPERS[wrapped]))\n\n if LANTZ_BUILDING_DOCS:\n cls._WRAPPERS[wrapped] = type(wrapped.__name__ + 'Wrapped',\n (cls, ), {'_IS_LANTZ_WRAPPER': True})\n else:\n cls._WRAPPERS[wrapped] = type(wrapped.__name__ + 'Wrapped',\n (cls, wrapped), {'_IS_LANTZ_WRAPPER': True})\n\n return cls",
"def register_integration(\n self, key: str, type_: Union[Type[\"Integration\"], LazyLoader]\n ) -> None:\n self._integrations[key] = type_",
"def register_transport(conn_type: ConnectionType) \\\n -> Callable[[Type[Transport]], Type[Transport]]:\n # pylint: disable=unused-argument, missing-docstring\n def decorator(cls: Type[Transport]) -> Type[Transport]:\n TRANSPORT_CLASSES[conn_type] = cls\n\n @property # type: ignore\n def connection_type(self: Transport) -> ConnectionType:\n return conn_type\n\n cls.connection_type = connection_type # type: ignore\n return cls\n return decorator",
"def plugin_class(cls):\n if isinstance(cls, str):\n context = cls\n\n def wrapper(cls):\n setattr(cls, CLASS_MARKER, context)\n return cls\n return wrapper\n\n elif inspect.isclass(cls):\n setattr(cls, CLASS_MARKER, True)\n return cls",
"def register(cls):\n if not hasattr(cls, \"__fromjson__\") or not hasattr(cls, \"__tojson__\"):\n raise KeyError(\"register: registered types must have a __fromjson__ method\")\n k = clsKey(cls)\n if k in _types:\n raise Exception(\"tinyjson: mutliple attempts to register class %s\" % k)\n _types[k] = cls",
"def logging_hook():\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available logging hooks.\n\n :param type cls: logging hook class.\n\n :returns: logging hook class.\n :rtype: type\n \"\"\"\n\n instance = cls()\n logging_services.register_hook(instance)\n\n return cls\n\n return decorator",
"def __new__(cls, base='Instrument', *args, **kwargs):\n addCls = {'Dummy': DummyModbus, 'Instrument': Instrument}[base]\n cls = type(cls.__name__ + '+' + addCls.__name__, (cls, addCls), {})\n\n return super(AlicatModule, cls).__new__(cls)",
"def add_wrapper(self, obj, wrapper):\n if wrapper.name not in self.codegens:\n obj['wrapper'] = self.codegens[wrapper.name] = wrapper\n obj['wrapped'] = True",
"def register_decorator(func: Callable) -> Callable:\n module_name = module.__name__ if module else func.__module__\n func_name = func.__name__\n _UNITS.setdefault(module_name, dict())[func_name] = unit\n\n return func",
"def register(dataset_name):\n\n def decorator(decorator_dataset_class, decorator_dataset_name):\n _DATASETS[decorator_dataset_name] = decorator_dataset_class\n return decorator_dataset_class\n\n return lambda dataset_class: decorator(dataset_class, dataset_name)",
"def TypeAlias(alias: ClassVar):\n def register_alias(cls: ClassVar):\n if not is_cowait_type(cls):\n raise TypeError(f'{cls} must be a valid cowait type')\n\n register_type(alias, cls)\n return cls\n\n return register_alias",
"def RegisterClass(cls, handler):\n cls.handler_cache[handler.meta.typeid] = handler",
"def register_implementation(cls, name, transformation_type):\n cls.implementations[name] = transformation_type\n transformation_type._match_node = cls",
"def Wrapper(self) -> object:",
"def define_report_type(report_type):\n\n def wrap(func):\n setattr(func, '_report_type', report_type)\n return func\n\n return wrap",
"def register_spec(cls, spec):\n spec = to_spec(spec)\n _SPEC_REGISTRY[cls] = spec\n return spec",
"def test_init(self):\n # create a mock object for wrapped class\n cls = mock.MagicMock()\n # call the method\n decorator._SingletonWrapper.__init__(self.wrapper, cls)\n # check whether the object holds wrapped class\n self.assertEqual(self.wrapper.__wrapped__, cls)",
"def wrapping_type_test(self, wrapping_type_test):\n\n self._wrapping_type_test = wrapping_type_test",
"def register_domain_type(domain_class, type_key):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
True if C{tr} is a 4x4 homogeneous transform. | def ishomog(tr):
return tr.shape == (4,4) | [
"def is_tt_matrix(self):\n return len(self.get_raw_shape()) == 2",
"def check_conv2d_transpose(extract):\n if not ethosn_available():\n return False\n\n return _ethosn.conv2d_transpose(extract)",
"def is_identity(self):\n return self.m == Matrix4x4()",
"def is_four_bit_mode(self):\n return False",
"def HasTRI(self):\n return self.__has('TRI')",
"def _is_connect_four(self):\n\t\t# for each box of the grid\n\t\tfor i in xrange(self.height - 1, -1, -1):\n\t\t\tfor j in xrange(self.width):\n\t\t\t\tif self.board[i][j] != ' ':\n\t\t\t\t\t# check for vertical connect four\n\t\t\t\t\tif self._find_vertical_four(i, j):\n\t\t\t\t\t\treturn True\n\n\t\t\t\t\t# check for horizontal connect four\n\t\t\t\t\tif self._find_horizontal_four(i, j):\n\t\t\t\t\t\treturn True\n\n\t\t\t\t\t# check for diagonal connect four\n\t\t\t\t\tif self._find_diagonal_four(i, j):\n\t\t\t\t\t\treturn True\n\n\t\treturn False",
"def isTransform(obj):\n # Check object exists\n if not cmds.objExists(obj): return False\n\n # Check transform\n mObject = glTools.utils.base.getMObject(obj)\n if not mObject.hasFn(OpenMaya.MFn.kTransform): return False\n\n # Return result\n return True",
"def is_transition_matrix(T, tol):\n T=T.tocsr() # compressed sparse row for fast row slicing\n values=T.data # non-zero entries of T\n\n \"\"\"Check entry-wise positivity\"\"\"\n is_positive=np.allclose(values, np.abs(values), rtol=tol)\n\n \"\"\"Check row normalization\"\"\"\n is_normed=np.allclose(T.sum(axis=1), 1.0, rtol=tol)\n\n return is_positive and is_normed",
"def is_matrix(self):\r\n return self.size[0] > 1 and self.size[1] > 1",
"def IsTransformed(self, *args):\n return _Graphic3d.Graphic3d_Structure_IsTransformed(self, *args)",
"def is_transition_matrix(T, tol):\n T = T.tocsr() # compressed sparse row for fast row slicing\n values = T.data # non-zero entries of T\n\n \"\"\"Check entry-wise positivity\"\"\"\n is_positive = np.allclose(values, np.abs(values), rtol=tol)\n\n \"\"\"Check row normalization\"\"\"\n is_normed = np.allclose(T.sum(axis=1), 1.0, rtol=tol)\n\n return is_positive and is_normed",
"def find_same_transform(r,t,transforms):\n\n is_transpose = False\n tr_num = None\n for k,v in transforms.iteritems():\n if hasattr(v,'r'):\n rr = v.r\n tt = v.t\n else:\n (rr,tt) = v[2]\n is_the_same, is_transpose_flag = is_same_transform(r, t, rr, tt)\n if is_the_same:\n if is_transpose_flag:\n # when transpose is found, keep it but continue the search\n tr_num = k\n is_transpose = True\n else:\n # found non-transform match\n return k, False\n return tr_num, is_transpose",
"def isIdentity(self):\n # We are a 3x3 matrix.\n if isinstance(self, hou.Matrix3):\n # Construct a new 3x3 matrix.\n mat = hou.Matrix3()\n\n # Set it to be the identity.\n mat.setToIdentity()\n\n # Compare the two.\n return self == mat\n\n # Compare against the identity transform from hmath.\n return self == hou.hmath.identityTransform()",
"def is_matrix_triangular(matrix):\n return is_matrix_upper_triangular(matrix) or is_matrix_lower_triangular(\n matrix\n )",
"def is_plane(self) -> bool:\n return (\n self.min_x == self.max_x or\n self.min_y == self.max_y or\n self.min_z == self.max_z\n )",
"def is_su4layer(self, depth: int) -> bool:\n return self._to_su4layer[depth] >= 0",
"def is_transformation_ok(eq):\n A, B = transformation_to_pell(eq)\n u = (A*Matrix([X, Y]) + B)[0]\n v = (A*Matrix([X, Y]) + B)[1]\n simplified = simplify(Subs(eq, (x, y), (u, v)).doit())\n\n coeff = dict([reversed(t.as_independent(*[X, Y])) for t in simplified.args])\n\n for term in [X*Y, X, Y]:\n if term in coeff.keys():\n return False\n\n for term in [X**2, Y**2, Integer(1)]:\n if term not in coeff.keys():\n coeff[term] = Integer(0)\n\n if coeff[X**2] != 0:\n return isinstance(S(coeff[Y**2])/coeff[X**2], Integer) and isinstance(S(coeff[Integer(1)])/coeff[X**2], Integer)\n\n return True",
"def is_square(self):\n return self.rows == self.columns",
"def HasqTRI(self):\n return self.__has('qTRI')",
"def is_portrait(self) -> bool:\n return self.width <= self.height"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
True if C{tr} is an lvector. | def isvec(v, l=3):
return v.shape == (l,1) or v.shape == (1,l) or v.shape == (l,) | [
"def isVectorType(self):\n if re.match(r'^(ivec\\d|vec\\d)$', self.__type):\n return True\n return False",
"def __is_ltr(cer_inss):\n is_ltr = False\n for cer_ins in cer_inss.get_all():\n if len(cer_ins.features) > 0:\n is_ltr = True\n break\n return is_ltr",
"def is_vector(block):\n \n return is_1d_vector(block) or is_2d_vector(block)",
"def is_vector(self) -> bool:\n return self.result_type in (servo.connectors.prometheus.ResultType.vector, servo.connectors.prometheus.ResultType.matrix)",
"def is_lcl(obj):\n return (hasattr(obj, 'lmax') and hasattr(obj, 'dl') and hasattr(obj, 'nm')\n and hasattr(obj, 'cl'))",
"def is_vector(self) -> bool:\n if self.real == 0.0 and (\n self.i != 0.0 or self.j != 0.0 or self.k != 0.0):\n return True\n\n return False",
"def has_vector_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.dtype.vector_3_float64",
"def is_unit_vector(A):\n\tif A.ndim != 1:\n\t\tprint(\"This is not a vector!\")\n\t\treturn False\n\telse:\n\t\tl2_norm = lp_norm(A, 2)\n\t\tif l2_norm == 1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def is_vector(self, name):\n return not self.is_scalar(name)",
"def is_1d_vector(block):\n return all([not isinstance(e, list) for e in block])",
"def is_lin(self):\n return np.all([d.is_lin for d in self])",
"def is_vector(a):\n return len(a.shape) == 1 or a.shape[1] == 1",
"def is_linestring(self):\n return self._geography.getType() == Geography.LSVAL",
"def __isub__(self, *args):\n return _vnl_vectorPython.vnl_vectorLD___isub__(self, *args)",
"def HasVLS(self):\n return self.__has('VLS')",
"def __eq__(self, *args):\n return _vnl_vectorPython.vnl_vectorLD___eq__(self, *args)",
"def __eq__(self, *args):\n return _vnl_vectorPython.vnl_vectorSL___eq__(self, *args)",
"def is_2d_vector(block):\n return all([isinstance(r, list) for r in block]) and all([len(r) == 1 for r in block])",
"def vector_supports(self, fieldid, astype):\r\n format = self.vector_format(fieldid)\r\n if format is None: return False\r\n return format.supports(astype)",
"def __eq__(self, *args):\n return _vnl_vectorPython.vnl_vectorUL___eq__(self, *args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Number of columns in a matrix. | def numcols(m):
return m.shape[1]; | [
"def numCols(self) -> int:\n return self._java_matrix_wrapper.call(\"numCols\")",
"def board_n_columns(board: Board) -> int:\n return len(board[0])",
"def numColBlocks(self) -> int:\n return self._java_matrix_wrapper.call(\"numColBlocks\")",
"def size(self, matrix):\r\n return matrix.shape",
"def column_count(self):\n\t\treturn len(self.column_names)",
"def num_cols_at(self, i):\n return len(self._net_graph[i])",
"def numRows(self) -> int:\n return self._java_matrix_wrapper.call(\"numRows\")",
"def _get_numberOfColumns(self) -> \"int\" :\n return _core.TableCommandInput__get_numberOfColumns(self)",
"def columns(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixCF_columns(self)",
"def col_count(self):\n return len(self.tblGrid.gridCol_lst)",
"def columns(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixF_columns(self)",
"def colsPerBlock(self) -> int:\n return self._java_matrix_wrapper.call(\"colsPerBlock\")",
"def __len__(self):\n return len(self.__matrix)",
"def cols(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixF_cols(self)",
"def _get_number_of_columns(self, row: _Row):\n columns = len(row)\n for column in row:\n if isinstance(column, TableCell):\n columns += column.colspan - 1\n\n return columns",
"def cols(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixCF_cols(self)",
"def columns(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixD_columns(self)",
"def mat_size(self):\n\n # Length of the linear array\n l = self.size\n\n # Total number of elements in the corresponding bi-dimensional symmetric matrix\n n = int((1 + math.sqrt(1 + 8 * l)) / 2)\n\n return n",
"def cols(self):\n return len(self.text[0])",
"def get_column_count(self, sheet_name):\n return self._get_sheet(sheet_name).ncols"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a 1dimensional argument that is either a list, array or matrix to an array. | def arg2array(arg):
if isinstance(arg, (matrix, ndarray)):
s = arg.shape;
if len(s) == 1:
return array(arg);
if min(s) == 1:
return array(arg).flatten();
elif isinstance(arg, list):
return array(arg);
elif isinstance(arg, (int, float, float32, float64)):
return array([arg]);
raise ValueError; | [
"def T_arr1d(*args):\n return _seb.T_arr1d(*args)",
"def to_array(arr):\n return arr if isinstance(arr, np.ndarray) else np.array(arr)",
"def listtoarray(self,x):\n dim=len(x)\n matrice=zeros((1,dim))\n for i in range(dim):\n matrice[0][i]=x[i]\n return matrice",
"def data_2_numpy(data):\n typeOfInput = type(data) \n \n #the data provided is already an array\n if typeOfInput==np.ndarray:\n #numpy array \n return(data)\n \n #the data provided is a list\n elif typeOfInput == list:\n # python list\n return(np.array(data))\n \n #the data provided is a real number\n else:\n #real input\n return(np.array([data]))",
"def to1d(list: list) -> np.ndarray:\n return np.fromiter(list)",
"def wrap_array(x):\n if isinstance(x, collections.Iterable):\n if isinstance(x, np.ndarray):\n return x\n else:\n return np.array(x)\n else:\n return np.array([x])",
"def from1d(array: np.ndarray) -> list:\n return array.tolist()",
"def to_array(x):\n if isinstance(x, chainer.Variable):\n x = x.array\n return x",
"def _arraytest(*args):\r\n\r\n rargs = []\r\n for a in args:\r\n if isinstance(a, (list, tuple)):\r\n rargs.append(scipy.array(a))\r\n else:\r\n rargs.append(a)\r\n if len(rargs) == 1:\r\n return rargs[0] # no unpacking if single value, return value i/o list\r\n else:\r\n return rargs",
"def _make_1_element_array(data: float):\n return numpy.array([data])",
"def optimize_list_of_ndarrays(x):\n if type(x) == np.ndarray:\n return x\n if len(x) == 0:\n return np.array([[]])\n if type(x[0]) == float or type(x[0]) == np.ndarray:\n return np.array(x)\n return x",
"def makelist(input):\n if isinstance(input, list) or isinstance(input, np.ndarray):\n output = input\n else:\n output = [input]\n return output",
"def arrayify(value):\n return value if _is_array(value) else [value]",
"def _check_to_array(x):\n if(hasattr(x, '__iter__')):\n return(np.array(x, dtype=float))\n else:\n return(np.array([float(x)]))",
"def convert_array(a):\n try:\n return tuple(convert_array(i) for i in a)\n except TypeError:\n return a",
"def _as_ndarray(value):\n # TODO(tomhennigan) Support __array_interface__ too (including for\n # _convert_numpy_inputs).\n return value.__array__()",
"def cast2matrix(x, dim):\n if type(x) == list: # cast list to numpy array and then call cas2matrix\n return cast2matrix(numpy.array(x), dim)\n\n if type(x) == numpy.ndarray:\n if len(x.shape) == 1: # vector to 1x3 matrix\n return reshape(x, (1, dim))\n if len(x.shape) == 2:\n if x.shape[1] == dim: # is n x 3\n return x\n if x.shape[0] == dim: # has to be transposed since it is 3 x n\n return x.T\n if len(x.shape) > 2:\n raise ValueError(\"Numpy ndarrays as input have to be vectors or matrices.\")",
"def to_numpy(data):\n if isinstance(data, (int, float)):\n return np.array(data)\n if isinstance(data, np.ndarray):\n return data\n if isinstance(data, torch.Tensor):\n return data.detach().cpu().numpy()\n raise TypeError(f'Not supported data type `{type(data)}` for '\n f'converting to `numpy.ndarray`!')",
"def _convert_other_val_to_array(self, other: Any) -> Any:\r\n if isinstance(other, list):\r\n return Array(other)\r\n return other",
"def list_to_array(vec, size):\n \"\"\" Provided that length(vec)/size is an integer, transforms the given list\n into a numpy array\"\"\"\n n = int(len(vec) / size)\n arr = np.reshape(np.array(vec), (n, size))\n return arr"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Common error handler. Display the error string, execute a traceback then raise an execption to return to the interactive prompt. | def error(s):
print('Robotics toolbox error:', s)
#traceback.print_exc();
raise ValueError | [
"def error(s):\n print s\n exit(1)",
"def displayError(err):\n print(\"\\nError: %s.\" % err)\n displayUsage()",
"def error (\n\n text,\n fatal = False\n ) :\n\n text = str( text )\n\n directory, name = os.path.split( sys.executable )\n\n identifier, extension = os.path.splitext( name )\n\n if identifier.lower() == \"python\" :\n\n if bool( fatal ) : prefix = \"fatal error - \"\n\n else : prefix = \"warning - \"\n\n text = str( text )\n\n if ( not text.endswith( \"\\n\" ) ) and ( len( text ) > 60 ) : text = text + \"\\n\"\n\n raw_input( prefix + \" \" + str( text ) + \" Press any key\" )\n\n if bool( fatal ) : sys.exit( 1 )",
"def _error(exc=None):\n if exc is None:\n exc = format_exc()\n print('* confspec:', file=stderr)\n for line in exc.split('\\n'):\n print('* ', line, file=stderr)",
"def _reportErrors(self, msg) :\n self.help()\n print msg\n print self._line(\"-\")\n if not self.inhibitExceptions :\n raise ScriptInputError, msg",
"def error(what,say):\n print 'ERROR: ', what, say",
"def error(message):\n print(message)\n exit()",
"def ShowSyntaxError(self):\n (etype, value, tb) =sys.exc_info()\n msg = ' '.join(traceback.format_exception_only(etype, value))\n self.output.write_exc(msg)",
"def error(msg,code=None,exception=None):\n if DEBUG:\n raise Exception(msg) from exception\n elif not QUIET:\n print(f\"ERROR [{EXENAME}]: {msg}\",file=sys.stderr)\n if type(code) is int:\n exit(code)",
"def raise_error(self, message):\n # On compile, exec_pos stores the index of lines being compiled\n # so the bad line number is [exec_pos+1]\n print(\"In line \" + str(self.exec_pos + 1) + \",\")\n print(message)\n raise RuntimeError",
"def error(msg):\n print(\"error: \" + str(msg))\n sys.exit(-1)",
"def doException(etype, eval, etrace):\n if hasattr(sys, 'ps1') or not sys.stderr.isatty():\n # we are in interactive mode or we don't have a tty-like\n # device, so we call the default hook\n sys.__excepthook__(etype, eval, etrace)\n else:\n import traceback, pdb\n # we are NOT in interactive mode, print the exception...\n traceback.print_exception(etype, eval, etrace, limit=2, file=sys.stdout)\n print\n # ...then start the debugger in post-mortem mode.\n pdb.pm()",
"def tik_exception_process(loc, msg):\n if loc is None:\n print(\"Error: {}\\n\".format(msg.rstrip(\"\\n\")))\n return\n print(\"\\n\".join(get_context_msg(loc.file, int(loc.column), msg)))",
"def display_exceptions(context: ExecutionContext, event: events.Finished) -> None:\n if not event.has_errors:\n return\n\n default.display_section_name(\"EXCEPTIONS\")\n for result in context.results:\n if result.has_errors:\n display_single_exception(context, result)\n if not context.show_errors_tracebacks:\n click.secho(\n \"Add this option to your command line parameters to see full tracebacks: --show-exception-tracebacks\",\n fg=\"magenta\",\n )",
"def printErr(err, str):\r\n\r\n\tprint \"Error fetching {}:\".format(str), err\r\n\tsys.exit(1)",
"def error_repr(e):\n if isinstance(e, (ScriptError, lupa.LuaSyntaxError, lupa.LuaError)):\n if isinstance(e, ScriptError):\n info = e.args[0]\n tp = info['type']\n else:\n info = parse_error_message(e.args[0])\n tp = ScriptError.SYNTAX_ERROR\n line_num = info.get('line_number', -1)\n message = info.get('error', info.get('message'))\n return \"%s [input]:%s: %s\" % (tp, line_num, message)\n elif isinstance(e, Exception):\n return repr(e)\n return ScriptError.UNKNOWN_ERROR",
"def error_print():\n print(\"ERROR: Invalid Entry!\")",
"def _handle_exec_exception(self, err):\n\n # Log the error if we are debugging\n msg = 'Error occurred while evaluating CustomResponse'\n log.warning(msg, exc_info=True)\n\n # Notify student with a student input error\n _, _, traceback_obj = sys.exc_info()\n raise ResponseError(text_type(err), traceback_obj)",
"def raise_runtime_error(self, message):\n print(\"Iceberg Runtime ERROR!\")\n print(\"In instruction number \" + str(self.exec_pos) + \",\")\n print(message)\n raise RuntimeError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes a proxy from the internal store. | def remove_proxy(self, id, proxy):
if id not in self._stores:
return
self._stores[id].difference_update({proxy, }) | [
"def _delete_proxy(self, proxy):\n print \"except, remove proxy: \", proxy \n new_set = set(self.proxy_list)\n new_set.remove(proxy)\n self.proxy_list = list(new_set)",
"def delete(self, proxy):\n self.db.delete(proxy)",
"def remove(self, proxy):\n self.SMProperty.RemoveProxy(proxy.SMProxy)\n self._UpdateProperty()",
"def _delete_proxy(self, proxy_id:str) -> dict:\r\n params = {'f': 'json',\r\n 'proxies': proxy_id}\r\n url = \"%s/sharing/rest/content/users/%s/items/%s/deleteProxies\" % (self._portal.url,\r\n self._user_id,\r\n self.id)\r\n return self._portal.con.post(url, params)",
"def del_neigh_proxy(self, ip):\n if ip in self.proxies:\n logger.info(\"Delete Neighbor Proxy {0} @ {1}\"\n .format(ip, self.iface))\n try:\n ipcmd.del_neigh_proxy(self.iface, ip)\n except ipcmd.IpCmdError:\n # Failure is normal if the proxy did not exist\n if ip not in self.proxies:\n return\n # Reload tables\n self.reload()\n if ip not in self.proxies:\n return\n # Let's try again, and failure goes up this time\n ipcmd.del_neigh_proxy(self.iface, ip)\n self.proxies.discard(ip)",
"def _del_proxy(self, handle):\n # ignore if remote already shut down:\n if self.socket.closed:\n return\n self.send_message('del_proxy', handle=handle)",
"def removeProxyListener(self, listener):\n # type: (IProxyListener) -> ()",
"def remove_proxies_for_instance(self, instance_name):\n original_set = self.proxies.copy()\n for proxy in [proxy for proxy in original_set if\n proxy.instance_name.lower() == instance_name.lower()]:\n self.proxies.remove(proxy)",
"def __del__(self):\n # Make sure that we remove observers we added\n if self.Observed:\n observed = self.Observed\n tag = self.ObserverTag\n self.Observed = None\n self.ObserverTag = -1\n observed.RemoveObserver(tag)\n if _pyproxies and self.SMProxy and (self.SMProxy, self.Port) in _pyproxies:\n del _pyproxies[(self.SMProxy, self.Port)]",
"def delete_ndp_proxy(self, ndp_proxy, ignore_missing=True):\n self._delete(\n _ndp_proxy.NDPProxy, ndp_proxy, ignore_missing=ignore_missing\n )",
"def UnRegister(proxy, **extraArgs):\n if \"registrationGroup\" in extraArgs:\n registrationGroup = extraArgs[\"registrationGroup\"]\n else:\n registrationGroup = __determineGroup(proxy)\n\n if \"registrationName\" in extraArgs:\n registrationName = extraArgs[\"registrationName\"]\n else:\n registrationName = __getName(proxy, registrationGroup)\n\n if registrationGroup and registrationName:\n pxm = ProxyManager()\n pxm.UnRegisterProxy(registrationGroup, registrationName, proxy)\n else:\n raise RuntimeError (\"UnRegistration error.\")\n return (registrationGroup, registrationName)",
"def uninstall_squid_proxy():\n sudo('service squid3 stop')\n with settings(warn_only=True):\n sudo('apt-get -y purge squid3')\n puts(green('Proxy service removed from ' + str(env.host)))\n puts(blue('**Please remove {}:{} from PROXY_LIST used for cheffing.**'.format(env.host, '3128')))",
"def _unregister_module_proxy(name):\n\t_module_proxies_lock.acquire()\n\ttry:\n\t\tproxy_list = _module_proxies.get(name)\n\t\tif proxy_list is not None:\n\t\t\t# First delete this name from the dict so that\n\t\t\t# if this same thread reenters below, it won't\n\t\t\t# enter this path again.\n\t\t\tdel _module_proxies[name]\n\t\t\tfor proxy in proxy_list:\n\t\t\t\tobject.__getattribute__(proxy, '_get_target')()\n\n\t\t\tmodules = sys.modules\n\t\t\tfor name, proxy_list in list(_module_proxies.items()):\n\t\t\t\tif name not in modules:\n\t\t\t\t\tcontinue\n\t\t\t\t# First delete this name from the dict so that\n\t\t\t\t# if this same thread reenters below, it won't\n\t\t\t\t# enter this path again.\n\t\t\t\tdel _module_proxies[name]\n\t\t\t\tfor proxy in proxy_list:\n\t\t\t\t\tobject.__getattribute__(proxy, '_get_target')()\n\tfinally:\n\t\t_module_proxies_lock.release()",
"def delete(self):\n if self._store:\n self._store.delete(self.key)",
"def apiproxy_unpublish(self, apiproxy_unpublish):\n\n self._apiproxy_unpublish = apiproxy_unpublish",
"def remove(self):\n self.connections.remove(self)",
"def remove_repo(self, repo=None, url=None):\n self.contents()\n\n if repo:\n repouri = repo\n if url:\n repouri = TransportRepoURI(url)\n else:\n raise ValueError(\"Must supply either a repo or a uri.\")\n\n if repouri.key() in self.__cache:\n del self.__cache[repouri.key()]",
"def test_delete_cloud_proxy(self):\n pass",
"def remove_from_cache(self, obj):\r\n nm = self.client._resolve_name(obj)\r\n self._object_cache.pop(nm, None)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the store with the given proxies. This clears the store of preexisting proxies and adds the new ones. | def update_store(self, id, proxies):
if id not in self._stores:
return
store = self._stores[id]
with self._lock:
store.clear()
if proxies:
store.update(proxies) | [
"def set_proxies(self, proxies):\n if proxies:\n protocols = [\"http\", \"https\", \"ftp\", \"socks\"]\n for protocol in protocols:\n entry_id = protocol + \"_proxy_entry\"\n entry_widget = self.ui.get_object(entry_id)\n port_id = protocol + \"_proxy_port\"\n port_widget = self.ui.get_object(port_id)\n\n try:\n proxy = proxies[protocol]\n proxy = proxy.replace('https://', '')\n proxy = proxy.replace('http://', '')\n\n host = proxy.split(':')[0]\n port = proxy.split(':')[1]\n\n entry_widget.set_text(host)\n port_widget.set_text(port)\n except (IndexError, KeyError) as err:\n pass",
"def reload(self):\n listings = self.getAllListings()\n for listing in listings:\n self.onlineStoreDatabase.addListing(itemID=listing['name'], storeID=listing['storeID'], price=listing['price'])\n\n orders = self.getAllOrders()\n for order in orders:\n self.onlineStoreDatabase.addOrder(order)",
"def update_all_proxy_attrs(self):\n for proxy in self.proxies:\n self.set_proxy_attrs(proxy)",
"def save_proxies(self) -> None:\n self.sort_proxies()\n directories_to_delete = (\n \"proxies\",\n \"proxies_anonymous\",\n \"proxies_geolocation\",\n \"proxies_geolocation_anonymous\",\n )\n for directory in directories_to_delete:\n try:\n rmtree(directory)\n except FileNotFoundError:\n pass\n directories_to_create = (\n directories_to_delete\n if self.MMDB\n else (\"proxies\", \"proxies_anonymous\")\n )\n for directory in directories_to_create:\n mkdir(directory)\n\n # proxies and proxies_anonymous folders\n for proto, proxies in self.proxies.items():\n path = f\"proxies/{proto}.txt\"\n path_anonymous = f\"proxies_anonymous/{proto}.txt\"\n for proxy, exit_node in proxies.items():\n self.append_to_file(path, proxy)\n if exit_node != proxy.split(\":\")[0]:\n self.append_to_file(path_anonymous, proxy)\n\n # proxies_geolocation and proxies_geolocation_anonymous folders\n if self.MMDB:\n with open_database(self.MMDB) as reader:\n for proto, proxies in self.proxies.items():\n path = f\"proxies_geolocation/{proto}.txt\"\n path_anonymous = (\n f\"proxies_geolocation_anonymous/{proto}.txt\"\n )\n for proxy, exit_node in proxies.items():\n line = proxy + self.get_geolocation(exit_node, reader) # type: ignore\n self.append_to_file(path, line)\n if exit_node != proxy.split(\":\")[0]:\n self.append_to_file(path_anonymous, line)",
"def add_proxy(self,\n proxy):\n if proxy in self.proxies:\n self.proxies.remove(proxy) # Update token, etc.\n self.proxies.add(proxy)",
"def update_squid_proxy():\n proxy_hosts = checkproxies()\n puts(green('Updating the proxy service config file /etc/squid3/squid.conf for'))\n puts(green('proxy_hosts = ' + str(proxy_hosts)))\n for host in proxy_hosts:\n env.host_string = host\n with hide('running', 'stdout', 'stderr'):\n hostname = run('hostname')\n puts(green('Updting proxy config on ' + hostname))\n sudo('service squid3 stop')\n put('config/etc_squid3_squid.conf', '/etc/squid3/squid.conf', use_sudo=True)\n sudo('service squid3 start')\n puts(green('All proxy servers updated successfully.'))",
"def remove_proxy(self, id, proxy):\n if id not in self._stores:\n return\n\n self._stores[id].difference_update({proxy, })",
"def save(self, proxy_list, proxy_type='socks5', file_name=\"data/proxies.db\"):\n\n with open(file_name, 'a') as f:\n for proxy in proxy_list:\n f.write(Proxy.get_data() + '\\n')",
"def update_proxy_pool(self):\n proxy_list = []\n try:\n resp = requests.get(self.url)\n except ConnectionError as ce:\n print(ce)\n return(1)\n soup = bs(resp.text, \"html.parser\")\n proxy_table = soup.find_all(id='proxylisttable')\n for tr in proxy_table[0].find_all('tbody')[0].find_all('tr'):\n td = tr.find_all('td')\n proxy_list.append({\n 'ip': td[0].text,\n 'port': td[1].text,\n 'anonymity': td[4].text.upper(),\n 'https': td[6].text\n })\n self._data_frame = pd.DataFrame(proxy_list)",
"def populate_stores(self, stores):\n for self_store, other_store in zip(self.stores, stores):\n if self_store is not None:\n self_store.store(other_store.val)",
"def __init__(self, proxies=None):\n self._address_pool = set() if proxies is None else set(proxies)\n self._address_pool_cycle = cycle(self._address_pool)\n self.__load_proxies_from_file()",
"def clear_managers():\n for manager in proxies.values():\n manager.close()\n proxies.clear()",
"def _update_http_proxy_nodes(self):\n new_http_proxy_nodes = self.deployment_state_manager.get_active_node_ids()\n new_http_proxy_nodes = (\n new_http_proxy_nodes - self.cluster_node_info_cache.get_draining_node_ids()\n )\n new_http_proxy_nodes.add(self._controller_node_id)\n self._http_proxy_nodes = new_http_proxy_nodes",
"def __load_proxies_from_file(self):\n proxies_from_file = set()\n try:\n with open(\"http_handler/proxy_data.csv\", 'r', encoding='utf-8') as fd:\n for line in fd:\n line = line.split(' ')\n proxies_from_file.add(line[:-1][0])\n except BaseException as e:\n logs.save_log(\"Exception: failed to load proxies at __load_proxies_from_file method, Error: {}\".format(e))\n print(str(e))\n return\n finally:\n self._address_pool |= proxies_from_file\n self.__update_cycle()",
"def populateIdentities(self):\n self.populate_proxies(self.proxyFilePath)\n self.populate_user_agents()",
"def _delete_proxy(self, proxy):\n print \"except, remove proxy: \", proxy \n new_set = set(self.proxy_list)\n new_set.remove(proxy)\n self.proxy_list = list(new_set)",
"def sort_proxies(self) -> None:\n prox = [\n (\n proto,\n [\n (proxy, exit_node)\n for proxy, exit_node in proxies.items()\n if exit_node\n ],\n )\n for proto, proxies in self.proxies.items()\n ]\n self.proxies = {\n proto: dict(sorted(proxies, key=self._get_sorting_key))\n for proto, proxies in prox\n }",
"def load(self, file_name=\"data/proxies.db\", proxy_list=None, proxy_type=None):\n\n if not proxy_list:\n self.proxy_list = []\n\n with open(file_name, 'r') as f:\n for line in f.readline():\n (ip, port, proxy_type) = line.split()\n proxy = Proxy(ip, port, proxy_type)\n self.proxy_list.append(proxy)\n\n return proxy_list",
"def test_update_cloud_proxy(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reads the image from path and return a PIL image object | def read_image(self, path: str) -> Image:
raise NotImplementedError | [
"def __open_image(path) -> PILImage:\n try:\n return pilImage.open(path)\n except IOError as ex:\n LOGGER.critical('Failed to open image file at %s: %s' % (path, str(ex)))\n raise",
"def load_img(path):\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `load_img` requires PIL.')\n with open(path, 'rb') as f:\n img = pil_image.open(io.BytesIO(f.read()))\n if img.mode not in ('L', 'I;16', 'I'):\n img = img.convert('L')\n return img",
"def img_read(img_path):\n img_path, img_type = check_img_path(img_path)\n if img_type == 'nii':\n img = load_nii(img_path, compressed=False)\n elif img_type == 'nii.gz':\n img = load_nii(img_path, compressed=True)\n else:\n img = load_gii(img_path)\n return img",
"def get_img_obj(file_path):\n try:\n img = Image.open(file_path)\n except IOError as ioe:\n print ioe, \", skipping:\", file_path\n return False\n if img.format:\n return img\n return False",
"def load_image(file):\n return Image.open(os.path.abspath(file))",
"def open_image(filename):\n return Image.open(filename)",
"def load(image_path, access='random'):\n\n return pyvips.Image.new_from_file(image_path, access=access)",
"def read_image(self,filename:str):\n image_path = os.path.join(os.getcwd(),self.images_folder, filename)\n image = Image.open(image_path)\n return image",
"def get_image():\r\n\r\n file = choose_file()\r\n \r\n if file == \"\":\r\n sys.exit(\"File Open cancelled, exiting program\")\r\n img = load_image(file)\r\n\r\n return img",
"def load_image(self, idx):\n\n path = self.__image_folder / self.imgs[idx][\"file_name\"]\n return Image.open(path)",
"def read_image(image_path):\n return cv2.imread(image_path)",
"def get_image(self, name, pil=False):\n image = Image.open(BytesIO(self.get_file(name).read()))\n if pil:\n return image\n return to_tensor(image)",
"def create_pil_image(filepath):\n\n try:\n image = Image.open(filepath)\n except FileNotFoundError as e:\n print(e, f\"{filepath} is not a valid path to an image\")\n\n return image",
"def open_file_as_pil_image(source_file):\n return Image.open(source_file)",
"def sample_loader(sample_path: str) -> Image:\n image = Image.open(sample_path).convert(\"RGB\")\n return image",
"def getImage(source_path):\n filename = source_path.split('/')[-1]\n current_path = TRAINING_IMAGES_DIR + filename\n image = cv2.imread(current_path)\n return image",
"def getImageObject(self, path):\n #return self.b.ImageObject(path)",
"def get_image():\n\n # Pop up a dialogue box to select a file\n\n file = choose_file()\n \n # Exit the program if the Cancel button is clicked.\n if file == \"\":\n sys.exit(\"File Open cancelled, exiting program\")\n \n # Open the file containing the image and load it\n img = load_image(file)\n \n return img",
"def load() -> Image:\r\n image = load_image(choose_file())\r\n show(image)\r\n return image"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return JSON of all albums | def get(self):
return getAllAlbums() | [
"def albums():\n albums = app.config[\"albums\"]\n # TODO complete (return albums.get_albums() in JSON format)\n return json.dumps(albums.get_albums())",
"def vk_get_album_list(request):\n if not request.user.is_superuser:\n return redirect('%s?next=%s' % (reverse('dc_parse:admin_auth'), request.path))\n vk_token,vk_user = get_vk_cookies(request)\n method_name = 'photos.getAlbums'\n parameters = {\n 'owner_id': vk_user,\n 'need_covers': 1,\n 'need_system': 1,\n }\n content = vk_method(method_name,vk_token,parameters)\n\n albums = content['items']\n for album in albums:\n album['created'] = psql_time(album.get('created')) if isinstance(album.get('created'),int) else None\n album['updated'] = psql_time(album.get('updated')) if isinstance(album.get('updated'),int) else None\n\n return render(request,'vk_get_album_list.html',{\n # 'content': content,\n 'albums': content['items'],\n # 'album': album,\n # 'tags': tags,\n # 'resume': resume\n })",
"def albums(self):\n\n c.artist = request.GET.get('artist', u'')\n c.album = request.GET.get('album', u'')\n\n try:\n self.m = g.p.connect()\n except (NoMPDConnection, ConnectionClosed):\n return render('/null.html')\n c.albums = self.m.albums(c.artist)\n\n aa = AlbumArt()\n c.album_imgs = aa.artist_art(c.artist)\n random.shuffle(c.album_imgs)\n return render('/albums.html')",
"def get_artist_album_list_json(artist_name):\n \n album_list_json = []\n \n try:\n results = sp.artist_albums(artist_id_dictionary[artist_name], album_type='album')\n except:\n logging.error(\" Bad request getting album list for \" + artist_name)\n return None\n \n album_list_json.extend(results['items'])\n \n while results['next']:\n results = sp.next(results)\n album_list_json.extend(results['items'])\n \n return album_list_json",
"def photosGetAlbums(self, callback):\n self.photosGetAlbums(None, None, callback)",
"def get(self, album_id):\n return jsonify(getAlbumData(album_id))",
"def get_albums(self):\n soup = self.get_page(self.img_url)\n blks = soup.find_all(\"ul\", class_='large-image-blocks')\n if self.path:\n base_path = os.path.join(self.path, \"images\")\n else:\n base_path = os.path.join(self.username, \"images\")\n\n for blk in blks:\n for tag in blk.find_all(\"a\"):\n link = tag.attrs[\"href\"]\n if link.rfind(\"album\") == -1:\n break\n name_tag = tag.find(\"p\", class_=\"name\")\n album_name = name_tag.decode_contents().encode('utf8', 'ignore')\n album_name = ''.join(c for c in str(album_name)\n if c in valid_chars)\n path = os.path.join(base_path, album_name)\n self.download_images(link, path)\n if self.stoprequest.isSet():\n return",
"def get_albums(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('albums', search, start,\r\n max_items)",
"def getAlbums(self):\n\t\tbasketPage = self.request(site_prefix + 'basket.shtml')\n\n\t\tp = linksParser()\n\t\tp.feed(basketPage)\n\t\tp.close()\n\n\t\talbums = []\n\t\tfor link,desc in p.links.items():\n\t\t\tm = self.albumRe.match(link)\n\t\t\tif m:\n\t\t\t\tnew = dict()\n\t\t\t\tnew['url'] = site_prefix + \"downloads_iframe.shtml?\" + m.group(1)\n\t\t\t\tnew['artist'] = desc[1][0].strip()\n\t\t\t\tnew['title'] = \"\".join(desc[1][1:]).strip()\n\t\t\t\tnew['tracks'] = []\n\t\t\t\talbums.append(new)\n\n\t\treturn albums",
"def albums(self, albums, market=None):\n\n tlist = [self._get_id(\"album\", a) for a in albums]\n if market is not None:\n return self._get(\"albums/?ids=\" + \",\".join(tlist) + '&market=' + market)\n else:\n return self._get(\"albums/?ids=\" + \",\".join(tlist))",
"def get_album(self, album_id):\n track = []\n img = None\n\n for i in self.__albums:\n for t_id, info in self.__tracks.items():\n if i[\"id\"] == t_id and t_id == album_id:\n img = i[\"img\"]\n\n for a, b in info.items():\n track.append({\n \"name\": a,\n \"length\": b\n })\n return {\"album_id\": album_id, \"img\": img, \"track\": track}",
"def photosGetAlbums(self, uid, aids, callback):\n j = Json().put(u\"uid\", uid).put(u\"aids\", aids)\n self.callMethodRetList(u\"photos.getAlbums\", j.getJavaScriptObject(), Album.__class__, callback)",
"def display_album(id):\n\n with urllib.request.urlopen(\"https://jsonplaceholder.typicode.com/photos?albumId=\" + str(id)) as url:\n data = json.loads(url.read().decode())\n\n if not data:\n print(f'There is no data corresponding to that id: {id}')\n\n item_list = []\n for element in data:\n if element['id']:\n item_list.append('['+ str(element['id']) + ']' + ' '+ element['title'])\n\n return item_list",
"def __load_albums(self, albums_file):\n with open(albums_file, \"r\") as h:\n for line in h:\n album_id, artist, album_name, album_img = line.strip().split(\"\\t\")\n self.__albums.append({\n \"id\": album_id,\n \"artist\": artist,\n \"album_name\": album_name,\n \"img\": album_img\n })\n h.close()\n #print(self.__albums)",
"def listFotoJSON():\r\n fotos = session.query(Foto).all()\r\n return jsonify(fotos=[i.serialize for i in fotos])",
"def get_artist_albums(self, artist_id): # TODO initialize and return a list of Album objects\n return self.__get_data(self.url.artists_albums_url().format(id=str(artist_id)))",
"def get_everything(all_albums_data, all_tracks_data):\n\n get_all_albums(all_albums_data)\n\n if all_tracks_data:\n # Gets artist artwork\n if all_tracks_data[0]['artist_avatar']:\n url = 'https://f4.bcbits.com/img/' + all_tracks_data[0]['artist_avatar'] + '_10.jpg'\n artist_cover = requests.get(url).content\n else:\n artist_cover = None\n\n get_all_tracks(all_tracks_data[0]['artist'], 'Tracks', all_tracks_data, album_release=None,\n album_cover=artist_cover, artist_cover=artist_cover)",
"def get_all_albums(all_albums_data, artist_cover=None):\n for album_data in all_albums_data:\n get_specific_album(album_data, artist_cover)",
"def get_artist_singles_list_json(artist_name):\n \n single_list_json = []\n \n try:\n results = sp.artist_albums(artist_id_dictionary[artist_name], album_type='single')\n except:\n logging.error(\" Bad request getting singles list for \" + artist_name)\n return None\n \n single_list_json.extend(results['items'])\n \n while results['next']:\n results = sp.next(results)\n single_list_json.extend(results['items'])\n \n return single_list_json"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return JSON of album with album_id | def get(self, album_id):
return jsonify(getAlbumData(album_id)) | [
"def get_album(self, album_id):\n track = []\n img = None\n\n for i in self.__albums:\n for t_id, info in self.__tracks.items():\n if i[\"id\"] == t_id and t_id == album_id:\n img = i[\"img\"]\n\n for a, b in info.items():\n track.append({\n \"name\": a,\n \"length\": b\n })\n return {\"album_id\": album_id, \"img\": img, \"track\": track}",
"def albums():\n albums = app.config[\"albums\"]\n # TODO complete (return albums.get_albums() in JSON format)\n return json.dumps(albums.get_albums())",
"def fetchAlbumInfo(album_id):\n url = 'https://api.spotify.com/v1/albums/' + album_id\n req = requests.get(url)\n assert req.ok, 'n/a'\n data = req.json()\n album_info = {}\n assert data.get('name'), 'n/a'\n album_info['artist_id'] = data['artists'][0]['id']\n album_info['album_id'] = album_id\n album_info['name'] = data['name']\n album_info['year'] = data['release_date'][0:4]\n album_info['popularity'] = data['popularity']\n return album_info",
"def fetchAlbumInfo(album_id):\n url = \"https://api.spotify.com/v1/albums/\" + album_id\n req = requests.get(url)\n if req.ok == False:\n return 'Error: bad Spotify API URL or similar error'\n data = req.json()\n album_info = {}\n #print data[u'artists']\n album_info['artist_id'] = data[u'artists'][0][u'id']\n album_info['album_id'] = data[u'id']\n album_info['name'] = data[u'name']\n release_date = data[u'release_date']\n year = int(release_date[0:4])\n album_info['year'] = year\n album_info['popularity'] = data[u'popularity']\n \n return album_info",
"def album(self, album_id, market=None):\n\n trid = self._get_id(\"album\", album_id)\n if market is not None:\n return self._get(\"albums/\" + trid + '?market=' + market)\n else:\n return self._get(\"albums/\" + trid)",
"def fetchAlbumInfo(album_id):\n url_base = \"https://api.spotify.com/v1/albums/\" + album_id\n url = url_base\n req = requests.get(url)\n data = req.json()\n info={}\n info[\"artist_id\"]=data[\"artists\"][0][\"id\"]\n info[\"album_id\"]=album_id #string\n info[\"name\"]=data[\"name\"] #string\n info[\"release_date\"]=data[\"release_date\"] [:4]\n info[\"popularity\"]=data[\"popularity\"] #int\n return info\n #pass\"\"\"",
"def display_album(id):\n\n with urllib.request.urlopen(\"https://jsonplaceholder.typicode.com/photos?albumId=\" + str(id)) as url:\n data = json.loads(url.read().decode())\n\n if not data:\n print(f'There is no data corresponding to that id: {id}')\n\n item_list = []\n for element in data:\n if element['id']:\n item_list.append('['+ str(element['id']) + ']' + ' '+ element['title'])\n\n return item_list",
"def album(self):\r\n return self.content.get('album')",
"def get(self): \n return getAllAlbums()",
"def _fetch_album(self, gn_id, is_last_album=True):\n self.__log.call(gn_id, is_last_album=is_last_album)\n\n gn_queries = self._prepare_gn_queries(self.ALBUM_FETCH_XML)\n gn_queries.find(\"QUERY/GN_ID\").text = gn_id\n\n gn_responses = self._get_response(\n gn_queries, http_keep_alive=is_last_album)\n gn_album = gn_responses.find(\"RESPONSE/ALBUM\")\n\n self.__log.return_(gn_album)\n return gn_album",
"def get_artist_album_list_json(artist_name):\n \n album_list_json = []\n \n try:\n results = sp.artist_albums(artist_id_dictionary[artist_name], album_type='album')\n except:\n logging.error(\" Bad request getting album list for \" + artist_name)\n return None\n \n album_list_json.extend(results['items'])\n \n while results['next']:\n results = sp.next(results)\n album_list_json.extend(results['items'])\n \n return album_list_json",
"def get_album_from_context(self, context):\n album_id = id_from_uri(context[\"uri\"])\n result = self.get_api_v1(\"albums/{}\".format(album_id))\n return Album(result or {})",
"def _add_album_metadata(self, spotify_album):\r\n album = SpotifyAlbum(spotify_album.spotify_uri)\r\n params = {'uri': spotify_album.spotify_uri}\r\n res = requests.get(self.api_lookup_url, params=params)\r\n data = res.json()\r\n\r\n if 'album' in data:\r\n album.title = data['album']['name']\r\n album.artist_uri = data['album']['artist-id']\r\n\r\n return album",
"def vk_get_album_list(request):\n if not request.user.is_superuser:\n return redirect('%s?next=%s' % (reverse('dc_parse:admin_auth'), request.path))\n vk_token,vk_user = get_vk_cookies(request)\n method_name = 'photos.getAlbums'\n parameters = {\n 'owner_id': vk_user,\n 'need_covers': 1,\n 'need_system': 1,\n }\n content = vk_method(method_name,vk_token,parameters)\n\n albums = content['items']\n for album in albums:\n album['created'] = psql_time(album.get('created')) if isinstance(album.get('created'),int) else None\n album['updated'] = psql_time(album.get('updated')) if isinstance(album.get('updated'),int) else None\n\n return render(request,'vk_get_album_list.html',{\n # 'content': content,\n 'albums': content['items'],\n # 'album': album,\n # 'tags': tags,\n # 'resume': resume\n })",
"def albums_show(album_id):\n album = albums.find_one({'_id': ObjectId(album_id)})\n # album_comments = comments.find({'album_id': ObjectId(album_id)})\n return render_template('albums_show.html', album=album)",
"def get_album_art(track_id):\n track_result = spotify.track(track_id)\n imageurl = track_result['album']['images'][1]['url']\n return imageurl\n\n return songseries",
"def fetchAlbumIds(artist_id):\n url_base = \"https://api.spotify.com/v1/artists/\" + artist_id\n url_album = \"/albums?album_type=album\"\n url_market = \"&market=US\"\n url = url_base + url_album + url_market\n req = requests.get(url)\n data = req.json()\n album = data['items'][0]['id']\n return album",
"def album(self):\n return self._album_name",
"def albums(self):\n\n c.artist = request.GET.get('artist', u'')\n c.album = request.GET.get('album', u'')\n\n try:\n self.m = g.p.connect()\n except (NoMPDConnection, ConnectionClosed):\n return render('/null.html')\n c.albums = self.m.albums(c.artist)\n\n aa = AlbumArt()\n c.album_imgs = aa.artist_art(c.artist)\n random.shuffle(c.album_imgs)\n return render('/albums.html')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Samples individuals from each replicant population, and stores the genotypes of that sample in the database. | def sampleIndividuals(pop, param):
(ssize, mutation, popsize, sim_id, num_loci) = param
popID = pop.dvars().rep
gen = pop.dvars().gen
sample = drawRandomSample(pop, sizes=ssize)
samplelist = []
for idx in range(ssize):
genotype_list = list(sample.individual(idx).genotype())
indiv = dict(id=idx, genotype=genotype_list)
samplelist.append(indiv)
_storeIndividualSample(popID,num_loci,ssize,gen,mutation,popsize,sim_id,samplelist)
return True | [
"def mutate_population(self):\n for invidual in self.population:\n if(np.random.uniform(0, 1) < self.mutation_chance):\n invidual.mutate_random_gene(self.mutation_scale)\n self.rate_population()",
"def generate_random_population(self):\n for i in range(POPULATION_SIZE):\n Chromosome = individual()\n Chromosome.generate()\n self.populationlist.append(Chromosome)",
"def create_population(self):\n global maxid\n self.population= []\n #.....0th individual is the initial guess if there is\n ind= Individual(0,self.ngene,self.murate,self.func,self.args)\n genes=[]\n for ig in range(self.ngene):\n g= Gene(self.nbitlen,self.vars[ig]\n ,min=self.vranges[ig,0],max=self.vranges[ig,1])\n genes.append(g)\n ind.set_genes(genes)\n self.population.append(ind)\n #.....other individuals whose genes are randomly distributed\n for i in range(self.nindv-1):\n ind= Individual(i+1,self.ngene,self.murate,self.func,self.args)\n maxid= i+1\n genes= []\n for ig in range(self.ngene):\n g= Gene(self.nbitlen,self.vars[ig]\n ,min=self.vranges[ig,0],max=self.vranges[ig,1])\n #.....randomize by mutating with high rate\n g.mutate(0.25)\n genes.append(g)\n ind.set_genes(genes)\n self.population.append(ind)",
"def stored_populations(env):\n gens_group = env.data_file[gens_group_key]\n return [\n gen for gen in stored_generations(env)\n if 'individuals' in gens_group[gen]]",
"def load_genotypes(self):\n genotypes = read_range(self.pgen_file, 0, self.num_variants-1, sample_subset=self.sample_idxs)\n return pd.DataFrame(genotypes, index=self.variant_ids, columns=self.sample_ids)",
"def create_population(self):\r\n self.generation = 0\r\n for genome_num in range(self.pop_size):\r\n genome = Genome()\r\n genome.mutate()\r\n self.genomes.append(genome)\r\n txt = self.data_loc(self.generation, genome_num)\r\n savetxt(txt, genome.node_net, fmt=\"%f\")",
"def appendPopulation(self, population):\n for index in range(population.size()):\n gene = population.getGene(index)\n fitness = population.getFitness(index)\n self.appendGene(gene, fitness)",
"def populate(self):\n self.population = []\n \n self.population = np.random.multivariate_normal(self.mu, self.S, size = self.n)",
"def populations(tsv_filename, data_source_name):\n print(\"Importing populations from '{}'\".format(tsv_filename))\n data_source = DataSource.query.filter_by(name=data_source_name).one()\n print(\"Using data source: {}\".format(data_source))\n with open(tsv_filename) as file:\n csvreader = csv.DictReader(file, delimiter='\\t')\n for row in csvreader:\n print(row)\n if 'super_population' in row:\n super_population_code = row['super_population']\n elif 'geographic_region' in row:\n super_population_code = row['geographic_region']\n sp = SuperPopulation.query.filter_by(\n code=super_population_code).one()\n p, created = get_one_or_create(\n db.session, Population, code=row['code'], name=row['name'],\n description=row['description'],\n latitude=float(row['latitude']),\n longitude=float(row['longitude']))\n if created:\n p.data_source = data_source\n p.super_populations.append(sp)\n db.session.add(p)\n db.session.commit()",
"def set_up_patient_population(self):\n\n # Instantise patients\n for key, value in self.patient_data.iterrows():\n\n # Set up patient details in dictionary (to be passed to Patient class)\n patient_dict = dict()\n patient_dict['patient_id'] = value['Patient ID']\n patient_dict['location'] = value['Postcode sector']\n patient_dict['dialysis_type'] = value['Patient type']\n patient_dict['first_day'] = value['first_day']\n \n patient_dict['will_be_infected'] = (\n True if self._params.will_be_infected_rand.sample() < \n self._params.total_proportion_people_infected else False)\n\n patient_dict['time_to_infection'] = (\n self._params.time_to_infection.sample() if \n patient_dict['will_be_infected'] else 99999)\n\n patient_dict['time_positive'] = self._params.time_positive.sample()\n patient_dict['status'] = value['COVID status']\n \n # Turn all current suspected into negatives (assume will be treated in side rooms)\n if patient_dict['status'] == 'suspected':\n patient_dict['status'] = 'negative'\n \n # Set default unit location\n patient_dict['default_unit_location'] = value['Site Postcode']\n \n # Get subunits for default unit\n if patient_dict['default_unit_location'] == 'HOME':\n patient_dict['default_unit'] = ['HOME']\n else:\n master_unit = value['Site']\n subunits = list(self._units.unit_info[\n self._units.unit_info['unit']==master_unit].index)\n patient_dict['default_unit'] = subunits\n \n # Set inpatient-related parameters\n patient_dict['require_inpatient'] = (\n True if self._params.requiring_inpatient_random.sample() < \n self._params.proportion_pos_requiring_inpatient else False)\n if patient_dict['require_inpatient']:\n # Over-write pos LoS (as used for outpatient care)\n patient_dict['time_positive'] = self._params.time_pos_before_inpatient.sample()\n patient_dict['inpatient_los'] = self._params.time_inpatient.sample()\n else:\n patient_dict['inpatient_los'] = 0\n\n # Create patient and add to patient population\n patient = Patient(\n self._env, patient_dict, self.allocate, self._params, self.pop, self._units)\n self.pop.patients[value['Patient ID']] = patient\n \n # Allocate patient to unit\n self.allocate.load_patient(patient)\n \n # Add default travel time to Population list\n self.pop.default_travel_times.append(patient.default_time)\n \n # Start patient virus progression\n self._env.process(patient.patient_virus_progress())",
"def populate_sample_info(project):\n\tprint 'populating sample info'\n\tsamples = mock_sample_get_method_1()\n\tfor s in samples:\n\t\tp = BaseSample()\n\t\tp.name = s\n\t\tp.project = project\n\t\tp.save()",
"def add_population(self, population):",
"def gen_population(self, size):\n population = []\n for i in range(size):\n population.append(self.gen_candidate(self.target))\n return(population)",
"def generation_initial_population():\n pass",
"def record_snapshots(self, population):\n if pan.skip(pan.SNAPSHOT_RATE_) or len(population) == 0:\n return\n\n # genotypes\n df_gen = pd.DataFrame(np.array(population.genomes.reshape(len(population), -1)))\n df_gen.reset_index(drop=True, inplace=True)\n df_gen.columns = [str(c) for c in df_gen.columns]\n df_gen.to_feather(self.paths[\"snapshots_genotypes\"] / f\"{pan.stage}.feather\")\n\n # phenotypes\n df_phe = pd.DataFrame(np.array(population.phenotypes))\n df_phe.reset_index(drop=True, inplace=True)\n df_phe.columns = [str(c) for c in df_phe.columns]\n df_phe.to_feather(self.paths[\"snapshots_phenotypes\"] / f\"{pan.stage}.feather\")\n\n # demography\n dem_attrs = [\"ages\", \"births\", \"birthdays\"]\n demo = {attr: getattr(population, attr) for attr in dem_attrs}\n df_dem = pd.DataFrame(demo, columns=dem_attrs)\n df_dem.reset_index(drop=True, inplace=True)\n df_dem.to_feather(self.paths[\"snapshots_demography\"] / f\"{pan.stage}.feather\")",
"def populate_generation(vars):\n number_of_processors = int(vars[\"number_of_processors\"])\n\n # Determine which generation it is and how many mutations\n # to make\n\n num_mutations = vars[\"number_of_mutants\"]\n\n # Get the Source compound list. This list is the full population from\n # either the previous generations or if its Generation 1 than the its the\n # entire User specified Source compound list If either has a SMILES that\n # does not sanitize in RDKit it will be excluded and a printout of its\n # Name and SMILES string will be printed.\n\n # Total Population size of this generation\n total_num_desired_new_ligands = num_mutations\n\n print(\"MAKE MUTATIONS\")\n # Making Mutations\n\n # Package user vars specifying the Reaction library to use for mutation\n rxn_library_variables = [\n vars[\"rxn_library\"],\n vars[\"rxn_library_file\"],\n vars[\"function_group_library\"],\n vars[\"complementary_mol_directory\"],\n ]\n\n # List of SMILES from mutation\n new_mutation_smiles_list = []\n\n seed_list = get_complete_list_prev_gen_or_source_compounds(vars)\n # Save seed list\n save_ligand_list(\n vars[\"output_directory\"],\n seed_list,\n \"Seed_List\",\n )\n\n seed_list_mutations = copy.deepcopy(seed_list)\n\n # Make all the required ligands by mutations\n while len(new_mutation_smiles_list) < num_mutations:\n sys.stdout.flush()\n\n num_mutants_to_make = num_mutations - len(new_mutation_smiles_list)\n\n # Make all mutants\n new_mutants = Mutation.make_mutants(\n vars,\n 1,\n number_of_processors,\n num_mutants_to_make,\n seed_list_mutations,\n new_mutation_smiles_list,\n rxn_library_variables,\n )\n if new_mutants is None:\n # try once more\n new_mutants = Mutation.make_mutants(\n vars,\n 1,\n number_of_processors,\n num_mutants_to_make,\n seed_list_mutations,\n new_mutation_smiles_list,\n rxn_library_variables,\n )\n\n if new_mutants is None:\n break\n\n # Remove Nones:\n new_mutants = [x for x in new_mutants if x is not None]\n\n for i in new_mutants:\n new_mutation_smiles_list.append(i)\n if len(new_mutation_smiles_list) == num_mutations:\n break\n sys.stdout.flush()\n\n # save new_mutation_smiles_list\n save_ligand_list(\n vars[\"output_directory\"],\n new_mutation_smiles_list,\n \"Chosen_Mutants\",\n )\n\n if (\n new_mutation_smiles_list is None\n or len(new_mutation_smiles_list) < num_mutations\n ):\n print(\"\")\n print(\"\")\n print(\"We needed to make {} ligands through Mutation\".format(num_mutations))\n print(\n \"We only made {} ligands through Mutation\".format(\n len(new_mutation_smiles_list)\n )\n )\n print(\"\")\n print(\"\")\n raise Exception(\"Mutation failed to make enough new ligands.\")\n\n print(\"FINISHED MAKING MUTATIONS\")\n\n sys.stdout.flush()\n\n\n # make a list of all the ligands from mutations\n new_generation_smiles_list = []\n full_generation_smiles_list = []\n for i in new_mutation_smiles_list:\n new_generation_smiles_list.append(i)\n full_generation_smiles_list.append(i)\n\n if len(full_generation_smiles_list) < total_num_desired_new_ligands:\n print(\"We needed \", total_num_desired_new_ligands)\n print(\"We made \", len(full_generation_smiles_list))\n print(\n \"population failed to make enough mutants... \\\n Errors could include not enough diversity, too few seeds to \\\n the generation, or all of the seed lack functional groups \\\n for performing reactions\"\n )\n return None, None, None\n\n # Save the Full Generation\n smiles_to_convert_file, new_gen_folder_path = save_generation_smi(\n vars[\"output_directory\"],\n new_generation_smiles_list,\n \"New_SMILES\",\n )\n\n sys.stdout.flush()\n\n # CONVERT SMILES TO .sdf USING GYPSUM and convert .sdf to .pdb with rdkit\n # This will output sdf files into a folder. The .smi.0.sdf file is not a\n # valid mol, but all the others will be valid the 1st Smiles in the\n # original .smi file is saved as .smi.1.sdf and 2nd file is saved as\n # .smi.2.sdf\n if vars[\"convert_to_3D\"] is True:\n conversion_to_3d.convert_to_3d(vars, smiles_to_convert_file, new_gen_folder_path)\n get_list_of_3D_SMILES(vars, new_generation_smiles_list)\n\n sys.stdout.flush()\n\n return smiles_to_convert_file, full_generation_smiles_list",
"def initialize_population(self):\n # Convert number of genes to the largest integer under the binary\n # representation with that number of bits.\n # Do this here instead of in self.__init__() because this is specific to\n # the representation, and this method should be overridden when\n # subclassing\n self._indiv_size = pow(2, self._num_genes) - 1\n pop = [self._random.randint(0, self._indiv_size) for _ in\n range(self._pop_size)]\n self._pop = self._rank_pop(pop)",
"def _sample_applicants(self, rng):\n env_params = self.initial_params\n original_test_scores = []\n group_ids = []\n mixture_ids = rng.multinomial(env_params.num_applicants,\n env_params.feature_params.mix_weight)\n for group_id, count in enumerate(mixture_ids):\n group_ids.extend([group_id] * count)\n original_test_scores.extend(\n np.clip(\n rng.normal(env_params.feature_params.mu[group_id],\n env_params.feature_params.sigma[group_id], count),\n env_params.score_params.min, env_params.score_params.max))\n return original_test_scores, group_ids",
"def get_recording_populations(self):\n results = []\n with self.transaction() as cursor:\n for row in cursor.execute(\n \"\"\"\n SELECT label\n FROM population\n \"\"\"):\n results.append(str(row[\"label\"], 'utf-8'))\n return results"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
getsutdic loads the student.json file in a dictionnary | def getstudic():
global studentdicsingleton
if studentdicsingleton == None:
try:
studentdicsingleton = json.load(open("student.json","r"))
except Exception as e:
# TODO gestion des exceptions dans plExecutor
studentdicsingleton = dict() # retourne un dico vide
return studentdicsingleton | [
"def readstu(self) -> None:\n path :str = os.path.join(self.directory_path,\"students.txt\")\n for cwid, name, major in file_reader(path, 3, sep='\\t',header=True): \n b: Student = Student(cwid,name,major)\n self.studict[cwid]=b",
"def load_json(filename):\n with open(filename, encoding='utf-8') as f:\n students = json.load(f)\n return students",
"def student(self,path):\n try:\n sfile = open(path, 'r')\n except FileNotFoundError:\n logging.exception('There is an error with opening the student file in this directory')\n else:\n if sfile.readlines() == ['\\n']:\n print('This file is an empty!')\n else:\n sfile.seek(0)\n for lines in sfile:\n studentid, studentname, studentmajor = lines.strip().split('\\t')\n self.studentdict[studentid] = Student(studentid,studentname,studentmajor)",
"def get_students():\n try:\n with open(STUDENTS_FILE, \"r\") as fp:\n return json.load(fp)\n except FileNotFoundError:\n # Returns an empty list if the file does not exist\n return list()",
"def load_student():\n tablename = 'student'\n data = jsonify_seed_data(tablename)\n\n for item in data[tablename]:\n new_item = Student(\n name_first=item['name_first'],\n name_last=item['name_last'],\n rank_stripes=item['rank_stripes'],\n rank_type=item['rank_type'],\n program=item['program'],\n )\n db.session.add(new_item)\n db.session.commit()",
"def _get_students(self,path):\n try:\n for cwid, name, major in file_reading_gen(path, 3, sep=\"\\t\",header=False):\n self._students[cwid] = Student(cwid,name,major)\n except FileNotFoundError as fnfe:\n print(fnfe)\n except ValueError as ve:\n print(ve)",
"def read_json_files():\n\n jsons = dict()\n with open('json_files/config.json') as file:\n data_conf = json.load(file)\n jsons['base_url'] = data_conf['base_url']\n jsons['implicit_wait'] = data_conf['implicit_wait']\n jsons['os'] = data_conf['os']\n jsons['is_headless'] = (data_conf['headless'] == 'True')\n\n with open('json_files/state.json') as file:\n data_states = json.load(file)\n jsons['list_states'] = data_states['states']\n\n with open('json_files/district.json') as file:\n jsons['dict_districts'] = json.load(file)\n\n with open('json_files/sub_district.json') as file:\n jsons['dict_sub_districts'] = json.load(file)\n\n with open('json_files/gram_panchayat.json') as file:\n jsons['dict_gram_panchayats'] = json.load(file)\n\n with open('json_files/village.json') as file:\n jsons['dict_villages'] = json.load(file)\n\n return jsons",
"def save_students(students):\n data = {student[\"id\"]: student for student in students}\n with open(STUDENTS_FILE, \"w\") as fp:\n json.dump(data, fp)",
"def __init__(self, file_name):\n StudentRepo.__init__(self)\n self.file_name = file_name\n file = open(self.file_name, \"r\")\n for obj in json.load(file):\n self.add_student(Student(obj[\"sid\"], obj[\"name\"]))\n file.close()",
"def __load_data(self):\n # Load students\n print(\"Loading students.tsv ...\")\n with open(\"students.tsv\", \"r\") as f:\n for line in f:\n student_no, name = line.strip().split(\"\\t\")\n self.__students[student_no] = name\n\n # Load courses\n print(\"Loading courses.tsv ...\")\n with open(\"courses.tsv\", \"r\") as f:\n for line in f:\n courses_no, name = line.strip().split(\"\\t\")\n self.__courses[courses_no] = name\n # Load grades\n print(\"Loading grades.tsv ...\")\n #with open(\"grades.tsv\", \"r\") as f:\n # for line in f:\n # semesters_no, courses_no = line.strip().split(\"\\t\")\n # self.__semesters[semesters_no] = name",
"def load_dictionary(self):\n with open(self.default_dict_path) as file_object:\n self.dictionary = json.load(file_object)",
"def load_student_data():\n display.add_message(\"Downloading student data...\", color=display.YELLOW)\n osis_data = send_request(\"/importcsv\", data={})\n handle_response(osis_data, out=\"STUDENTS.csv\", save=True)\n with open(\"STUDENTS.csv\", \"r\") as csvfile:\n next(csvfile)\n csv_reader = csv.reader(csvfile)\n for row in csv_reader:\n _id = row[0]\n name = row[1]\n STUDENT_DATA[name] = int(_id)",
"def loadjson(self):\n\n\t\tif os.path.isfile(self.filename):\n\t\t\twith open(self.filename, 'r+') as outfile:\n\t\t\t \tdata = json.load(outfile)\n\t\t\t \tself.cache.update(data)\n\t\telse:\n\t\t\tprint \"File %s created\" % (self.filename)",
"def json_loads(self, data_file):\n module_logger.info('------ Load student responses from JSON file ::: {}'.format(data_file))\n\n del self.__feature_names__\n del self.__student_responses__\n try:\n with open(data_file.replace('.csv', '.json'), mode='rb') as jfile:\n dat = json.load(jfile)\n self.__feature_names__ = dat[0]\n self.__student_responses__ = dat[1]\n except:\n module_logger.exception('****** Failed loading student responses from JSON file')\n self.__feature_names__ = None\n self.__student_responses__ = None",
"def _santas_from_file(self):\n with open(self._path_to_santas, \"r\") as fp:\n self._santas = json.load(fp)",
"def load_JSON(self):\n try:\n with open(self.source_path, 'r') as to_read:\n self.source_dictionary = json.load(to_read)\n except IOError:\n print (\"Cannot find source file\")",
"def readgra(self) -> None:\n path :str = os.path.join(self.directory_path,\"grades.txt\")\n for stucwid, coursename, grade, instcwid in file_reader(path, 4, sep='\\t',header=True): \n if stucwid not in self.studict.keys():\n print(f\" There is no Student with CWID: {stucwid}\")\n continue\n if instcwid not in self.instdict.keys():\n print(f\" There is no Instructor with CWID: {instcwid}\")\n continue\n self.studict[stucwid].set_courses(coursename,grade)\n self.instdict[instcwid].set_courses(coursename)",
"def readins(self) -> None:\n path :str = os.path.join(self.directory_path,\"instructors.txt\")\n for cwid, name, department in file_reader(path, 3, sep='\\t',header=True): \n b: Instructor = Instructor(cwid,name,department)\n self.instdict[cwid]=b",
"def _read_stats_file(path):\n with open(path, \"r\") as f:\n dct = json.load(f)\n dct = {int(k): v for k, v in dct.items()}\n return dct"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes data and establishes connection with server, where the payload's properties are registered, updated and tracked. | def __init__(self):
self.files_handler = None
self.client = BaseClient()
# connects to server and starts session
self.client.connect(SERVER_IP, PORT)
self.session = self.client.get_session()
# sends payload's properties
self.session.send_text(JSON_TEXT)
# receive's payload's status
response = self.session.receive().get_data()
print (response)
response = json.loads(response)
is_active = response['active']
if is_active:
# continues active payload
self.handle_active_payload(response)
else:
self.handle_inactive_payload(response) | [
"def _initialize_observing(self):\n self.build_client_snapshot()\n self.build_shared_snapshot()\n self.load_local_dir_state()\n self.create_observer()\n self.observer.start()\n self.sync_with_server()",
"def __init__(self, store):\n self.data = dict()\n self.store = store\n self.__load()",
"def initializeData(self):\n EntityBase.initializeData(self)",
"def setup(self):\n # Listen for all updates\n self._init_webhooks()",
"def setup(self):\n self.server._clients[self.client_address] = self\n self.server._callback(TCPServerEvent.ClientConnected, self)\n self.queue = queue.Queue()\n self.log = logging.getLogger('Request')",
"def init_datastores(self):\n self.data = DatastoreLegacy(self.id_)\n self.data.subscribe(self.data_change)\n self.class_data = DatastoreLegacy(type(self).__name__)\n self.class_data.subscribe(self.class_data_change)",
"def __init__(self):\n self.config = ConfigUtil.ConfigUtil('../../../data/ConnectedDevicesConfig.props')\n self.config.loadConfig()\n print('Configuration data...\\n' + str(self.config)) \n print('============= Setting Done! =============')\n self.host = self.config.getProperty(ConfigConst.COAP_GATEWAY_SECTION, ConfigConst.DEFAULT_HOST )\n self.port = int(self.config.getProperty(ConfigConst.COAP_GATEWAY_SECTION, ConfigConst.DEFAULT_COAP_PORT))\n self.serverAddr = (self.host, self.port)\n print('URL(IP): ' + str(self.serverAddr))\n self.url = \"coap://\" + self.host + \":\" + str(self.port) + \"/temp\"",
"def connection_setup(self):\n\n self.logger.debug(\"Create the connection to the mgr....\")\n # Create a connection to Hal driver mgr\n self.mgrConnection = HalTransport(HalTransport.HalTransportClientMgr,\n HalTransport.HalClientMode,\n disconnectHandlerCb=self.connectionDisconnectCb)\n\n # create the poller\n if self.poller is None:\n self.poller = self.dispatcher.get_poll()\n\n # register the mgr socket\n self.dispatcher.fd_register(self.mgrConnection.socket, self.dispatcher.EV_FD_IN, self.host_management_cb)\n self.dispatcher.fd_register(self.mgrConnection.monitor, self.dispatcher.EV_FD_IN, self.host_management_cb)",
"def setUp(self):\n with open('Tests/socket_data.json') as f:\n sd = f.read()\n self.sd = sd\n\n # conn = create_connection('wss://qa.sockets.stackexchange.com')\n # conn.send('1-questions-newest-python')\n # self.conn = conn",
"def __init__(self):\n self._events = self._create_event_objects()",
"def initialize(self, store):\r\n assert isinstance(store, stores.BaseStore)\r\n self.messages = Queue()\r\n self.store = store\r\n self.store.register(self)",
"async def init(self):\n pass",
"def __init__(self, server, jsonconn):\n ##Reference to server\n self.server = server\n ##JSON connections\n self.jsonconnections = jsonconn\n ##ongoing queries\n self.__q = {}\n\n server.register_event_handler(jsoncomm.message.name, self)\n server.register_event_handler(queryresponse.name, self)",
"def setup(self):\n self.news_api = NewsApiClient(api_key=news_api_key)",
"def __init__(self,\n *,\n runtime_data: List['StateStoreResponse'] = None) -> None:\n self.runtime_data = runtime_data",
"def _connect(self):\n\n wrapper_headers, wrapper_body = self._create_wrapper_request()\n\n self.wrapper_user = self._get_wrapper_user(wrapper_headers)\n self.wrapper_key = self._get_wrapper_key(wrapper_body)\n\n self.websocket = self._get_websocket()\n\n return self.init()",
"def initialize_data(self):\n self.ref_data = {}\n self.cur_data = {}",
"def __client(self):\n\t\tself.__set_event_callbacks()\n\t\tself.__connect_with_credentials()\n\t\tself.__subscribe()",
"def setUp(self):\n self.host = os.getenv('RIAK_HOST', 'localhost')\n self.sink = ReplSink(host=self.host, port=8098, queue='q1_ttaaefs')\n self.test_data = b'{\"test\":\"data\"}'\n self.http = urllib3.HTTPConnectionPool(host=self.host, port=8098, retries=False)\n\n empty = False\n while not empty:\n rec = self.sink.fetch()\n empty = rec.empty"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Notifies the server when a new file was encrypted | def notify_file_encryption(self, _file):
self.session.send_text('{{"action": {}, "file":"{}"}}'.format(FILE_ENCRYPTED, _file)) | [
"def handler(event, context):\n print('encrypting file')\n\n # Get file key from the event\n s3_key = event['Records'][0]['s3']['object']['key']\n\n # get string contents of file\n file_string = aws.get_file_as_string(s3_key)\n\n # encrypt\n encrypted_string = encrypter.encrypt(file_string, 12345678)\n\n # turn encrypted string into a file obj\n fileobj = io.BytesIO(encrypted_string.encode(\"utf-8\"))\n\n # create key for encrypted file\n key = 'processed/' + s3_key.split('/')[1]\n\n # upload\n aws.upload_fileobj(fileobj, key)",
"def encryptFile(self):\n user_key = str(self.input_password.get())\n if user_key == '':\n messagebox.showerror('Error', 'Password can not be empty')\n return\n input_filename = filedialog.askopenfilename()\n output_filename = input_filename + '.encrypted'\n fd_input = open(input_filename, \"rb\")\n fd_output = open(output_filename, \"wb\")\n # TODO: IV <- get with good entropy\n iv = time.time_ns()\n gamma = get_sha512(user_key)\n encrypt_file(fd_input, fd_output, user_key, iv) # TODO: process return code\n fd_input.close()\n fd_output.close()\n messagebox.showinfo('Information', 'Encryption completed')",
"def encryptFile(filename, key):\n with open(filename, \"rb\") as pfile:\n pdata = pfile.read()\n key = sha256(key)\n key = base64.urlsafe_b64encode(key)\n f = Fernet(key)\n cdata = f.encrypt(pdata)\n cdata = base64.urlsafe_b64decode(cdata)\n cfilename = filename+\".mfp\"\n with open(cfilename, \"wb\") as cfile:\n cfile.write(cdata)\n print(\"[i] File encrypted to new file: {}\".format(cfilename))\n\n return",
"def encrypter(img_file, image_opener, message):\n\n image = image_opener(img_file)\n\n if len(message) == 0:\n raise ValueError('Message is empty')\n\n new_image = image.copy()\n enc = encrypt_jpg(new_image, message)\n enc()\n\n # This var below can be edited to change the name of the encrypted file\n new_image_name = \"secret.png\"\n new_image.save(new_image_name, str(new_image_name.split(\".\")[1].upper()))\n print(\"Done\")",
"def encrypt_file(ctx, filename, key):\n\n click.echo(f\"Encrypting {filename}...\")\n\n # opening the key\n with open(key, 'rb') as file_key:\n key = file_key.read()\n\n # using the generated key\n fernet = Fernet(key)\n\n # opening the original file to encrypt\n with open(filename, 'rb') as file:\n original = file.read()\n\n # encrypting the file\n encrypted = fernet.encrypt(original)\n\n # opening the file in write mode and\n # writing the encrypted data\n with open(filename, 'wb') as encrypted_file:\n encrypted_file.write(encrypted)\n\n click.echo(f\"{filename} encrypted successfully!\")",
"def on_modified(self, e):\n logger.info('Modify event on file: {}'.format(e.src_path))\n new_md5 = self.hash_file(e.src_path)\n rel_path = self.relativize_path(e.src_path)\n data = {\n 'filepath': rel_path,\n 'md5': new_md5\n }\n if self._is_shared_file(rel_path):\n # if it has modified a file tracked by shared snapshot, then force the re-download of it\n try:\n self.shared_snapshot.pop(rel_path)\n except KeyError:\n pass\n else:\n # Send data to connection manager dispatcher and check return value.\n # If all go right update client_snapshot and local_dir_state\n response = self.conn_mng.dispatch_request('modify', data)\n if response['successful']:\n event_timestamp = response['content']['server_timestamp']\n self.client_snapshot[rel_path] = [event_timestamp, new_md5]\n self.update_local_dir_state(event_timestamp)\n logger.debug('Modify event completed.')\n else:\n self.stop(1, response['content'])",
"def fileCreated(self, the_file, ctx=None):\n pass",
"def encrypt_file(fileName, password):\n #Import os.path for checking if a file exists later\n from os import path\n #If the file the user wish to encrypt does not exist then raise an IO error\n if not path.isfile(fileName):\n raise IOError('Failed to find file with name: ' + fileName)\n #Read all text from the file\n with open(fileName, 'r') as f:\n fileText = f.read()\n #Encrypt all the text with a password\n encryptedText = str(encrypt(fileText, password))\n #Generate encrypted file's file name\n encryptedFileName = fileName.split('.')[0] + '.mycrypto'\n index = 2\n while path.isfile(encryptedFileName):\n #If a file with the same name already exists then change the name of the file\n encryptedFileName = fileName.split('.')[0] + '(' + str(index) + ')' + '.mycrypto'\n index += 1\n del index\n #write the new file\n with open(encryptedFileName, 'w') as file:\n file.write(encryptedText)",
"def create_file(self):\n\n if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:\n raise errors.AnsibleError(CRYPTO_UPGRADE)\n\n if os.path.isfile(self.filename):\n raise errors.AnsibleError(\"%s exists, please use 'edit' instead\" % self.filename)\n\n # Let the user specify contents and save file\n self._edit_file_helper(cipher=self.cipher_name)",
"def encr_fil(filname, pwd, openssl, ofpath):\n call([openssl, \"aes-256-cbc\", \"-a\", \"-salt\", \"-in\", filname, \"-out\", ofpath, \"-pass\", \"pass:\"+pwd])",
"def test_file_encryption(self):\n\n given = b\"Hello, World!\"\n expected = b\"Hello, World!\"\n\n input_file = NamedTemporaryFile()\n output_file = NamedTemporaryFile()\n decrypted_file = NamedTemporaryFile()\n\n input_file.write(given)\n\n encryptor = FileEncryption()\n encryptor.set_key(self.encryption_key)\n encryptor.set_iv(self.iv_key)\n\n input_file.seek(0)\n encryptor.encrypt_file(input_file, output_file)\n\n self.assertTrue(os.path.exists(input_file.name))\n self.assertTrue(os.path.exists(output_file.name))\n\n output_file.seek(0)\n\n encryptor.decrypt_file(output_file, decrypted_file)\n\n decrypted_file.seek(0)\n\n self.assertEqual(expected, decrypted_file.read())",
"def end_of_data(self, version, serial, nonce, refresh, retry, expire):\n\n # Run the base method\n super(ClientChannel, self).end_of_data(version, serial, nonce, refresh, retry, expire)\n\n # Set up our new output directory\n dn = \"%s.%s\" % (self.args.bgpsec_key_directory, rpki.rtr.channels.Timestamp.now())\n ln = \"%s.%s\" % (self.args.bgpsec_key_directory, \".tmp\")\n if os.path.exists(ln):\n os.unlink(ln)\n os.makedirs(dn)\n\n # Write all the keys\n for asn, gski, key in self.sql.execute(\"SELECT asn, ski, key FROM routerkey\"):\n with open(find_free_name(\"%s/%s.%s.%%d.key\" % (dn, asn, gski)), \"wb\") as f:\n f.write(key.decode(\"base64\"))\n\n # Install the new directory\n os.symlink(os.path.basename(dn), ln)\n os.rename(ln, self.args.bgpsec_key_directory)\n\n # Clean up old output directories\n pattern = \".[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:[0-9][0-9]Z\"\n for gn in glob.iglob(self.args.bgpsec_key_directory + pattern):\n if gn != dn:\n shutil.rmtree(gn)",
"def encrypt_file(filename, public_key):\n f = Fernet(public_key)\n with open(filename, \"rb\") as file:\n # read all file data\n file_data = file.read()\n # encrypt data\n encrypted_data = f.encrypt(file_data)\n # write the encrypted file\n with open(filename, \"wb\") as file:\n file.write(encrypted_data)",
"def files_changed(self):\n # TODO: implement",
"def fileModified(self, the_file, ctx=None):\n pass",
"def encryptToFile(self, file_data, filename, key):\n f = Fernet(key)\n # encrypt data\n if type(file_data) != bytes:\n file_data = bytes(file_data)\n encrypted_data = f.encrypt(file_data)\n # write the encrypted file\n with open(filename, \"wb\") as file:\n file.write(encrypted_data)",
"def apply_cipher(func):\n text = args.in_file.read()\n changed_text = func(text)\n args.out_file.write(changed_text)",
"def encrypt_file(file, delete=False):\n key = Fernet.generate_key()\n fer = Fernet(key)\n\n with open(file, 'rb') as f:\n encrypted_file = fer.encrypt(f.read())\n\n with open(file + '.enc', 'wb') as f:\n f.write(encrypted_file)\n\n if delete:\n os.remove(file)\n\n return key",
"def advapi32_WriteEncryptedFileRaw(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pfImportCallback\", \"pvCallbackContext\", \"pvContext\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Unpack a bytearray from chunks of n bytes | def unpack(chunks, size):
bts = bytearray()
for c in chunks:
b = []
for _ in range(size):
b.append(c & 0xff)
c = c >> 8
# bts.extend(filter(lambda a: a != 0, reversed(b)))
bts.extend(reversed(b))
return bts | [
"def read_bytes(iterable, n=0):\n\n iterator = iter(iterable)\n value = bytearray()\n \n for i in range(n):\n \n nextByte = next(iterator)\n \n if isinstance(nextByte, int):\n value.append(nextByte)\n elif isinstance(nextByte, bytes):\n value += next(iterator)\n \n return value",
"def bytes_to_blocks(n: int, body: bytes) -> List[Tuple[int, ...]]:\n\n # Honestly I don't fully understand why this works. Seems like functional\n # magic to me.\n # See:\n # https://stackoverflow.com/questions/9475241/split-string-every-nth-character#comment75857079_9475538\n #\n # Also, should I really be filling values with 0 if there's a remainder????\n return list(zip_longest(*[iter(body)] * n, fillvalue=0))",
"def split_byte_string(s, n):\n while len(s) > n:\n k = n\n yield s[:k]\n s = s[k:]\n yield s",
"def read_n_bytes(s, n):\n bytes_read = 0\n _buffer = []\n while bytes_read < n:\n data = s.recv(n - bytes_read)\n if data == b'':\n break\n\n bytes_read += len(data)\n _buffer.append(data)\n\n result = b''.join(_buffer)\n if len(result) != n:\n log.warning(\"expected {} bytes but read {}\".format(n, len(result)))\n\n return b''.join(_buffer)",
"def read_bytes(stream, n):\n return stream.read(n)",
"def read_n_bytes(resp, sock, n, deadline):\n buf = b''\n while len(buf) < n:\n buf += read_by_deadline(resp, sock, deadline, n - len(buf))\n return buf",
"def read_in_chunks(buffer, chunk_size=1024 * 1024.0 * 4):\n\n while True:\n data = buffer.read1(chunk_size)\n if not data:\n break\n yield data",
"def defragment(self, chunks: list) -> np.ndarray:\n array_bytes = b\"\".join(c[1] for c in chunks)\n array = pickle.loads(array_bytes)\n return array",
"def read_n_bytes(self, n):\n #\n #\n #\n #\n return self.bus.readfrom(self.bus_addr, n)",
"def chunks_by_element(arr, n):\n return [arr[i:i+n] for i in range(0, len(arr), n)]",
"def split_bigfile(iterable, n):\n iterable = iter(iterable)\n while True:\n try:\n yield chain([next(iterable)], islice(iterable, n-1))\n except StopIteration:\n return",
"def frame_split_size(frames, n=BIG_BYTES_SHARD_SIZE):\n if not frames:\n return frames\n\n if max(map(len, frames)) <= n:\n return frames\n\n out = []\n for frame in frames:\n if len(frame) > n:\n if isinstance(frame, bytes):\n frame = memoryview(frame)\n for i in range(0, len(frame), n):\n out.append(frame[i: i + n])\n else:\n out.append(frame)\n return out",
"def unpackb(packed, **kwargs):\n unpacker = Unpacker(None, **kwargs)\n unpacker.feed(packed)\n try:\n ret = unpacker._unpack()\n except OutOfData:\n raise UnpackValueError(\"Data is not enough.\")\n if unpacker._got_extradata():\n raise ExtraData(ret, unpacker._get_extradata())\n return ret",
"def readbytes(inp):\n buf = array(\"B\")\n done = False\n while not done:\n temp = inp.read(BLOCK_SIZE)\n buf.fromstring(temp)\n done = len(temp) < BLOCK_SIZE\n return buf",
"def split_cipher(ciphertext, n):\n blocks = [[] for _ in range(n)]\n for i, byte in enumerate(ciphertext):\n blocks[i%n].append(byte)\n return [bytes(b) for b in blocks]",
"def convert_byte_array_to_list(source_bytes):\n cumulated_length = 0\n byte_sum = 0\n lengths_lst = []\n byte_lst = []\n for byte in source_bytes:\n byte_lst += [byte]\n byte_sum += byte\n cumulated_length += 1\n if cumulated_length > 0x7F:\n cumulated_length = 1\n lengths_lst += [0x00]\n lengths_lst += [cumulated_length]\n byte_sum += cumulated_length\n return byte_lst, lengths_lst, byte_sum",
"def extract_chunk_data(data: bytes) -> list:\n chunks = []\n\n for x, z in tqdm.tqdm(\n itertools.product(range(REGION_WIDTH_CHUNKS), range(REGION_WIDTH_CHUNKS)),\n desc=\"extracting region data\",\n total=REGION_TOTAL_CHUNKS,\n ):\n location_data_start = 4 * (\n (x % REGION_WIDTH_CHUNKS) + (z % REGION_WIDTH_CHUNKS) * REGION_WIDTH_CHUNKS\n )\n location_data_end = location_data_start + 4\n location_data = data[location_data_start:location_data_end]\n offset = int_from_bytes(location_data[0:3])\n sector_count = location_data[3]\n\n timestamp_data_start = location_data_start + 4096\n timestamp_data_end = timestamp_data_start + 4\n timestamp = int_from_bytes(data[timestamp_data_start:timestamp_data_end])\n\n chunk = Chunk(x, z, offset, sector_count, timestamp)\n if not chunk.empty:\n chunk_data_start = offset * 4096\n chunk_data_end = chunk_data_start + sector_count * 4096\n chunk_data = data[chunk_data_start:chunk_data_end]\n nbt_data = extract_nbt_data(chunk_data)\n chunk.nbt_data = nbt_data\n chunks.append(chunk)\n return chunks",
"def chunks(data_list, chunk_size):\n data_info, frequency, bits = data_list\n\n some_data_list = []\n for i in range(0, len(data_info), chunk_size):\n some_data_list.append(data_info[i:i+chunk_size])\n return some_data_list",
"def packints_decode(\n data: bytes,\n /,\n dtype: numpy.dtype | str,\n bitspersample: int,\n runlen: int = 0,\n *,\n out=None,\n) -> numpy.ndarray:\n if bitspersample == 1: # bitarray\n data_array = numpy.frombuffer(data, '|B')\n data_array = numpy.unpackbits(data_array)\n if runlen % 8:\n data_array = data_array.reshape(-1, runlen + (8 - runlen % 8))\n data_array = data_array[:, :runlen].reshape(-1)\n return data_array.astype(dtype)\n if bitspersample in (8, 16, 32, 64):\n return numpy.frombuffer(data, dtype)\n raise NotImplementedError(\n f'packints_decode of {bitspersample}-bit integers '\n \"requires the 'imagecodecs' package\"\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a column definition, return the bigquery SchemaField object | def build_schema_field(column):
mode = column.get('mode', 'NULLABLE')
if column['type'] != "RECORD":
return bigquery.schema.SchemaField(column['name'], column['type'], mode)
fields = set([build_schema_field(field) for field in column['fields']])
return bigquery.schema.SchemaField(column['name'], column['type'], mode, fields=fields) | [
"def column_to_bq_schema(self) -> SchemaField:\n kwargs = {}\n if len(self.fields) > 0:\n fields = [field.column_to_bq_schema() for field in self.fields]\n kwargs = {\"fields\": fields}\n\n return SchemaField(self.name, self.dtype, self.mode, **kwargs)",
"def get_column_def(self):\n return '{} {}'.format(self.db_field_name, self.db_type)",
"def build_structfield_for_column(column_name: str) -> StructField:\n\n # Note: if this function starts being called from code running on Spark workers, it might be better to create the\n # set of keys in the caller (or its caller, etc) rather than at the module level, so that it doesn't need to be\n # serialized to the workers. Trade-off between unnecessary re-computation of unchanging result within each executor\n # and unnecessary transfer from the driver of an object that can be easily derived from other transferred data.\n col_name_standardized = column_name.strip().upper()\n if col_name_standardized in desired_column_initial_names:\n (final_name, data_type) = attributes_for_col[col_name_standardized]\n else:\n final_name = column_name\n data_type = StringType\n # note: the () after data_type is needed to actually instantiate the class\n return StructField(final_name, data_type())",
"def generate_bq_schema(dataframe, default_type='STRING'):\n\n type_mapping = {\n 'i': 'INTEGER',\n 'b': 'BOOLEAN',\n 'f': 'FLOAT',\n 'O': 'STRING',\n 'S': 'STRING',\n 'U': 'STRING',\n 'M': 'TIMESTAMP'\n }\n\n fields = []\n for column_name, dtype in dataframe.dtypes.iteritems():\n fields.append({'name': column_name,\n 'type': type_mapping.get(dtype.kind, default_type)})\n\n return {'fields': fields}",
"def schematizeColumn (self, columnName, dataType, isNullable, extra):\n\n column = ColumnSchema (columnName, dataType, isNullable, extra)\n return column",
"def parse_field_table_column(coldef):\n parts = [p.strip() for p in coldef.split(\";\")]\n if len(parts) == 1:\n return {\"identifier\": coldef, \"type\": \"string\"}\n else:\n result = {\"identifier\": parts[0], \"type\": parts[1]}\n if result[\"type\"] == \"select\":\n result[\"options\"] = parts[2].split(\"|\")\n return result",
"def get_schema_field(self, *, term: str, is_source: bool = True) -> FieldModel:\n schema = self.schema_destination\n alt_schema = self.schema_source\n if is_source:\n schema = self.schema_source\n alt_schema = self.schema_destination\n field = schema.fields.get(name=term)\n if not field:\n field = alt_schema.fields.get(name=term)\n field = schema.fields.get(name=field.name)\n if not field:\n s = \"destination\"\n if is_source:\n s = \"source\"\n raise ValueError(f\"Field name is not recognised from the {s} schema fields ({term}).\")\n return field",
"def to_schema_field(field_class):\n class DynamicJSONField(field_class):\n def prepare_value(self, value):\n \"\"\"\n Use the raw field data in the JSON field.\n \"\"\"\n if value is None:\n return value\n return super().prepare_value(getattr(value, '_data', value))\n\n return DynamicJSONField",
"def columnFactory(column):\n def factory(context, field):\n return column\n return factory",
"async def _next_column_definition(self):\n defn = await self._next_packet(packet.ColumnDefinition)\n\n if defn.type == FIELD_TYPE.JSON:\n encoding = self.encoding\n elif defn.type in FIELD_TYPE.TEXT_TYPES:\n if defn.character_set == FIELD_TYPE.ENCODING_BINARY:\n encoding = None\n else:\n encoding = self.encoding\n else:\n encoding = \"ascii\"\n converter = serializer.from_mysql.get(defn.type)\n\n def convert(encoding, converter):\n def _convert(value):\n if encoding:\n value = value.decode(encoding)\n if converter:\n value = converter(value)\n return value\n return _convert\n\n defn.convert = convert(encoding, converter)\n return defn",
"def path_col(field: BaseField) -> Column:\n return field.COL",
"def struct_field(field: BaseField) -> StructField:\n return field._spark_struct_field # pylint: disable=protected-access",
"def field(self):\n\n _field = self.model._meta.fields.get(self.field_name, None)\n\n if isinstance(self._accessor, six.text_type):\n spec = self._accessor\n if spec[0] == ':':\n key_paths = spec[1:].split('.')\n # can be used to access nested JSONField\n for p in key_paths:\n try:\n p = int(p)\n except ValueError:\n pass\n _field = _field[p]\n elif callable(self._accessor):\n _field = self._accessor(_field)\n\n ctx = self.model._meta.database.get_sql_context()\n if self.field_type:\n _field = _field.cast(self.field_type().ddl_datatype(ctx).sql)\n\n return _field",
"def for_model_column(model_column: Column, **kwargs) -> 'W2Field':\n t = model_column.type\n f = model_column.name\n default = model_column.info.get('w2', {})\n if type(t) is Integer:\n w2field = W2Integer(f, **default)\n elif type(t) is String:\n default.setdefault('size', min(t.length * 4, 150))\n w2field = W2String(f, **default)\n elif type(t) is DateTime:\n w2field = W2DateTime(f, **default)\n else:\n w2field = W2Field(f, **default)\n w2field.set_options(**kwargs)\n return w2field",
"def _construct_schema(uuid):\n catalog_url = '{0}/api/catalog/v1?ids={1}'.format(URI, uuid)\n response = urllib.request.urlopen(catalog_url, context=context)\n catalog_data = json.load(response)[\"results\"][0][\"resource\"]\n\n schema = []\n for i in range(0, len(catalog_data[\"columns_field_name\"])):\n name = catalog_data[\"columns_field_name\"][i]\n field_type = _encode_datatype(catalog_data[\"columns_datatype\"][i])\n description = catalog_data[\"columns_description\"][i]\n schema.append(bigquery.SchemaField(name, field_type, mode='NULLABLE', description=description))\n\n return schema",
"def _read_column(self, column: Mapping, schema: str) -> MetabaseColumn:\n\n column_name = column.get(\"name\", \"\").upper().strip('\"')\n column_description = column.get(\"description\")\n\n metabase_column = MetabaseColumn(\n name=column_name,\n description=column_description,\n **self.read_meta_fields(column, METABASE_COLUMN_META_FIELDS),\n )\n\n fk_target_table = None\n fk_target_field = None\n\n for test in column.get(\"tests\") or []:\n if isinstance(test, dict):\n if \"relationships\" in test:\n relationships = test[\"relationships\"]\n fk_target_table = self.parse_ref(relationships[\"to\"])\n if not fk_target_table:\n logger().warning(\n \"Could not resolve foreign key target table for column %s\",\n metabase_column.name,\n )\n continue\n fk_target_field = relationships[\"field\"]\n\n self.set_column_foreign_key(\n column=column,\n metabase_column=metabase_column,\n table=fk_target_table,\n field=fk_target_field,\n schema=schema,\n )\n\n return metabase_column",
"def column_instance_factory(config):\n try:\n config.name\n config.desc\n config.type_name\n config.append\n config.reduce\n config.__str__\n return config.duplicate()\n except AttributeError:\n raise TypeError('Expected config to be instance of pyqp.column.Column, got %s' % \\\n type(config))",
"def _FieldRef(column):\n return 'f{column}'.format(column=column)",
"def GetEntryFromSchema(field_name, schema):\n\n def FindEntryFromSchema(field_name, schema):\n for entry in schema:\n if entry['name'] == field_name:\n return entry\n return None\n\n all_fields = field_name.split('.')\n for i in range(len(all_fields) - 1):\n entry = FindEntryFromSchema(all_fields[i], schema)\n if not entry or 'fields' not in entry:\n return None\n schema = entry['fields']\n entry = FindEntryFromSchema(all_fields[-1], schema)\n if not entry or 'fields' in entry:\n return None\n return entry"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a document and a set of column definitions, return the dictionary representing the row to upload to bigquery that contains only the fields matching the column definitions | def get_row(document, columns):
row_to_upload = {}
for column in columns:
try:
mongo_field = column.get('mongo_field')
if not mongo_field:
mongo_field = column.get('name')
if column['type'] != "RECORD":
if mongo_field == '_id':
# Mongo default document ObjectID, treat it specially
row_to_upload[column['name']] = str(document['_id'])
else:
if document is None:
document = {} # Just treat it as an empty dictionary for now
mongo_fields = mongo_field.split('.')
result = document
for field in mongo_fields:
result = result.get(field, {})
if result == {}:
# We ended up with a nothing, return null
result = None
row_to_upload[column['name']] = result
if column.get('mode', 'NULLABLE') == 'REPEATED' and result is None:
row_to_upload[column['name']] = []
else:
if column.get('mode', 'NULLABLE') == 'REPEATED':
row_to_upload[column['name']] = [get_row(doc, column['fields']) for doc in document.get(mongo_field, [])]
else:
row_to_upload[column['name']] = get_row(document.get(mongo_field, {}), column['fields'])
except KeyError:
if column.get('mode', 'NULLABLE') == 'REPEATED':
row_to_upload[column['name']] = []
else:
row_to_upload[column['name']] = None # if the document doesn't have the field, we just send a null instead
return row_to_upload | [
"def columns(self, column_list: list[str], documents: list[Document], ):\n filtered_list = []\n for document in documents:\n filtered_dict = {k: v for k, v in document.items() if k in column_list}\n filtered_list.append(Document(filtered_dict, document.doc_id))\n return filtered_list",
"def construct_schema(collection):\n columns_dict = {}\n columns = []\n for row in collection.find():\n for field in row.keys():\n field_type = get_type(field, row[field])\n if field not in columns_dict.keys():\n columns_dict[field] = field_type\n else:\n union_type = unify_types(columns_dict[field], field_type)\n columns_dict[field] = union_type\n for field in sorted(columns_dict.keys()):\n # We sort the keys to make the constructed schema look nice\n # Possible failure modes up until this point:\n # Field is entirely empty arrays, type is undefined\n # Field is entirely empty objects\n # Field is invalid\n columns_dict[field] = remove_invalid_fields(columns_dict[field])\n if (columns_dict[field].get('type', 'INVALID') != 'INVALID' and\n not (columns_dict[field]['type'] == 'RECORD' and columns_dict[field]['fields'] == [])):\n columns.append(columns_dict[field])\n return columns",
"def filterKeys(document):\n return {key: document[key] for key in FIELDS_TO_KEEP}",
"def apply_doc_values(cls, mapping):\n def apply_doc_values(field_def):\n if field_def.get('type', 'object') in ('nested', 'object'):\n for _, nested_def in six.iteritems(field_def['properties']):\n apply_doc_values(nested_def)\n else:\n if 'doc_values' not in field_def:\n if field_def['type'] in DOC_VALUE_TYPES:\n field_def['doc_values'] = True\n elif (field_def['type'] == 'string' and\n field_def.get('index', '') == 'not_analyzed'):\n field_def['doc_values'] = True\n\n for _, multidef in six.iteritems(field_def.get('fields', {})):\n apply_doc_values(multidef)\n\n # Check dynamic templates\n dynamic_templates = mapping.get('dynamic_templates', {})\n for dyn_field, dyn_mapping in six.iteritems(dynamic_templates):\n for field, definition in six.iteritems(dyn_mapping['mapping']):\n apply_doc_values(definition)\n\n for field, definition in six.iteritems(mapping['properties']):\n apply_doc_values(definition)",
"def _extract_table_schema(\n key: str,\n cols: List,\n sheet_name: str = \"acceptance\",\n sheet_index: int = 1\n) -> dict:\n schema_df = gs.load_gs_to_dataframe(\n key=key,\n usecols=cols,\n skiprows=1,\n nrows=1,\n sheet_name=sheet_name,\n sheet_index=sheet_index,\n evaluate_formulas=True\n )\n schema_df.columns = [\n x.split(\".\")[0] for x in schema_df.columns\n ]\n schema_dict = dict(\n zip(\n list(schema_df.columns),\n list(schema_df.iloc[0, :])\n )\n )\n return schema_dict",
"def _DeserializeFields(self, def_rows):\n field_id_to_name = {\n field_id: field\n for field_id, _pid, _rank, field, _doc in def_rows}\n field_name_to_id = {\n field.lower(): field_id\n for field_id, field in field_id_to_name.iteritems()}\n\n return field_id_to_name, field_name_to_id",
"def _dataset(dataset, rows):\n try:\n return {\"fields\": dataset.in_fields}\n except AttributeError:\n if len(rows) > 0:\n return {'fields': Flatline.infer_fields(rows[0])}\n return None",
"def _column_fields_to_columns(fields, organization):\n\n def select_col_obj(column_name, table_name, organization_column):\n if organization_column:\n return [organization_column]\n else:\n # Try for \"global\" column definitions, e.g. BEDES. - Note the BEDES are not\n # loaded into the database as of 9/8/2016 so not sure if this code is ever\n # exercised\n obj = Column.objects.filter(organization=None, column_name=column_name).first()\n\n if obj:\n # create organization mapped column\n obj.pk = None\n obj.id = None\n obj.organization = organization\n obj.save()\n\n return [obj]\n else:\n if table_name:\n obj, _ = Column.objects.get_or_create(\n organization=organization,\n column_name=column_name,\n table_name=table_name,\n is_extra_data=is_extra_data,\n )\n return [obj]\n else:\n obj, _ = Column.objects.get_or_create(\n organization=organization,\n column_name=column_name,\n is_extra_data=is_extra_data,\n )\n return [obj]\n\n return True\n\n md = MappingData()\n\n # Container to store the dicts with the Column object\n new_data = []\n\n for field in fields:\n new_field = field\n\n # find the mapping data column (i.e. the database fields) that match, if it exists\n # then set the extra data flag to true\n db_field = md.find_column(field['to_table_name'], field['to_field'])\n is_extra_data = False if db_field else True # yes i am a db column, thus I am not extra_data\n\n # find the to_column\n to_org_col = Column.objects.filter(organization=organization,\n column_name=field['to_field'],\n table_name=field['to_table_name'],\n is_extra_data=is_extra_data).first()\n from_org_col = Column.objects.filter(organization=organization,\n column_name=field['from_field'],\n is_extra_data=is_extra_data).first()\n\n new_field['to_column_object'] = select_col_obj(\n field['to_field'],\n field['to_table_name'],\n to_org_col\n )\n new_field['from_column_object'] = select_col_obj(\n field['from_field'],\n \"\",\n from_org_col)\n\n new_data.append(new_field)\n\n return new_data",
"def _create_db_columns_def_(self):\r\n\r\n columns = {}\r\n first_dict = self.new_data[0]\r\n\r\n for key, value in first_dict.items():\r\n columns.update({key: None})\r\n\r\n for key, value in first_dict.items():\r\n if key == 'IpAddress':\r\n columns[key] = 'TEXT PRIMARY KEY'\r\n elif isinstance(value, str):\r\n columns[key] = \"TEXT\"\r\n elif isinstance(value, float):\r\n columns[key] = \"REAL\"\r\n else:\r\n columns[key] = \"TEXT\"\r\n\r\n return columns",
"def db_fields(self, obj = None):\n\n if obj is None: obj = self\n\n db_fields = self.api.db.fields(self.table_name)\n return dict ( [ (key,value) for (key,value) in obj.items()\n if key in db_fields and\n Row.is_writable(key,value,self.fields) ] )",
"def map_mongo_to_sql_common(sample: SampleDoc) -> Dict[str, Any]:\n return {\n # hexadecimal string representation of BSON ObjectId. Do ObjectId(hex_string) to turn it back\n MLWH_MONGODB_ID: str(sample.get(FIELD_MONGODB_ID)),\n MLWH_ROOT_SAMPLE_ID: sample.get(FIELD_ROOT_SAMPLE_ID),\n MLWH_RNA_ID: sample.get(FIELD_RNA_ID),\n MLWH_PLATE_BARCODE: sample.get(FIELD_PLATE_BARCODE),\n MLWH_COORDINATE: unpad_coordinate(sample.get(FIELD_COORDINATE)),\n MLWH_RESULT: sample.get(FIELD_RESULT),\n MLWH_DATE_TESTED: sample.get(FIELD_DATE_TESTED),\n MLWH_SOURCE: sample.get(FIELD_SOURCE),\n MLWH_LAB_ID: sample.get(FIELD_LAB_ID),\n # channel fields\n MLWH_CH1_TARGET: sample.get(FIELD_CH1_TARGET),\n MLWH_CH1_RESULT: sample.get(FIELD_CH1_RESULT),\n MLWH_CH1_CQ: parse_decimal128(sample.get(FIELD_CH1_CQ)),\n MLWH_CH2_TARGET: sample.get(FIELD_CH2_TARGET),\n MLWH_CH2_RESULT: sample.get(FIELD_CH2_RESULT),\n MLWH_CH2_CQ: parse_decimal128(sample.get(FIELD_CH2_CQ)),\n MLWH_CH3_TARGET: sample.get(FIELD_CH3_TARGET),\n MLWH_CH3_RESULT: sample.get(FIELD_CH3_RESULT),\n MLWH_CH3_CQ: parse_decimal128(sample.get(FIELD_CH3_CQ)),\n MLWH_CH4_TARGET: sample.get(FIELD_CH4_TARGET),\n MLWH_CH4_RESULT: sample.get(FIELD_CH4_RESULT),\n MLWH_CH4_CQ: parse_decimal128(sample.get(FIELD_CH4_CQ)),\n # filtered positive fields\n MLWH_FILTERED_POSITIVE: sample.get(FIELD_FILTERED_POSITIVE),\n MLWH_FILTERED_POSITIVE_VERSION: sample.get(FIELD_FILTERED_POSITIVE_VERSION),\n MLWH_FILTERED_POSITIVE_TIMESTAMP: sample.get(FIELD_FILTERED_POSITIVE_TIMESTAMP),\n # UUID fields\n MLWH_LH_SAMPLE_UUID: sample.get(FIELD_LH_SAMPLE_UUID),\n MLWH_LH_SOURCE_PLATE_UUID: sample.get(FIELD_LH_SOURCE_PLATE_UUID),\n }",
"def _defs_sql_to_json(rows):\n # type: (List[Tuple[int, str, str]]) -> List[Dict[str, Union[int, str]]]\n return [{'id': row[0], 'name': row[1], 'path': row[2]} for row in rows]",
"def get_fields(filedata) -> dict[str, list[str]]:\n dbf = DBF(\"\", ignore_missing_memofile=True, filedata=filedata)\n table_ids = {}\n table_cols = {}\n\n for r in dbf:\n if r.get(\"OBJECTTYPE\", None) == \"Table\":\n tname = r[\"OBJECTNAME\"]\n tid = r[\"OBJECTID\"]\n\n if tid not in table_ids:\n table_ids[tid] = tname\n\n elif r.get(\"OBJECTTYPE\", None) == \"Field\":\n tid = r[\"PARENTID\"]\n colname = r[\"OBJECTNAME\"]\n\n if tid in table_cols:\n table_cols[tid].append(colname)\n else:\n table_cols[tid] = [colname]\n\n tables = {}\n\n for tid, tname in table_ids.items():\n if tid in table_cols:\n tables[tname] = table_cols[tid]\n else:\n logger.warning(f\"Missing cols on {tname}\")\n\n return tables",
"def column_reflection_fallback(self):\n sql = sa.select([sa.text(\"*\")]).select_from(self._table)\n col_names = self.engine.execute(sql).keys()\n col_dict = [{'name': col_name} for col_name in col_names]\n return col_dict",
"def map_mongo_sample_to_mysql(doc: SampleDoc, copy_date: bool = False) -> Dict[str, Any]:\n value = map_mongo_to_sql_common(doc)\n\n if copy_date:\n value[MLWH_CREATED_AT] = doc[FIELD_CREATED_AT]\n value[MLWH_UPDATED_AT] = doc[FIELD_UPDATED_AT]\n else:\n dt = datetime.utcnow()\n\n value[MLWH_CREATED_AT] = dt\n value[MLWH_UPDATED_AT] = dt\n\n return value",
"def row_to_record(row, field_mappings):\n row = dict(row)\n result = {}\n for airtable_name, query_names in field_mappings.items():\n query_names = query_names or []\n for query_name in query_names:\n value = row.get(query_name)\n if value is not None:\n if isinstance(value, decimal.Decimal):\n value = float(value)\n elif isinstance(value, datetime.datetime):\n value = value.strftime(\"%m/%d/%Y %H:%M:%S\")\n elif isinstance(value, datetime.date):\n value = value.isoformat(\"%m/%d/%Y\")\n elif (\n isinstance(value, str)\n and value.strip()\n and query_name.endswith(\"_list\")\n ):\n value = [item.strip() for item in value.split(\"|\")]\n result[airtable_name] = value\n break\n return result",
"def save_column_mappings(request):\n body = json.loads(request.body)\n import_file = ImportFile.objects.get(pk=body.get('import_file_id'))\n organization = import_file.import_record.super_organization\n mappings = body.get('mappings', [])\n for mapping in mappings:\n dest_field, raw_field = mapping\n if dest_field == '':\n dest_field = None\n\n dest_cols = _column_fields_to_columns(dest_field, organization)\n raw_cols = _column_fields_to_columns(raw_field, organization)\n try:\n column_mapping, created = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=raw_cols,\n )\n except ColumnMapping.MultipleObjectsReturned:\n # handle the special edge-case where remove dupes doesn't get\n # called by ``get_or_create``\n ColumnMapping.objects.filter(\n super_organization=organization,\n column_raw__in=raw_cols,\n ).delete()\n column_mapping, created = ColumnMapping.objects.get_or_create(\n super_organization=organization,\n column_raw__in=raw_cols,\n )\n\n # Clear out the column_raw and column mapped relationships.\n column_mapping.column_raw.clear()\n column_mapping.column_mapped.clear()\n\n # Add all that we got back from the interface back in the M2M rel.\n [column_mapping.column_raw.add(raw_col) for raw_col in raw_cols]\n if dest_cols is not None:\n [\n column_mapping.column_mapped.add(dest_col)\n for dest_col in dest_cols\n ]\n\n column_mapping.user = request.user\n column_mapping.save()\n\n return {'status': 'success'}",
"def _schema_sql_to_bq_compatibility(\n schema_dict: dict\n) -> dict:\n for k, v in schema_dict.items():\n if v == \"INTEGER\":\n schema_dict[k] = \"INT64\"\n elif v == \"FLOAT\":\n schema_dict[k] = \"FLOAT64\"\n\n return schema_dict",
"def extract_column_names(self) -> Dict[str, Tuple[str, str]]:\n fields = []\n for field in self.properties.keys():\n if not is_airbyte_column(field):\n fields.append(field)\n result = {}\n field_names = set()\n for field in fields:\n field_name = self.name_transformer.normalize_column_name(field, in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True)\n if field_name_lookup in field_names:\n # TODO handle column name duplicates or collisions deterministically in this stream\n for i in range(1, 1000):\n field_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=True)\n if field_name_lookup not in field_names:\n break\n field_names.add(field_name_lookup)\n result[field] = (field_name, jinja_name)\n return result"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given the set of rows to upload, upload them to the specified bigquery table | def upload_rows(bigquery_client, bigquery_table, rows_to_upload, collection_to_watch, operation_type):
for row in rows_to_upload:
row['time_archived'] = time.time()
row['operationType'] = operation_type
errors = bigquery_client.create_rows(bigquery_table, rows_to_upload, skip_invalid_rows=True)
if errors:
for i in xrange(len(errors)):
# Add the actual failing document to the error so we can debug this at all
errors[i]['document'] = rows_to_upload[errors[i]['index']]
logging.error("Row insert failed: Updating table " + collection_to_watch['bigquery_table'] + " failed: " + str(errors[i]))
return errors | [
"def upload_bq(bq_project, bq_dataset, table_name,gsc_schemas,bq_tmp_file,cl,bq_dataset_location,bq_check,bq_alert_empty,\n bq_alert_callback,script_file):\n\n\n # create the configuration for an upload job\n final_table_name = u\"%s.%s.%s\" % (bq_project, bq_dataset, table_name)\n jc = bigquery.LoadJobConfig()\n jc.schema = gsc_schemas\n jc.source_format = bigquery.SourceFormat.CSV\n jc.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n\n # create a job to upload the rows\n with open(bq_tmp_file, \"rb\") as f:\n\n jb = cl.load_table_from_file(f, final_table_name, location=bq_dataset_location, job_config=jc)\n\n try:\n # upload the rows\n rs = jb.result()\n print(\"Table uploaded to BQ \\n\")\n # check if the table was created successfully\n if bq_check == True:\n if not cl.get_table(final_table_name):\n if bq_alert_empty == True:\n bq_alert_callback(script_file, u\"[bq] table '%s' was not created\" % final_table_name)\n except Exception as e:\n logging.error(f\"Could not upload the table to BQ: {e}\")\n\n print(u\"ERROR: %s\" % table_name)\n\n if jb.errors:\n for i in jb.errors:\n print(u\"ERROR: %s\" % i[\"message\"])\n else:\n print(e)\n\n f.close()",
"def upload_table(self, table):\n self.open_con(fetchall=False)\n \n # download and save to temporary CSV\n self.cur.execute(\"SELECT * FROM %s\" % (table))\n tmp_file = \"/tmp/%s.csv\" % (table)\n with open(tmp_file, 'w') as out:\n csv_out = csv.writer(out, quotechar='\"', escapechar='\\\\',\n doublequote=True, quoting=csv.QUOTE_MINIMAL,\n lineterminator='\\n')\n for row in self.cur:\n csv_out.writerow(row)\n \n # upload to BQ\n file_size = os.stat(tmp_file).st_size\n file_size = float(file_size) / 1000000 # MB\n \n bq_table = \"%s.%s\" % (self.conf.get('big_query', 'db'),\n self.conf.get('big_query', table + '_table'))\n \n msg = \"Uploading %.1f MB to table '%s' on Big Query...\" \\\n % (file_size, bq_table)\n self.logger.info(msg)\n \n bq_cmd = (\"bq load --replace --source_format=CSV \"\n \"%s %s bigquery_schema/%s\")\n bq_cmd = bq_cmd % (bq_table, tmp_file, table)\n \n process = subprocess.Popen(bq_cmd.split(), stdout=subprocess.PIPE)\n \n self.logger.info(\" Done.\")\n \n self.close_con()",
"def _SendToBigQuery(table, row_dict):\n client = bigquery.Client()\n\n # Attempt the initial row insertion.\n try:\n dataset_ref = client.dataset(constants.BIGQUERY_DATASET)\n table_ref = dataset_ref.table(table.name)\n schema = table.schema\n row_id = table.CreateUniqueId(**row_dict)\n errors = client.insert_rows(\n table_ref, [row_dict], selected_fields=schema, row_ids=[row_id])\n\n # If we get a 404, ensure the dataset and table exist, then try again.\n except exceptions.NotFound:\n\n # See if the destination dataset exists.\n try:\n client.get_dataset(dataset_ref)\n logging.info('Dataset \"%s\" exists', constants.BIGQUERY_DATASET)\n\n # If it doesn't, then try to create it. We're probably racing against other\n # rows, so just ignore 409s.\n except exceptions.NotFound:\n logging.info('Creating dataset \"%s\"', constants.BIGQUERY_DATASET)\n try:\n client.create_dataset(bigquery.Dataset(dataset_ref))\n except exceptions.Conflict:\n logging.info(\n 'Dataset \"%s\" was already created', constants.BIGQUERY_DATASET)\n else:\n logging.info('Dataset \"%s\" created', constants.BIGQUERY_DATASET)\n\n # See if the destination table exists.\n try:\n client.get_table(table_ref)\n logging.info('Table \"%s\" exists', table.name)\n\n # If it doesn't, then try to create it. We're probably racing against other\n # rows, so just ignore 409s.\n except exceptions.NotFound:\n logging.info('Creating table \"%s\"', table.name)\n try:\n client.create_table(bigquery.Table(table_ref, schema=schema))\n except exceptions.Conflict:\n logging.info('Table \"%s\" has already been created', table.name)\n else:\n logging.info('Table \"%s\" successfully created', table.name)\n\n # Attempt the row insertion again. Apparently insertion 404s are cached\n # until the table creation fully propagates, so attempt the insertion a few\n # times with increasing delays before giving up and letting the taskqueue\n # retry it.\n for mins in xrange(1, 6):\n logging.info(\n 'Waiting %dm for table \"%s\" to be ready', mins, table.name)\n _Sleep(mins)\n try:\n errors = client.insert_rows(\n table_ref, [row_dict], selected_fields=schema, row_ids=[row_id])\n except exceptions.NotFound:\n logging.info('Table \"%s\" is still not ready', table.name)\n else:\n break\n\n # If the client returns errors, raise a StreamingFailureError.\n if errors:\n error_str = ', '.join(str(e) for e in errors)\n msg = 'The BigQuery client returned errors: %s' % error_str\n logging.error(msg)\n raise StreamingFailureError(msg)\n\n logging.info('Successfully streamed row to \"%s\" table', table.name)",
"def write_to_bigquery(rows_to_insert):\n logging.debug(\"write_to_bigquery\")\n client = bigquery.Client()\n\n table_id = f'{config.PROJECT_ID}.{config.BIGQUERY_DATASET}.{config.BIGQUERY_TABLE}'\n errors = client.insert_rows_json(table_id, rows_to_insert)\n if errors == []:\n print(\"New rows have been added.\")\n else:\n print(\"Encountered errors while inserting rows: {}\".format(errors))",
"def bq_data_insert(bq_client, project_id, dataset, table, tweets):\n try:\n table_ref = bigquery.TableReference.from_string(\n table_id='{}.{}'.format(dataset, table),\n default_project=project_id\n )\n\n logging.info(f'BigQuery: Inserting {len(tweets)} records')\n # Try the insertion.\n response = bq_client.insert_rows_json(\n table=table_ref,\n json_rows=tweets,\n ignore_unknown_values=True,\n skip_invalid_rows=True\n )\n\n if response != []:\n logging.info(f'BigQuery: Insert Error - {response}')\n\n return response\n except Exception as e1:\n logging.error(f'BigQuery: General Error - {e1}')",
"def load_files(gs_file_names: List[str]) -> None:\n\n partition_sql = get_partition_sql_from_file_names(gs_file_names)\n gs_file_names_string = \",\".join([ f\"'{f}'\" for f in gs_file_names])\n sql = f\"\"\"\n SELECT 1 FROM data_test.users \n WHERE {partition_sql} and _FILE_NAME in ({gs_file_names_string});\n \"\"\"\n\n # when we gets triggered, the file is uploaded but it will take some time \n # to show up in external_table, so we loop/wait for 3 minutes (18 round sleep 10s)\n loop_cnt = 0\n while (loop_cnt < 18):\n time.sleep(10)\n results = client.query(sql)\n print(f\"resuls count: {len(list(results))}\")\n if len(list(results)) > 0:\n loop_cnt = 1000\n loop_cnt += 1\n if loop_cnt < 1000: # we timed out \n print(\"timed out, the external table doesn't have the new uploaded data in GCS.\")\n return\n sql = f\"\"\"\n SELECT * FROM data_test.bq_users \n WHERE {partition_sql} and gcs_file_name in ({gs_file_names_string});\n \"\"\"\n print(sql)\n results = client.query(sql)\n print(list(results))\n if len(list(results)) > 0:\n sql = f\"\"\"\n DELETE FROM data_test.bq_users \n WHERE {partition_sql} and gcs_file_name in ({gs_file_names_string});\n \"\"\"\n print(sql)\n results = client.query(sql)\n\n sql = f\"\"\"\n INSERT INTO data_test.bq_users\n SELECT *, _FILE_NAME as gcs_file_name FROM data_test.users\n WHERE {partition_sql} and _FILE_NAME in ({gs_file_names_string});\n \"\"\"\n print(sql)\n query_job = client.query(sql)\n results = query_job.result()\n print(results)",
"def upload_to_db(self, conn, table, asin) -> NoReturn:\n pass",
"def transferTables(syn,sourceProjId, uploadProjId, extId_Str = ''):\n\n # List of tables sorted by activity as defined in groupTableActivity, which is based on get_tables from\n # synapsebridgehelper.tableHelpers\n tables_list = groupTableActivity(syn,sourceProjId,extId_Str)\n all_tables = groupTableActivity(syn,sourceProjId)\n\n # Iterate over each activity in tables_list\n for activity_ in tables_list:\n print(activity_)\n \n # list of all table ids corresponding to that activity \n activityTableIds = tables_list[activity_]\n result = tableWithFileIds(syn,table_id = activityTableIds[0], extIdStr = extId_Str)\n df_main = result['df']\n cols = result['cols']\n \n # appending the rest of the sorted tables corresponding to that activity if they exist\n for table_index in range(1, len(activityTableIds)):\n result = tableWithFileIds(syn,table_id = activityTableIds[table_index], extIdStr = extId_Str)\n df = result['df']\n cols = result['cols']\n df_main = df_main.append(df)\n \n # Correcting the order of the columns while uploading\n df_main = df_main[df_main.columns]\n \n # Updaing schema and uploading\n schema = synapseclient.Schema(name=activity_ +' extIdStr_' + extId_Str, columns=cols, parent=uploadProjId)\n table = synapseclient.Table(schema, df_main)\n table = syn.store(table)\n table = syn.setProvenance(table.schema.id , activity = synapseclient.activity.Activity(used = all_tables[activity_]))",
"def send_tabular_data_for_ingestion(\n self,\n rows: iter,\n column_names: list,\n destination_table: Optional[str],\n method: Optional[str],\n target: Optional[str],\n collection_id: Optional[str] = None,\n ):\n pass",
"def upload_table(_table, _name):\n\n Gaia.upload_table(upload_resource=_table, table_name=_name, table_description=\"For the thesis! Hallelujah!\")\n table_id = \"user_\" + username + \".\" + _name\n\n return table_id",
"def write_to_bigquery(df, table_name, dataset_name):\n client = bigquery.Client()\n dataset_id = f\"{client.project}.{dataset_name}\"\n\n # Saving the data to BigQuery\n df.write.format(\"bigquery\").option(\"table\", f\"{dataset_id}.{table_name}\").save()\n\n print(f\"Table {table_name} successfully written to BigQuery\")",
"def _write_to_bigquery(df, table_name):\n\n dataframe = df\n\n client = bigquery.Client(project=BQ_LTV_GCP_PROJECT)\n\n job_config = bigquery.LoadJobConfig()\n job_config.write_disposition = \"WRITE_TRUNCATE\"\n job_config.schema = hook_get_bq_schema()\n\n job = client.load_table_from_dataframe(\n dataframe, table_name, job_config=job_config)\n job.result()\n\n table = client.get_table(table_name)\n print(\"Loaded {} rows and {} columns to {}\".format(table.num_rows,\n len(table.schema),\n table_name))",
"def put_many(self, rows):\n self.flush()\n return self._client.put_many(self._full_name, rows)",
"def BatchInsert(self, table, rowlist):\n columns = rowlist[0].keys()\n sql = \"INSERT INTO {table} ({columns}) VALUES\\n\".format(table=table, columns=', '.join(columns))\n rindex = 0\n sqlvals = dict()\n values = ''\n for row in rowlist:\n rowvalues = list()\n for column in columns:\n rowvalues.append(row[column])\n rowvalues = tuple(rowvalues)\n # update the row index\n rowref = \"r{}\".format(rindex)\n rindex += 1\n # update the sql string; on first time don't add a comma\n if values:\n values += \",\\n\"\n values += \"%({thisrow})s\".format(thisrow=rowref)\n #----- \n sqlvals[rowref] = rowvalues\n #execute\n self.query(sql + values, sqlvals)\n self.connection.commit()",
"def send_tabular_data_for_ingestion(\n self,\n rows: iter,\n column_names: list,\n destination_table: Optional[str],\n method: Optional[str],\n target: Optional[str] = None,\n collection_id: Optional[str] = None,\n ):\n if len(column_names) == 0 and destination_table is None:\n errors.log_and_throw(\n ResolvableBy.USER_ERROR,\n log,\n what_happened=\"Failed to ingest tabular data\",\n why_it_happened=\"Either column names or destination table must be specified.\"\n \"Without at least one of those we cannot determine how data should be ingested and we abort.\",\n consequences=\"The data will not be ingested and the current call will fail with an exception.\",\n countermeasures=\"Pass column names or destination table as argument.\",\n )\n\n if not ingester_utils.is_iterable(rows):\n errors.log_and_throw(\n ResolvableBy.USER_ERROR,\n log,\n what_happened=\"Cannot ingest tabular data\",\n why_it_happened=f\"The rows argument must be an iterable but it was type: {type(rows)}\",\n consequences=\"The data will not be ingested and current call will fail with an exception.\",\n countermeasures=\"Make sure rows is proper iterator object \",\n )\n\n if not isinstance(column_names, Iterable):\n errors.log_and_throw(\n ResolvableBy.USER_ERROR,\n log,\n what_happened=\"Cannot ingest tabular data\",\n why_it_happened=f\"The column_names argument must be a List (or iterable) but it was: {type(rows)}\",\n consequences=\"The data will not be ingested and current call will fail with an exception.\",\n countermeasures=\"Make sure column_names is proper List object \",\n )\n\n log.info(\n \"Posting for ingestion data for table {table} with columns {columns} against endpoint {endpoint}\".format(\n table=destination_table, columns=column_names, endpoint=target\n )\n )\n\n if collection_id is None:\n collection_id = \"{data_job_name}|{execution_id}\".format(\n data_job_name=self._data_job_name, execution_id=self._op_id\n )\n\n # fetch data in chunks to prevent running out of memory\n for page_number, page in enumerate(ingester_utils.get_page_generator(rows)):\n ingester_utils.validate_column_count(page, column_names)\n converted_rows = ingester_utils.convert_table(page, column_names)\n log.debug(\n \"Posting page {number} with {size} rows for ingestion.\".format(\n number=page_number, size=len(converted_rows)\n )\n )\n for row in converted_rows:\n self.__verify_payload_format(payload_dict=row)\n self._send(\n payload_dict=row,\n destination_table=destination_table,\n method=method,\n target=target,\n collection_id=collection_id,\n )",
"def streaming_data_into_a_table(self, json_data: list) -> None:\n table_ref = self._client.dataset(self._dataset).table(self._table)\n table = self._client.get_table(table_ref)\n\n errors = self._client.insert_rows_json(table, json_data)\n\n assert errors == [], errors",
"def insert_rows(self, rows: list, table: object) -> int:\n raise NotImplementedError",
"def _upload_to_postgres(task_id: str,\n engine: sa.engine.base.Engine,\n table_name: str,\n schema: str,\n **kwargs) -> None:\n ti = kwargs['ti']\n logging.info(f'task_id: {task_id}')\n df = ti.xcom_pull(task_ids=task_id)\n logging.info(f'type(df): {type(df)}')\n logging.info(f'df.shape: {df.shape}')\n logging.info(f'df.info(): {df.info()}')\n\n\n # Check for geopandas dataframe\n if 'LGA_2016_AUST' in task_id:\n df.to_postgis(con=engine,\n name=table_name,\n schema=schema,\n if_exists='replace',\n index=False)\n else:\n df.to_sql(con=engine,\n name=table_name,\n schema=schema,\n if_exists='replace',\n index=False)",
"def load_table(bigquery, project_id, dataset_id, table_name, source_schema,\n source_path, num_retries=5):\n\n # Generate a unique job_id so retries\n # don't accidentally duplicate query\n job_data = {\n 'jobReference': {\n 'projectId': project_id,\n 'job_id': str(uuid.uuid4())\n },\n 'configuration': {\n 'load': {\n 'sourceUris': [source_path],\n 'schema': {\n 'fields': source_schema\n },\n 'destinationTable': {\n 'projectId': project_id,\n 'datasetId': dataset_id,\n 'tableId': table_name\n }\n }\n }\n }\n\n return bigquery.jobs().insert(\n projectId=project_id,\n body=job_data).execute(num_retries=num_retries)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a Mongo collection, determine the schema that allows the most data to be uploaded to Bigquery Returns the schema, sets _id as the primary key always | def construct_schema(collection):
columns_dict = {}
columns = []
for row in collection.find():
for field in row.keys():
field_type = get_type(field, row[field])
if field not in columns_dict.keys():
columns_dict[field] = field_type
else:
union_type = unify_types(columns_dict[field], field_type)
columns_dict[field] = union_type
for field in sorted(columns_dict.keys()):
# We sort the keys to make the constructed schema look nice
# Possible failure modes up until this point:
# Field is entirely empty arrays, type is undefined
# Field is entirely empty objects
# Field is invalid
columns_dict[field] = remove_invalid_fields(columns_dict[field])
if (columns_dict[field].get('type', 'INVALID') != 'INVALID' and
not (columns_dict[field]['type'] == 'RECORD' and columns_dict[field]['fields'] == [])):
columns.append(columns_dict[field])
return columns | [
"def get_schema_for_doc(doc_id, path_only=False):\n (coll_name, _) = doc_id.split(\"/\")\n return get_schema(\"collection\", coll_name, path_only)",
"def __init__(self, name, data=None, schema=None, **kwargs):\n self._name = name\n self._kwargs = kwargs\n conn = self._get_connection()\n has = conn.has_collection(self._name)\n if has:\n resp = conn.describe_collection(self._name)\n server_schema = CollectionSchema.construct_from_dict(resp)\n if schema is None:\n self._schema = server_schema\n if data is not None:\n self.insert(data=data)\n else:\n if len(schema.fields) != len(resp[\"fields\"]):\n raise Exception(\"The collection already exist, but the schema is not the same as the passed in.\")\n for schema_field in schema.fields:\n same_field = False\n for field in resp[\"fields\"]:\n if field[\"name\"] == schema_field.name and field[\"type\"] == schema_field.dtype:\n # and field[\"is_primary_key\"] == schema_field.is_primary:\n same_field = True\n if not same_field:\n raise Exception(\n \"The collection already exist, but the schema is not the same as the passed in.\")\n self._schema = schema\n if data is not None:\n self.insert(data=data)\n\n else:\n if schema is None:\n if data is None:\n raise Exception(\"Collection missing schema.\")\n else:\n if isinstance(data, pandas.DataFrame):\n # TODO(czs007): construct schema by DataFrame\n pass\n else:\n raise Exception(\"Data of not pandas.DataFrame type should be passed into the schema.\")\n else:\n # create collection schema must be dict\n if isinstance(schema, CollectionSchema):\n conn.create_collection(self._name, fields=schema.to_dict())\n self._schema = schema\n if isinstance(data, pandas.DataFrame):\n # TODO(czs007): insert data by DataFrame\n pass\n else:\n self.insert(data=data)\n else:\n raise Exception(\"schema type must be schema.CollectionSchema.\")",
"def find_collection(self, ctype):\n if not ctype in self.cols.keys():\n self.cols[ctype] = collection.Collection(self.db, ctype)\n return self.cols[ctype]",
"def get_dataset_schema(dataset):\n return dataset.table_meta[SINGLE_TABLE]",
"def get_collection_id(self):\n if self.__collection_id is None:\n attrs = self.get_handler().ncattrs()\n if 'DSD_entry_id' in attrs:\n self.__collection_id = self.get_handler().DSD_entry_id\n elif 'id' in attrs:\n self.__collection_id = self.get_handler().id\n else:\n pass\n return self.__collection_id",
"def collection_id(self):\n if self._collection_id is not None:\n return self._collection_id\n col_filename = os.path.join(self.dir, 'collection_id.txt')\n\n def creator():\n return '%06i' % (int(self.timer() * 100) % (10 ** 6))\n\n self._collection_id = read_unique(col_filename, creator)\n return self._collection_id",
"def inputSchemaType(self):\n return self.sourceType + \"_schema\"",
"def get_collection_id(vrs_client):\n res = vrs_client.list_collections()\n if res['face_collections']:\n face_collection = res['face_collections'][0]\n else:\n res = vrs_client.create_collection()\n face_collection = res\n return face_collection['collection_id']",
"def _get_schema(name):\n item = datalab.utils.commands.get_notebook_item(name)\n if not item:\n item = _get_table(name)\n\n if isinstance(item, datalab.bigquery.Schema):\n return item\n if hasattr(item, 'schema') and isinstance(item.schema, datalab.bigquery._schema.Schema):\n return item.schema\n return None",
"def _construct_schema(uuid):\n catalog_url = '{0}/api/catalog/v1?ids={1}'.format(URI, uuid)\n response = urllib.request.urlopen(catalog_url, context=context)\n catalog_data = json.load(response)[\"results\"][0][\"resource\"]\n\n schema = []\n for i in range(0, len(catalog_data[\"columns_field_name\"])):\n name = catalog_data[\"columns_field_name\"][i]\n field_type = _encode_datatype(catalog_data[\"columns_datatype\"][i])\n description = catalog_data[\"columns_description\"][i]\n schema.append(bigquery.SchemaField(name, field_type, mode='NULLABLE', description=description))\n\n return schema",
"def get_collection(self, collection_name) -> MongoCollection:\n return MongoCollection(self._quasar_database[collection_name])",
"def _rebuild_internal_schema(self):\n\t\tself.columns = OrderedDict()\n\t\tself.primary_cgroup = None\n\n\t\tfor cgroup, schema in self._cgroups.iteritems():\n\t\t\tfor colname, dtype in schema['columns']:\n\t\t\t\tassert colname not in self.columns\n\t\t\t\tself.columns[colname] = ColumnType()\n\t\t\t\tself.columns[colname].name = colname\n\t\t\t\tself.columns[colname].dtype = np.dtype(dtype)\n\t\t\t\tself.columns[colname].cgroup = cgroup\n\n\t\t\tif self.primary_cgroup is None and not self._is_pseudotablet(cgroup):\n\t\t\t\tself.primary_cgroup = cgroup\n\t\t\t\tif 'primary_key' in schema:\n\t\t\t\t\tself.primary_key = self.columns[schema['primary_key']]\n\t\t\t\tif 'temporal_key' in schema:\n\t\t\t\t\tself.temporal_key = self.columns[schema['temporal_key']]\n\t\t\t\tif 'spatial_keys' in schema:\n\t\t\t\t\t(lon, lat) = schema['spatial_keys']\n\t\t\t\t\tself.spatial_keys = (self.columns[lon], self.columns[lat])\n\t\t\telse:\n\t\t\t\t# If any of these are defined, they must be defined in the\n\t\t\t\t# primary cgroup\n\t\t\t\tassert 'primary_key' not in schema\n\t\t\t\tassert 'spatial_keys' not in schema\n\t\t\t\tassert 'temporak_key' not in schema\n\n\t\t\tif 'blobs' in schema:\n\t\t\t\tfor colname in schema['blobs']:\n\t\t\t\t\tassert self.columns[colname].dtype.base == np.int64, \"Data structure error: blob reference columns must be of int64 type\"\n\t\t\t\t\tself.columns[colname].is_blob = True",
"def _backcompute_schema(self, cursor):\n raw_stats_types = self.connection.tables()\n if not raw_stats_types:\n raise weewx.UninitializedDatabase(\"Uninitialized stats database\")\n # Some stats database have schemas for heatdeg and cooldeg (even though\n # they are not used) due to an earlier bug. Filter them out. Also,\n # filter out the metadata table. In case the same database is being used\n # for the archive data, filter out the 'archive' database.\n stats_types = [s for s in raw_stats_types if s not in ['heatdeg','cooldeg','metadata', 'archive']]\n stats_schema = []\n for stat_type in stats_types:\n ncol = len(self.connection.columnsOf(stat_type))\n stats_schema.append((stat_type, 'REAL' if ncol==7 else 'VECTOR'))\n return stats_schema",
"def __init__(self,url,collectionName):\n self.fullUrl = url + collectionName + '/schema'\n self.field = {}",
"def get_collection_id(self): # real signature unknown; restored from __doc__\n return \"\"",
"def get_schema_name(self):\n obj = self._get_db_obj_query().first()\n return obj.schema_name if obj else None",
"def coll_type(coll, type_id):\n return RecordField.load(coll, field_id, altscope=\"all\")",
"def _get_collection_name(cls):\n return cls._meta.get(\"collection\", None)",
"def get_collection_by_id(collection_id):\n\n return Collection.query.filter(Collection.collection_id == collection_id).first()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For a record definition, remove all the invalid fields, otherwise return itself | def remove_invalid_fields(field):
if field.get('type', 'INVALID') == 'RECORD':
field['fields'] = [remove_invalid_fields(subfield) for subfield in field['fields'] if subfield.get('type', 'INVALID') != 'INVALID']
field['fields'] = [subfield for subfield in field['fields'] if subfield['type'] != 'RECORD' or subfield.get('fields', []) != []]
return field | [
"def clean_rec (self, r):\n\t\t# new rec to store info in & assoc errors\n\t\tnew_rec = {}\n\t\terrors = []\n\n\t\tself.pre_process (r, new_rec)\n\n\t\tfor cr in self.rules:\n\t\t\t# get values from the source fields\n\t\t\tvals = cr.get_src_vals_from_rec (r)\n\n\t\t\t# abort all vals are in cleaner or rule nulls\n\t\t\tif all_vals_within (vals, self.null_vals):\n\t\t\t\tcontinue\n\n\t\t\tif cr.are_vals_skippable (vals):\n\t\t\t\tcontinue\n\n\t\t\t# otherwise process through san chain\n\t\t\ttry:\n\t\t\t\tout_vals = cr.process_vals (vals)\n\t\t\t\t# if we make it here, store it in new rec\n\t\t\t\tdst_flds = cr.dst_flds\n\t\t\t\tfor df in dst_flds:\n\t\t\t\t\tnew_rec[df] = out_vals\n\t\t\texcept Exception as err:\n\t\t\t\tvalidation_msg = 'Validation failed: %s' % err\n\t\t\t\terrors.append ({\n\t\t\t\t\t\t'error': err,\n\t\t\t\t\t\t'values': list (vals),\n\t\t\t\t\t\t'fields': cr.get_src_flds_str(r),\n\t\t\t\t\t}\n\t\t\t\t)\n\n\t\tself.post_process (r, new_rec)\n\n\t\t## Postconditions & return:\n\t\treturn new_rec, errors",
"def _clean_fields(self, omit_fields, sparse_fields, next_level_omits):\n sparse = len(sparse_fields) > 0\n to_remove = []\n\n if not sparse and len(omit_fields) == 0:\n return\n\n for field_name in self.fields:\n is_present = self._should_field_exist(\n field_name, omit_fields, sparse_fields, next_level_omits\n )\n\n if not is_present:\n to_remove.append(field_name)\n\n for remove_field in to_remove:\n self.fields.pop(remove_field)",
"def remove_none_fields(self, data, **kwargs):\n return remove_none_entries(data)",
"def validate533(self,marc_record):\n all533s = marc_record.get_fields('533')\n for field in all533s:\n marc_record.remove_field(field)\n field.delete_subfield('n')\n marc_record.add_field(field)\n return marc_record",
"def removeField(field):",
"def clean_records(self, records_presentation_format):\n errors = []\n\n # Singletons\n if self.type in (\n \"CNAME\",\n \"DNAME\",\n ):\n if len(records_presentation_format) > 1:\n errors.append(f\"{self.type} RRset cannot have multiple records.\")\n\n # Non-apex\n if self.type in (\n \"CNAME\",\n \"DS\",\n ):\n if self.subname == \"\":\n errors.append(f\"{self.type} RRset cannot have empty subname.\")\n\n if self.type in (\"DNSKEY\",):\n if self.subname != \"\":\n errors.append(f\"{self.type} RRset must have empty subname.\")\n\n def _error_msg(record, detail):\n return f\"Record content of {self.type} {self.name} invalid: '{record}': {detail}\"\n\n records_canonical_format = set()\n for r in records_presentation_format:\n try:\n r_canonical_format = RR.canonical_presentation_format(r, self.type)\n except ValueError as ex:\n errors.append(_error_msg(r, str(ex)))\n else:\n if r_canonical_format in records_canonical_format:\n errors.append(\n _error_msg(\n r,\n f\"Duplicate record content: this is identical to \"\n f\"'{r_canonical_format}'\",\n )\n )\n else:\n records_canonical_format.add(r_canonical_format)\n\n if any(errors):\n raise ValidationError(errors)\n\n return records_canonical_format",
"def force_clean(self, caller=True):\n if caller:\n self.to_graph_objs() # TODO add error handling here!\n for entry in self:\n entry.force_clean(caller=False)\n del_indicies = [index for index, item in enumerate(self)\n if len(item) == 0]\n del_ct = 0\n for index in del_indicies:\n del self[index - del_ct]\n del_ct += 1",
"def remove_internal_attributes(field_details: Field) -> None:\n field_details.pop('node_name', None)\n field_details.pop('intermediate', None)",
"def _clean_fields(self):\n fields = self.fields\n self.fields = dict((k, v) for k, v in fields.items() if not k in self.field_form)\n super(MergingFormMixin, self)._clean_fields()\n self.fields = fields",
"def validate830(self,marc_record):\n return self.__remove_field__(marc_record=marc_record,\n\t\t\t tag='830')",
"def _clear_changed_fields(self):\n ReferenceField = _import_class(\"ReferenceField\")\n GenericReferenceField = _import_class(\"GenericReferenceField\")\n\n for changed in self._get_changed_fields():\n parts = changed.split(\".\")\n data = self\n for part in parts:\n if isinstance(data, list):\n try:\n data = data[int(part)]\n except IndexError:\n data = None\n elif isinstance(data, dict):\n data = data.get(part, None)\n else:\n field_name = data._reverse_db_field_map.get(part, part)\n data = getattr(data, field_name, None)\n\n if not isinstance(data, LazyReference) and hasattr(\n data, \"_changed_fields\"\n ):\n if getattr(data, \"_is_document\", False):\n continue\n\n data._changed_fields = []\n elif isinstance(data, (list, tuple, dict)):\n if hasattr(data, \"field\") and isinstance(\n data.field, (ReferenceField, GenericReferenceField)\n ):\n continue\n BaseDocument._nestable_types_clear_changed_fields(data)\n\n self._changed_fields = []",
"def __record(self):\r\n f = self.__getFileObj(self.dbf)\r\n recFmt = self.__recordFmt()\r\n recordContents = unpack(recFmt[0], f.read(recFmt[1]))\r\n if recordContents[0] != ' ':\r\n # deleted record\r\n return None\r\n record = []\r\n for (name, typ, size, deci), value in zip(self.fields,\r\n recordContents):\r\n if name == 'DeletionFlag':\r\n continue\r\n elif not value.strip():\r\n record.append(value)\r\n continue\r\n elif typ == \"N\":\r\n value = value.replace('\\0', '').strip()\r\n if value == '':\r\n value = 0\r\n elif deci:\r\n value = float(value)\r\n else:\r\n value = int(value)\r\n elif typ == 'D':\r\n try:\r\n y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])\r\n value = [y, m, d]\r\n except:\r\n value = value.strip()\r\n elif typ == 'L':\r\n value = (value in 'YyTt' and 'T') or \\\r\n (value in 'NnFf' and 'F') or '?'\r\n else:\r\n value = value.strip()\r\n record.append(value)\r\n return record",
"def force_clean(self, caller=True): # TODO: can't make call to super...\n obj_key = NAME_TO_KEY[self.__class__.__name__]\n if caller:\n self.to_graph_objs(caller=False)\n del_keys = [key for key in self\n if str(key) not in INFO[obj_key]['keymeta']]\n for key in del_keys:\n if (key[:5] == 'xaxis') or (key[:5] == 'yaxis'):\n try:\n test_if_int = int(key[5:])\n except ValueError:\n del self[key]\n else:\n del self[key]\n keys = list(self.keys())\n for key in keys:\n try:\n self[key].force_clean(caller=False) # TODO error handling??\n except AttributeError:\n pass\n if isinstance(self[key], (dict, list)):\n if len(self[key]) == 0:\n del self[key] # clears empty collections!\n elif self[key] is None:\n del self[key]",
"def clean(self, df):\n df = df.drop(self.__preprocessor.get_non_redundant_entity_attributes(), axis=1)\n df = df.drop(self.__preprocessor.get_redundant_entity_attributes(), axis=1)\n return df",
"def remove_invalid(self):\n for index, row in enumerate(self.raw_data):\n if len(row) != len(self.raw_data[0]):\n self.invalid_rows.append([index + 1, row])\n else:\n self.valid_rows.append(row)",
"def pop_fields(self):\n fields = self.remove_fields if self.remove else self.selection_fields\n if not self.multiple:\n self.process_fields_removal(self.data, fields)\n else:\n for values in self.data:\n self.process_fields_removal(values, fields)",
"def __init__(self):\n for field in self.get_fields():\n setattr(self, field, None)",
"def clear_fields(self) -> None:\n self._fields.clear()",
"def removeField(self, field):\n while field in self.titleLine:\n self.titleLine.remove(field)\n for lineData in self.outputLines:\n while field in lineData:\n lineData.remove(field)\n self.outputLines = [line for line in self.outputLines if line]\n # if len(self.lineList) == 0:\n # self.lineList.append([''])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a mongo field name, make one bigquery is happy with | def get_bq_name(mongo_field):
return ''.join([ch for ch in mongo_field if ch.isalnum() or ch == '_']) | [
"def get_bigquery_sanitized_field_name(field_name):\n # type: (str) -> str\n assert field_name # field_name must not be empty by this stage.\n if not re.match('[a-zA-Z]', field_name[0]):\n field_name = _FALLBACK_FIELD_NAME_PREFIX + field_name\n return re.sub('[^a-zA-Z0-9_]', '_', field_name)",
"def _get_field(self, field_name):\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the 'extra__kubernetes__' prefix \"\n f\"when using this method.\"\n )\n if field_name in self.conn_extras:\n return self.conn_extras[field_name] or None\n prefixed_name = f\"extra__kubernetes__{field_name}\"\n return self.conn_extras.get(prefixed_name) or None",
"def test_build_queries_for_int_field_with_query_param_name(self):\n request = self.request_factory.get('/api/test/?foo-gte=10')\n q = self.query_utils.build_queries_for_int_field(request, 'myfield',\n 'foo')\n\n self.assertQEqual(q, Q(myfield__gte=10))",
"def map_mongo_to_sql_common(sample: SampleDoc) -> Dict[str, Any]:\n return {\n # hexadecimal string representation of BSON ObjectId. Do ObjectId(hex_string) to turn it back\n MLWH_MONGODB_ID: str(sample.get(FIELD_MONGODB_ID)),\n MLWH_ROOT_SAMPLE_ID: sample.get(FIELD_ROOT_SAMPLE_ID),\n MLWH_RNA_ID: sample.get(FIELD_RNA_ID),\n MLWH_PLATE_BARCODE: sample.get(FIELD_PLATE_BARCODE),\n MLWH_COORDINATE: unpad_coordinate(sample.get(FIELD_COORDINATE)),\n MLWH_RESULT: sample.get(FIELD_RESULT),\n MLWH_DATE_TESTED: sample.get(FIELD_DATE_TESTED),\n MLWH_SOURCE: sample.get(FIELD_SOURCE),\n MLWH_LAB_ID: sample.get(FIELD_LAB_ID),\n # channel fields\n MLWH_CH1_TARGET: sample.get(FIELD_CH1_TARGET),\n MLWH_CH1_RESULT: sample.get(FIELD_CH1_RESULT),\n MLWH_CH1_CQ: parse_decimal128(sample.get(FIELD_CH1_CQ)),\n MLWH_CH2_TARGET: sample.get(FIELD_CH2_TARGET),\n MLWH_CH2_RESULT: sample.get(FIELD_CH2_RESULT),\n MLWH_CH2_CQ: parse_decimal128(sample.get(FIELD_CH2_CQ)),\n MLWH_CH3_TARGET: sample.get(FIELD_CH3_TARGET),\n MLWH_CH3_RESULT: sample.get(FIELD_CH3_RESULT),\n MLWH_CH3_CQ: parse_decimal128(sample.get(FIELD_CH3_CQ)),\n MLWH_CH4_TARGET: sample.get(FIELD_CH4_TARGET),\n MLWH_CH4_RESULT: sample.get(FIELD_CH4_RESULT),\n MLWH_CH4_CQ: parse_decimal128(sample.get(FIELD_CH4_CQ)),\n # filtered positive fields\n MLWH_FILTERED_POSITIVE: sample.get(FIELD_FILTERED_POSITIVE),\n MLWH_FILTERED_POSITIVE_VERSION: sample.get(FIELD_FILTERED_POSITIVE_VERSION),\n MLWH_FILTERED_POSITIVE_TIMESTAMP: sample.get(FIELD_FILTERED_POSITIVE_TIMESTAMP),\n # UUID fields\n MLWH_LH_SAMPLE_UUID: sample.get(FIELD_LH_SAMPLE_UUID),\n MLWH_LH_SOURCE_PLATE_UUID: sample.get(FIELD_LH_SOURCE_PLATE_UUID),\n }",
"def test_field_name_matching_query_builder_alias_not_allowed(testdir: Testdir) -> None:\n schema = (\n testdir.SCHEMA_HEADER\n + '''\n model User {{\n id String @id\n order_by String\n }}\n '''\n )\n with pytest.raises(subprocess.CalledProcessError) as exc:\n testdir.generate(schema=schema)\n\n assert (\n 'Field name \"order_by\" shadows an internal keyword; '\n 'use a different field name with \\'@map(\"order_by\")\\''\n ) in str(exc.value.output, 'utf-8')",
"def test_building_a_single_word_mongo_index_query(self):\n self.test_function_input = \"thisisaword\"\n self.expected_function_output = {'word':'thisisaword'}\n self.result = database.build_mongo_index_query(input=self.test_function_input)\n self.assertEqual(self.result, self.expected_function_output), \"Searching for one word is not building the proper mongo index query\"",
"def test_update_dictfield( ):\n\tclass TestA(Document):\n\t\tdata = DictField( )\n\n\tassert Q( { 'data__123': 'test' } ).toMongo( TestA, forUpdate=True ) == { 'data.123': 'test' }\n\n\t# children of a dictfield shouldn't be motified\n\tfieldName = 'data__123'\n\tvalue = {\"XXX\": \"YYY\"}\n\tassert Q( { fieldName: value } ).toMongo( TestA, forUpdate=True )[fieldName.replace('__', '.')] \\\n\t\t== value\n\tvalue = ['test']\n\tassert Q( { fieldName: value } ).toMongo( TestA, forUpdate=True )[fieldName.replace('__', '.')] \\\n\t\t== value",
"def gen_fake(self, field_name, fake):\r\n ...",
"def _get_filter_field_name(self, field_name):\n return field_name.rstrip('_ids') + 's'",
"def test_hstore_unique_rename_field():\n\n test = migrations.rename_field(\n HStoreField(uniqueness=[\"beer\", \"cookies\"]),\n [\"RENAME TO\", \"CREATE INDEX\", \"DROP INDEX\"],\n )\n\n with test as calls:\n assert len(calls.get(\"RENAME TO\", [])) == 2\n assert len(calls.get(\"CREATE UNIQUE\", [])) == 0\n assert len(calls.get(\"DROP INDEX\", [])) == 0",
"def SoDB_createGlobalField(name: 'SbName', type: 'SoType') -> \"SoField *\":\n return _coin.SoDB_createGlobalField(name, type)",
"def normalize_col_name(self, col_name, used_column_names, is_relation):\n field_params = {}\n field_notes = []\n\n new_name = clean_utf8(col_name)\n new_name = col_name.lower()\n if new_name != col_name:\n field_notes.append('Field name made lowercase.')\n\n if is_relation:\n if new_name.endswith('_id'):\n new_name = new_name[:-3]\n else:\n field_params['db_column'] = col_name\n\n new_name, num_repl = re.subn(r'\\W', '_', new_name)\n if num_repl > 0:\n field_notes.append('Field renamed to remove unsuitable characters.')\n\n if new_name.find('__') >= 0:\n while new_name.find('__') >= 0:\n new_name = new_name.replace('__', '_')\n if col_name.lower().find('__') >= 0:\n # Only add the comment if the double underscore was in the original name\n field_notes.append(\"Field renamed because it contained more than one '_' in a row.\")\n\n if new_name.startswith('_'):\n new_name = 'field%s' % new_name\n field_notes.append(\"Field renamed because it started with '_'.\")\n\n if new_name.endswith('_'):\n new_name = '%sfield' % new_name\n field_notes.append(\"Field renamed because it ended with '_'.\")\n\n if keyword.iskeyword(new_name):\n new_name += '_field'\n field_notes.append('Field renamed because it was a Python reserved word.')\n\n if new_name[0].isdigit():\n new_name = 'number_%s' % new_name\n field_notes.append(\"Field renamed because it wasn't a valid Python identifier.\")\n\n if new_name in used_column_names:\n num = 0\n while '%s_%d' % (new_name, num) in used_column_names:\n num += 1\n new_name = '%s_%d' % (new_name, num)\n field_notes.append('Field renamed because of name conflict.')\n\n if col_name != new_name and field_notes:\n field_params['db_column'] = col_name\n\n return new_name, field_params, field_notes",
"def __init__(self, field_name, using=None):\n self.redis = using or connection()\n self.field_name = field_name",
"def _get_field_by_name(model, field):\n field_dict = {x.name: x for x in model._meta.get_fields()} # noqa\n return field_dict[field]",
"def getFieldShortName(field_name):\n return field_name.replace('request.','') \\\n .replace('response.','') \\\n .replace('context.system.','') \\\n .replace('skills.','') \\\n .replace('main skill.','') \\\n .replace('user_defined.','') \\\n .replace('context.','')",
"def enforce_field_name_rules(field_name):\n # check for empty string\n if field_name == \"\":\n new_field_name = \"invalid_field\"\n else:\n new_field_name = field_name\n\n # replace spaces with underscores\n new_field_name = new_field_name.replace(' ', '_')\n\n # replace invalid characters\n new_field_name = re.sub('[^a-zA-Z0-9_]', '', new_field_name)\n\n # grab leading underscores\n underscore_search = re.compile('^_*')\n underscores = underscore_search.search(new_field_name).group()\n\n # remove leading underscores\n new_field_name = re.sub('^_*', '', new_field_name)\n\n # remove leading non-letters\n new_field_name = re.sub('^[^a-zA-Z]*', '', new_field_name)\n\n # add underscores back\n new_field_name = underscores + new_field_name\n\n # limit to 255 characters\n new_field_name = new_field_name[:255]\n\n return new_field_name",
"def name(field: BaseField) -> str:\n return field.NAME",
"def regex_query(field_name, value):\n value = re.escape(value)\n return {'regexp': {field_name: '.*%s.*' % value}}",
"def __setattr__(self, name, val):\n if name in self._meta.fields:\n f = self._meta.fields[name]\n val = f.to_search_value(val)\n super(DocumentModel, self).__setattr__(name, val)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return a single die roll of an n_faces die. | def roll_die(n_faces: int = 6):
return int(random.random() * n_faces) + 1 | [
"def roll_dice(number_of_faces:int, repetitions:int):\r\n pass",
"def roll_dice():\r\n die1 = random.randrange(1, 7)\r\n die2 = random.randrange(1, 7)\r\n return (die1, die2) # pack die face values into a tuple\r",
"def roll_die(num_sides):\r\n result = random.randrange(0, num_sides) + 1\r\n return result",
"def roll_die(self) -> None:\n self.face_value = random.randint(1, self.number_of_sides)",
"def ex1():\n total_roll = 10000\n num_faces = [0, 0, 0, 0, 0, 0]\n\n for _ in range(total_roll):\n n = random.randint(0, 5)\n num_faces[n] = num_faces[n] + 1\n\n return num_faces",
"def roll_dice():\n dice1 = random.randrange(1, 7)\n dice2 = random.randrange(1, 7)\n return (dice1, dice2) # pack dice face values into a tuple",
"def roll_dice(num_rolls, dice=six_sided_dice, who='Boss Hogg'):\n roll_total = 0\n got_a_one = False\n assert type(num_rolls) == int, 'num_rolls must be an integer.'\n assert num_rolls > 0, 'Must roll at least once.'\n\n for x in range (0,num_rolls):\n a = dice()\n if commentary:\n announce(a,who)\n if a == 1:\n got_a_one = True\n else:\n roll_total += a\n\n if got_a_one:\n return 1\n else:\n return roll_total",
"def throw(dice = 1):\n\n sum = 0\n for n in range (dice):\n sum = sum + roll()\n return sum",
"def roll_die(dice_amount, dice_range):\n list_range = range(1, dice_range+1)\n dice_counter = 0\n output = 0\n while dice_counter < dice_amount:\n output += random.choice(list_range)\n dice_counter += 1\n return output",
"def test_roll(N=6000):\n dieList = []\n for i in range (user_die):\n dieList.append(i+1)\n \n count = [0] * (user_die + 1) \n\n for i in range(N):\n j = roll(user_die)\n assert j in dieList\n count [j] = count[j] + 1\n\n for i in range(user_die + 1):\n print(i, count[i])",
"def roll(self, count):\n numhits = 0\n diedist = [0, 0, 0, 0, 0, 0]\n\n for d in range(count):\n rol = random.randint(1, self._sides)\n diedist[rol - 1] += 1\n if rol >= self.tohit: # self._tohit:\n numhits += 1\n # print(str(diedist))\n return(numhits * 1.00 / (count * 1.00))",
"def fours(dice):\n return dice_counts(dice)[4] * 4",
"def rollSeveral(self, amount, mode, glitchRule):\r\n if amount <= 0:\r\n raise ValueError(\"Invalid amount of dice.\")\r\n if mode == \"chance\":\r\n raise TypeError(\"Function rollSeveral() doesn't handle chance die rolls.\")\r\n _glitchCount = 0\r\n _hits = 0\r\n for _i in range(amount):\r\n _singleDieHits, _singleDieGlitch = self.rollOne(mode, glitchRule)\r\n _hits += _singleDieHits #add hit(s) from individual die to roll total\r\n if _singleDieGlitch:\r\n _glitchCount += 1 #count 1's for glitch checking\r\n if glitchRule and _glitchCount >= amount / 2:\r\n _glitch = True\r\n else:\r\n _glitch = False\r\n return (_hits, _glitch)",
"def approximate_roll(m, n):\n if m < 1 or n < 2:\n return 0\n\n if m == 1:\n return randint(1, n)\n # Properties of a single n-faced die\n mean = (n + 1) / 2.0\n variance = (n * n - 1) / 12.0\n # Properties of the distribution\n mu = m * mean\n sigma = math.sqrt(m * variance)\n\n v = int(gauss(mu, sigma))\n v = clamp(v, m, m * n)\n return v",
"def roll_dice(self):\n\n dice = [random.choice(range(-1, 2)) for _ in range(4)]\n fate_dice_roll = [FATE_DICE[str(d)] for d in dice]\n return {\n 'dice': dice,\n 'fate_dice_roll': fate_dice_roll,\n 'fate_roll_string': ''.join(fate_dice_roll),\n 'rolled': sum(dice)\n }",
"def roll_dice(num, sides):\n return [random.randint(1, sides) for _ in range(num)]",
"def sixes(dice):\n return dice_counts(dice)[6] * 6",
"def biased_die_roll(die_state, choice):\n p = list()\n die_total = sum(die_state)\n for i in die_state:\n p.append(i*1.0/die_total)\n return choice(a=die_state, p=p)",
"def fives(dice):\n return dice_counts(dice)[5] * 5"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given sequences to count and samples to try, return probabilities of simuated outcomes. | def calculate_probabilities(
sequences_of_note: Tuple[List[int]],
n_samples: int
):
seq_len = len(sequences_of_note[0])
if not all([
len(seq) == seq_len for seq in sequences_of_note
]):
raise RuntimeError(
'sequences_of_note must all be the same length'
)
def handle_buffer(new_val, buffer, seq_len):
buffer.pop(0)
buffer.append(new_val)
return buffer
def get_key_name(seq_list):
return ', '.join([str(x) for x in seq_list])
def initialize_active_variables(sequences_of_note, seq_len):
buffer = []
for i in range(seq_len):
buffer.append(roll_die())
counts = {get_key_name(key): 0 for key in sequences_of_note}
counts['the_rest'] = 0
return buffer, counts
buffer, counts = initialize_active_variables(sequences_of_note, seq_len)
for i in range(seq_len, n_samples+1):
buffer = handle_buffer(roll_die(), buffer, seq_len)
if buffer in sequences_of_note:
counts[get_key_name(buffer)] += 1
else:
counts['the_rest'] += 1
return {key: (counts[key] / n_samples) for key in counts} | [
"def test_probability_by_state_sequence(self):\n observations = [0,1,1]\n probabilities = Algs.analysis_of_state_sequences(self.model3, observations)\n total_probability = sum(prob for sequence, prob in probabilities)\n self.assertAlmostEquals(total_probability,\n Algs.probability_of_observations(self.model3, observations))",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n probs = []\n for state, action in zip(states, actions):\n probs.append(1 if self.sample_action(state) == action else 0)\n return np.array(probs)",
"def chances(x):\n import scipy as sp\n x=sp.array(x) # if given x is list rather than array\n nt=sp.size(x) # number of trials\n# if nt > 15: # may not be needed in python (was for idl version)\n# x=x[:15]\n# nt=15\n ns=nt+1 # number of possible numbers of total successes\n s=sp.zeros(ns)\n # one place for each possible number of total successes\n p=binaries(long(2)**nt)\n p=p[1:,1:] # all possible permutations of outcomes\n c=p*x+1-p-(1-p)*x # array of chances for each member of p\n ap=sp.sum(p,axis=1)\n # unordered, unsummed list of possible total number of successes\n ac=sp.sum(c,axis=1)/sp.sum(c)\n # corresponding array of unordered, unsummed chances\n for i in range(ns): s[i]=sp.sum(ac*sp.where(ap==i,1,0))\n # range(ns) is index\n #of labels covering all possible numbers of total successes\n # now generate p an array describing all possible detailed outcomes\n return s",
"def prob_estimation(n):\n truecount = 0\n for i in range(n):\n test = gen_rand_23()\n if has_duplicates(test):\n truecount += 1\n return truecount",
"def probs_test(self):\n\t\talignment = '0-0 1-1 2-2 4-3 3-4'\n\t\tsentence = 'a b c d e'\n\t\tlabels = dict(zip([(i,i+1) for i in xrange(5)] + [(0,5),(1,5),(0,3),(3,5),(2,5),(0,2),(1,3)],['0','1','2','4','3','A','B','C','D','E','F','G']))\n\t\ta = Alignments(alignment,sentence)\n\t\tHAT_dict = a.HAT_dict(labels)\n\t\tprobs = {}\n\t\th = HATGrammar(HAT_dict, 'A')\n\t\th.probmass('A', probs = probs)\n\t\tassert probs == {('B', 'G', 'D'): 1, ('A', 'F', 'E'): 1, ('3', 'e'): 1, ('2', 'c'): 1, ('0',): 1, ('2',): 1, ('A', '0', 'B'): 2, ('4',): 1, ('A',): 5, ('C',): 2, ('1', 'b'): 1, ('E',): 1, ('G',): 1, ('E', '2', 'D'): 1, ('C', 'F', '2'): 1, ('B', '1', 'E'): 1, ('C', '0', 'G'): 1, ('1',): 1, ('G', '1', '2'): 1, ('3',): 1, ('F', '0', '1'): 1, ('D', '4', '3'): 1, ('0', 'a'): 1, ('B',): 2, ('D',): 1, ('4', 'd'): 1, ('A', 'C', 'D'): 2, ('F',): 1}\n\t\treturn True",
"def estimated_sequence_probability(list_of_syllables, unigram_dict, bigram_dict):\n \n # set probability to 1 initially\n p = 1.\n\n # loop over sequence indices\n for syll_idx in range(len(list_of_syllables) - 1):\n # form bigram from subsequent syllables\n bigram = (list_of_syllables[syll_idx], list_of_syllables[syll_idx + 1])\n \n # multiply previous probability with probability of this bigram\n p = p * estimated_bigram_probability(bigram, unigram_dict, bigram_dict)\n\n # return the estimated probability of the entire sequence\n return p",
"def chance_of_occurrence(events):\n universe_of_events = len(events)\n frequencies_of_occurrence = {}\n for event in events:\n if event in frequencies_of_occurrence.keys():\n frequencies_of_occurrence[event] += 1\n else:\n frequencies_of_occurrence[event] = 1\n probabilities = {}\n for event in frequencies_of_occurrence.keys():\n probabilities[event] = frequencies_of_occurrence[event]*(1/universe_of_events)\n return probabilities",
"def probability(self):\r\n batcoins = arrayofcoins(1000) #code can be messed with to make a custom value for x in arrayofcoins(x), but theoretical probability calculation must also be changed\r\n for m in range(0,self.trials):\r\n if batcoins.successnumber() == 1:\r\n self.successruns = self.successruns + 1\r\n return float(self.successruns) / self.trials",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n probs = 0.5 * np.ones(len(states))\n return probs",
"def probability(outcomes):\n chances = {1: 0.1, 2: 0.2, 3: 0.3, 4: 0.15, 5: 0.05, 6: 0.2}\n result = 1\n for outcome in outcomes:\n result *= chances[outcome]\n return result",
"def Prob(self, sequence):\n \n # This special case is handled by a separate function\n if ( self.order == 1):\n return self.ProbMono(sequence)\n \n # We strictly enforce the sequence length because it must match\n # the size of the probability tensors\n if ( len(sequence) != self.seq_length ):\n print(\"ERROR: sequence not the right length!\", file=sys.stderr)\n exit(1)\n \n # Calculate the first factor in the multiplication\n otups = self.OTup(sequence, 0, self.order-1)\n if (self.Ps[otups] == 0.0):\n prob = 0.25\n else:\n prob = self.Ps[otups]\n \n # Loop through the rest of the multiplication\n for n in range(self.order-1, self.seq_length):\n otupl = self.OTup(sequence, n+1-self.order, self.order)\n otups = self.OTup(sequence, n+1-self.order, self.order-1)\n \n # Depending on how the probability tensors were generated,\n # some oligonucleotides may be too rare to have an estimated\n # probability that is non-zero. In this case we take this\n # as a failure to measure the correct probability, and we\n # assume a uniform distribution.\n # This is probably not optimal, and could potentially be\n # improved.\n if (self.Pl[otupl] == 0.0 or self.Ps[otups] == 0.0):\n #print(\"WARNING: Zero probability encountered!\", file=sys.stderr)\n pfac = 0.25\n \n # If all is well, multiply our probability with the next factor.\n else:\n pfac = self.Pl[otupl]/self.Ps[otups]\n prob *= pfac\n return prob",
"def seqProcessing(sp,sample_keys,mlineage,size_par,mean_depth,purity):\n all_cur_id = []\n all_mut_id = []\n for key in sample_keys:\n smuts = list(sp[key].neutral + sp[key].advant)\n all_cur_id += smuts\n sample_size = 10000 ## the number of cells for sequencing analysis\n sample_id = random.sample(all_cur_id,sample_size)\n id_count = Counter(sample_id)\n for x in id_count.keys():\n xlineage = traceLineage(mlineage,x)\n all_mut_id += xlineage*id_count[x]\n mut_count = Counter(all_mut_id)\n prob_par=size_par*1.0/(size_par+mean_depth)\n sampleAF = {}\n for x in mut_count.keys():\n true_af = mut_count[x]*0.5*purity/sample_size\n if true_af > 0.005:\n site_depth = np.random.negative_binomial(size_par,prob_par)\n if site_depth >= 10:\n var_reads = np.random.binomial(site_depth,true_af)\n seq_af = var_reads*1.0/site_depth\n if var_reads >= 3:\n sampleAF[str(x)] = (site_depth,seq_af)\n #sampleAF[str(x)] = seq_af\n return sampleAF",
"def num_sequences_sampled(self) -> int:\n return self._num_sequences_sampled",
"def next_token_log_probabilities_batched(\n self, prefixes: List[List[int]]) -> np.ndarray:\n old_sleep_millis = self.sleep_millis\n self.sleep_millis = 0.0\n results = np.array(\n [self.next_token_log_probabilities(prefix) for prefix in prefixes])\n self.sleep_millis = old_sleep_millis\n self.batch_probability_count += 1\n time.sleep(len(prefixes) * self.batch_sleep_millis / 1000.0)\n return results",
"def rv_outcomes(match_counts, win_prob):\n p_outcome = wins_to_outcomes(win_prob)\n return tfp.distributions.Multinomial(match_counts, probs=p_outcome)",
"def get_low_probabilty_sequences(self, project_sequences, sequence_len):\r\n scoring_dict = {}\r\n for line in project_sequences:\r\n method_name = line.split(self.config.METHOD_TOKEN_SPLITTER)[self.config.METHOD_INDEX]\r\n tokens = line.split(self.config.METHOD_TOKEN_SPLITTER)[self.config.TOKEN_INDEX].split(\r\n self.config.TOKEN_SPLITTER)\r\n token_sequence_chunks = list(self.chunks(tokens, sequence_len))\r\n for chunk in token_sequence_chunks:\r\n score = self.score_sequence(chunk)\r\n scoring_dict[method_name + str(random())] = (score, chunk)\r\n return heapq.nsmallest(self.config.REPORTING_SIZE, scoring_dict.items(), key=itemgetter(1))",
"def test_toProbs(self):\n c = Counts([1,2,3,4,2,2,2,2,0.2,0.4,0.6,0.8,1,0,0,0], RnaPairs)\n p = c.toProbs()\n assert isinstance(p, Probs)\n self.assertEqual(p, Probs([0.1,0.2,0.3,0.4,0.25,0.25,0.25,0.25, \\\n 0.1,0.2,0.3,0.4,1.0,0.0,0.0,0.0], RnaPairs))\n self.assertEqual(p['U','U'], 0.1)\n self.assertEqual(p['G','U'], 1.0)\n self.assertEqual(p['G','G'], 0.0)",
"def probabilities(n):\n prob2 = np.array([1., 1.])\n it2 = [1, 2]\n\n if n == 2:\n p = 0.5*np.array(prob2)\n expect = np.dot(p, it2)\n return p, it2, expect\n\n if n > 2:\n prob = prob2\n it = it2\n k = 2\n while k < n:\n prob = np.concatenate([prob, prob / k])\n it = np.concatenate([it, it + np.ones_like(it)])\n k += 1\n # print(k)\n return (1/n) * np.array(prob), it",
"def find_p_value_for_activation(events, number_of_rois,\n number_of_active_rois,\n number_of_permutations,\n number_of_original_activations):\n number_of_activations = np.zeros((number_of_permutations))\n for i in range(number_of_permutations):\n partial_events_mat = randomize_partial_events_mat(events, number_of_rois)\n number_of_activations[i] = count_number_of_activations(partial_events_mat, number_of_active_rois)\n p_value = np.sum(number_of_activations < number_of_original_activations)/ float(number_of_permutations)\n\n return p_value, number_of_activations"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Looks at a status, compares it to a cache of known urls and textonly Tweets. Returns either the status, or None if the status has been seen before. | def consider_status(status, cache, cache_length=604800, expand_fn=None):
logger = logging.getLogger("twitterdedupe.consider_status")
if expand_fn is None:
expand_fn = lengthen_url
if len(status.entities['urls']) == 0:
# Hey there's only text here
key = str(hash("%s.%s" % (status.user.screen_name, status.text)))
if cache.get(key) is None:
logger.info("CACHE.MISS: %s.%s - %s" % (
status.user.screen_name,
status.id,
status.text
))
cache.set(key, 1, cache_length)
return status
else:
# WE GOT LINKSIGN!!!
url = status.entities['urls'][0]['expanded_url']
expanded_url = expand_fn(url)
key = expanded_url
if cache.get(key) is None:
logger.info("CACHE.MISS: %s.%s - %s" % (status.user.screen_name,
status.id, expanded_url))
cache.set(key, 1, cache_length)
return status
return None | [
"def fetch(account):\n \n import twitterapi\n import rfc822\n import datetime\n items_existing = 0\n items_created = 0\n twitterapi = twitterapi.Api()\n \n # get the latest tweet we already have\n if TwitterStatus.objects.count() > 0:\n latest_id = TwitterStatus.objects.latest().twitter_id\n tweets = twitterapi.GetUserTimeline(\n user=account.username,\n since_id=latest_id)\n else:\n tweets = twitterapi.GetUserTimeline(user=account.username)\n \n for status in tweets:\n try:\n tweetdate = datetime.datetime(*rfc822.parsedate(status.created_at)[:6])\n except: # TODO: more specific exception handling?\n continue\n \n entry, created = TwitterStatus.objects.get_or_create(\n account = account,\n twitter_id = status.id,\n published = tweetdate)\n if created:\n entry.title = status.text\n entry.link = u'http://twitter.com/%s/%s' % (account.username, status.id)\n entry.save()\n items_created += 1\n else:\n items_existing += 1\n \n return (items_created, items_existing)",
"def get_status_of_url(self, long_url: str):\n document = self.db.unsafe_links.find_one({'long_url': long_url})\n if document is None:\n return None\n return document['status']",
"def fetch_tweets(self, id_list):\n if len(id_list) <= 100:\n return self.api.statuses_lookup(id_list)\n return None",
"def get_github_status(api_url=None):\n r = requests.get(api_url or github_status_api_url)\n api = r.json()\n r = requests.get(api['status_url'])\n status = r.json()\n return status['status']",
"def post_single(self, text, **kwargs):\n if len(text) == 0:\n logger.error(\"Empty tweet?\")\n return None\n msg_log.warning(text)\n if self.readonly:\n return None\n if 'reply_to_status' in kwargs:\n orig_tweet = kwargs.pop('reply_to_status')\n if orig_tweet:\n kwargs['in_reply_to_status_id'] = orig_tweet.id\n kwargs['auto_populate_reply_metadata'] = True\n while True: # catches rate limit\n try:\n new_tweet = self.api.update_status(text, **kwargs)\n return new_tweet\n except tweepy.TweepError as twerror:\n if twerror.api_code is None:\n logger.critical(\"Unknown error while tweeting: %s\", twerror.reason)\n return None\n if twerror.api_code == 185: # status update limit (tweeted too much)\n logger.error(\"Tweeted too much, waiting 1 Minute before trying again\")\n time.sleep(60)\n continue\n if twerror.api_code == 385:\n logger.critical(\"Error 385: Tried to reply to deleted or invisible tweet %s\",\n kwargs.get('in_reply_to_status_id', 'N/A'))\n elif twerror.api_code != 187: # duplicate tweet\n logger.critical(\"Error %s tweeting: %s\", twerror.api_code, twerror.reason)\n return None",
"def fetch_status():\n return json.loads(requests.get('http://omegle.com/status').text)",
"def get_status(self):\r\n\r\n try:\r\n req = self.config.session.get(\r\n self.status_url, verify=self.config.verify, timeout=self.config.timeout)\r\n res = json.loads(req.text)['state']\r\n return res\r\n except requests.exceptions.RequestException as e:\r\n raise VraSdkRequestException(\r\n f'Error requesting status url {self.status_url}: {e}')\r\n except Exception as e:\r\n raise VraSdkMainRequestException(\r\n f'Unmanaged error requesting status url {self.status_url}: {e}')",
"def get_tweet(self, tweet_id):\n return self.api.get_status(tweet_id)",
"def get_status_by_status_id(status_id):\n params = dict()\n params[\"id\"] = status_id\n status_json = ch.get_json_data(cc.status_base_url, params=params, post_data=None)\n if ch.check_json_format(status_json) is False:\n logging.error(\"Error to json String: [%s...]\", status_json[0:10])\n return None\n raw_status = json.loads(status_json)\n status = format_status(raw_status)\n return status",
"def get_statuses(self, content):\n re_has_templates = \"\\{\\{[aA][fF][cC] submission\\s*(\\}\\}|\\||/)\"\n re_template = \"\\{\\{[aA][fF][cC] submission\\s*(.*?)\\}\\}\"\n re_remove_embed = \"(\\{\\{[aA][fF][cC] submission\\s*(.*?))\\{\\{(.*?)\\}\\}(.*?)\\}\\}\"\n valid = [\"R\", \"H\", \"P\", \"T\", \"D\"]\n subtemps = {\n \"/reviewing\": \"R\",\n \"/onhold\": \"H\",\n \"/pending\": \"P\",\n \"/draft\": \"T\",\n \"/declined\": \"D\"\n }\n statuses = []\n\n while re.search(re_has_templates, content):\n status = \"P\"\n match = re.search(re_template, content, re.S)\n if not match:\n return statuses\n temp = match.group(1)\n limit = 0\n while \"{{\" in temp and limit < 50:\n content = re.sub(re_remove_embed, \"\\\\1\\\\4}}\", content, 1, re.S)\n match = re.search(re_template, content, re.S)\n temp = match.group(1)\n limit += 1\n params = temp.split(\"|\")\n try:\n subtemp, params = params[0].strip(), params[1:]\n except IndexError:\n status = \"P\"\n params = []\n else:\n if subtemp:\n status = subtemps.get(subtemp)\n params = []\n for param in params:\n param = param.strip().upper()\n if \"=\" in param:\n key, value = param.split(\"=\", 1)\n if key.strip() == \"1\":\n status = value if value in valid else \"P\"\n break\n else:\n status = param if param in valid else \"P\"\n break\n statuses.append(status)\n content = re.sub(re_template, \"\", content, 1, re.S)\n\n return statuses",
"def status(self, result, config=None):\r\n return result['status']",
"def cached_nag_status(status_file = app.config['STATUS_FILE'], level = STATE_CRITICAL):\n status = cache.get('nag-status-%s' % level)\n if status is None:\n status = get_nag_status(status_file, level)\n cache.set('nag-status-%s' % level, status, timeout=10)\n return status",
"def check_cache(self, state):\n for entry in self.__cache:\n if state == entry:\n return entry\n self.__cache.append(state)\n return state",
"def get_tweet(self, value):\n while True:\n t = self._checkout_tweet(value)\n if t is not None:\n return t\n # Otherwise, try and get one\n self._get_tweet_from_api()",
"def get_with_status(self, status: TaskStatus, offset=0, limit=None, query=None):\n self.update_from_file(self.storage_path)\n filter_function = {\n TaskStatus.COMPLETED: lambda x: x.completed(),\n TaskStatus.UNCOMPLETED: lambda x: not x.completed(),\n TaskStatus.PLANNED: lambda x: not x.is_finished() and not x.is_running(),\n TaskStatus.RUNNING: lambda x: x.is_running(),\n TaskStatus.FINISHED: lambda x: x.is_finished(),\n }[status]\n result = list(filter(filter_function, self.tasks))\n return DataManager.__paginate_and_search__(result, offset, limit, query, status)",
"def status(self) -> Optional[pulumi.Input['SharedPrivateLinkResourceStatus']]:\n return pulumi.get(self, \"status\")",
"def getRT(statuses):\n\n retweets_dup = [\n (status['retweet_count'], \n status['retweeted_status']['user']['screen_name'], \n status['text'])\n for status in statuses \n if status.has_key('retweeted_status')]\n return list(set(retweets_dup))",
"def import_status(self):\n result = self.__get_object('imports', None, None)\n if not 'status' in result:\n self.log.error(\"Unable to find 'status' key in result: %s\" % (result))\n return None \n elif not result['status'] in ['ready', 'queued', 'processing', 'succeeded', 'failed' ]:\n self.log.error(\"Unexpected status '%s' for import status. Check API and update library. Result = %s\" % (status, result))\n return None\n return result",
"def get_status(self):\n self.doGet(STATUS_API, DEFAULT_HEADERS)\n self.parse_response_as_json()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the load ratings function | def testloadratings(ratingstablename, filepath, openconnection, rowsininpfile):
MyAssignment.loadratings(ratingstablename, filepath, openconnection)
# Test 1: Count the number of rows inserted
with openconnection.cursor() as cur:
cur.execute('SELECT COUNT(*) from {0}'.format(RATINGS_TABLE))
count = int(cur.fetchone()[0])
if count != rowsininpfile:
raise Exception(
'Expected {0} rows, but {1} rows in \'{2}\' table'.format(rowsininpfile, count, RATINGS_TABLE)) | [
"def test_get_ratings(self):\n self.base_method()\n response = self.client.get(self.url, format='json')\n assert response.status_code == 200",
"def test_import(self):\n path = os.path.dirname('')\n vote_history = os.path.abspath(\n 'import_ratings/tests/test_data/criticker_rankings.xml')\n\n ratings_list = parse_criticker_votes(vote_history)\n self.assertEquals(len(ratings_list), 10)\n\n save_ratings_in_db(self.u1, ratings_list, ImportRatings.CRITICKER, \n overwrite=True)\n\n all_ratings = ImportRatings.objects.all()\n self.assertEquals(len(all_ratings), 1)\n\n \"\"\"\n Gets the import records stored in ImportRatings table and\n imports them into single Rating records\n \"\"\"\n\n import_ratings()\n\n ratingsLogs = ImportRatingsLog.objects.all()\n self.assertEquals(len(ratingsLogs), 1)\n\n ratings = Rating.objects.all()\n self.assertEquals(len(ratings), 10)",
"def load_ratings():\n \n ratings_file = open(\"seed_data/u.data\")\n for line in ratings_file:\n rating_info = line.rstrip().split(\"\\t\")\n rating = Ratings(user_id=rating_info[0], movie_id=rating_info[1], movie_score=rating_info[2])\n db.session.add(rating)\n \n # print \"The load_ratings for loop took\", time.time() - start, \"ms to run\" \n\n db.session.commit()",
"def _load_ratings(self):\n\t\tfield_name_1 = 'short_question_{0}'.format(self.lang)\n\t\tfield_name_2 = 'long_question_{0}'.format(self.lang)\n\t\tquery = \"\"\"\n\t\t\tSELECT {0}, {1}, month, numerical_answer, count\n\t\t\tFROM ratings\n\t\t\tWHERE course_code = %s\n\t\t\tORDER BY {0};\n\t\t\"\"\".format(field_name_1, field_name_2)\n\t\tresults = query_mysql(query, (self.course_code,))\n\t\tresults = pd.DataFrame(results, columns=['short_question', 'long_question', 'month', 'average', 'count'])\n\t\t# Return False if course has received no feedback\n\t\tself.data = False if results.empty else results",
"def load_ratings(ratings_file):\n if not isfile(ratings_file):\n print(\"File %s does not exist.\" % ratings_file)\n sys.exit(1)\n f = open(ratings_file, 'r')\n ratings = np.loadtxt(ratings_file, dtype=int, delimiter='::')\n f.close()\n if not ratings.any():\n print(\"No ratings provided.\")\n sys.exit(1)\n else:\n return ratings",
"def test_get_average_rating_that_doestnot_exist(self):\n url = reverse('ratings', kwargs={\"article_id\": 4})\n response = self.client.get(url,\n HTTP_AUTHORIZATION=self.joel_auth_header1,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_calculate_rating_correct():\n\n game = games.get(\"Flingler\")\n rating = ratings.calculate(game)\n\n assert 3.0 == rating",
"def test_reload_review(self):\n self.reload_helper(\"Review\")",
"def test_game_add_rating():\n\n game = games.get(\"Flingler\")\n\n rating = ratings.get(1)\n rating2 = ratings.get(2)\n rating3 = rating.get(3)\n\n game.ratings.append(rating)\n game.ratings.append(rating2)\n game.ratings.append(rating3)\n\n assert rating in game.ratings\n assert rating3 in game.ratings",
"def test_get_article_average(self):\n url = reverse('ratings', kwargs={\"article_id\": 1})\n response = self.client.get(url,\n HTTP_AUTHORIZATION=self.joel_auth_header1,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_car_includes_rating(self):\n self.__add_test_rating(car=self.test_car, value=3)\n self.__add_test_rating(car=self.test_car, value=4)\n\n response = self.client.get(f'/api/cars/')\n data = response.data['results']\n\n rated_car = [serialized_car for serialized_car in data\n if serialized_car['id'] == self.test_car.id][0]\n\n self.assertEqual(rated_car['rating'], '3.50')",
"def test_rating_get_incorrect():\n\n assert_raises(exceptions.NonExistentRating, ratings.get, 200)",
"def test_get_rate(self):\n self.base_post_rate()\n response = self.client.get(self.rate_article_url, format='json')\n assert response.status_code == 200\n assert response.data[\"user\"] == \"asheuh\"\n assert response.data[\"rate\"] == 5\n assert response.data[\"comment\"] == \"I like this article\"",
"def test_average_no_review(self):\n restaurant = Restaurant.objects.get(name=\"Unknown Restaurant\")\n self.assertEqual(restaurant.averageRating(), 0)",
"def test_retrieve_recipies(self):\n sample_recipe(user = self.user)\n sample_recipe(user = self.user)\n\n res = self.client.get(RECIPES_URL)\n\n recipes = Recipe.objects.all().order_by('id')\n serializer = RecipeSerializer(recipes, many = True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_load_readings(self):\n self.assertTrue(len(self.temperature_reading_manager1.get_all_readings())>0)",
"def test_query_by_rating(self):\n suppliers = self._create_suppliers(5)\n rating_limit = suppliers[0].rating\n rating_suppliers = [supplier for supplier in suppliers if supplier.rating > rating_limit]\n resp = self.app.get(\"/suppliers\", query_string=\"rating={}\".format(rating_limit))\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), len(rating_suppliers))\n # check the data just to be sure\n for supplier in data:\n self.assertGreater(supplier['rating'], rating_limit)",
"def test_mean_surviver_resources(self):\n\n survivers = Surviver.objects.all()\n food, water, medication, ammunition = calc_mean_surviver_resources(survivers)\n\n self.assertEqual(food, 2)\n self.assertEqual(water, 9)\n self.assertEqual(medication, 8)\n self.assertEqual(ammunition, 4)",
"def test_vote_rating(self):\n\n Vote.objects.create(type=True, user=self.user, tip=self.tip) #Up vote by user\n\n self.assertTrue(self.tip.get_rating == {'positive':1, 'negative':0})\n self.assertTrue(self.tip.vote_set.count() == 1)\n\n Vote.objects.create(type=True, user=self.user1, tip=self.tip) #Up vote by user1\n\n self.assertTrue(self.tip.get_rating == {'positive':2, 'negative':0})\n self.assertTrue(self.tip.vote_set.count() == 2)\n\n Vote.objects.create(type=False, user=self.user2, tip=self.tip) #Down vote by user2\n\n self.assertTrue(self.tip.get_rating == {'positive':2, 'negative':1}) # rating should be 1\n self.assertTrue(self.tip.vote_set.count() == 3) # vote count 3"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the round robin partitioning for Completness, Disjointness and Reconstruction | def testroundrobinpartition(ratingstablename, numberofpartitions, openconnection, robinpartitiontableprefix,
partitionstartindex):
try:
MyAssignment.roundrobinpartition(ratingstablename, numberofpartitions, openconnection)
except Exception:
# ignore any exceptions raised by function
pass
testrangeandrobinpartitioning(numberofpartitions, openconnection, robinpartitiontableprefix, partitionstartindex) | [
"def test_move_partition_rg_imbalanced(self):\n assert not self.move_partition_valid(0, 1, 3)",
"def test_random_partition(self):\n stirling = spn.utils.Stirling()\n for num_subsets in range(1, len(TestPartition.test_set) + 1):\n # Run test for various num_subsets\n with self.subTest(num_subsets=num_subsets):\n possible_partitions = TestPartition.possible_partitions[num_subsets - 1]\n counts = [0 for p in possible_partitions]\n # Sample many times\n num_tests = 10000\n for _ in range(num_tests):\n out = spn.utils.random_partition(TestPartition.test_set,\n num_subsets, stirling)\n i = possible_partitions.index(out)\n counts[i] += 1\n # Check if counts are uniform\n expected = num_tests / len(possible_partitions)\n for c in counts:\n self.assertGreater(c, 0.8 * expected)\n self.assertLess(c, 1.2 * expected)",
"def test_move_partition_move_rg(self):\n assert self.move_partition_valid(5, 1, 3)",
"def test_partition_weights(self):\n assert self.state.partition_weights == (2, 3, 4, 5, 6, 7, 8)",
"def testAnalyaticalPartitionIsCorrect(self):\n # Here we enumerate a set of positive rational numbers n/d alongside\n # numerically approximated values of Z(n / d) up to 10 digits of precision,\n # stored as (n, d, Z(n/d)). This was generated with an external mathematica\n # script.\n ground_truth_rational_partitions = (\n (1, 7, 4.080330073), (1, 6, 4.038544331), (1, 5, 3.984791180),\n (1, 4, 3.912448576), (1, 3, 3.808203509), (2, 5, 3.735479786),\n (3, 7, 3.706553276), (1, 2, 3.638993131), (3, 5, 3.553489270),\n (2, 3, 3.501024540), (3, 4, 3.439385624), (4, 5, 3.404121259),\n (1, 1, 3.272306973), (6, 5, 3.149249092), (5, 4, 3.119044506),\n (4, 3, 3.068687433), (7, 5, 3.028084866), (3, 2, 2.965924889),\n (8, 5, 2.901059987), (5, 3, 2.855391798), (7, 4, 2.794052016),\n (7, 3, 2.260434598), (5, 2, 2.218882601), (8, 3, 2.190349858),\n (3, 1, 2.153202857), (4, 1, 2.101960916), (7, 2, 2.121140098),\n (5, 1, 2.080000512), (9, 2, 2.089161164), (6, 1, 2.067751267),\n (7, 1, 2.059929623), (8, 1, 2.054500222), (10, 3, 2.129863884),\n (11, 3, 2.113763384), (13, 3, 2.092928254), (14, 3, 2.085788350),\n (16, 3, 2.075212740), (11, 2, 2.073116001), (17, 3, 2.071185791),\n (13, 2, 2.063452243), (15, 2, 2.056990258)) # pyformat: disable\n for numer, denom, z_true in ground_truth_rational_partitions:\n z = distribution.analytical_base_partition_function(numer, denom)\n self.assertAllClose(z, z_true, atol=1e-9, rtol=1e-9)",
"def test_random_partitions_by_enumeration(self):\n self.run_test_random_partitions(spn.utils.random_partitions_by_enumeration,\n balanced=False)\n self.run_test_random_partitions(spn.utils.random_partitions_by_enumeration,\n balanced=True)",
"def test_partitions(self):\n assert self.state.partitions == (\n self.ct.partitions[('T0', 0)],\n self.ct.partitions[('T0', 1)],\n self.ct.partitions[('T1', 0)],\n self.ct.partitions[('T1', 1)],\n self.ct.partitions[('T2', 0)],\n self.ct.partitions[('T3', 0)],\n self.ct.partitions[('T3', 1)],\n )",
"def test_move_partition_valid(self):\n assert self.move_partition_valid(0, 1, 4)",
"def test_random_partition_args(self):\n # input_set\n with self.assertRaises(TypeError):\n spn.utils.random_partition(1, 1)\n with self.assertRaises(ValueError):\n spn.utils.random_partition([], 1)\n # num_subsets\n with self.assertRaises(ValueError):\n spn.utils.random_partition([1], 0)\n with self.assertRaises(ValueError):\n spn.utils.random_partition([1], 2)\n # stirling\n with self.assertRaises(TypeError):\n spn.utils.random_partition([1], 1, stirling=list())\n # rnd\n with self.assertRaises(TypeError):\n spn.utils.random_partition([1], 1, rnd=list())",
"def check_partition(game, player):\n\n exists = False\n rows = ()\n columns = ()\n \n own_loc = ()\n own_side = 0\n \n opp_loc = ()\n opp_side = 0\n \n # First check row partitions\n\n own_loc = game.get_player_location(player)\n opp_loc = game.get_player_location(game.get_opponent(player))\n for i in range(2, 4):\n for j in range(0, 7):\n if game.move_is_legal((i, j)) or game.move_is_legal((i+1, j)):\n break\n elif j == 6:\n own_loc = game.get_player_location(player)\n opp_loc = game.get_player_location(game.get_opponent(player))\n # players cant be inside the partition\n print(game.to_string())\n if own_loc[0] != i and own_loc[0] != i + 1 and opp_loc[0] != i and opp_loc[0] != i + 1:\n exists = True\n print(game.to_string())\n if exists:\n rows = (i, i+1)\n break\n \n # If a partition exists, see if players are on opposite sides (-1 is top, +1 is bottom)\n if exists:\n own_loc = game.get_player_location(player)\n if own_loc[0] <= rows[0]:\n own_side = -1\n else:\n own_side = 1\n\n opp_loc = game.get_player_location(game.get_opponent(player))\n if opp_loc[0] <= rows[0]:\n opp_side = -1\n else:\n opp_side = 1\n\n # If players are on opposite sides, we approximate that the winner is on the larger side\n # NOTE: A more accurate (but more costly) estimate would be to count open moves available\n # on each side.\n if own_side != opp_side:\n if rows[0] < 3 and opp_side == -1:\n return float(\"inf\")\n else:\n return float(\"-inf\")\n\n own_loc = game.get_player_location(player)\n opp_loc = game.get_player_location(game.get_opponent(player))\n for j in range(2, 4):\n for i in range(0, 7):\n if game.move_is_legal((i, j)) or game.move_is_legal((i, j + 1)):\n break\n elif i == 6:\n own_loc = game.get_player_location(player)\n opp_loc = game.get_player_location(game.get_opponent(player))\n # players cant be inside the partition\n print(game.to_string())\n if own_loc[1] != j and own_loc[1] != j + 1 and opp_loc[1] != j and opp_loc[1] != i + j:\n exists = True\n print(game.to_string())\n if exists:\n columns = (j, j + 1)\n break\n\n return 0",
"def test_is_partition_2(set_of_sets, alphabet):\n assert not is_partition(set_of_sets, alphabet)",
"def test_grid_init_not_decomposition_dependent(rank: int):\n nx_tile, ny_tile, nz = 48, 48, 5\n metric_terms_1by1 = MetricTerms(\n quantity_factory=get_quantity_factory(\n layout=(1, 1), nx_tile=nx_tile, ny_tile=ny_tile, nz=nz\n ),\n communicator=get_cube_comm(rank=0, layout=(1, 1)),\n )\n metric_terms_3by3 = MetricTerms(\n quantity_factory=get_quantity_factory(\n layout=(3, 3), nx_tile=nx_tile, ny_tile=ny_tile, nz=nz\n ),\n communicator=get_cube_comm(rank=rank, layout=(3, 3)),\n )\n partitioner = pace.util.TilePartitioner(layout=(3, 3))\n assert allclose(metric_terms_1by1.grid, metric_terms_3by3.grid, partitioner, rank)\n assert allclose(metric_terms_1by1.agrid, metric_terms_3by3.agrid, partitioner, rank)\n assert allclose(metric_terms_1by1.area, metric_terms_3by3.area, partitioner, rank)\n assert allclose(metric_terms_1by1.dx, metric_terms_3by3.dx, partitioner, rank)\n assert allclose(metric_terms_1by1.dy, metric_terms_3by3.dy, partitioner, rank)\n assert allclose(metric_terms_1by1.dxa, metric_terms_3by3.dxa, partitioner, rank)\n assert allclose(metric_terms_1by1.dya, metric_terms_3by3.dya, partitioner, rank)\n assert allclose(\n metric_terms_1by1.cos_sg1, metric_terms_3by3.cos_sg1, partitioner, rank\n )\n assert allclose(\n metric_terms_1by1.cos_sg2, metric_terms_3by3.cos_sg2, partitioner, rank\n )\n assert allclose(\n metric_terms_1by1.cos_sg3, metric_terms_3by3.cos_sg3, partitioner, rank\n )\n assert allclose(\n metric_terms_1by1.cos_sg4, metric_terms_3by3.cos_sg4, partitioner, rank\n )\n assert allclose(\n metric_terms_1by1.sin_sg1, metric_terms_3by3.sin_sg1, partitioner, rank\n )\n assert allclose(\n metric_terms_1by1.sin_sg2, metric_terms_3by3.sin_sg2, partitioner, rank\n )\n assert allclose(\n metric_terms_1by1.sin_sg3, metric_terms_3by3.sin_sg3, partitioner, rank\n )\n assert allclose(\n metric_terms_1by1.sin_sg4, metric_terms_3by3.sin_sg4, partitioner, rank\n )\n assert allclose(metric_terms_1by1.rarea, metric_terms_3by3.rarea, partitioner, rank)\n assert allclose(metric_terms_1by1.rdx, metric_terms_3by3.rdx, partitioner, rank)\n assert allclose(metric_terms_1by1.rdy, metric_terms_3by3.rdy, partitioner, rank)",
"def test_partition_sizes(self):\n assert self.state.partition_sizes == (3, 4, 5, 6, 7, 8, 9)",
"def test_random_partitions_by_sampling(self):\n self.run_test_random_partitions(spn.utils.random_partitions_by_sampling,\n balanced=False)\n self.run_test_random_partitions(spn.utils.random_partitions_by_sampling,\n balanced=True)",
"def test_partition2(self):\n\n inputShape = (40, 32, 2)\n inputLayer = NxInputLayer(inputShape)\n flattenLayer = NxFlatten()(inputLayer.input)\n hiddenLayer = NxDense(100, validatePartitions=True)\n outputLayer = NxDense(10, validatePartitions=True)\n model = NxModel(inputLayer.input,\n outputLayer(hiddenLayer(flattenLayer)))\n\n model.partition()\n\n model.clearTemp()",
"def test_is_partition_1(set_of_sets, alphabet):\n assert is_partition(set_of_sets, alphabet)",
"def test_partition1(self):\n\n inputShape = (5, 4)\n inputLayer = NxInputLayer(inputShape)\n outputLayer = NxConv1D(2, 3, validatePartitions=True)\n model = NxModel(inputLayer.input, outputLayer(inputLayer.input))\n\n model.partition()\n\n model.clearTemp()",
"def test_partition1(self):\n\n inputShape = (3, 3, 2)\n inputLayer = NxInputLayer(inputShape)\n flattenLayer = NxFlatten()(inputLayer.input)\n outputLayer = NxDense(10, validatePartitions=True)\n model = NxModel(inputLayer.input, outputLayer(flattenLayer))\n\n model.partition()\n\n model.clearTemp()",
"def test_split_grid(self):\n split_and_check(self.seds_trim_fname_cache, 4) # an edge case\n split_and_check(self.seds_trim_fname_cache, 3) # an odd numer\n split_and_check(self.seds_trim_fname_cache, 1) # an even number"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the roundrobin insert function by checking whether the tuple is inserted in he Expected table you provide | def testroundrobininsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename):
try:
MyAssignment.roundrobininsert(ratingstablename, userid, itemid, rating, openconnection)
except Exception:
# ignore any exceptions raised by function
pass
if not testrangerobininsert(expectedtablename, itemid, openconnection, rating, userid):
raise Exception(
'Round robin insert failed! Couldnt find ({0}, {1}, {2}) tuple in {3} table'.format(userid, itemid, rating,
expectedtablename)) | [
"def testrangeinsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename):\n try:\n MyAssignment.rangeinsert(ratingstablename, userid, itemid, rating, openconnection)\n except Exception:\n # ignore any exceptions raised by function\n pass\n if not testrangerobininsert(expectedtablename, itemid, openconnection, rating, userid):\n raise Exception(\n 'Range insert failed! Couldnt find ({0}, {1}, {2}) tuple in {3} table'.format(userid, itemid, rating,\n expectedtablename))",
"def test_insert(self, record):",
"def multiple_insert(self, tablename, values, seqname=None, _test=False): \r\n if not values:\r\n return []\r\n \r\n if not self.supports_multiple_insert:\r\n out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]\r\n if seqname is False:\r\n return None\r\n else:\r\n return out\r\n \r\n keys = values[0].keys()\r\n #@@ make sure all keys are valid\r\n\r\n # make sure all rows have same keys.\r\n for v in values:\r\n if v.keys() != keys:\r\n raise ValueError, 'Bad data'\r\n\r\n sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))\r\n\r\n for i, row in enumerate(values):\r\n if i != 0:\r\n sql_query.append(\", \")\r\n SQLQuery.join([SQLParam(row[k]) for k in keys], sep=\", \", target=sql_query, prefix=\"(\", suffix=\")\")\r\n\r\n if _test: return sql_query\r\n\r\n db_cursor = self._db_cursor()\r\n if seqname is not False: \r\n sql_query = self._process_insert_query(sql_query, tablename, seqname)\r\n\r\n if isinstance(sql_query, tuple):\r\n # for some databases, a separate query has to be made to find \r\n # the id of the inserted row.\r\n q1, q2 = sql_query\r\n self._db_execute(db_cursor, q1)\r\n self._db_execute(db_cursor, q2)\r\n else:\r\n self._db_execute(db_cursor, sql_query)\r\n\r\n try: \r\n out = db_cursor.fetchone()[0]\r\n out = range(out-len(values)+1, out+1) \r\n except Exception: \r\n out = None\r\n\r\n if not self.ctx.transactions: \r\n self.ctx.commit()\r\n return out",
"def test_query(self):\n # want to check 1) length of result and 2) that all values in result \n # are in the generator, although it would be pretty hard for them not\n # to be\n width = True #we'll only do one here since it really doesn't matter\n gen = self.db.init_insert(101, 101, width, True)\n compareresult = self.gen_to_list(gen)\n self.sequential_inserter(width)\n \n records = 10\n streams = 10\n result = self.db.query(records, streams, True)\n self.assertEqual(len(result), records*streams)\n for x in result:\n self.assert_(x in compareresult)\n \n print(\"test_query passed\")",
"def fast_populate_inserts(self, cnx, src_table):\n\t\traise NotImplemented",
"def test_insert_backup_data(self):\n self._db.insert_backup_data(\"test.csv\")\n df = pd.read_csv(\"test.csv\")\n tables = json.loads(self._db.get_database_info())\n for table, columns in db_connection.Database.get_columns().items():\n #check that each table has the corresponding records in csv\n for _, row in df[columns].iterrows():\n for record in tables[table]:\n #find matching row in table\n if row[\"uuid\"] in record:\n #check rest of fields in row match\n assert TestDBConnection.check_row_equality(\n list(record), list(row))",
"def insert_rows(self, rows: list, table: object) -> int:\n raise NotImplementedError",
"def test_run_statement_w_homogeneous_insert_statements(self):\n from google.cloud.spanner_dbapi.checksum import ResultsChecksum\n from google.cloud.spanner_dbapi.cursor import Statement\n from google.rpc.status_pb2 import Status\n from google.rpc.code_pb2 import OK\n\n sql = \"INSERT INTO T (f1, f2) VALUES (%s, %s), (%s, %s)\"\n params = [\"a\", \"b\", \"c\", \"d\"]\n param_types = {\"f1\": str, \"f2\": str}\n\n connection = self._make_connection()\n transaction = mock.MagicMock()\n connection.transaction_checkout = mock.Mock(return_value=transaction)\n transaction.batch_update = mock.Mock(return_value=(Status(code=OK), 1))\n statement = Statement(sql, params, param_types, ResultsChecksum())\n\n connection.run_statement(statement, retried=True)\n\n self.assertEqual(len(connection._statements), 0)",
"def test_insert_empty(self):\n value = None\n temperature = temperature_record.TempTracer()\n result = temperature.insert(value)\n self.assertFalse(result)",
"def multiple_insert(self, tablename, values, seqname=None, _test=False):\n if not values:\n return []\n\n if not self.supports_multiple_insert:\n out = [self.insert(\n tablename, seqname=seqname,\n _test=_test, **v) for v in values]\n if seqname is False:\n return None\n else:\n return out\n\n keys = values[0].keys()\n #@@ make sure all keys are valid\n\n for v in values:\n if v.keys() != keys:\n raise ValueError, 'Not all rows have the same keys'\n\n sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' %\n (tablename, ', '.join(keys)))\n\n for i, row in enumerate(values):\n if i != 0:\n sql_query.append(\", \")\n SQLQuery.join([SQLParam(row[k]) for k in keys],\n sep=\", \",\n target=sql_query,\n prefix=\"(\",\n suffix=\")\")\n\n if _test: return sql_query\n\n db_cursor = self._db_cursor()\n if seqname is not False:\n sql_query = self._process_insert_query(sql_query, tablename,\n seqname)\n\n if isinstance(sql_query, tuple):\n # for some databases, a separate query has to be made to find \n # the id of the inserted row.\n q1, q2 = sql_query\n self._db_execute(db_cursor, q1)\n self._db_execute(db_cursor, q2)\n else:\n self._db_execute(db_cursor, sql_query)\n\n try:\n out = db_cursor.fetchone()[0]\n out = range(out - len(values) + 1, out + 1)\n except Exception:\n out = None\n\n if not self.ctx.transactions:\n self.ctx.commit()\n return out",
"def test_insert_rows(employees):\n df = insrow(employees, pos=1, values=['Paula', 23, 35])\n assert list(df.index) == [0, -1, 1, 2, 3, 4, 5, 6]\n df = insrow(employees, values=[['Paula', 23, 35], ['James', '34', 45.5]])\n assert list(df.index) == [0, 1, 2, 3, 4, 5, 6, -1, -1]\n # Error case\n with pytest.raises(ValueError):\n insrow(employees, pos=1, values=['Paula', 35])\n with pytest.raises(ValueError):\n insrow(employees, pos=1, values=[['Paula', 32, 35], ['James', '34']])",
"def test_inserts(self, values):\n s = Store()\n\n def _tx():\n d = {}\n for e, t, k, v in values:\n LookupEntry.set(s, e, t, k, v)\n d[(_lower(e), _lower(t), _lower(k))] = v\n self.assertThat(LookupEntry.get(s, e, t, k), Equals(v))\n for (e, t, k), v in d.iteritems():\n self.assertThat(LookupEntry.get(s, e, t, k), Equals(v))\n s.transact(_tx)",
"def generate_data(conn, schema):\n one_thous = sample(range(1000), 1000)\n ten_thous = sample(range(10000), 10000)\n hun_thous = sample(range(100000), 100000) \n million = sample(range(1000000), 1000000)\n for uniq2 in range(0, 100000):\n mil_vals = build_vals_tup(uniq2, million)\n insert_tuple(conn, vals=mil_vals, schema=schema, table='MILKTUP1')\n if uniq2 < 1000:\n one_vals = build_vals_tup(uniq2, one_thous)\n ten_vals = build_vals_tup(uniq2, ten_thous)\n hun_vals = build_vals_tup(uniq2, hun_thous)\n insert_tuple(conn, vals=hun_vals, schema=schema, table='HUNDREDKTUP1')\n insert_tuple(conn, vals=one_vals, schema=schema, table='ONEKTUP')\n insert_tuple(conn, vals=ten_vals, schema=schema, table='TENKTUP1')\n insert_tuple(conn, vals=ten_vals, schema=schema, table='TENKTUP2')\n elif uniq2 < 10000:\n ten_vals = build_vals_tup(uniq2, ten_thous)\n hun_vals = build_vals_tup(uniq2, hun_thous)\n insert_tuple(conn, vals=hun_vals, schema=schema, table='HUNDREDKTUP1')\n insert_tuple(conn, vals=ten_vals, schema=schema, table='TENKTUP1')\n insert_tuple(conn, vals=ten_vals, schema=schema, table='TENKTUP2')\n elif uniq2 < 100000:\n hun_vals = build_vals_tup(uniq2, hun_thous)\n insert_tuple(conn, vals=hun_vals, schema=schema, table='HUNDREDKTUP1')",
"def insert_many(self, table, **values):\n if len(values) < 1:\n # TODO: raise exception here instead of just returning\n return \n if len(values) == 1:\n self.insert(values[0])\n return\n placeholder = \",\".join([\"?\" for _ in values[0]])\n print(f\"INSERT INTO {table} VALUES {placeholder} {values}\")\n self.__cursor.executemany(f\"INSERT INTO {table} VALUES ({placeholder})\", values)\n self.__connection.commit()",
"def test_run_statement_w_heterogenous_insert_statements(self):\n from google.cloud.spanner_dbapi.checksum import ResultsChecksum\n from google.cloud.spanner_dbapi.cursor import Statement\n from google.rpc.status_pb2 import Status\n from google.rpc.code_pb2 import OK\n\n sql = \"INSERT INTO T (f1, f2) VALUES (1, 2)\"\n params = None\n param_types = None\n\n connection = self._make_connection()\n transaction = mock.MagicMock()\n connection.transaction_checkout = mock.Mock(return_value=transaction)\n transaction.batch_update = mock.Mock(return_value=(Status(code=OK), 1))\n statement = Statement(sql, params, param_types, ResultsChecksum())\n\n connection.run_statement(statement, retried=True)\n\n self.assertEqual(len(connection._statements), 0)",
"def do_multi_value_insert(conn, should_commit, tuples):\n\n part = \"\"\n for tup in tuples:\n part += \"({},{},{}),\".format(*tup)\n\n part = part[:-1] # chop last character (the trailing comma)\n insert_statement = \"INSERT INTO bob (id, fname, lname) VALUES \" + part\n\n try:\n cur = conn.cursor()\n cur.execute(insert_statement)\n if should_commit:\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n return False\n\n return True",
"def insert(self, sql):",
"def test_insert_sensor_data(self):\n with open(\"test-sensor-data.json\", \"r\") as f:\n sensor_data = list(json.load(f).get(\"payload_fields\").values())\n self._db.insert_sensor_data(sensor_data)\n data_points = [\n sensor_data[i:i + 4] for i in range(0, len(sensor_data), 4)\n ]\n db_records = json.loads(self._db.query_air_pollution_data())\n for data_point in data_points:\n assert any([\n TestDBConnection.check_row_equality(data_point, record)\n for record in db_records\n ]) #check if any record matches data-point",
"def insert_record(self,list_holding_record):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the range insert function by checking whether the tuple is inserted in he Expected table you provide | def testrangeinsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename):
try:
MyAssignment.rangeinsert(ratingstablename, userid, itemid, rating, openconnection)
except Exception:
# ignore any exceptions raised by function
pass
if not testrangerobininsert(expectedtablename, itemid, openconnection, rating, userid):
raise Exception(
'Range insert failed! Couldnt find ({0}, {1}, {2}) tuple in {3} table'.format(userid, itemid, rating,
expectedtablename)) | [
"def test_insert_number_range(self):\n value = 122\n temperature = temperature_record.TempTracer()\n result = temperature.insert(value)\n self.assertFalse(result)",
"def test_insert_rows(employees):\n df = insrow(employees, pos=1, values=['Paula', 23, 35])\n assert list(df.index) == [0, -1, 1, 2, 3, 4, 5, 6]\n df = insrow(employees, values=[['Paula', 23, 35], ['James', '34', 45.5]])\n assert list(df.index) == [0, 1, 2, 3, 4, 5, 6, -1, -1]\n # Error case\n with pytest.raises(ValueError):\n insrow(employees, pos=1, values=['Paula', 35])\n with pytest.raises(ValueError):\n insrow(employees, pos=1, values=[['Paula', 32, 35], ['James', '34']])",
"def insert_rows(self, rows: list, table: object) -> int:\n raise NotImplementedError",
"def test_insert_empty(self):\n value = None\n temperature = temperature_record.TempTracer()\n result = temperature.insert(value)\n self.assertFalse(result)",
"def test_insert_number(self):\n value = 12\n temperature = temperature_record.TempTracer()\n result = temperature.insert(value)\n self.assertTrue(result)",
"def test_insert(self, record):",
"def test_query(self):\n # want to check 1) length of result and 2) that all values in result \n # are in the generator, although it would be pretty hard for them not\n # to be\n width = True #we'll only do one here since it really doesn't matter\n gen = self.db.init_insert(101, 101, width, True)\n compareresult = self.gen_to_list(gen)\n self.sequential_inserter(width)\n \n records = 10\n streams = 10\n result = self.db.query(records, streams, True)\n self.assertEqual(len(result), records*streams)\n for x in result:\n self.assert_(x in compareresult)\n \n print(\"test_query passed\")",
"def test_insert_dict(self):\n value = {\"value1\": 56}\n temperature = temperature_record.TempTracer()\n result = temperature.insert(value)\n self.assertFalse(result)",
"def testroundrobininsert(ratingstablename, userid, itemid, rating, openconnection, expectedtablename):\n try:\n MyAssignment.roundrobininsert(ratingstablename, userid, itemid, rating, openconnection)\n except Exception:\n # ignore any exceptions raised by function\n pass\n if not testrangerobininsert(expectedtablename, itemid, openconnection, rating, userid):\n raise Exception(\n 'Round robin insert failed! Couldnt find ({0}, {1}, {2}) tuple in {3} table'.format(userid, itemid, rating,\n expectedtablename))",
"def test_insert_invalid_index() -> None:\n ll = setup_linked_list(['cat', 'dog', 'emu', 'fox'])\n try:\n ll.insert(99, 'wombat')\n assert False\n except IndexError:\n assert True\n except:\n assert False\n try:\n ll.insert(-2, 'wombat')\n assert False\n except IndexError:\n assert True\n except:\n assert False",
"def tuple_insert(tup,val,ind, perform_checks=False):\n if perform_checks:\n L = len(tup)\n if ind>L:\n raise ValueError('Index out of bound for tuple of length ' + str(L))\n else:\n pass\n \n return tup[ :ind] + (val ,) + tup[ind: ]",
"def test_insert_sensor_data(self):\n with open(\"test-sensor-data.json\", \"r\") as f:\n sensor_data = list(json.load(f).get(\"payload_fields\").values())\n self._db.insert_sensor_data(sensor_data)\n data_points = [\n sensor_data[i:i + 4] for i in range(0, len(sensor_data), 4)\n ]\n db_records = json.loads(self._db.query_air_pollution_data())\n for data_point in data_points:\n assert any([\n TestDBConnection.check_row_equality(data_point, record)\n for record in db_records\n ]) #check if any record matches data-point",
"def test_column_outofrange(self):\n self.st.append( (0,4) )\n self.o.state = self.st\n self.assertTrue(self.o.timer == 0, \"timer is wrong\")\n self.assertTrue(self.o.state == (), \"state is wrong\")\n self.assertEqual(self.o.board.count(0), self.o.nbl*self.o.nbc,\n \"board is wrong\")",
"def hasValidRange(*args, **kwargs):\n \n pass",
"def test_too_large(self):\r\n value = 'x' * 1000\r\n self.cursor.execute(\"create table t1(s varchar(800))\")\r\n def test():\r\n self.cursor.execute(\"insert into t1 values (?)\", value)\r\n self.assertRaises(pyodbc.DataError, test)",
"def test_assertIsBetween_numbers_true(self):\n self.assertIsBetween(5,3,7)",
"def insert():\n try:\n if tab[\"tbheaders\"] == None:\n raise Exception(\"tbheaders not set\")\n except Exception:\n print t['sel_st']\n return\n # TODO, optimize for many inserts\n # rows = [ ('itm', itm, 'itm'),\n # ('itm', itm, 'itm'),\n # ('itm', itm, 'itm') ]\n # c.executemany('insert into table values (?,?,?,?,?)', rows )\n # connection.commit()\n\n # ouch the pain. Why did i do that like that.\n ret = [ tuple( cli.show(sep( t['provide'], \n \"format: (X; Y; 3; Z),\", \n tab[\"tbheaders\"])).split(\";\") \n ) \n ]\n prep = lambda: \"\".join( [ \"?,\" for x in tab[\"tbheaders\"].split(\",\") ] )[:-1]\n for item in ret:\n sql = \"INSERT INTO %s %s VALUES(%s)\" \\\n % ( tab[\"tbname\"], tab['tbheaders'], prep() )\n try:\n tab[\"cur\"].execute(sql, item)\n tab[\"conn\"].commit()\n except sqlite3.ProgrammingError:\n print t['notallowed']",
"def test_insert(self, index):\n builtin = [0, 1, 2, {\"a\": 1}]\n ds = DatasetList(builtin)\n\n builtin.insert(index, 10)\n ds.insert(index, 10)\n\n assert ds == builtin",
"def valid_cursor_position(self):\n buffer = self.view.buffer\n insert = buffer.get_insert()\n insert_iter = buffer.get_iter_at_mark(insert)\n begin, end = self.stack[-1].bounds\n begin_iter = buffer.get_iter_at_mark(begin)\n end_iter = buffer.get_iter_at_mark(end)\n \n return insert_iter.in_range(begin_iter, end_iter)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is supposed to merge all strings in the list return this merged string | def mergeList(l):
res = ""
for x in l:
res += x
return res | [
"def concat_list(str_list):\r\n new_string = '' #this empty string will fill up with\r\n # strings\r\n for component in str_list:\r\n new_string += component + ' '#a new component is added to\r\n #the string every time.\r\n return(new_string[:-1]) #deletes the space at the end\r",
"def join_strings(word_list):\n \n # initialize a blank string for the join_strings variable\n\n join_strings = ''\n\n # add each word in a string to the join_strings variable\n\n for word in word_list:\n join_strings = join_strings + word\n\n # if there is a blank input list, the string remains empty\n\n if word_list == []:\n join_strings = ''\n\n return join_strings",
"def join(word_list: List[str]) -> str: # Problem 2\n word = \"\".join(word_list)\n return word",
"def combine_str(string_no1, string_no2):\n str_list = []\n \n for i in range(len(string_no1 + string_no2)):\n\n if i <= (len(string_no1)-1):\n str_list.append(string_no1[i])\n\n if i <= (len(string_no2)-1):\n str_list.append(string_no2[i])\n\n return (\"\".join(str_list)) # makes the list(str_list) a string ",
"def list_to_string(input_list, seperator):\n output = input_list[0]\n for item in input_list[1:]:\n output = string_concatenator(output, item, seperator)\n return output",
"def join_strings_with_comma(list_of_words):\n \n # join a list of strings together with a comma as separator, beginning with \n # the first input word and continuing sequentially\n\n strings_with_comma = list_of_words[0]\n \n # set condition for a list with only one element\n if len(list_of_words) == 1:\n strings_with_comma = list_of_words[0]\n\n # iterate through the list of words and add the next word in the list to the\n # string \n else:\n for word in list_of_words[1:]:\n strings_with_comma = \"{}, {}\".format(strings_with_comma, word) \n\n return strings_with_comma",
"def english_join(lst):\n return _join_list(lst, oxford=True)",
"def merge_strings(string_1, string_2, separator=\"|\"):\n\n if string_1 is None:\n string_1 = \"\"\n\n if string_2 is None:\n string_2 = \"\"\n\n # converting the strings to lists\n list_1 = string_1.split(separator)\n list_2 = string_2.split(separator)\n\n list_1 = list(filter(lambda item: item != '-', list_1))\n list_2 = list(filter(lambda item: item != '-', list_2))\n\n # adding the lists\n sum_lists = list_1 + list_2\n\n # 1) converting the lists to sets 2) removing empty elements\n # 3) getting the union of these sets 4) converting back to list\n merged_lists = list(set(filter(None, sum_lists)))\n\n # joining the list to a string\n result_string = separator.join(merged_lists)\n\n return result_string",
"def mergeStringLists(firstList: list, secondList: list) -> list:\n\n\t\t\tif not secondList: return firstList\t#nothing to add\n\t\t\tif not firstList: return [i for i in secondList]\t#return copy of secondList\n\n\t\t\tfor curString in secondList:\n\t\t\t\tif not curString in firstList:\n\t\t\t\t\twarnOnCloseMatch(curString, firstList)\n\t\t\t\t\tfirstList.append(curString)\n\t\t\treturn firstList",
"def combine_lines(lines_list):\n\n # Store the result string.\n result_string = \"\"\n for line in lines_list:\n result_string += (line + \"\\n\")\n\n # Leaves an additional newline at the end which is the same as in the original files.\n return result_string",
"def join(self, iterable):\r\n result = ANSIString('')\r\n last_item = None\r\n for item in iterable:\r\n if last_item is not None:\r\n result += self\r\n result += item\r\n last_item = item\r\n return result",
"def join_strings_with_comma(list_of_words):\n# I had the same problem as the first join exercise.\n is_joined = []\n for item in list_of_words:\n split = list_of_words.split(\",\")\n is_joined = list_of_words.append(split)\n return is_joined",
"def mergesort(file_list: List[str]) -> List[str]:\n pass",
"def concatenate_all_text(data_list):\n all_text = []\n for index, value in enumerate(data_list):\n all_text.extend(value['text'])\n \n return all_text",
"def StringConverter(org_list, seperator=''):\n return seperator.join(org_list)",
"def join_english(items: Iterable[Any], conj=\" and \"):\n items_list = list(items)\n if len(items_list) > 1:\n return \", \".join(str(v) for v in items_list[:-1]) + conj + str(items_list[-1])\n else:\n return \", \".join(str(v) for v in items_list)",
"def joinlist(j, mylist):\n gp = j.join(map(str, mylist))\n\n return gp",
"def merge(s1: str, s2: str) -> str:\n if len(s1) == 0 or len(s2) == 0:\n return s2 if len(s1) == 0 else s1\n else:\n if s1[0] < s2[0]:\n return s1[0] + merge(s1[1:], s2)\n else:\n return s2[0] + merge(s1, s2[1:])",
"def mix_lists(list1, list2):\n\n #Find out which list is shorter and store that length as n\n n = min(len(list1), len(list2))\n\n master_string = \"\"\n\n for i in range(n): \n master_string += list1[i] + \" \"\n master_string += list2[i] + \" \"\n\n #Disregard other tweets if one list is longer than the other so that one user \n #doesn't take over the markov tweet.\n\n return master_string"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes starting value for initial state distribution pi and state transition matrix A. A and pi are initialized with random starting values which satisfies the summation and nonnegativity constraints. | def initialize(n_states, x):
seed = 5340
np.random.seed(seed)
pi = np.random.random(n_states)
A = np.random.random([n_states, n_states])
# We use softmax to satisify the summation constraints. Since the random
# values are small and similar in magnitude, the resulting values are close
# to a uniform distribution with small jitter
pi = softmax(pi)
A = softmax(A, axis=-1)
# Gaussian Observation model parameters
# We use k-means clustering to initalize the parameters.
x_cat = np.concatenate(x, axis=0)
kmeans = KMeans(n_clusters=n_states, random_state=seed).fit(x_cat[:, None])
mu = kmeans.cluster_centers_[:, 0]
std = np.array([np.std(x_cat[kmeans.labels_ == l]) for l in range(n_states)])
phi = {'mu': mu, 'sigma': std}
return pi, A, phi | [
"def _initialize_state_vector(self):\n np.random.seed(self.seed)\n self.initial_state = [0.0] * self.num_state_variables",
"def rand_init_state(self):\n state = np.random.random((self.lattice, self.lattice))\n state[state >= 0.5] = 1\n state[state < 0.5] = -1\n return state",
"def get_initial_distribution(self):\n initDistVec = np.zeros(self.n_states)\n initDistVec[0] = 1.0\n return initDistVec",
"def __init__(self):\n self.action_space = tuple([(pick_up,drop) for pick_up in (1,2,3,4,5) for drop in (1,2,3,4,5) if pick_up!=drop])\n self.state_space = [(loc, time, day) for loc in np.arange(1,m+1) for time in range(t) for day in range(d)]\n self.state_init = random.choice(self.state_space)\n self.state_input = (np.arange(1,m+1) , np.arange(0,t) , np.arange(0,d))\n # Start the first round\n self.reset()",
"def initialize(self):\n F = len(self.inputs[0])\n min_val = np.min(self.inputs)\n max_val = np.max(self.inputs)\n \n np.random.seed(1)\n if self.init=='random':\n # create 3D array storing initial models\n self.M = np.random.uniform(min_val, max_val, size=(self.J*self.K, F))\n self.M = np.array(self.M)",
"def initial_state(TT, RR, C, QQ):\n\n Ns = TT.shape[0]\n e = np.linalg.eigvals(TT)\n\n if all(np.abs(e) < 1.0):\n x_0 = np.linalg.inv(np.eye(Ns) - TT) @ C\n P_0 = solve_discrete_lyapunov(TT, RR @ QQ @ RR.T)\n else:\n x_0 = C\n P_0 = 1e+6 * np.eye(Ns)\n\n return x_0, P_0",
"def __init__(self):\n self.action_space = [(0, 0)] + list(permutations([i for i in range(m)], 2))\n self.action_space = [list(i) for i in self.action_space]\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()",
"def init_input(self):\n m1 = np.diagflat([-1] * (self.n - 1), -1)\n m2 = np.diagflat([-1] * (self.n - 1), 1)\n m3 = np.diagflat([self.gamma] * self.n)\n self.A = np.matrix((m1 + m2 + m3).astype(np.double))\n\n self.b = np.matrix(\n np.full((self.n, 1), self.gamma - 2).astype(np.double)\n )\n self.b[0] = self.gamma - 1\n self.b[self.n - 1] = self.gamma - 1\n\n self.x0 = np.matrix(\n np.full((self.n, 1), 0).astype(np.double)\n )",
"def set_infinite_temperature_state( self ):\n if self.pure:\n raise RuntimeError(\"Cannot set pure state to infinite temperature - Forgot to indicate non-pure?\")\n return -1\n\n # Select the identity matrix on every site\n state = np.zeros( self.L )\n\n # Reset B's and Lambda's\n self.B = {}\n self.Lambda = {}\n self.Chi = {}\n for s in np.arange(self.L):\n self.B[s] = np.zeros( (self.d[s], self.D[s-1], self.D[s]), dtype=np.complex128 )\n self.Lambda[s] = np.zeros( self.D[s] )\n self.Chi[s] = self.D[s]\n\n # Set the i'th B to have s particles\n for i,s in enumerate(state):\n self.B[i] = np.zeros( (self.d[s],1,1) )\n self.B[i][s] = 1\n self.Chi[i] = 1\n self.Lambda[i] = np.array([1])",
"def test_initial_state():\n h0 = hamiltonians.X(3)\n h1 = hamiltonians.TFIM(3)\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t, dt=1e-2)\n target_psi = np.ones(8) / np.sqrt(8)\n init_psi = adev.get_initial_state()\n assert_states_equal(init_psi, target_psi)",
"def init_matrix(num_states, num_obs):\n A = np.random.uniform(size=(num_states, num_states))\n for row in A:\n row /= np.sum(row)\n \n O = np.random.uniform(size=(num_states, num_obs))\n for row in O:\n row /= np.sum(row)\n \n # make sure that there are no zero values in the initialization\n if np.count_nonzero(A) != num_states**2 or np.count_nonzero(O) != num_states * num_obs:\n print \"Initialize again\"\n init_matrix(num_states, num_obs)\n else:\n return A, O",
"def initialize(self):\n self.lattice = 2 * np.random.randint(2, size=(self.N, self.N)) - 1",
"def _initialize_ancillae(self, ancillae_state):\n # the number of system qubits should have already been extracted and\n # stored in `num_system_qubits`\n num_ancillae = self.num_qubits - self.num_system_qubits\n if ancillae_state is not None:\n self.ancillae_state = ancillae_state\n else:\n state = qutip.tensor([qutip.basis(2, 0)\n for _ in range(num_ancillae)])\n self.ancillae_state = state",
"def initialize_priors(self):\n\n variable = np.array(self.defaults.variable)\n variable = self.defaults.variable\n if np.array(variable).dtype != object:\n variable = np.atleast_2d(variable)\n\n n = len(variable[0])\n\n if isinstance(self.mu_0, (int, float)):\n self.mu_prior = np.full((n, 1),self.mu_0)\n else:\n if len(self.mu_0) != n:\n raise FunctionError(\"Length of mu_0 ({}) does not match number of predictors ({})\".\n format(len(self.mu_0), n))\n self.mu_prior = np.array(self.mu_0).reshape(len(self._mu_0),1)\n\n if isinstance(self.sigma_0, (int, float)):\n Lambda_0 = (1 / (self.sigma_0 ** 2)) * np.eye(n)\n else:\n if len(self.sigma_0) != n:\n raise FunctionError(\"Length of sigma_0 ({}) does not match number of predictors ({})\".\n format(len(self.sigma_0), n))\n Lambda_0 = (1 / (np.array(self.sigma_0) ** 2)) * np.eye(n)\n self.Lambda_prior = Lambda_0\n\n # before we see any data, the posterior is the prior\n self.mu_n = self.mu_prior\n self.Lambda_n = self.Lambda_prior\n self.gamma_shape_n = self.gamma_shape_0\n self.gamma_size_n = self.gamma_size_0",
"def init_random(self, N: int):\r\n self.taskNames = [f\"Task {i}\" for i in range(1, N+1)]\r\n self.workerNames = [f\"Worker {i}\" for i in range(1, N+1)]\r\n self.N = N\r\n self.G = np.matrix(np.random.randint(1,N,(N,N)),dtype=float)\r\n self.G0 = np.copy(self.G)",
"def _initialize_parameters(state_machine, n_features):\n return np.zeros((state_machine.n_states \n + state_machine.n_transitions,\n n_features))",
"def _init_boids_state(self) -> np.ndarray:\n dims, n_boids, n_attrs = (self.env_dims.size, \n self.num_boids, len(Boids.Attr))\n max_vel, max_acc = self.max_vel, self.max_acc\n state = np.zeros([dims, n_boids, n_attrs], dtype=\"float\")\n\n upper = self.origin + self.env_dims//2\n lower = self.origin - self.env_dims//2\n for idx, (high, low) in enumerate(zip(upper, lower)):\n state[idx, :, Boids.Attr.LOC] = np.random.randint(low=low, high=high,\n size=n_boids)\n state[:, :, Boids.Attr.VEL] = np.random.uniform(low=-max_vel, high=max_vel,\n size=(dims, n_boids))\n state[:, :, Boids.Attr.ACC] = np.random.uniform(low=-max_acc, high=max_acc,\n size=(dims, n_boids))\n return state",
"def __init__(self, randomize_center=True, random_state=None):\n\n if random_state is None:\n random_state = numpy.random.RandomState()\n\n self.random_state = random_state\n self.randomize_center = randomize_center",
"def init_params_random(self) -> None:\n prec_m = Gamma(self.prec_alpha_prior,\n self.prec_beta_prior)\n self.precs = prec_m.sample()\n\n means_m = MultivariateNormal(loc=self.means_prior,\n precision_matrix=(self.n0 *\n self.prec_alpha_prior /\n self.prec_beta_prior\n ).diag())\n self.means = means_m.sample()",
"def __init__(self, schedule_timesteps: int, final_p: float, initial_p=1.0):\n self.schedule_timesteps = schedule_timesteps\n self.final_p = final_p\n self.initial_p = initial_p"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fit HMM parameters to observed data using BaumWelch algorithm | def fit_hmm(x_list, n_states):
# We randomly initialize pi and A, and use k-means to initialize phi
# Please do NOT change the initialization function since that will affect
# grading
pi, A, phi = initialize(n_states, x_list)
""" YOUR CODE HERE
Populate the values of pi, A, phi with the correct values.
"""
threshold = 1e-4
old = HmmModel(pi, A, phi)
gamma_list, xi_list = e_step(x_list, pi, A, phi)
pi, A, phi = m_step(x_list, gamma_list, xi_list)
new = HmmModel(pi, A, phi)
while new - old > threshold:
#print(new-old)
old = new
gamma_list, xi_list = e_step(x_list, pi, A, phi)
pi, A, phi = m_step(x_list, gamma_list, xi_list)
new = HmmModel(pi, A, phi)
return new.pi, new.A, new.phi | [
"def fit_predict(self,dataSet):\n\t\tnp.random.seed()\n\t\tn = dataSet.shape[0] # total number of data\n\n\t\tHMM_data = pp.scale(dataSet[[\"Var\",\"Gradient\"]])\n\n\t\tif self.nc == 3:\n\t\t\t# initialize transition matrix\n\t\t\ttransmat = np.zeros((3, 3))\n\t\t\ttransmat[0, 1] = 3.0/n\n\t\t\ttransmat[0, 0] = 1.0 - transmat[0, 1]\n\t\t\ttransmat[1, 2] = 3.0/n\n\t\t\ttransmat[1, 1] = 1.0 - transmat[1, 2]\n\t\t\ttransmat[2, 2] = 1.0\n\n\t\t\t# Force the first point is in state 0\n\t\t\tstartprob = np.array([1, 0, 0])\n\t\t\t\n\t\t\t# The state mean of variable \n\t\t\tstate_means = np.zeros((3, 2))\n\t\t\tstate_means[0, 0] = np.percentile(HMM_data[:10, 0],50)\n\t\t\tstate_means[2, 0] = np.percentile(HMM_data[-10:, 0],50)\n\t\t\tstate_means[1, 0] = (state_means[0, 0] + state_means[1, 0]) / 2.0\n\n\t\t\t# The state mean of power\n\t\t\tstate_means[0, 1] = np.percentile(HMM_data[:10, 1],50)\n\t\t\tstate_means[2, 1] = np.percentile(HMM_data[-10:, 1],50)\n\t\t\tstate_means[1, 1] = np.percentile(HMM_data[:, 1],90) / 2.0\n\n\t\telse:\n\t\t\traise ValueError(\"not implemented nc!=3\")\n\t\n\t\t\n\t\tmodel = hmm.GaussianHMM(\n\t\t\tn_components=self.nc,\n\t\t\tcovariance_type=\"diag\", # diagnoal matrix\n\t\t\tn_iter=2000,\n\t\t\tparams='cmt',\n\t\t\tinit_params='c',\n\t\t\tverbose=False)\n\n\t\tmodel.transmat_ = transmat\n\t\tmodel.means_ = state_means\n\t\tmodel.startprob_ = startprob\n\n\t\tmodel.fit(HMM_data)\n\n\t\thidden_states = model.predict(HMM_data)\n\t\th = np.array(hidden_states)\n\t\tdiff = h[1:] - h[:-1]\n\t\tchangePoint = np.argwhere(diff != 0).flatten()\n\n\t\tfeature_index = [dataSet.Gradient.argmin()] # add thermocline using gradient\n\t\n\t\tfor point in changePoint:\n\t\t\tfeature_index.append(point)\n\n\t\treturn feature_index # trm index, LEP index, UEP index",
"def model_training(train_data, tags):\n start=[]\n obs = []\n for i in range(len(train_data)):\n start.append(train_data[i].tags[0])\n obs+= train_data[i].words\n obs=list(set(obs))\n A=np.zeros([len(tags),len(tags)])\n B=np.zeros([len(tags),len(obs)])\n for i in range(len(train_data)):\n words_ = train_data[i].words\n tags_ = train_data[i].tags\n for j in range(len(tags_)):\n if j < len(tags_)-1:\n A[tags.index(tags_[j])][tags.index(tags_[j+1])] += 1\n B[tags.index(tags_[j])][obs.index(words_[j])] +=1\n A_d = np.sum(A,axis=1,keepdims=True)\n A_d[A_d==0] = 1\n B_d = np.sum(B,axis=1,keepdims=True)\n B_d[B_d==0] = 1\n A = A/A_d \n B = B/B_d\n \n count=Counter(start)\n pi=np.zeros(len(tags))\n state_dict={}\n for i in range(len(tags)):\n if tags[i] in count:\n pi[i]=count[tags[i]]/len(start)\n \n obs_dict={k: v for v, k in enumerate(obs)}\n state_dict={k: v for v, k in enumerate(tags)}\n model = HMM(pi, A, B, obs_dict, state_dict)\n ###################################################\n # Edit here\n ###################################################\n return model",
"def fit(self) -> None:\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `sigma_ols`.\n self.sigma_ols = self._compute_sigma_ols()\n\n mu_prior = np.zeros((self.m, self.N))\n for i in range(self.m):\n mu_prior[i, self.p * i] = 1\n mu_prior = mu_prior.flatten()\n\n v_prior = self._construct_v_prior()\n\n Z_sig_Z_sum = 0\n Z_sig_y_sum = 0\n\n for t in range(self.p, self.T):\n Z_t = self._construct_Zt(\n self.X, self.Y, t\n ) # shape: m x [m * (m * p + r + 1)]\n\n z_sum_term = (\n Z_t.T @ inv(self.sigma_ols)\n ) @ Z_t # shape: [m * (m * p + r + 1)] x [m * (m * p + r + 1)]\n y_sum_term = (Z_t.T @ inv(self.sigma_ols)) @ self.Y[\n :, t\n ] # shape: [m * (m * p + r + 1)] x 1\n\n assert (\n self.num_mu_coefficients,\n self.num_mu_coefficients,\n ) == z_sum_term.shape, f\"Expected {(self.num_mu_coefficients, self.num_mu_coefficients)}, got {z_sum_term.shape}\"\n assert (\n self.num_mu_coefficients,\n ) == y_sum_term.shape, (\n f\"Expected {(self.num_mu_coefficients,)}, got {y_sum_term.shape}\"\n )\n\n Z_sig_Z_sum += z_sum_term\n Z_sig_y_sum += y_sum_term\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `v_posterior`.\n self.v_posterior = inv(\n inv(v_prior) + Z_sig_Z_sum\n ) # shape: [m * (m * p + r + 1)] x [m * (m * p + r + 1)]\n assert (\n self.num_mu_coefficients,\n self.num_mu_coefficients,\n ) == self.v_posterior.shape, f\"Expected {(self.num_mu_coefficients, self.num_mu_coefficients)}, got {self.v_posterior.shape}\"\n\n # pyre-fixme[16]: `BayesianVAR` has no attribute `mu_posterior`.\n self.mu_posterior = self.v_posterior @ (\n inv(v_prior) @ mu_prior + Z_sig_y_sum\n ) # shape: [m * (m * p + r + 1)] x 1\n assert (\n self.num_mu_coefficients,\n ) == self.mu_posterior.shape, (\n f\"Expected {(self.num_mu_coefficients,)}, got {self.mu_posterior.shape}\"\n )\n # pyre-fixme[16]: `BayesianVAR` has no attribute `resid`.\n self.resid = self._get_training_residuals()\n self.fitted = True",
"def fit(self, X):",
"def HMM(X,K,loo_idx,song_idx,song_bounds):\n \n w = 6\n srm_k = 45\n nPerm = 1000\n within_across = np.zeros(nPerm+1)\n run1 = [X[i] for i in np.arange(0, int(len(X)/2))]\n run2 = [X[i] for i in np.arange(int(len(X)/2), len(X))]\n print('Building Model')\n srm = SRM(n_iter=10, features=srm_k) \n print('Training Model')\n srm.fit(run1)\n print('Testing Model')\n shared_data = srm.transform(run2)\n shared_data = stats.zscore(np.dstack(shared_data),axis=1,ddof=1)\n others = np.mean(shared_data[:,:,np.arange(shared_data.shape[-1]) != loo_idx],axis=2)\n loo = shared_data[:,song_bounds[song_idx]:song_bounds[song_idx + 1],loo_idx] \n nTR = loo.shape[1]\n\n # Fit to all but one subject\n ev = brainiak.eventseg.event.EventSegment(K)\n ev.fit(others[:,song_bounds[song_idx]:song_bounds[song_idx + 1]].T)\n events = np.argmax(ev.segments_[0],axis=1)\n\n # Compute correlations separated by w in time\n corrs = np.zeros(nTR-w)\n for t in range(nTR-w):\n corrs[t] = pearsonr(loo[:,t],loo[:,t+w])[0]\n \n # Compute within vs across boundary correlations, for real and permuted bounds\n for p in range(nPerm+1):\n within = corrs[events[:-w] == events[w:]].mean()\n across = corrs[events[:-w] != events[w:]].mean()\n within_across[p] = within - across\n \n np.random.seed(p)\n events = np.zeros(nTR, dtype=np.int)\n events[np.random.choice(nTR,K-1,replace=False)] = 1\n events = np.cumsum(events)\n\n return within_across",
"def fit_naive_bayes_model(matrix, labels):\n\n # *** START CODE HERE ***\n ###################\n ix_0 = np.isin(labels, [0])\n ix_1 = np.isin(labels, [1])\n ham_data = matrix[ix_0]\n spam_data = matrix[ix_1]\n vocab_size = matrix.shape[1]\n words_in_y_0 = np.sum(ham_data) + vocab_size\n words_in_y_1 = np.sum(spam_data) + vocab_size\n y_probs_0 = ham_data.shape[0] / (ham_data.shape[0] + spam_data.shape[0])\n y_probs_1 = spam_data.shape[0] / (ham_data.shape[0] + spam_data.shape[0])\n temp = np.sum(ham_data, axis=0)\n temp = np.sum(ham_data, axis=0) + np.ones(ham_data.shape[1])\n temp=temp/words_in_y_0\n word_probs_0=temp\n temp = np.sum(spam_data,axis=0)+np.ones(spam_data.shape[1])\n temp=temp/words_in_y_1\n word_probs_1=temp\n y_probs_0=y_probs_0\n y_probs_1=y_probs_1\n word_probs_0=np.log(word_probs_0)\n word_probs_1=np.log(word_probs_1)\n return((y_probs_0,y_probs_1,word_probs_0,word_probs_1))\n # *** END CODE HERE ***",
"def check_hmc_lr_fit(X, y):\n num_burnin = 100\n start = time.time() \n params = Params(X, y, sigma=0.01, num_leaps=10, step_size=0.01)\n sample_thetas = hmc_lr(params, 1000)\n #import pdb;pdb.set_trace()\n thetas_mean = np.mean(sample_thetas[:, num_burnin:], axis=1)\n end = time.time()\n print \"params for lr with hmc: %s\" % thetas_mean\n print \"cost time %f seconds\" % (end - start)\n \n logp = bayes_neglogloss(thetas_mean.reshape(3, 1), params)\n print \"logloss=%f\" % logp",
"def fit(self, X, Xerr):\n \n if type(X) == pd.core.frame.DataFrame:\n if type(X.columns) == pd.indexes.base.Index:\n self.labels = np.array(X.columns)\n X = X.values\n \n if self.method=='astroML':\n self.GMM.n_components=self.n_components\n self.GMM.n_iter=self.n_iter\n self.GMM.fit(X, Xerr)\n \n self.V=self.GMM.V\n self.mu=self.GMM.mu\n self.weights=self.GMM.alpha\n \n if self.method=='Bovy':\n \"\"\"\n Bovy extreme_deconvolution only imports if the method is\n 'Bovy' (this is because installation is somewhat more\n complicated than astroML, and we don't want it to be\n required)\n \n As with the astroML method, initialize with a few steps of\n the scikit-learn GMM\n \"\"\"\n from extreme_deconvolution import extreme_deconvolution\\\n as bovyXD\n \n tmp_gmm = skl_GMM(self.n_components, max_iter=10,\n covariance_type='full',\n random_state=self.random_state)\n tmp_gmm.fit(X)\n self.mu = tmp_gmm.means_\n self.weights = tmp_gmm.weights_\n self.V = tmp_gmm.covariances_\n \n logl=bovyXD(X,Xerr,self.weights,self.mu,self.V,\n tol=self.tol,maxiter=self.n_iter,w=self.w)\n self.GMM.V = self.V\n self.GMM.mu = self.mu\n self.GMM.alpha = self.weights\n \n return self",
"def _fit_model(self):\n # Determine location parameter from data\n floc = self._determine_loc()\n\n # Fit Weibull to data\n c, loc, scale = self.model.fit(self.ratio, self.c_guess, floc=floc)\n\n # Make Weibull-fitted cdf ratio\n self.fitted_ratio = self.model.pdf(self.bins, c, loc, scale)\n \n self.fitted_pars = {'c': c, 'loc': loc, 'scale': scale}\n self.pars = self.fitted_pars",
"def fit(self, x_train, w_train):\n # Store examples.\n self.x_train = x_train\n self.w_train = w_train\n\n # Estimate a prior probabilities p(wi) for each class wi.\n self.p_w = DataLoader.compute_a_priori(w_train)\n self.num_classes = len(self.p_w)\n\n return self",
"def build_hmm(training_data, unique_tags, unique_words, order, use_smoothing):\n init_distribution = compute_initial_distribution(training_data, order)\n num_of_tokens, count_tag_word, count_tags, count_tag2, count_tag3 = compute_counts(training_data, order)\n emis_prob = compute_emission_probabilities(unique_words, unique_tags, count_tag_word, count_tags)\n\n # parameters: result_matrix, order, smoothing, count_tags, num_tokens, tag_sequence1, tag_sequence2=None\n transit_prob = compute_transition_matrix(order, use_smoothing, count_tags, num_of_tokens, count_tag2, count_tag3)\n\n # parameters: order, initial_distribution, emission_matrix, transition_matrix\n hmm = HMM(order, init_distribution, emis_prob, transit_prob)\n return hmm",
"def hmm_viterbi(self):\n char_list = list(TRAIN_LETTERS) # Converting tag_set to a list to have indexes to refer\n rows = len(char_list)\n cols = len(self.test_letters)\n vit_matrix = [[None] * cols for i in range(rows)]\n\n # Storing a tuple in each cell (index of the previous cell, probability of the current cell)\n for col_index in range(len(self.test_letters)):\n curr_emission_probs = self.get_emission_probs(col_index)\n\n for row_index, curr_char in enumerate(char_list):\n # Computing the probabilities for the first column\n if col_index == 0:\n init_prob = self.init_prob[curr_char] if curr_char in self.init_prob else max_val\n vit_matrix[row_index][col_index] = (-1, curr_emission_probs[curr_char] + init_prob)\n # Computing the probabilities of the other columns\n else:\n best_prob_tuple = (-1, 200000000.0)\n for prev_row_index, prev_char in enumerate(char_list):\n prev_prob = vit_matrix[prev_row_index][col_index - 1][1]\n curr_prob = prev_prob + self.trans_prob[prev_char][curr_char] + curr_emission_probs[curr_char]\n if curr_prob < best_prob_tuple[1]:\n best_prob_tuple = (prev_row_index, curr_prob)\n vit_matrix[row_index][col_index] = (best_prob_tuple[0], best_prob_tuple[1])\n\n # Backtracking to fetch the best path\n # Finding the cell with the max probability from the last column\n (max_index, max_prob) = (-1, max_val)\n for row in range(rows):\n curr_prob = vit_matrix[row][cols - 1][1]\n if curr_prob < max_prob:\n (max_index, max_prob) = (row, curr_prob)\n\n output_list = list() # List to store the output tags\n # Adding the best path to output list\n for col in range(cols - 1, 0, -1):\n output_list.insert(0, char_list[max_index])\n max_index = vit_matrix[max_index][col][0]\n output_list.insert(0, char_list[max_index])\n print 'HMM MAP:', ''.join(output_list)",
"def _weibull_fit(self):\n if not self._is_random:\n curr_avg = np.mean(self._relmat, axis=1)\n x_val = np.array(range(0, len(curr_avg)))\n curr_model = Model(weibull_model)\n curr_model.set_param_hint('c', value = 1, min = 0, max = np.inf)\n curr_model.set_param_hint('a', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('b', value=1, min=0, max=np.inf)\n pars = curr_model.make_params()\n result = curr_model.fit(curr_avg,\n k = x_val,\n params=pars,method=\"leastsq\")\n pred_x = np.array(range(0, self._pred_x))\n self._pred_y = result.eval(params=result.params, k=pred_x)\n else:\n for i in range(0, self._shuffle_time):\n curr_idx = np.random.choice(self._rnum, self._csys, replace=False)\n curr_relmat = self._relmat[:, curr_idx]\n curr_avg = np.mean(curr_relmat, axis=1)\n x_val = np.array(range(0, len(curr_avg))) + 1\n curr_model = Model(weibull_model)\n curr_model.set_param_hint('c', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('a', value=1, min=0, max=np.inf)\n curr_model.set_param_hint('b', value=1, min=0, max=np.inf)\n pars = curr_model.make_params()\n result = curr_model.fit(curr_avg,\n k=x_val,\n params=pars, method=\"leastsq\")\n pred_x = np.array(range(0, self._pred_x))\n self._pred_y += result.eval(params=result.params, k=pred_x)\n self._pred_y /= self._shuffle_time",
"def _fit(self, X, y, w):\n pass",
"def fitDataBinomial(data, theta_init=[.1, 1]):\n\n theta = fmin(calculateWeibullNegLLBinomial, theta_init, args=(data,))\n return(theta)",
"def test_exponential_growth_model_bayesian_d_optimal_design(self):\n num_design_pts = 50\n optimal_design_samples = {10:([0.182],[1.0]),40:([0.048,0.354],[0.981,0.019]),50:([0.038,0.318],[0.973,0.027]),100:([0.019,0.215],[.962,0.038]),200:([0.010,0.134],[0.959,0.041]),300:([0.006,0.084,0.236],[0.957,0.037,0.006]),3000:([0.0006,0.009,0.055,1.000],[0.951,0.039,0.006,0.004])}\n lb2=1\n for ub2 in optimal_design_samples.keys():\n # assuming middle of parameter domain is used to find local design\n design_samples = np.linspace(0,1,num_design_pts)\n design_samples = np.sort(np.unique(np.concatenate(\n [design_samples,optimal_design_samples[ub2][0]])))\n noise_multiplier = None\n\n local_design_factors = \\\n lambda p,x: exponential_growth_model_grad_parameters(p,x).T\n xx2,ww2 = pya.gauss_jacobi_pts_wts_1D(40,0,0)\n xx2 = (xx2+1)/2*(ub2-lb2)+lb2 # transform from [-1,1] to [lb2,ub2]\n parameter_samples = xx2[np.newaxis,:]\n \n opt_problem = AlphabetOptimalDesign(\n 'D',local_design_factors,opts=None)\n \n mu,res = opt_problem.solve_nonlinear_bayesian(\n parameter_samples,design_samples[np.newaxis,:],\n sample_weights=ww2,options={'iprint': 0, 'ftol':1e-14,'disp':True,'tol':1e-12},return_full=True)\n I = np.where(mu>1e-5)[0]\n J =np.nonzero(design_samples==np.array(optimal_design_samples[ub2][0])[:,None])[1]\n mu_paper = np.zeros(design_samples.shape[0]);\n mu_paper[J] = optimal_design_samples[ub2][1]\n # published designs are not optimal for larger values of ub2\n if I.shape==J.shape and np.allclose(I,J):\n assert np.allclose(\n mu[I],optimal_design_samples[ub2][1],rtol=3e-2)\n assert (res.obj_fun(mu)<=res.obj_fun(mu_paper)+1e-6)",
"def band_fit_M1(band_definition,results_data,verbose=False):\n\n # helper functions\n def f_moment(K,J):\n \"\"\" Generate coefficient matrix row for moments.\n \"\"\"\n f0 = J\n f1 = K/(J+1)\n f2 = (-1)**(J-0.5) / (2*math.sqrt(2)) * (2*J+1)/(J+1)\n if (K == 0):\n # case K = 0\n coefficients = [f0]\n elif (K == 1/2):\n # case K = 1/2\n coefficients = [f0,f1,f2]\n else:\n # case K generic\n coefficients = [f0,f1]\n\n return coefficients\n def f_trans(K,J):\n \"\"\" Generate coefficient matrix row for transitions.\n \"\"\"\n f0 = 0\n f1 = -math.sqrt(3/(4*math.pi)) * math.sqrt((J**2-K**2)/J)\n f2 = (-1)**(J-0.5) / math.sqrt(2) * f1\n if (K == 0):\n # case K = 0\n coefficients = [f0]\n elif (K == 1/2):\n # case K = 1/2\n coefficients = [f0,f1,f2]\n else:\n # case K generic\n coefficients = [f0,f1]\n\n return coefficients\n\n # setup\n K = band_definition.K\n\n # accumulate moment entries\n A_moment = []\n b_moment = []\n for J in band_definition.J_list_M1_moment:\n A_moment.append(f_moment(K,J))\n b_moment.append(results_data.moments[(band_definition.members[J],\"M1\")])\n\n # accumulate transition entries\n A_trans = []\n b_trans = []\n for J in band_definition.J_list_M1_trans:\n A_trans.append(f_trans(K,J))\n Ji = J\n Jf = J - 1\n M = band_definition.M[Ji]\n values = np.array(results_data.get_rme(band_definition.members[Jf],band_definition.members[Ji],\"M1\",M))\n values *= band_definition.signs[(Ji,M)]*band_definition.signs[(Jf,M)]\n b_trans.append(values)\n\n # combine moment and transition arrays\n A = np.array(A_moment+A_trans,float)\n b = np.array(b_moment+b_trans,float)\n if (verbose):\n print(\"J_list_M1_moment:\",band_definition.J_list_M1_moment)\n print(\"J_list_M1_trans:\",band_definition.J_list_M1_trans)\n print(\"Coefficient matrix\")\n print(A)\n print(\"Ordinate matrix\")\n print(b)\n\n # trap blatantly insufficient system\n if ( not (\n ((K==0) and (len(b)>=1))\n or\n ((K==1/2) and (len(b)>=3))\n or\n ((K>1/2) and (len(b)>=2))\n ) ):\n parameters = np.nan*np.ones((3,4))\n return parameters\n\n # solve system\n parameters = np.linalg.lstsq(A,b,rcond=None)[0]\n\n # upgrade parameter matrix to three rows\n # zero pads for parameter a2 if not already present\n if (parameters.shape == (1,4)):\n parameters = np.append(parameters,[[0,0,0,0],[0,0,0,0]],axis=0)\n elif (parameters.shape == (2,4)):\n parameters = np.append(parameters,[[0,0,0,0]],axis=0)\n if (verbose):\n print(\"Parameter matrix\")\n print(parameters)\n\n return parameters",
"def sample_model_bpinns(models, data, model_loss, num_samples=10, num_steps_per_sample=10, step_size=0.1, burn=0, inv_mass=None, jitter=None, normalizing_const=1., softabs_const=None, explicit_binding_const=100, fixed_point_threshold=1e-5, fixed_point_max_iterations=1000, jitter_max_tries=10, sampler=hamiltorch.Sampler.HMC, integrator=hamiltorch.Integrator.IMPLICIT, metric=hamiltorch.Metric.HESSIAN, debug=False, tau_priors=None, tau_likes=None, store_on_GPU = True, desired_accept_rate=0.8, device = 'cpu', n_params_single = None, pde = False, pinns = False, epochs = 10000, params_init_val = None):\n\n if n_params_single is not None:\n params_init = torch.ones([n_params_single]).to(device)\n else:\n params_init = torch.Tensor([]).to(device)\n\n for model in models:\n params_init_net = hamiltorch.util.flatten(model).to(device).clone()\n params_init = torch.cat((params_init,params_init_net))\n\n # params_init = torch.randn_like(params_init)\n if params_init_val is not None:\n params_init = params_init_val\n\n print('Parameter size: ', params_init.shape[0])\n\n log_prob_func = define_model_log_prob_bpinns(models, model_loss, data, tau_priors, tau_likes, n_params_single = n_params_single, pde = pde)\n\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n start_time = time.time()\n if pinns:\n params = params_init.clone().detach().requires_grad_()\n optimizer = torch.optim.Adam([params], lr=step_size)\n for epoch in range(epochs):\n optimizer.zero_grad()\n loss = log_prob_func(params)\n loss = -loss\n loss.backward()\n optimizer.step()\n\n if epoch%100==0:\n print('Epoch: %d, loss: %.6f, time: %.2f' % (epoch, loss.detach().item(), time.time() - start_time))\n\n if not store_on_GPU:\n ret_params = [params.clone().detach().cpu()]\n else:\n ret_params = [params.clone()]\n\n return list(map(lambda t: t.detach(), ret_params))\n\n else:\n return hamiltorch.sample(log_prob_func, params_init, num_samples=num_samples, num_steps_per_sample=num_steps_per_sample, step_size=step_size, burn=burn, jitter=jitter, inv_mass=inv_mass, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, fixed_point_threshold=fixed_point_threshold, fixed_point_max_iterations=fixed_point_max_iterations, jitter_max_tries=jitter_max_tries, sampler=sampler, integrator=integrator, metric=metric, debug=debug, desired_accept_rate=desired_accept_rate, store_on_GPU = store_on_GPU)",
"def __init__(self, model, data, **kwargs):\n\n if not isinstance(model, BayesianModel):\n raise NotImplementedError(\"Maximum Likelihood Estimate is only implemented for BayesianModel\")\n\n super(MaximumLikelihoodEstimator, self).__init__(model, data, **kwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. | def dict_get_path(data, path, default=None):
keys = path.split(".")
for k in keys:
if type(data) == list:
found = False
for item in data:
name = item.get("name", item.get("type"))
if name == k:
found = True
data = item
break
if not found:
return default
elif type(data) == dict:
if k in data:
data = data[k]
else:
return default
else:
return default
return data | [
"def get_value_by_path(data, path):\n\n if not isinstance(data, dict) or path == '':\n return None\n\n value_keys = path.split('.')\n result = data\n\n for key in value_keys:\n if key in result.keys():\n result = result[key]\n else:\n result = None\n break\n\n return result",
"def get_deep_item(d, k, sep='.'):\n if not isinstance(k, basestring):\n raise KeyError('expected string, got {0}: {1}'.format(type(k).__name__, k))\n val = d\n # recursively look for dictionary values, then\n # return the last value\n for key in k.split(sep):\n if key and isinstance(val, Mapping) and key in val:\n val = val.__getitem__(key)\n else:\n raise KeyError(k)\n return val",
"def get_values_by_path(data, path: list) -> list:\n if len(path) == 0:\n return [data]\n elif isinstance(data, (list, tuple)):\n # Concatenate the results returned by each item in the list.\n return list(itertools.chain.from_iterable(get_values_by_path(item, path) for item in data))\n elif isinstance(data, dict):\n # Descend further down the tree.\n if path[0] in data:\n return get_values_by_path(data[path[0]], path[1:])\n else:\n return []\n else:\n # The path has not been resolved and we've reached a\n # data type we can't key into, so return no results.\n return []",
"def test_second_level_retrieval(nested_dict):\n\n l = ['first', 'second']\n\n val = get_nested_value(d=nested_dict, keys=l)\n\n assert val == {'third': {'fourth': 'leaf', 'another': 'label'} }",
"def getPathValues(d, path):\n pos = path.find('.')\n currentpath = path[0:pos] if pos > 0 else path\n nextpath = path[pos+1:] if pos > 0 else None\n lbracket = path.find('[')\n itemnum= None\n if lbracket >= 0 and (pos < 0 or lbracket < pos):\n rbracket = path.find(']')\n itemnum = int(path[lbracket + 1:rbracket])\n currentpath = path[0:lbracket]\n # keep the bracket for the next recurive depth\n nextpath = path[lbracket:] if lbracket > 0 else nextpath\n if type(d) is list:\n result = []\n if itemnum is not None:\n result.extend(getPathValues(d[itemnum], nextpath))\n else:\n for item in d:\n #still on the current path node\n result.extend(getPathValues(item, path))\n return result\n if pos < 0:\n if currentpath == '*':\n result = []\n for k, v in d.iteritems():\n result.append(v)\n return result\n return [d[currentpath]] if currentpath in d and d[currentpath] else []\n else:\n if currentpath == '*':\n result = []\n for k,v in d.iteritems():\n result.extend(getPathValues(v, nextpath))\n return result\n return getPathValues(d[currentpath], nextpath) if currentpath in d else []",
"def traverse(self, path):\n obj = self.root\n path = [_ for _ in path.split('/') if _]\n for name in path:\n try:\n obj = obj[name]\n except AttributeError:\n msg = u'Object at %s does not exist.' % '/'.join(path)\n raise ValueError(msg)\n return obj",
"def traverse(self, path, tdata = False):\n p = self.data if tdata else self.files['/']\n if tdata:\n for i in path.split('/') :\n p = p[i] if len(i) > 0 else p\n else:\n for i in path.split('/') :\n p = p['files'][i] if len(i) > 0 else p\n\t#print (p)\n return p",
"def find_json_value(json_blob, path):\n value = None\n\n node = json_blob\n key = path\n\n while node is not None and key is not None:\n if isinstance(key, int) and isinstance(node, list):\n try:\n value = node[key]\n except IndexError:\n value = None\n break\n elif isinstance(key, str) and '.' not in key:\n value = node.get(key, None)\n break\n else:\n # traverse to next level\n level_key, key = key.split('.', 1)\n\n try:\n key = int(key)\n except ValueError:\n pass\n\n try:\n # check if key is actually a list index\n index = int(level_key)\n node = node[index]\n except (\n ValueError,\n KeyError,\n ) as e:\n # key is just a str\n node = node.get(level_key, None)\n\n return value",
"def _get_values_by_path(self, doc, path):\n vals = [doc]\n for key in path.split('.'):\n # print('getting values by path %s from %s' % (key, vals))\n new_vals = []\n for val in vals:\n key = key.replace('[]', '') # Remove [] syntax in other to get values by key\n if val is None or key not in val:\n continue\n val = val[key]\n if val is None:\n continue\n elif isinstance(val, list):\n for v in val:\n new_vals.append(v)\n else:\n new_vals.append(val)\n vals = new_vals\n #\n return vals",
"def test_read_nested_val(self):\n sample_json = {'level1': {'level2': {'level3': {'int': 42}}}}\n self.assertEqual(\n chrome_defaults.get_json_field(\n sample_json, 'level1.level2.level3.int'),\n 42)",
"def get_value_from_str_dotted_key(d, dotted_key):\n keys = dotted_key.split('.')\n temp = copy.deepcopy(d)\n try:\n for key in keys:\n temp = temp[key]\n return temp\n except KeyError:\n return None",
"def process_path_value(self, dirpath, filename, key_path, val, must_exist, can_have_subdict):\n if isinstance(val, str):\n return self.relative_path(dirpath, filename, key_path, val, must_exist)\n elif isinstance(val, list):\n vals = []\n for entry in val:\n if can_have_subdict and isinstance(entry, dict):\n for subkey, subval in entry.items():\n vals.append({subkey: self.relative_path(dirpath, filename, key_path, subval, must_exist)})\n else:\n vals.append(self.relative_path(dirpath, filename, key_path, entry, must_exist))\n return vals",
"def get_val_in_dict_dotted(field: str, dicto: Dict[str, Any]) -> Any:\n try:\n if \".\" not in field: # simple field; ex: \"logical_name\", \"sha512\"\n return dicto[field] # possible KeyError/TypeError\n\n # compound field; ex: \"checksum.sha512\"\n parent, child = field.split(\".\", maxsplit=1) # ex: \"checksum\" & \"sha512\"\n\n # ex: is \"sha512\" in \"checksum\"'s dict?\n # possible KeyError/TypeError\n return get_val_in_dict_dotted(child, dicto[parent])\n\n except (KeyError, TypeError) as e:\n raise DottedKeyError() from e",
"def __getitem__(self, name_or_path):\n if isinstance(name_or_path, (str, Feature)):\n return dict.__getitem__(self, name_or_path)\n elif isinstance(name_or_path, tuple):\n try:\n val = self\n for fid in name_or_path:\n if not isinstance(val, FeatStruct):\n raise KeyError # path contains base value\n val = val[fid]\n return val\n except (KeyError, IndexError) as e:\n raise KeyError(name_or_path) from e\n else:\n raise TypeError(self._INDEX_ERROR % name_or_path)",
"def GetValueAtFieldPath(protobuf, fieldpath, log):\n fieldname = fieldpath.rsplit(\".\", 1)[-1]\n pelt = path_utils.PathElement.FromText(fieldname)\n\n field_owner = _NavigateToField(protobuf, fieldpath, log)\n if pelt.is_repeated():\n return getattr(field_owner, pelt.name)[pelt.index]\n else:\n return getattr(field_owner, pelt.name)",
"def _retrieve_value(self, data, value):\n logging.info('Getting value for {}'.format(value))\n retrieve_data = []\n m_data = DotMap(data)\n if value.startswith('set('):\n retrieve_data = value[4:-1].split(\",\")\n elif value.startswith('values('): # TODO: nested values e.g. values(values(ids))\n search_value = re.search('{}(.*){}'.format('\\(', '\\)'), value).group(1)\n unique_list = search_value.split('.')\n m_data_cp = m_data.copy()\n for attr in unique_list:\n m_data_cp = getattr(m_data_cp, attr)\n retrieve_data = list(m_data_cp.values())\n elif ':' in value:\n obj_ref = getattr(m_data, value.split(':')[0])\n if obj_ref:\n included = value.split(':')[1]\n included = '/' + included.replace('.', '/')\n ref_data = self.wsClient.get_objects2({'objects': [{'ref': obj_ref,\n 'included': [included]}]})['data'][0]['data']\n m_ref_data = DotMap(ref_data)\n if ref_data:\n if '*' not in included:\n for key in included.split('/')[1:]:\n m_ref_data = getattr(m_ref_data, key)\n else:\n keys = included.split('/')[1:]\n m_ref_data = [x.get(keys[2]) for x in ref_data.get(keys[0])] # TODO: only works for 2 level nested data like '/features/[*]/id'\n\n retrieve_data = list(m_ref_data)\n else:\n unique_list = value.split('.')\n m_data_cp = m_data.copy()\n for attr in unique_list:\n m_data_cp = getattr(m_data_cp, attr)\n retrieve_data = list(m_data_cp)\n\n logging.info('Retrieved value (first 20):\\n{}\\n'.format(retrieve_data[:20]))\n\n return retrieve_data",
"def valueForKeyPath(d, key, separator=\".\", default=None):\n return valueForKeyList(d, key.split(separator), default)",
"def get(self, path, default = None, deep = False):\n\n try:\n pos = str(path).index('[');\n except ValueError:\n pos = False;\n if ( not deep or False is pos) :\n if path in self._parameters:\n return self._parameters[path];\n else:\n return default;\n\n\n root = str(path)[0:pos];\n if not root in self._parameters :\n return default;\n\n\n value = self._parameters[root];\n currentKey = None;\n i = pos - 1;\n for char in range(len(path)):\n i += 1;\n if ('[' == char) :\n if (None is not currentKey) :\n raise InvalidArgumentException(\n 'Malformed path. Unexpected \"[\" at position {0}.'\n ''.format(str(i))\n );\n\n\n currentKey = '';\n elif (']' == char) :\n if (None is currentKey) :\n raise InvalidArgumentException(\n 'Malformed path. Unexpected \"]\" at position {0}.'\n ''.format(str(i))\n );\n\n if isinstance(value, list):\n value = Array.toDict(value, True);\n if not isinstance(value, dict) or currentKey not in value :\n return default;\n\n\n value = value[currentKey];\n currentKey = None;\n else :\n if (None is currentKey) :\n raise InvalidArgumentException(\n 'Malformed path. Unexpected \"{0}\" at position {1}.'\n ''.format(char, str(i))\n );\n\n\n currentKey += char;\n\n\n\n if (None is not currentKey) :\n raise InvalidArgumentException(\n 'Malformed path. Path must end with \"]\".'\n );\n\n\n return value;",
"def get_subdict(D, path_vec):\n if path_vec:\n try:\n return get_subdict(D[path_vec[0]], path_vec[1:])\n except:\n print(f'problem accessing subpath {path_vec} of dictionary in get_subdict')\n else:\n return D"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Measure CPU consumption of the firefoxbin process | def measure_cpu(url):
print (url)
os.system('echo %s >> ./cpu-usage' % url)
cpu_list = []
# Loop
for i in range(90):
cpu = os.popen("top -n 1 | grep /usr/lib/chromium/chrome | head -1 | awk '{print $8;}'").read().strip()
time.sleep(1)
cpu_list.append(cpu)
os.system('echo %s >> ./cpu-usage' % cpu_list)
# os.system("ps -eo pcpu,pid,user,args | grep 'firefox-bin' | sort -k1 -r -n | head -1 | awk '{print $1;}'")
# os.system("ps -eo pcpu,pid,user,args | sort -k1 -r -n | head -1 | awk '{print $1;}'") | [
"def monitor_cpu():\n return psutil.cpu_percent(interval=0.4)",
"def monitor_cpu(self) -> None:\n last_write = 0\n _ = psutil.cpu_percent()\n _ = self.process.cpu_percent()\n system_usage = list()\n process_usage = list()\n\n process_cpu_times = self.process.cpu_times()\n process_times = dict()\n for field in process_cpu_times._fields:\n if not field.startswith(\"children_\"):\n process_times[field] = list()\n\n cpu_infos = {\n \"system_usage\": system_usage,\n \"process_usage\": process_usage,\n \"process_times\": process_times,\n \"time_step\": self.time_step\n }\n\n while not self.stop_event.is_set():\n time.sleep(self.time_step)\n system_usage.append(psutil.cpu_percent())\n process_usage.append(self.process.cpu_percent())\n process_cpu_times = self.process.cpu_times()\n\n for k in process_times.keys():\n process_times[k].append(getattr(process_cpu_times, k))\n\n if time.time() >= last_write + self.write_interval:\n self._log_to_file(cpu_infos)\n last_write = time.time()\n self._log_to_file(cpu_infos)",
"def CpuUsageTimer(self):\n (new_used, new_total) = self._ParseProcStat()\n total = new_total - self.cpu_total\n used = new_used - self.cpu_used\n if total == 0:\n self.cpu_usage = 0.0\n else:\n self.cpu_usage = (used / total) * 100.0\n self.cpu_total = new_total\n self.cpu_used = new_used",
"def _monitor(self):\n # Ubuntu systems typically mount cpuacct cgroup in /sys/fs/cgroup/cpu,cpuacct,\n # but this can vary by OS distribution.\n all_cgroups = subprocess.check_output(\n \"findmnt -n -o TARGET -t cgroup --source cgroup\".split(), universal_newlines=True\n ).split(\"\\n\")\n cpuacct_root = [c for c in all_cgroups if \"cpuacct\" in c][0]\n memory_root = [c for c in all_cgroups if \"memory\" in c][0]\n logging.info(\"Using cgroups: cpuacct %s, memory %s\", cpuacct_root, memory_root)\n self.min_memory_usage_gb = None\n self.max_memory_usage_gb = None\n\n with open(self.output_path, \"w\") as output:\n while self.keep_monitoring:\n # Use a single timestamp for a given round of monitoring.\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n for c in self.containers:\n cpu = self._metrics_from_stat_file(cpuacct_root, c, \"cpuacct.stat\")\n memory = self._metrics_from_stat_file(memory_root, c, \"memory.stat\")\n if cpu:\n output.write(\"%s %s cpu %s\\n\" % (now, c.id, cpu))\n if memory:\n output.write(\"%s %s memory %s\\n\" % (now, c.id, memory))\n output.flush()\n\n # Machine-wide memory usage\n m = used_memory()\n if self.min_memory_usage_gb is None:\n self.min_memory_usage_gb, self.max_memory_usage_gb = m, m\n else:\n self.min_memory_usage_gb = min(self.min_memory_usage_gb, m)\n self.max_memory_usage_gb = max(self.max_memory_usage_gb, m)\n time.sleep(self.frequency_seconds)",
"def get_cpu_usage():\n return psutil.cpu_percent()",
"def cpu_percent():\n return psutil.cpu_percent()",
"def displayCpuReport(self):\n cpuReport = self.getTopCpu()\n self.pprint.white('\\tTop CPU Consuming Processes : ')\n self.displayReport(cpuReport)\n print('')",
"def CPUStats(self):\r\n\t\t# From <http://ubuntuforums.org/showthread.php?t=148781>\r\n\t\ttime_list = cat(\"/proc/stat\").split(\"\\n\")[0].split(\" \")[2:6]\r\n\t\tres = map(int, time_list)\r\n\t\tself.LAST_CPU_STAT = res\r\n\t\treturn res",
"def cpu_info():\n cpu_info = subprocess.Popen(['lscpu'], stdout=PIPE)\n std_out = cpu_info.communicate()[0]\n return std_out.decode('utf-8')",
"def getHostCpuCount():\n return _j.getHostCpuCount()",
"def test_cpu():\n console = Console(_abs_path('nestest.nes'))\n ms6502 = console.cpu\n\n benchmark = open(_abs_path('benchmark.txt'))\n ms6502.memory.write(0x0180, 0x33)\n ms6502.memory.write(0x017F, 0x69)\n #ms6502.memory.write(0xA9A9, 0xA9)\n\n ms6502.memory.write(0x4004, 0xFF)\n ms6502.memory.write(0x4005, 0xFF)\n ms6502.memory.write(0x4006, 0xFF)\n ms6502.memory.write(0x4007, 0xFF)\n ms6502.memory.write(0x4015, 0xFF)\n ms6502.pc = 0xC000\n total_cycles = 0\n nb_fails = 0\n while(True):\n debug_data = ms6502.step(debug=True)\n string = debug_data['PC'] + \" \"\n string += debug_data['opcode'] + \" \"\n for arg in debug_data['args']:\n string += arg + \" \"\n string = string.ljust(15)\n string += debug_data['mneumonic']\n string = string.ljust(48)\n string += debug_data['A'] + \" \" + debug_data['X'] + \" \" + debug_data['Y'] + \" \" + debug_data['P'] + \" \"\n string += debug_data['SP'] + \" \"\n cyc_string = str(total_cycles % 341).rjust(3)\n string += \"CYC:{}\".format(cyc_string)\n total_cycles += 3*int(debug_data['cycles'])\n\n benchmark_line = benchmark.readline()[:81]\n if(string != benchmark_line):\n print(\"CPU: \" + string)\n print(\"Expected: \" + benchmark_line)\n nb_fails += 1\n if nb_fails == 1:\n exit(0)\n else:\n print(string)",
"def clocku():\r\n return resource.getrusage(resource.RUSAGE_SELF)[0]",
"def clock():\n\n u,s = resource.getrusage(resource.RUSAGE_SELF)[:2] \n return u+s",
"def clocks():\r\n return resource.getrusage(resource.RUSAGE_SELF)[1]",
"def perf(cmd=['sleep','1'], rep=1):\n command = ['perf', 'stat', '-x', '^','-r', str(rep)]\n command.extend(cmd)\n\n result = subprocess.run(command, stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)\n \n if result.returncode != 0:\n raise Exception(\"Running benchmark failed as perf returned a \"\n \"non-zero return code. Here are some hints \"\n \"from perf's stderr: {}\".format(result.stderr))\n \n try:\n return float(result.stderr.split(b'^')[0].decode(\"utf-8\"))/1000\n except:\n raise Exception(\"Running benchmark failed for \"\n \"some reason. Here are some hints \"\n \"from perf's stderr: {}\".format(result.stderr))",
"def overhead():\n pass\n # Running OOMMF through oommfc.\n #system = oc.examples.macrospin()\n #td = oc.TimeDriver()\n #oommfc_start = time.time()\n #td.drive(system, t=1e-12, n=1, overwrite=True)\n #oommfc_stop = time.time()\n #oommfc_time = oommfc_stop - oommfc_start\n\n # Running OOMMF directly.\n #oommf_runner = get_oommf_runner()\n #mifpath = os.path.realpath(os.path.join('example-macrospin', 'drive-0',\n # 'example-macrospin.mif'))\n #oommf_start = time.time()\n #oommf_runner.call(mifpath)\n #oommf_stop = time.time()\n #oommf_time = oommf_stop - oommf_start\n #shutil.rmtree('example-macrospin')\n\n #return oommfc_time - oommf_time",
"def get_host_cpu_time(self):\n with open('/proc/stat', 'r') as f:\n values = [float(x) for x in f.readline().split()[1:8]]\n return sum(values), sum(values[0:3])",
"def monitor_ram():\n memory_dict = dict(psutil.virtual_memory()._asdict())\n return memory_dict[\"free\"] / memory_dict[\"total\"] * 100.0",
"def system_load():\n cat = subprocess.Popen(['cat', '/proc/loadavg'], stdout=PIPE)\n std_out = cat.communicate()[0]\n std_out = std_out.decode('utf-8')\n return std_out.rstrip()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recovery original data for each point of the sequence. | def recover_original_data(idx_seq, dataset):
recover = dataset[idx_seq]
return recover | [
"def reset(self):\r\n self.array=self.original\r\n self.original=list(self.original)\r\n return self.array",
"def test_prep_recover_data(self):\n u = np.random.rand(12, 17, 73, 144)\n up, uinfo = prep_data(u, 'tzyx')\n ur = recover_data(up, uinfo)\n err = error(u, ur)\n assert_almost_equal(err, 0.)",
"def _undo_trajectory(self):\n for t in self._traj:\n self._mask[t] = 0",
"def _undo_trajectory(self):\n for t in self._traj:\n self._mask.__setitem__(t, 0)",
"def clean_data(self):\r\n self.all_data.drop(len(self.all_data) - 1, inplace = True)",
"def data_reset(self):\n # ic()\n self.arches.clear()\n self.arch_ids.clear()\n self.data_1d.clear()\n self.data_2d.clear()\n self.new_scan = True",
"def unbind_data(self):\n self.trajectory_df = pd.DataFrame()",
"def swap(self, signal):\n to_save = np.empty(self.n_ex, self.data.dtype)\n for elo, ehi, lo, hi in self.offset_iter():\n to_save[elo:ehi] = signal[lo:hi]\n signal[lo:hi] = self.data[elo:ehi]\n self.data[:] = to_save",
"def deal_datas(self):\n if self.need_previous:\n data = self.get()\n while data is not None:\n out_data = self.deal_a_data(data)\n if out_data is not None:\n self.put(out_data)\n data = self.get()\n else:\n pass",
"def deep_prior_restore_image(corrupted_image):\n im_shape = corrupted_image.shape\n base_image = np.random.random(size=(1,) + im_shape) * 2 - 1\n base_image = np.expand_dims(base_image, 3)\n corrupted_img_batch = np.expand_dims(corrupted_image, 0)\n corrupted_img_batch = np.expand_dims(corrupted_img_batch, 3)\n\n FIT_PARAMS = {\n 'x': base_image,\n 'y': corrupted_img_batch,\n 'epochs': 180,\n 'batch_size': 1,\n 'verbose': 0\n }\n autoencoder = build_decoder()\n autoencoder.fit(**FIT_PARAMS)\n return deprocess(autoencoder.predict(base_image)[0])",
"def recompute(self):\n if not self.samples:\n print('Error: Data has not been loaded yet!')\n else:\n for sample in self.samples:\n ret, normed_time_series = cyclic_analysis(sample['TimeSeries'], p=1, normalize=self.norm,\n trend_removal=self.trend_removal)\n lm, phases, perm, sorted_lm, eigenvalues = ret\n cm = np.corrcoef(normed_time_series)\n (_, n) = lm.shape\n sample['SLM'] = sorted_lm\n sample['ULM'] = lm\n sample['Eigenvalues'] = eigenvalues\n sample['Phases'] = phases\n sample['Permutation'] = perm\n sample['CM'] = cm\n sample['NormedTS'] = normed_time_series\n sample['FlatULM'] = lm[np.triu_indices(n, 1)]\n sample['FlatSLM'] = sorted_lm[np.triu_indices(n, 1)]\n sample['FlatCM'] = cm[np.triu_indices(n, 1)]\n\n self.reset()",
"def reset(self):\n self.csr.data[:] = 0",
"def _corrupt_input(self, data):\n corruption_ratio = np.round(\n self.corr_frac * data.shape[1]).astype(np.int)\n\n if self.corr_type == 'none':\n return np.copy(data)\n\n if self.corr_frac > 0.0:\n if self.corr_type == 'masking':\n return utilities.masking_noise(\n data, self.tf_session, self.corr_frac)\n\n elif self.corr_type == 'salt_and_pepper':\n return utilities.salt_and_pepper_noise(data, corruption_ratio)\n else:\n return np.copy(data)",
"def data_expl2(self):\r\n data = self.data_train.copy()\r\n data1 = self.labels_train\r\n \r\n data[len(data.columns)+1] = data1.iloc[:,0][0:len(data)]\r\n \r\n #Drop n/a correlations\r\n corr = data.corr()\r\n corr = pd.DataFrame(corr)\r\n corr = corr.dropna(how = 'all')\r\n corr = corr.dropna(axis = 1, how = 'all')\r\n \r\n corr_miss = corr.isnull().sum()/len(corr)\r\n corr_miss = corr_miss[corr_miss > 0]\r\n corr_miss.sort_values(inplace=True)\r\n \r\n self.corr = corr\r\n self.corr_miss = corr_miss\r\n \r\n #Highest and lowest correlated parameters\r\n self.label_high = corr.iloc[:,-1].sort_values(ascending=False)[:30]\r\n self.label_low = corr.iloc[:,-1].sort_values(ascending=False)[-10:]",
"def __fall_back(self, line):\n line.allx = line.prev_x\n line.ally = line.prev_y\n line.detected = False\n line.position = line.prev_position\n line.detection_counter += 1",
"def _did_reset(self):\n # skip frames and seed the random number generator\n seed = random.randint(0, 255), random.randint(0, 255)\n for _ in range(14):\n self.ram[0x0017:0x0019] = seed\n self._frame_advance(0)",
"def restore(self):\n\n # Restore the sets\n try:\n self.mr.master_atoms_mapped.discard(self.mr.last_mapped[1])\n self.mr.sub_atoms_mapped.discard(self.mr.last_mapped[0])\n self.mr.atom_mapping.discard(self.mr.last_mapped)\n except IndexError:\n # happens if there was no last added atom\n pass\n # Reset the last mapped\n try:\n self.mr.last_mapped = self.mapping_stack.pop()\n except IndexError:\n # Happens if there is no backup\n pass",
"def transform(self, epochs):\n if len(self.n_interpolate_) == 0:\n raise ValueError('Please run autoreject.fit() method first')\n\n _check_data(epochs, picks=self.picks, verbose=self.verbose)\n old_picks = self.local_reject_.picks\n self.local_reject_.picks = self.picks\n\n epochs_clean = self.local_reject_.transform(epochs)\n\n self.local_reject_.picks = old_picks\n return epochs_clean",
"def reset(self) -> None:\n self.noise_prev = self.initial_noise if self.initial_noise is not None else np.zeros_like(self._size)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
van der Pol equation | def vanDerPol():
pars = {'eps': 1.0, 'a': 0.5}
dsargs = {
'name': 'vanDerPol',
'pars': pars,
'varspecs': {
'x': '(y - (x * x * x / 3 - x)) / eps',
'y': 'a - x',
},
'ics': {
'x': pars['a'],
'y': pars['a'] - pow(pars['a'], 3) / 3,
},
'algparams': {'max_step': 1e-2, 'max_pts': 30000}
}
# TODO: add expected result
return dsargs, None | [
"def _lorentz(x,p,w):\n return 1./(1.+((p-x)/(w/2.))**2)",
"def vinvpol(x, t, p):\n\t\n u, v, w, q = x\n mu1, a1, b1, c1, e1, mu2, a2, b2, c2, e2 = p\n\n #The velocity function v = d(u,v,w,q)/dt:\n vel = [2*mu1*u + 2*a1*u**2 + 2*b1*u*v + c1*w,\n \t 2*mu2*v + 2*a2*u*v + 2*b2*v**2 + c2*w,\n \t(2*mu1+mu2)*w + (2*a1+a2)*u*w + (2*b1+b2)*v*w + 4*c1*u*v+2*c2*u**2-e2*q,\n \t(2*mu1 + mu2)*q + (2*a1 + a2)*u*q + (2*b1 + b2)*v*q + e2*w]\n\n return vel",
"def lj_p(r_a):\r\n \r\n func = ((r_a)**(-12)-(r_a)**(-6))\r\n \r\n return func",
"def ode(u: float) -> float:\n return u ** 2",
"def _derivadot(self, a):\n #verified correct by putting 5 different a's into mathematica and comparing.\n numerator = - (self._Om) + 2 * (1 - self._Om) * (a ** 3)\n denominator = 2 * np.sqrt((a ** 3) * (self._Om) + (1 - self._Om) * (a ** 6))\n return numerator/denominator",
"def parabolaconstant(self):\n if self.g1 and self.g2 and self.curvecheck:\n return ((self.g2-self.g1)/(2*self.curvecheck()))",
"def lorentzian(params, x):\n return params[0] + params[1] / ((x - params[2]) ** 2 + (0.5 * params[3]) ** 2)",
"def gOfPhi(p):\r\n return 1 / math.sqrt(1 + 3 * p **2 / math.pi **2)",
"def reactor_pressure_deriv(self):\n deriv = np.zeros((2, 5 + self.num_vars, self.num_nw_vars))\n # derivatives for pressure oxygen inlet\n deriv[0, 1, 1] = -1\n deriv[0, 4, 1] = 1\n # derivatives for pressure hydrogen inlet\n deriv[1, 2, 1] = -1\n deriv[1, 4, 1] = 1\n\n return deriv",
"def roexpr(g, p, r):\n W = (1 - r) * (1 + p * g) * np.exp(-1 * p * g) + r\n return W",
"def F(self,t,z,p):\n return 0.*z",
"def evaluate_lambda_poly(x, ps):\r\n if not isinstance(x, np.ndarray):\r\n x = np.array(x)\r\n result = np.ones(len(x))\r\n for p in ps:\r\n result = result * (x - p)\r\n return result.astype(np.float)",
"def equation_p(self):\n\t\treturn f\"{self.a}x + {self.b}y + {self.c}z − {self.d} = 0\"",
"def RV_star(dp):\n from lib.utils import typetest\n import numpy as np\n p=phase(dp)\n K=paramget('K',dp)\n typetest('K',K,float)\n rv=K*np.sin(2.0*np.pi*p) * (-1.0)\n return(rv)",
"def E(self,t,z,p,v):\n return z",
"def degenerate_triangle_equation(self, u, v, w):\n return self.lam(u,v) - self.lam(u,w) - self.lam(w,v)",
"def dzdypartial(self, x, y, rang, murvir):\n\n return 10**(2*y)*rang**2 / ((murvir + rang*10**y) *\n (murvir + rang*10**y) * np.log(1. + 10**y*rang/murvir)\\\n - rang*10**y)\\\n - 10**(2*y) / ((1. + 10**y)*((1. + 10**y)*np.log(1. + 10**y)\\\n - 10**y))",
"def ECG(self, LoP):\r\n \r\n volt = self.V.reshape(self.L,self.L)\r\n \r\n numerator = (((self.x[1:, 1:] - LoP[0]) * (volt[1:,1:] - volt[1:, :-1])) - \r\n ((self.y[1:, 1:] - LoP[1]) * (volt[1:,1:] - volt[:-1, 1:])))\r\n \r\n denominator = (((self.x[1:, 1:] - LoP[0])**2) +\r\n ((self.y[1:, 1:] - LoP[1])**2))**(3./2)\r\n \r\n values = float(numerator)/denominator\r\n ECG_value1 = sum(values.flatten())\r\n \r\n return ECG_value1",
"def polyMod(p, d):\n\tq, r = polyDivMod(p, d) # q is just a dummy\n\treturn r"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that resets the saved users file | def reset_user_saved_file():
with open("./assets/vegan_cosmetics_saved.txt", "w") as file:
file.write("") | [
"def reset(self):\n\t\tf = open(self.file_path, \"w+\")\n\t\tf.close()",
"def reset(self):\n if self.path.exists():\n warn_msg =(f\"WARNING:\\n \"\n f\"This will overwrite file at {self.path}...\\n \"\n f\"Type 'DELETE' to continute.\")\n confirmation = input(warn_msg)\n\n if confirmation != \"DELETE\":\n print(\"File not reset.\")\n return # Exit without overwritting file\n\n else:\n print(f\"File at {self.path} was reset.\")\n\n self._init_new_file()",
"def reset():\n session_functions.reset() # Reset the session and session folder\n session_functions.init() # Initialize the new session\n return redirect(url_for('upload'))",
"def resetAccounts():\n \n if debug: print(\"resetAccounts\")\n\n if path.exists(accountsFile): remove(accountsFile)\n with open(accountsFile, 'w') as fp:\n fp.write(\"name,sent\\n\")\n for i in [x for j, x in enumerate(range(18)) if j != 4]:\n if i == 0:\n fp.write(\"edersemelhante@gmail.com,\")\n else:\n fp.write(\"edersemelhante\" + str(i) + \"@gmail.com,\")\n fp.write(\"0\\n\")\n fp.close()\n\n return True",
"def deleteUser(self):\r\n os.system(\"attrib -h -s -r \" + tempfile.gettempdir() + r\"\\temp.temp\")\r\n os.system(\"del \" + tempfile.gettempdir() + r\"\\temp.temp\")",
"def resetAutoSave(self):\n self.autoSaveTimer.stop()\n minutes = globalref.genOptions.getValue('AutoSaveMinutes')\n if minutes and self.modified:\n self.autoSaveTimer.start(60000 * minutes)\n else:\n self.deleteAutoSaveFile()",
"def reset_user(self):\n self.user_model = None",
"def reset(\n self,\n username: Optional[str],\n password: Optional[str],\n ) -> None:\n self.password_mgr.rb_user = username\n self.password_mgr.rb_pass = password\n self.used = False",
"def save( self ):\n if self._userSettings:\n\n from ToolBOSCore.Packages.CopyrightHeader import getCopyrightHeader\n\n content = getCopyrightHeader( 'python', 'User preferences' )\n\n for key, value in sorted( self._userSettings.items() ):\n value = repr( value ) # write object as Python code\n\n content += '%s = %s\\n\\n' % ( key, value )\n\n content += '\\n# EOF\\n'\n\n logging.debug( 'writing %s', self._userFile )\n FastScript.setFileContent( self._userFile, content )\n\n else:\n # delete entire file if there are no settings left\n\n logging.debug( 'deleting empty configfile' )\n FastScript.remove( self._userFile )",
"def reset_current_input():\n\n file_name = '%s/%s/%s' % (sublime.packages_path(),\n \"TextTransmute\",\n \"Data.sublime-project\")\n with open(file_name, \"w\"):\n pass",
"def reset_user_status(self, username):\n if not username in self._users.keys():\n return \n self._users[username].reset_status()",
"def update_accounts(self):\r\n pickle.dump(self.accounts, open(\"accounts.p\", \"wb\")) #updates the pickle file with any new info recently added after logoff\r",
"def __save(self):\n\n write_file(path.join(path_wallet, self.__user + '_wallet.txt'),\n self.__user + '\\n'\n + self.__keys_filename)",
"def clearRecentFiles(self): \n self._recentFiles = []\n self._saveIni()\n self.updateMenu()",
"def delete_user():\n os.remove(_user_path())",
"def clear(self):\n self.file.seek(0)\n self.file.truncate()\n self.file.close()\n self.open()",
"def deleteAutoSaveFile(self):\n filePath = self.filePath + '~'\n if self.filePath and os.path.exists(filePath):\n try:\n os.remove(filePath)\n except OSError:\n QtGui.QMessageBox.warning(self.activeWindow, 'TreeLine',\n _('Error - could not delete backup file {}').\n format(filePath))",
"def clear_student_state(self, *args, **kwargs):\n # pylint: disable=unused-argument\n student_id = kwargs['user_id']\n for submission in submissions_api.get_submissions(\n self.get_student_item_dict(student_id)\n ):\n submission_file_sha1 = submission['answer'].get('sha1')\n submission_filename = submission['answer'].get('filename')\n submission_file_path = self.file_storage_path(submission_file_sha1, submission_filename)\n if default_storage.exists(submission_file_path):\n default_storage.delete(submission_file_path)\n submissions_api.reset_score(\n student_id,\n self.block_course_id,\n self.block_id,\n clear_state=True\n )",
"def __save_current_user_data(self):\n np.save(self.file_names['data'], self.data)\n np.save(self.file_names['users_list'], self.users_list)\n np.save(self.file_names['user_indexes'], self.user_indexes)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that resets the users favorite list | def reset_user_fav_list():
user_fav_list = []
return user_fav_list | [
"def rm_favourite():\n\n user_id = request.args['user_id']\n photo_id = request.args['photo_id']\n\n remove_favourite(user_id, photo_id)\n\n flash(\"Picture was deleted from your favourites!\")\n return redirect(url_for(\"favourites\"))",
"def cleanTurnOffAllFavoriteChannels(self):\n self.rc.sendKeys(['KEY_BACK'])\n time.sleep(2)\n self.rc.sendKeys(['KEY_BACK'])\n time.sleep(2)\n self.rc.sendKeys(['KEY_TV'])\n time.sleep(2)\n self.rc.sendKeys([\"KEY_MENU\"])\n self.actionSelect(Menu.myAccount)\n self.actionSelect(Menu.mySettings)\n self.actionSelect(Menu.myChannels)\n time.sleep(1)\n self.rc.sendKeys([\"KEY_BACK\"])\n time.sleep(5)\n if not (self.findInPage(Description.favoriteZeroChannels)):\n self.rc.sendKeys([\"KEY_OK\"])\n self.turnOffFavouriteChannels(byNumber=True)\n self.logger.info(\" >> Turn off all favorite channels\")\n else:\n self.logger.info(\" >> All favorite channels are turn off\")\n self.rc.sendKeys(['KEY_BACK'])\n time.sleep(2)\n self.rc.sendKeys(['KEY_BACK'])\n time.sleep(2)\n self.rc.sendKeys(['KEY_TV'])\n time.sleep(2)\n return True",
"def favorites_menu(self):\n\n show_favorites = self.database.display_favorites()\n favorites_menu = Menu('FAVORITES MENU', self.about_favorites_display, show_favorites)\n favorites_menu.clear_screen()\n favorites_menu.display()\n while True:\n erase_favorites_prompt = input(\"- enter 'e' to erase your saved products;\\n\"\n \"- enter '0' to navigate app\\n\").lower()\n if erase_favorites_prompt == '0':\n favorites_menu.menu_navigation()\n elif erase_favorites_prompt == 'e':\n while True:\n erase_favorites_confirmation = input(\"- You are about to erase your saved products\\n\"\n \"- Enter 'y' to confirm\\n\"\n \"- Enter 'n' to return to favorites\\n\").lower()\n if erase_favorites_confirmation == 'y':\n self.database.erase_favorites()\n print(\"< Favorites > erased, resest completed\")\n return\n elif erase_favorites_confirmation == 'n':\n print(\" reset cancelled... returning to << FAVORITES MENU >>\")\n return self.favorites_menu()\n else:\n print('INVALID INPUT')\n pass\n else:\n print(\"INVALID INPUT\\n\")\n pass",
"def view_favorites():\n\n favorite_items = User.objects(id = session['user']['id']).get().favorites_list\n \n items = []\n\n for i in range(0, len(favorite_items)):\n\n item = Item.objects(id = favorite_items[i]).first()\n items.append(item)\n \n return render_template(\"user/favorites_list.html\", items = items)",
"def set_favorite(self):\n\n app = App.get_running_app()\n if not app.database_scanning:\n if self.target != 'Favorite':\n app = App.get_running_app()\n app.Tag.toggle(self.fullpath, 'favorite')\n photo_info = app.Photo.exist(self.fullpath)\n self.photos[self.current_photo_index()] = photo_info\n self.update_tags()\n self.refresh_all()\n self.viewer.favorite = self.favorite",
"def test_manage_remove_favorite(self):\n\n service.manage_add_or_remove_favorite(\n self.mock_product2, self.mock_user)\n for value in self.mock_product.favorites.values():\n self.assertEqual(value, None)",
"def manage_add_or_remove_favorite(self, product, user):\n\n if product.favorites.filter(id=user.id).exists():\n product.favorites.remove(user.id)\n else:\n product.favorites.add(user.id)",
"def delete_favourite():\n if request.method == \"POST\":\n user_id = mongo.db.users.find_one({\"username\": session[\"user\"]})[\"_id\"]\n favourite = request.form.get(\"wine_id\")\n\n mongo.db.users.update({\"_id\": ObjectId(user_id)}, {\"$pull\":\n {'favourites': {\"wine_id\": favourite}}})\n\n flash(\"Wine has now been removed from your favourites\")\n return redirect(url_for('profile'))",
"def cmd_reset(self, event):\n usr = event[\"sender\"]\n if not self.todos.has(usr):\n return \"You need to start a todo list first. type !todo new\"\n self.todos.set(usr, [])\n return \"Your todo list has been reset\"",
"def reset_favorite_groups(self, **kwargs):\n\n response = self._requester.request(\n \"DELETE\", \"users/self/favorites/groups\", _kwargs=combine_kwargs(**kwargs)\n )\n return response.json().get(\"message\") == \"OK\"",
"def remove_all_friends(self): \r\n self.setOfFriends.clear()",
"def delete_favourite_users(self, data: dict):\n with self.connection.cursor() as cur:\n query = sql.SQL(\"DELETE FROM favourite_users WHERE chat_id = {0} AND unique_id IN ({1})\").format(\n sql.Literal(data['chat_id']),\n sql.SQL(',').join(data['unique_id']))\n cur.execute(query)\n self.connection.commit()",
"def manage_sort_out_user_favorite_products(self, products, user):\n\n for product in products:\n if product.favorites.filter(id=user.id).exists():\n product.is_fav = True\n else:\n product.is_fav = False\n return products",
"def remove_favorite(self, ticker):\n company_obj = NasdaqCompanies.objects.get(symbol=ticker)\n fav_list = self.favorites.split(',')\n fav_list.remove(str(company_obj.companyid))\n if len(fav_list) == 0:\n self.favorites = None\n else:\n self.favorites = ','.join(fav_list)\n self.save()",
"def favorites():\n\n # Select the current user's favorited fish\n rows = db.execute(\"SELECT fishname FROM favorites WHERE user_id = :user_id\",\n user_id=session[\"user_id\"])\n\n # If GET, show the users favotited fish\n if request.method == \"GET\":\n\n return render_template(\"favorites.html\", rows=rows)\n\n # If POST, render the selected fish's HTML page\n else:\n\n fish = request.form.get(\"fishname\")\n return render_template(\"fish.html\", fish=fish, verify=1)",
"def syncPublicFavorites(self, username):\n nsid = self.user2nsid(username)\n favList, created = FavoriteList.objects.get_or_create( \\\n\t owner = username, defaults = {'sync_date': datetime.now()})\n\n result = self.flickr.favorites_getPublicList(user_id=nsid, per_page=500)\n page_count = int(result.photos[0]['pages'])\n for page in range(1, page_count+1):\n photo_list = self._syncPhotoXMLList(result.photos[0].photo)\n for photo in photo_list:\n favList.photos.add(photo)\n\t\tif page == 1:\n\t\t favList.primary = photo\n\t\t favList.save()\n result = self.flickr.favorites_getPublicList(user_id=nsid,\n per_page=500, page=page+1)",
"def remove_favorite(self):\n if request.method == 'POST':\n try:\n userID = get_jwt_identity()\n info = request.json\n id_post = info[\"post_id\"]\n\n favorite = User_has_Post_as_favorite.query.filter_by(user_id=userID, post_id=id_post).first()\n\n if not favorite:\n return Response(dumps({\"message\": \"IT IS NOT FAVORITE\"}), status=422, mimetype=\"application/json\")\n \n db.session.delete(favorite)\n db.session.commit()\n\n return Response(dumps({\"message\": \"SUCCESS\"}), status=200, mimetype=\"application/json\")\n\n except HTTPException as e:\n return Response(dumps({\"message\": str(e)}), status=500, mimetype=\"application/json\")\n\n return Response(dumps({\"message\": \"NOT POST\"}), status=403, mimetype=\"application/json\")",
"def favourite(self, favourite):\n if self.local_vars_configuration.client_side_validation and favourite is None: # noqa: E501\n raise ValueError(\"Invalid value for `favourite`, must not be `None`\") # noqa: E501\n\n self._favourite = favourite",
"def me_favsr_view(request):\n sr_id_li = [x[3:] for x in request.POST.keys() if x.startswith('id_')]\n\n if len(sr_id_li) > settings.SR_FAVS_COUNT_MAX:\n sr_id_li = sr_id_li[:settings.SR_FAVS_COUNT_MAX] # limit nr of favs\n messages.warning(request, 'There is a maximum of {} favorites '\n 'subreddits.'.format(settings.SR_FAVS_COUNT_MAX))\n\n sr_li = Subscribed.objects.filter(user=request.user)\n sr_li.update(is_favorite=False) # delete all favorites of user\n sr_li = Subscribed.objects.filter(user=request.user, sr__in=sr_id_li)\n\n if sr_li:\n sr_li.update(is_favorite=True) # and set favorite on the subset\n # messages.success(request, 'Favorite subreddits updated.')\n\n return redirect(reverse('me_page'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function takes in user input whether or not they want to order anything from our store, if they do, then it brings in the search products function and allows them to order. If they dont, the program will quit. Also Covers the edge case, if they enter the wrong input, it will ask them the question again. | def user_input(user_fav_list = []):
order_now = input(dedent(
'''
Would you like to see anything from our store catalog (y/n) Or would you like to quit (q)?
'''))
if order_now == 'y':
search_product(user_fav_list)
elif order_now == 'n':
grab_saved_product()
elif order_now == 'q':
print("*" * 100)
print("Thank you for shopping here!")
print("*" * 100)
sys.exit()
else:
print("Please re-enter with (y) or (n)")
user_input() | [
"def search_product(user_fav_list=[]):\n print(dedent(\n '''\n These are the categories and individual products available:\n\n Eye Vegan Products: mascara, eye shadow, liner\n Lip Vegan Products: lip products, liner, pencil\n Face Vegan Products: cream, moisturizer, bronzer, foundation, blush, primer\n Nail Vegan Products: nail\n\n Please type in either category or product\n\n '''\n ))\n search_word = input(dedent(\n '''\n What would you like to view? Quit with (q)\n '''\n ))\n \n if search_word =='q':\n print(\"*\" * 100)\n print(\"Thank you for shopping here!\")\n print(\"*\" * 100)\n sys.exit()\n\n search_word = search_word.lower()\n\n find_search_product(search_word,user_fav_list)",
"def orderinput():\n while True:\n addmore = \"** Anything else? **\"\n answer = input()\n answer = answer.lower()\n if answer in appetizers:\n appetizers[answer] += 1\n finaloutput.append(answer)\n print(\n f\"** {appetizers[answer]} order of {answer} have been added to your meal**\"\n )\n print(addmore)\n if answer in entrees:\n entrees[answer] += 1\n finaloutput.append(answer)\n print(\n f\"** {entrees[answer]} order of {answer} have been added to your meal**\"\n )\n print(addmore)\n if answer in desserts:\n desserts[answer] += 1\n finaloutput.append(answer)\n print(\n f\"** {desserts[answer]} order of {answer} have been added to your meal**\"\n )\n print(addmore)\n if answer in drinks:\n drinks[answer] += 1\n finaloutput.append(answer)\n print(\n f\"** {drinks[answer]} order of {answer} have been added to your meal**\"\n )\n print(addmore)\n if answer == \"quit\":\n print(f\"The Items you Ordered are: {', '.join(finaloutput).capitalize()}\")\n print(f\"Thank you for ordering! Bye-Bye!\")\n sys.exit()",
"def prompt_for_product():\n while True:\n print()\n print(\"Select a product:\")\n print()\n n = 1\n for code,description,desired_number in datastorage.products():\n print(\" {}. {} - {}\".format(n, code, description))\n n = n + 1\n\n s = input(\"> \").strip()\n if s == \"\": return None\n\n try:\n n = int(s)\n except ValueError:\n n = -1\n\n if n < 1 or n > len(datastorage.products()):\n print(\"Invalid option: {}\".format(s))\n continue\n\n product_code = datastorage.products()[n-1][0]\n return product_code",
"def sell_product(self):\r\n\r\n option = 'y'\r\n while option == 'y':\r\n print 'Enter product code'\r\n code = raw_input()\r\n product = self.store.get_product(code)\r\n exists = self.store.product_exist(code)\r\n cant = 0\r\n cash = 0\r\n if product is not None and exists is True:\r\n print 'Product \\t Price'\r\n print '{0} \\t {1}'.format(product[1], product[0])\r\n print 'Please enter quantity'\r\n while cant == 0:\r\n try:\r\n cant = int(raw_input())\r\n except ValueError:\r\n print 'Please enter a valid quantity'\r\n continue\r\n\r\n if self.store.delete_product(code, cant):\r\n self.sales.add_sale(code, product[0], product[1], cant)\r\n cost = cant*product[0]\r\n print 'That will be ${0}.'.format(round(cost, 3))\r\n print 'Please enter the cash'\r\n while cash == 0:\r\n try:\r\n cash = float(raw_input())\r\n except ValueError:\r\n print 'Please enter a valid ammount'\r\n change = cash - cost\r\n if change < 0:\r\n print'Please enter a valid ammount'\r\n else:\r\n print 'Your change is $ {0}'.format(round(change, 3))\r\n else:\r\n print 'Wrong product code'\r\n print 'Do you want to sell another product? (y/n)'\r\n option = raw_input()",
"def buy(self):\n buy_request = str(input(\"\\nWhat do you want to buy? \"\n \"1 - espresso, \"\n \"2 - latte, \"\n \"3 - cappuccino:, \"\n \"back - to main menu:\\n\"))\n\n if buy_request == '1':\n self.espresso()\n elif buy_request == '2':\n self.latte()\n elif buy_request == '3':\n self.cappuccino()\n elif buy_request == \"back\":\n pass",
"def is_product_in_list(grocery_list):\r\n print(\"check if you have a product in your list.\")\r\n product = input(\"pick a product: \")\r\n if product in grocery_list:\r\n print(product + \" is on your list\")\r\n elif product not in grocery_list:\r\n print(product + \" is NOT on your list\")",
"def display_library_menu(self):\n user_input = None\n while user_input != 7:\n print(\"\\nWelcome to the Library!\")\n print(\"-----------------------\")\n print(\"1. Display all items\")\n print(\"2. Check Out an item\")\n print(\"3. Return an item\")\n print(\"4. Find an item\")\n print(\"5. Add an item\")\n print(\"6. Remove an item\")\n print(\"7. Quit\")\n string_input = input(\"Please enter your choice (1-7)\")\n\n # handle user pressing only enter in menu\n if string_input == '':\n continue\n\n user_input = int(string_input)\n\n if user_input == 1:\n self.display_available_items()\n user_input = input(\"Press Enter to continue\")\n elif user_input == 2:\n call_number = input(\"Enter the call number of the item\"\n \" you wish to check out.\")\n self.check_out(call_number)\n elif user_input == 3:\n call_number = input(\"Enter the call number of the item\"\n \" you wish to return.\")\n self.return_item(call_number)\n elif user_input == 4:\n input_title = input(\"Enter the title of the item:\")\n found_titles = self._catalogue.find_items(input_title)\n print(\"We found the following:\")\n if len(found_titles) > 0:\n for title in found_titles:\n print(title)\n else:\n print(\"Sorry! We found nothing with that title\")\n\n elif user_input == 5:\n self._catalogue.add_item()\n\n elif user_input == 6:\n call_number = input(\"Enter the call number of the item\")\n self._catalogue.remove_item(call_number)\n\n elif user_input == 7:\n pass\n else:\n print(\"Could not process the input. Please enter a\"\n \" number from 1 - 7.\")\n\n print(\"Thank you for visiting the Library.\")",
"def inputPurchase():\n purchase = input(\"Please input purchase variables: \")\n return purchase",
"def search_product():\n\n total_rows = Product.select().count()\n try:\n product_id = int(input(f'Input product id (hint: between 1 and {total_rows}:) '))\n product = Product.get_by_id(product_id)\n\n print(\"\"\"\n Your search result is:\n \"\"\")\n print(f'NAME --------------|{product.product_name}')\n print(f'PRICE -------------|${(product.product_price / 100)}')\n print(f'QTY ---------------|{product.product_quantity}')\n print(f'DATE UPDATED ------|{product.date_updated}')\n except ValueError:\n print(f'Please enter a number value from 1 to {total_rows}')\n except Product.DoesNotExist:\n print(f\"\"\"The product does not exist.\n{product_id} is not within 1 to {total_rows}\"\"\")",
"def shop_menu(self):\n self.screen.clear()\n self.screen.border(0)\n\n row = 3\n # load_product_library and for each available product index, get_value to print the name and price.\n product_list = print_menu(read_product_from_db, self.screen, row)\n row += len(product_list)\n\n # are you logged in or not?\n row += 2\n if self.current_user is not None:\n # view your cart or note that it's empty.\n self.screen.addstr(row, 22, \"Press the number of the item you'd like to add to your cart.\")\n row += 1\n self.screen.addstr(row, 22, \"Or press'c' to check out, 'l' to clear cart, b' to go back, 'x' to exit.\")\n row += 2\n self.view_cart(row)\n\n try:\n next_step = bytes.decode(self.screen.getstr(15, 40, 60))\n row += 1\n if next_step == \"x\": # Exit.\n self.quit_menu(self.shop_menu)\n\n elif next_step == \"b\": # Go back.\n self.logged_in_menu()\n\n elif next_step == \"c\": # Check Out.\n self.payment_options_menu(completing=True)\n elif next_step == \"l\": #clear cart\n clear_cart(self.cart_id)\n self.shop_menu()\n else:\n self.screen.addstr(row, 40, next_step)\n row += 1\n try: # Add a product to your cart.\n next_step = int(next_step)\n except ValueError:\n self.shop_menu()\n finally:\n row += 2\n if next_step >= 0 and next_step <= len(product_list):\n prod_id = set_thing(product_list, next_step)\n self.add_to_cart_menu(prod_id)\n else:\n # print(\"command not recognized.\")\n self.screen.addstr(row, 40, \"Command not recognized.\")\n self.shop_menu()\n\n except ValueError:\n self.unlogged_in_menu()\n\n else:\n # if you're not logged in you can view products, but you can't do anything with a cart.\n self.screen.addstr(row, 40, \"You are not logged in.\")\n row += 1\n self.screen.addstr(row, 12, \"Press 'b' to go back and choose a login option, or x to exit.\")\n\n next_step = chr(self.screen.getch())\n\n if next_step == \"b\":\n self.unlogged_in_menu()\n elif next_step == \"x\":\n self.quit_menu(self.shop_menu)\n else:\n self.screen.addstr(17, 40, \"Command_not_recognized.\")\n # print(\"command_not_recognized.\")",
"def test_search_by_product_name(self):\n # Test data\n product_title = 'Final Fantasy XV - Xbox One'\n product_price = '$19.99'\n product_rating = '4.6'\n\n self.search_controller.open_search_page()\n self.search_controller.dismiss_subscribe_modal()\n self.search_controller.search(product_title)\n\n self.assertIsNotNone(self.search_controller.get_item_from_results(product_title))\n self.assertEqual(product_price, self.search_controller.get_item_price(product_title))\n self.assertEqual(product_rating, self.search_controller.get_item_rating(product_title))\n\n self.search_controller.add_item_and_checkout(product_title)\n\n self.assertTrue(self.checkout_controller.is_checkout_page())\n self.assertTrue(self.checkout_controller.item_in_cart(product_title))\n self.assertEqual(self.checkout_controller.get_total_price(), product_price)",
"def handle_food_deficit(self, game_state, foodstuff, avail_food, food):\n print(\"You do not have enough food.\")\n if avail_food > 0:\n for food_item, quantity in foodstuff:\n self.inventory[food_item.name] = 0\n print(str(quantity) + ' x ' + food_item.name.title() + '(' + str(food_item.food) + ' food) ', end=\"\")\n print(\" were consumed from your storage.\")\n print(\"You still have \" + str(food - avail_food) + \" food deficit.\")\n print(\"Your options:\")\n print(\"1. Sell Buildings / Ships\")\n print(\"2. Take Loan\")\n ans = input(\"? \")\n if int(ans) == 1: # sell property\n self.perform_sell(game_state)\n elif int(ans) == 2: # take loan\n self.take_loan(food - avail_food)",
"def input_product():\n product_name = input(\"Enter the name:\")\n product_price = input(\"Enter the price:\")\n \n return product_name, product_price",
"def main():\n item_list = [item.Book(\"Lord of the Rings\", \"1023.2323\", \"JRR Tolkien\", 1),\n item.Book(\"Game of Thrones\", \"1032.1212\", \"GRR Martin\", 1),\n item.Book(\"Harry Potter\", \"1111.2222\", \"JK Rowling\", 1),\n item.DVD(\"Pursuit of Happiness\", \"April 12, 1974\", \"NTSC\", 1, \"12121\"),\n item.Journal(\"National Geographic\", 10, \"Science\", 1, \"51232\"),\n item.Book(\"Game of Thrones\", \"1033\", \"GRR Martin\", 1)]\n biblioteca = Library(item_list)\n catalogue_ = catalogue.Catalogue(item_list)\n generator_ = catalogue.LibraryItemGenerator(item_list)\n choice = 1\n while choice != 0:\n print(\"Welcome to Biblioteca self-service\")\n print(\"If you would like to find a book, press 1\")\n print(\"If you would like to request an item be removed press 2\")\n print(\"If you would like to check out an item press 3\")\n print(\"If you would like to return an item press 4\")\n print(\"If you would like to add an item press 5\")\n print(\"If you would like to browse the full catalogue press 6\")\n print(\"If you would like to end self-service press 0\")\n\n choice = int(input(\"what would you like to do? \"))\n\n if choice == 1:\n title = input(\"Enter the title of the book you are looking for: \")\n if isinstance(title, str):\n catalogue_.find_item(title)\n else:\n return \"Sorry, that is an invalid title\"\n if choice == 2:\n call_number = input(\"Enter the call number for the book: \")\n if isinstance(call_number, str):\n catalogue_.remove_item(call_number)\n else:\n return \"That is an invalid call number\"\n if choice == 3:\n call_number = input(\"Enter the call number for the book: \")\n if isinstance(call_number, str):\n biblioteca.check_out(call_number)\n else:\n return \"That is an invalid call number\"\n\n if choice == 4:\n call_number = input(\"Enter the call number for the book: \")\n if isinstance(call_number, str):\n biblioteca.return_item(call_number)\n else:\n return \"that is an invalid call number\"\n if choice == 5:\n generator_.generate_item(item_list)\n if choice == 6:\n display_available_books(item_list)",
"def search_menu(self):\n clr_screen() \n \n print (misc.SEARCH_MENU)\n\n for key in sorted(misc.search_menu):\n print (misc.search_menu[key])\n\n print('\\n')\n choice = input(\"Please select:\")\n\n if choice == '1':\n self.search_by_range_date()\n self.main_menu()\n elif choice == '2': \n self.find_by_time()\n self.main_menu()\n elif choice == '3':\n self.find_by_string()\n self.main_menu()\n elif choice == '4': \n self.find_by_pattern()\n self.main_menu()\n elif choice == '5': \n print (\"return to main menu\")\n self.main_menu()\n else: \n misc.option_error()\n self.main_menu()",
"def shopping():\n\n print('Getting request for stores')\n # lat, lon, max_locations, k, categories, product\n lat = g.user['lat']\n lon = g.user['lon']\n max_locations = 20\n k = 3\n categories = ['Grocery']\n product = None\n scroll = None\n allStores = []\n storeDB = get_stores_db()\n\n if request.method == 'POST':\n args = request.form\n categories = args.getlist('category')\n product = args['product']\n scroll = 'search-results'\n\n for i in range(len(categories)):\n if (categories[i] == \"Grocery Store\"):\n categories[i] = \"Grocery\"\n\n # Sample store: ['Walmart', [37.72945007660575, -121.92957003664371], '9100 Alcosta Blvd, San Ramon, California, 94583']\n storesArr = get_safest_stores(lat, lon, max_locations, k, categories)\n\n for store in storesArr:\n storeLat = str(store[1][0])\n storeLon = str(store[1][1])\n allStores.append(constructStore(lat, lon, ObjectId(), store[0], storeLat, storeLon, store[2], product))\n\n allProducts = getAllProducts()\n\n return render_template('shopping.html', userLat=lat, userLon=lon,\n shoppingTasks=g.user['shoppingTasks'], allProducts=allProducts,\n storeLocs=allStores, req=request.method, product=product, scroll=scroll), 200",
"def keyword_search(self,abc = False):\n \n print(\"\\033c\")\n if abc == True:\n word = ' Abc '\n else:\n word = 'Keyword'\n while True:\n print('#####################')\n print('# Search({}) #'.format(word))\n print('#####################')\n print(\"Type: '00' to return \\n\")\n selection = ''.join(input('>>> ').split()).lower() \n if selection in ('00'):\n print(\"\\033c\")\n return\n if abc == True:\n comics = [i.lower() for i in self.book_lib if i[0] == selection[0]]\n else:\n comics = [i.lower() for i in self.book_lib if selection in i]\n \n if len(comics) == 0:\n print('No Results Found!')\n time.sleep(2)\n print(\"\\033c\")\n while True:\n print('Search Again?')\n again = input('>>> ').lower()\n if again in ('y','yes'):\n break\n elif again in ('n','no'):\n return \n else:\n break\n print(\"\\033c\")\n comics.sort(key = self.natural_key)\n numbered_list = [''.join((str(i),')',comics[i])) for i in range(len(comics))]\n while True:\n print('####################')\n print('# Search Results #')\n print('####################\\n')\n print('Showing Matches for: {}'.format(selection))\n print('{} result(s) found\\n'.format(len(comics)))\n for result in numbered_list:\n print(result.title())\n pick = ''.join(input('>>> ').split()).lower() \n if pick in ('back','b','q'):\n print(\"\\033c\")\n return\n elif pick.isdigit():\n if int(pick) in range(len(comics)):\n book_link = self.book_lib[comics[int(pick)]]\n self.load_pull()\n if book_link not in self.pull_list.values():\n self.book_display(book_link)\n else:\n self.book_display(book_link)\n else:\n print('Invalid Entry!')\n time.sleep(1)\n print('\\033c')",
"def search_tasks():\n if Task.select().count() == 0:\n clear()\n input(\"No tasks exist in the database. Press ENTER to return to \"\n \"the main menu\")\n return\n search_menu = OrderedDict([\n ('e', employee_search),\n ('t', duration_search),\n ('k', keyword_search),\n ('d', date_search),\n ('r', date_range_search),\n ])\n message = \"Enter criteria below:\"\n while True:\n clear()\n print(\"What criteria would you like to use for searching?\\n\")\n print(\"Search by (E)mployee name\")\n print(\"Search by Dura(t)ion\")\n print(\"Search by (K)eyword\")\n print(\"Search by (D)ate\")\n print(\"Search by Date (R)ange\")\n print(\"Or go (B)ack\")\n print(\"\\n{}\\n\".format(message))\n choice = input(\"> \").lower().strip()\n\n if choice not in ['e', 't', 'k', 'd', 'r', 'b']:\n message = \"Entry not recognized. Try again.\"\n continue\n if choice == 'b':\n break\n tasks = search_menu[choice]()\n if len(tasks) == 0:\n message = \"No tasks found by that criteria. Try again.\"\n continue\n task_page_menu(tasks)",
"def search_cards(self):\n\n # Ask for the card name search parameter\n card_name = input(\"Enter full/partial card name (press Enter to skip): \")\n card_name = card_name.strip()\n print(\"\")\n\n if card_name == \"\":\n card_name = None\n\n # Ask for the card cost search parameter\n card_cost = input(\"Enter mana cost (press Enter to skip): \")\n card_cost = card_cost.strip()\n print(\"\")\n\n if card_cost == \"\":\n card_cost = None\n else:\n try:\n # Convert to integer\n card_cost = int(card_cost)\n except:\n print(\"Invalid card cost\")\n return\n\n # Ask for the card rarity search parameter\n card_rarity = input(\"Enter card rarity (Free/Common/Rare/Epic/Legendary) (press Enter to skip): \")\n card_rarity = card_rarity.strip().lower()\n print(\"\")\n\n if card_rarity == \"\":\n card_rarity = None\n elif card_rarity != \"free\" and card_rarity != \"common\" and card_rarity != \"rare\" and card_rarity != \"epic\" and card_rarity != \"legendary\":\n print(\"Invalid card rarity\")\n return\n\n # Ask for the card type search parameter\n card_type = input(\"Enter card type (Minion/Spell/Weapon) (press Enter to skip): \")\n card_type = card_type.strip().lower()\n print(\"\")\n\n if card_type == \"\":\n card_type = None\n elif card_type != \"minion\" and card_type != \"spell\" and card_type != \"weapon\":\n print(\"Invalid card type\")\n return\n\n # Ask for the class name search parameter\n class_name = input(\"Enter full class name (press Enter to skip): \")\n class_name = class_name.strip()\n print(\"\")\n\n if class_name == \"\":\n class_name = None\n elif self.db.check_class(class_name) == False:\n print(\"Invalid class name\")\n return\n\n # Query the database for all the cards matching the parameters\n search_results = self.db.get_cards(card_name, card_cost, card_rarity, card_type, class_name)\n\n # Display a summary of the search parameters\n print(\"Search parameters:\")\n print(\"Card Name:\", card_name if card_name is not None else \"None\")\n print(\"Mana Cost:\", card_cost if card_cost is not None else \"None\")\n print(\"Card Rarity:\", card_rarity if card_rarity is not None else \"None\")\n print(\"Card Type:\", card_type if card_type is not None else \"None\")\n print(\"Class Name:\", class_name if class_name is not None else \"None\")\n print(\"\")\n \n # Display the search results\n if len(search_results) == 0:\n print(\"No results found\")\n else:\n print(len(search_results), \"cards found\")\n for card in search_results:\n print(card)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ask the user what they would like to order, then it plugs the input into the find_search_product helper function which searchs the products in the databse based on regex. | def search_product(user_fav_list=[]):
print(dedent(
'''
These are the categories and individual products available:
Eye Vegan Products: mascara, eye shadow, liner
Lip Vegan Products: lip products, liner, pencil
Face Vegan Products: cream, moisturizer, bronzer, foundation, blush, primer
Nail Vegan Products: nail
Please type in either category or product
'''
))
search_word = input(dedent(
'''
What would you like to view? Quit with (q)
'''
))
if search_word =='q':
print("*" * 100)
print("Thank you for shopping here!")
print("*" * 100)
sys.exit()
search_word = search_word.lower()
find_search_product(search_word,user_fav_list) | [
"def test_search_by_product_name(self):\n # Test data\n product_title = 'Final Fantasy XV - Xbox One'\n product_price = '$19.99'\n product_rating = '4.6'\n\n self.search_controller.open_search_page()\n self.search_controller.dismiss_subscribe_modal()\n self.search_controller.search(product_title)\n\n self.assertIsNotNone(self.search_controller.get_item_from_results(product_title))\n self.assertEqual(product_price, self.search_controller.get_item_price(product_title))\n self.assertEqual(product_rating, self.search_controller.get_item_rating(product_title))\n\n self.search_controller.add_item_and_checkout(product_title)\n\n self.assertTrue(self.checkout_controller.is_checkout_page())\n self.assertTrue(self.checkout_controller.item_in_cart(product_title))\n self.assertEqual(self.checkout_controller.get_total_price(), product_price)",
"def search_product():\n\n total_rows = Product.select().count()\n try:\n product_id = int(input(f'Input product id (hint: between 1 and {total_rows}:) '))\n product = Product.get_by_id(product_id)\n\n print(\"\"\"\n Your search result is:\n \"\"\")\n print(f'NAME --------------|{product.product_name}')\n print(f'PRICE -------------|${(product.product_price / 100)}')\n print(f'QTY ---------------|{product.product_quantity}')\n print(f'DATE UPDATED ------|{product.date_updated}')\n except ValueError:\n print(f'Please enter a number value from 1 to {total_rows}')\n except Product.DoesNotExist:\n print(f\"\"\"The product does not exist.\n{product_id} is not within 1 to {total_rows}\"\"\")",
"def find_search_product(search_word, user_fav_list):\n\n regex_dict = {'mascara':'\\w*.ascara\\w*', 'foundation': '\\w*.oundation\\w*', 'eye shadow': '\\w*.hadow\\w*', 'lip products': '\\w*.ip\\w*', 'bronzer': '\\w*.onzer\\w*', 'liner': '\\w*[Ll]iner\\w*', 'pencil' : '\\w*.encil', 'blush' : '\\w*.lush', 'cream' : '\\w*.ream\\w*', 'moisturizer': '\\w*.oistu\\w*', 'nail': '\\w*.ail\\w*', 'primer': '\\w*.rimer\\w*', 'powder': '\\w*.owder\\w*', 'eye vegan products': '\\w*.ascara\\w*|\\w*.hadow\\w*|\\w*.[Ll]iner\\w*', 'lip vegan products': '\\w*.ip\\w*|\\w*[Ll]iner\\w*|\\w*.encil', 'face vegan products': '\\w*.ream\\w*|\\w*.oistu\\w*|\\w*.onzer\\w*|\\w*.oundation\\w*|\\w*.lush|\\w*.rimer\\w*', 'nail vegan products': '\\w*.ail\\w*'}\n\n if search_word not in regex_dict:\n search_product(user_fav_list)\n\n pattern = str(regex_dict[search_word])\n \n global vegan_makeup_list\n if not vegan_makeup_list:\n # API call to makeup_API and the webscraping initiated\n vegan_makeup_list = beauty_api_call()\n get_contents_100percentpure()\n get_contents_thrive_causemetics()\n\n # searching for item in the API\n for item in vegan_makeup_list:\n if re.search(pattern,item['name'].strip()):\n user_fav_list.append(f\"Name : {item['name']} Cost : {item['price']} \\n\")\n\n with open (\"./assets/thrive_cosmetics_saved.txt\", \"r\") as file:\n thrive_cosmetics_scrape = file.readlines()\n \n with open (\"./assets/hundred_percent_saved.txt\", \"r\") as file:\n hundred_percent_scrape = file.readlines()\n\n # searching for item in the thrive causemetics\n for item in thrive_cosmetics_scrape:\n if re.search(pattern,item.strip()):\n user_fav_list.append(item)\n \n # searching for item in the hundred percent pure\n for item in hundred_percent_scrape:\n if re.search(pattern,item.strip()):\n user_fav_list.append(item)\n\n # user_input(user_fav_list)\n save_user_product(user_fav_list)",
"def all_products(request):\n products = Product.objects.filter(in_stock=True).order_by('name')\n query_string = None\n category = None\n style = None\n sort = None\n direction = None\n packaging = None\n\n if request.GET:\n\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n products = products.annotate(lower_name=Lower('name'))\n\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n products = products.order_by(sortkey)\n\n if 'category' in request.GET:\n category = request.GET['category']\n products = products.filter(style__category__name=category)\n category = Category.objects.get(name=category)\n\n if 'style' in request.GET:\n style = request.GET['style']\n products = products.filter(style__name=style)\n style = Style.objects.get(name=style)\n\n if 'packaging' in request.GET:\n packaging = request.GET['packaging']\n if packaging == 'keg':\n products = products.filter(packaging='keg')\n elif packaging == 'bottle_can':\n products = products.exclude(packaging='keg')\n else:\n packaging = None\n\n if 'q' in request.GET:\n query_string = request.GET['q']\n\n if not query_string:\n messages.error(request, \"No search criteria entered.\")\n return redirect(reverse('products'))\n\n entry_query = get_query(query_string, [\n 'name',\n 'description',\n 'style__friendly_name',\n 'producer__name'])\n\n products = products.filter(entry_query)\n\n current_sorting = f'{sort}_{direction}'\n\n # Handle pagination\n page = request.GET.get('page', 1)\n paginator = Paginator(products, 12)\n\n try:\n product_page = paginator.page(page)\n except PageNotAnInteger:\n product_page = paginator.page(1)\n except EmptyPage:\n product_page = paginator.page(paginator.num_pages)\n\n context = {\n 'products': product_page,\n 'search_term': query_string,\n 'category': category,\n 'style': style,\n 'current_sorting': current_sorting,\n 'packaging': packaging,\n }\n\n return render(request, 'products/products.html', context)",
"def words_filter(resulting_search):\n\n if len(resulting_search) < 2:\n result = Product.objects.filter(product_name__contains=resulting_search[0])\n return result\n elif len(resulting_search) < 3:\n result = Product.objects.filter(\n product_name__contains=resulting_search[0]\n ).filter(product_name__contains=resulting_search[1])\n return result\n elif len(resulting_search) < 4:\n result = (\n Product.objects.filter(product_name__contains=resulting_search[0])\n .filter(product_name__contains=resulting_search[1])\n .filter(product_name__contains=resulting_search[2])\n )\n return result\n elif len(resulting_search) < 5:\n result = (\n Product.objects.filter(product_name__contains=resulting_search[0])\n .filter(product_name__contains=resulting_search[1])\n .filter(product_name__contains=resulting_search[2])\n .filter(product_name__contains=resulting_search[3])\n )\n return result",
"def search_substitutes(request, product):\r\n form = SearchForm()\r\n current_user = request.user\r\n\r\n page = request.GET.get('page', 1)\r\n research = Products.objects.get(name=product)\r\n # filter product from the same category with better nutriscore\r\n nutriscore_scale = list(('a', 'b', 'c', 'd', 'e'))\r\n index = nutriscore_scale.index(research.nutriscore)\r\n better_nutriscore = nutriscore_scale[:index]\r\n product_list = Products.objects.filter(nutriscore__in=better_nutriscore,\r\n category=research.category)\r\n # if user is authenticated, get his favorites, else, pass\r\n try:\r\n # for each product to display, check if the user added it to its favs\r\n # in order to display whether the product has already been saved or not\r\n for item in product_list:\r\n favorites = Favorites.objects.filter(user=User.objects.get\r\n (id=current_user.id),\r\n substitute=item.id).prefetch_related('user', 'substitute')\r\n if favorites:\r\n item.is_favorite = True\r\n else:\r\n item.is_favorite = False\r\n except User.DoesNotExist:\r\n pass\r\n\r\n # paginate\r\n paginator = Paginator(product_list, 9) # 9 products in each page\r\n page = request.GET.get('page')\r\n try:\r\n products = paginator.page(page)\r\n except PageNotAnInteger:\r\n # If page is not an integer deliver the first page\r\n products = paginator.page(1)\r\n except EmptyPage:\r\n # If page is out of range deliver last page of results\r\n products = paginator.page(paginator.num_pages)\r\n\r\n return render(request, 'purbeurre/substitutes.html', locals())",
"def search_order_by_name(self, order_name):\n self.__SEARCH_FIELD.set_text(order_name)\n self.__START_SEARCH_BUTTON.click()",
"def keyword_search(self,abc = False):\n \n print(\"\\033c\")\n if abc == True:\n word = ' Abc '\n else:\n word = 'Keyword'\n while True:\n print('#####################')\n print('# Search({}) #'.format(word))\n print('#####################')\n print(\"Type: '00' to return \\n\")\n selection = ''.join(input('>>> ').split()).lower() \n if selection in ('00'):\n print(\"\\033c\")\n return\n if abc == True:\n comics = [i.lower() for i in self.book_lib if i[0] == selection[0]]\n else:\n comics = [i.lower() for i in self.book_lib if selection in i]\n \n if len(comics) == 0:\n print('No Results Found!')\n time.sleep(2)\n print(\"\\033c\")\n while True:\n print('Search Again?')\n again = input('>>> ').lower()\n if again in ('y','yes'):\n break\n elif again in ('n','no'):\n return \n else:\n break\n print(\"\\033c\")\n comics.sort(key = self.natural_key)\n numbered_list = [''.join((str(i),')',comics[i])) for i in range(len(comics))]\n while True:\n print('####################')\n print('# Search Results #')\n print('####################\\n')\n print('Showing Matches for: {}'.format(selection))\n print('{} result(s) found\\n'.format(len(comics)))\n for result in numbered_list:\n print(result.title())\n pick = ''.join(input('>>> ').split()).lower() \n if pick in ('back','b','q'):\n print(\"\\033c\")\n return\n elif pick.isdigit():\n if int(pick) in range(len(comics)):\n book_link = self.book_lib[comics[int(pick)]]\n self.load_pull()\n if book_link not in self.pull_list.values():\n self.book_display(book_link)\n else:\n self.book_display(book_link)\n else:\n print('Invalid Entry!')\n time.sleep(1)\n print('\\033c')",
"def order_search(self, search):\n ordering = self.get_query_param(\"ordering\", self.ordering)\n if not ordering:\n return search\n\n sort_fields = []\n for raw_ordering in ordering.split(\",\"):\n ordering_field = raw_ordering.lstrip(\"-\")\n if ordering_field not in self.ordering_fields:\n raise ParseError(\n \"Ordering by `{}` is not supported.\".format(ordering_field)\n )\n\n ordering_field = self.ordering_map.get(ordering_field, ordering_field)\n direction = \"-\" if raw_ordering[0] == \"-\" else \"\"\n sort_fields.append(\"{}{}\".format(direction, ordering_field))\n\n return search.sort(*sort_fields)",
"def product_page(self):\n\n product = self.search_bar.text()\n\n conn = self.create_connection()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM ps4_tbl\")\n games = cursor.fetchall()\n cursor.execute(\"SELECT * FROM phone_tbl\")\n phones = cursor.fetchall()\n\n for each_category in range(2):\n if each_category == 0:\n category = 1\n for each_game in games:\n if product == each_game[0].lower():\n self.product_page = ProductPage(each_game, category)\n break\n else:\n category = 2\n for each_phone in phones:\n if product == each_phone[0].lower():\n self.product_page = ProductPage(each_phone, category)\n break",
"def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):\n if self._context.get('sale_expense_all_order'):\n domain = expression.AND([args or [], ['&', ('state', '=', 'sale'), ('company_id', 'in', self.env.companies.ids)]])\n return super(SaleOrder, self.sudo())._name_search(name=name, args=domain, operator=operator, limit=limit, name_get_uid=SUPERUSER_ID)\n return super(SaleOrder, self)._name_search(name=name, args=args, operator=operator, limit=limit, name_get_uid=name_get_uid)",
"def user_input(user_fav_list = []):\n order_now = input(dedent(\n '''\n Would you like to see anything from our store catalog (y/n) Or would you like to quit (q)?\n '''))\n\n if order_now == 'y':\n search_product(user_fav_list)\n\n elif order_now == 'n':\n grab_saved_product()\n\n elif order_now == 'q':\n print(\"*\" * 100)\n print(\"Thank you for shopping here!\")\n print(\"*\" * 100)\n sys.exit()\n\n else:\n print(\"Please re-enter with (y) or (n)\")\n user_input()",
"def manage_products(request):\n if not request.user.is_superuser:\n # If user is not an admin, redirect to home page\n messages.error(request, \"Only store owners can access this page.\")\n return redirect(reverse('home'))\n\n query = None\n\n products = Product.objects.all().order_by('producer', 'name')\n\n if request.GET:\n # If a product has been searched for\n if 'q' in request.GET:\n query_string = request.GET['q']\n if not query_string:\n messages.error(request, \"No search criteria entered.\")\n return redirect(reverse('manage_products'))\n\n entry_query = get_query(query_string, [\n 'name',\n 'description',\n 'style__friendly_name',\n 'producer__name'])\n\n products = products.filter(entry_query)\n\n context = {\n 'products': products,\n 'search_term': query,\n }\n\n return render(request, 'products/manage_products.html', context)",
"def test_price_sort(self):\n f.ProductFactory(name='product2', price=1, description='the best you can have!')\n self.client.login(username=self.user.username, password='test')\n response = self.client.get(\"/products/?&order_by=price\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n products = response.context_data['product_list']\n self.assertEqual(products.count(), 2)\n self.assertEqual(products[0].name, 'product2')",
"def search_items(keywords, meta_types=None):",
"def library_search(self):\n \n print(\"\\033c\")\n while True:\n print('#############')\n print('# Library #')\n print('#############\\n')\n print('A)Search by keyword')\n print('B)Search by Letter')\n print('Q)Back')\n choice = ''.join(input('>>> ').split()).lower() \n if choice == 'q':\n print(\"\\033c\")\n break\n elif choice == 'a':\n self.keyword_search()\n elif choice == 'b':\n self.keyword_search(abc = True)\n else:\n print('Invalid Entry')\n time.sleep(1)\n print('\\033c')",
"def search(collection: list):\n print('--------------------------------------------')\n print('Search for a book by:')\n print('1 = Author\\n2 = Title\\n3 = Publisher\\n4 = Shelf\\n5 = Category\\n6 = Subject')\n print('--------------------------------------------')\n search_input = int(input('Please input a number to make a selection:'))\n results = search_results(collection, search_input)\n return results",
"def search_arch_or_engproductid(attribute, input_datas):\n search_str = \"\"\n for input_data in input_datas.split(\",\"):\n input_data = input_data.strip()\n if input_data != \"\":\n search_str_tmp = \"(fn.Lower(SkuEntry.{0}) % '{1}%' |\" \\\n \"fn.Lower(SkuEntry.{0}) % '{1},%' |\" \\\n \"fn.Lower(SkuEntry.{0}) % '%,{1},%' |\" \\\n \"fn.Lower(SkuEntry.{0}) % '%,{1}') &\".format(attribute, input_data)\n else:\n search_str_tmp = \"(fn.Lower(SkuEntry.id) % '%') &\"\n search_str += search_str_tmp\n contain_search_str = str(search_str + \"&\").replace(\"&&\", \"\")\n not_contain_search_str = \"~({0})\".format(contain_search_str)\n return contain_search_str, not_contain_search_str",
"def split_product_by_symbol(product, symbol, LIMIT, cant_handle_that, return_best):\n split_prod = [0, 0, 0, 0]\n\n if type(product) == tuple:\n product = product[0]\n\n elif type(product) == list:\n # product = list(chain(product))\n for j in range(len(product)):\n for i in range(len(product[j][0].split(symbol))):\n # split the product by comma and keep the highest matching one. \n # i.e. Baumnüsse, ganz -> \"Baumnüsse\", \"ganz\" -> keep only \"Baumnüsse\"\n splitted_prod = product[j][0].split(symbol)[i]\n if splitted_prod in cant_handle_that:\n # jumps to next iteration if it's a \"/\" to minimize fuzzymatch warnings.\n continue # Unlike \"pass\" which does simply nothing. \n else:\n # gets the number (LIMIT) of best matching products\n temp = process.extract(splitted_prod, BLS, limit = LIMIT)\n if return_best == True:\n # checks if only the best should be returned.\n for j in range(len(temp)):\n if temp[j][1] > split_prod[1]:\n split_prod = temp[j]\n else:\n # or instead return the whole list\n if split_prod[3] == 0:\n split_prod = temp\n else:\n split_prod.extend(temp)\n\n if type(product) == str:\n for i in range(len(product.split(symbol))):\n # split the product by comma and keep the highest matching one. \n # i.e. Baumnüsse, ganz -> \"Baumnüsse\", \"ganz\" -> keep only \"Baumnüsse\"\n splitted_prod = product.split(symbol)[i]\n if splitted_prod in cant_handle_that:\n # jumps to next iteration if it's a \"/\" to minimize fuzzymatch warnings.\n continue # Unlike \"pass\" which does simply nothing. \n else:\n temp = process.extract(splitted_prod, BLS, limit = LIMIT)\n if return_best == True:\n for j in range(len(temp)):\n if temp[j][1] > split_prod[1]:\n split_prod = temp[j]\n else:\n split_prod = temp\n\n return split_prod"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
function has a dictionary of regex terms, then it iterates through a list of data and if the regex matches up with the search, it appends the items to user_fav_list. | def find_search_product(search_word, user_fav_list):
regex_dict = {'mascara':'\w*.ascara\w*', 'foundation': '\w*.oundation\w*', 'eye shadow': '\w*.hadow\w*', 'lip products': '\w*.ip\w*', 'bronzer': '\w*.onzer\w*', 'liner': '\w*[Ll]iner\w*', 'pencil' : '\w*.encil', 'blush' : '\w*.lush', 'cream' : '\w*.ream\w*', 'moisturizer': '\w*.oistu\w*', 'nail': '\w*.ail\w*', 'primer': '\w*.rimer\w*', 'powder': '\w*.owder\w*', 'eye vegan products': '\w*.ascara\w*|\w*.hadow\w*|\w*.[Ll]iner\w*', 'lip vegan products': '\w*.ip\w*|\w*[Ll]iner\w*|\w*.encil', 'face vegan products': '\w*.ream\w*|\w*.oistu\w*|\w*.onzer\w*|\w*.oundation\w*|\w*.lush|\w*.rimer\w*', 'nail vegan products': '\w*.ail\w*'}
if search_word not in regex_dict:
search_product(user_fav_list)
pattern = str(regex_dict[search_word])
global vegan_makeup_list
if not vegan_makeup_list:
# API call to makeup_API and the webscraping initiated
vegan_makeup_list = beauty_api_call()
get_contents_100percentpure()
get_contents_thrive_causemetics()
# searching for item in the API
for item in vegan_makeup_list:
if re.search(pattern,item['name'].strip()):
user_fav_list.append(f"Name : {item['name']} Cost : {item['price']} \n")
with open ("./assets/thrive_cosmetics_saved.txt", "r") as file:
thrive_cosmetics_scrape = file.readlines()
with open ("./assets/hundred_percent_saved.txt", "r") as file:
hundred_percent_scrape = file.readlines()
# searching for item in the thrive causemetics
for item in thrive_cosmetics_scrape:
if re.search(pattern,item.strip()):
user_fav_list.append(item)
# searching for item in the hundred percent pure
for item in hundred_percent_scrape:
if re.search(pattern,item.strip()):
user_fav_list.append(item)
# user_input(user_fav_list)
save_user_product(user_fav_list) | [
"def search_product(user_fav_list=[]):\n print(dedent(\n '''\n These are the categories and individual products available:\n\n Eye Vegan Products: mascara, eye shadow, liner\n Lip Vegan Products: lip products, liner, pencil\n Face Vegan Products: cream, moisturizer, bronzer, foundation, blush, primer\n Nail Vegan Products: nail\n\n Please type in either category or product\n\n '''\n ))\n search_word = input(dedent(\n '''\n What would you like to view? Quit with (q)\n '''\n ))\n \n if search_word =='q':\n print(\"*\" * 100)\n print(\"Thank you for shopping here!\")\n print(\"*\" * 100)\n sys.exit()\n\n search_word = search_word.lower()\n\n find_search_product(search_word,user_fav_list)",
"def extract_people(data, list1):\n\n result = []\n\n translator = str.maketrans('', '', string.punctuation)\n remove_terms = ['#goldenglobes', 'golden globes', '#goldenglobe', 'golden globe', 'goldenglobes', 'goldenglobe', 'golden', 'globe', 'globes']\n stop = remove_terms + list1\n\n for tweet in data:\n\n tweet = re.sub(\"\\d+\", \"\", tweet) #strip nums\n tweet = re.sub(r'http\\S+', '', tweet) #strip urls\n tweet = re.sub(r'#\\S+', '', tweet) #strip hashtags\n tweet = tweet.translate(translator) #strip non-alphanumeric characters\n tweet = tweet.split() #tokenize\n tweet = [term for term in tweet if term.lower() not in stop_words] #remove stop words\n for i in stop:\n for j in tweet:\n if i.lower() in j.lower():\n tweet.remove(j)\n result.append(tweet)\n\n\n\n grams = [];\n\n for tweet in result:\n if tweet:\n # Get all possible bigrams & trigrams in a tweet\n gram = list(nltk.everygrams(tweet, 2, 3))\n\n # Filter through and append to list for tweet\n for g in gram:\n if len(g) == 2:\n if bool(re.match(r'\\b[A-Z][a-z]+\\b', g[0])) and bool(re.match(r'\\b[A-Z][a-z]+\\b', g[1])):\n grams.append(' '.join(g))\n else:\n if bool(re.match(r'\\b[A-Z][a-z]+\\b', g[0])) and bool(re.match(r'\\b[A-Z][a-z]+\\b', g[1])) and bool(re.match(r'\\b[A-Z][a-z]+\\b', g[2])):\n grams.append(' '.join(g))\n\n\n fdist = nltk.FreqDist(grams)\n\n try:\n names = fdist.most_common()\n except:\n names = \"\"\n\n return names",
"def fav_from_list(self, list_id):\n for items in list_id:\n self.fav_tweet(items)",
"def add_favourites(update: Update, context: CallbackContext):\n bot_typing(context.bot, update.message.chat_id)\n try:\n message = update.message.text.split(' ')[1]\n if update.message.text == 'Add to Favourites ❤':\n db.execute('SELECT * FROM bus_stop_code_history WHERE user_id = %s ORDER BY datetime DESC',\n (update.message.chat_id,))\n last_sent_code = db.fetchone()\n\n db.execute(\"INSERT INTO users (user_id, bus_stop_code, description, new_description, state) VALUES \"\n \"(%s, %s, %s, %s, '0') ON CONFLICT (user_id, bus_stop_code) DO NOTHING\",\n (last_sent_code[0], last_sent_code[1], last_sent_code[2], last_sent_code[2]))\n\n update.message.reply_text(add_favourites_msg(last_sent_code[1]), parse_mode=ParseMode.HTML)\n\n elif len(message) == 5 and message.isdigit():\n with open('bus_stops.txt', 'r') as r:\n for bus_stop in r.readlines():\n bus_stop_location = bus_stop.split(' | ', 5)\n if bus_stop_location[0] == message:\n description = bus_stop_location[2]\n db.execute(\"INSERT INTO users (user_id, bus_stop_code, description, new_description, state) VALUES \"\n \"(%s, %s, %s, %s, '0') ON CONFLICT (user_id, bus_stop_code) DO NOTHING\",\n (update.message.chat_id, message, description, description))\n\n update.message.reply_text(add_favourites_msg(message), parse_mode=ParseMode.HTML)\n\n elif len(message) != 5 and not message.isdigit():\n update.message.reply_text(failed_add_fav_msg(message))\n except IndexError:\n update.message.reply_text(instructions_add_fav())",
"def add_to_favourites():\n first_merchant_card = My.search_clickable_webelement(driver, By.TAG_NAME, \"h3\")\n assert first_merchant_card\n first_merchant_card.click()\n\n add_to_favourites_button = My.search_clickable_webelement(\n driver, By.CSS_SELECTOR, \"#ypgBody > div.page__container > div > \"\n \"div.page__container.page__container--full.page__container--merchant > div.page__content \"\n \"> div.merchant__sharebar.hide-print > ul \"\n \"> li.shares__item.shares__item--fav.presenceBtnFav.jsMerchantFav > a\")\n assert add_to_favourites_button\n add_to_favourites_button.click()\n\n connect_with_yp_container = My.search_presence_webelement(driver, By.XPATH, '//*[@id=\"ypModal\"]/div/div')\n assert connect_with_yp_container",
"def get_web_fav_users(ref, getter, scraper):\r\n fav_tree = getter.get_legacy_part(ref, \"favs\")\r\n fav_recs = scraper.get_legacy_part(fav_tree)\r\n ffset = set()\r\n ffdict = dict()\r\n if fav_recs:\r\n ffset = {int(x.id) for x in fav_recs}\r\n ffdict = {int(x.id): x.alias for x in fav_recs}\r\n return ffset, ffdict",
"def list_favorited_bookmarks(request):\n bookmarks = Bookmark.objects.filter(user=request.user, favorited=True)\n\n if request.POST:\n bookmarks = search_bookmarks(request.POST.get('query', None), bookmarks)\n\n context = {\n 'bookmarks': bookmarks,\n }\n return render(request, 'bookmarks/list_favorited_bookmarks.html', context)",
"def friendsinvitesearch(request):\n search_list = []\n response_data = {'result': False, 'search_list': search_list}\n if request.method == 'POST' and request.is_ajax():\n search = request.POST.get('search_text', None)\n for term in search.split():\n query_result = User.objects.exclude(id=request.user.id).filter(\n Q(Q(first_name__icontains=term) | Q(last_name__icontains=term) |\n Q(email=term) | Q(username__icontains=term)), is_active=True).order_by('first_name')\n for qs in query_result:\n ps = Profile.objects.get(user=qs)\n if Friend.objects.filter(from_user=request.user, to_user=qs.id).exists():\n friend = Friend.objects.get(from_user=request.user, to_user=qs.id)\n status = friend.status\n if friend.status == '1':\n title = \"Friends\"\n elif friend.status == '3':\n title = \"Invite\"\n else:\n title = \"Invitation Sent\"\n elif Friend.objects.filter(from_user=qs.id, to_user=request.user).exists():\n friend = Friend.objects.get(to_user=request.user, from_user=qs.id)\n status = friend.status\n if friend.status == '1':\n title = \"Friends\"\n elif friend.status == '3':\n title = \"Invite\"\n else:\n title = \"Accept\"\n else:\n status = '3'\n title = 'Invite'\n qs.name = qs.get_full_name()\n if ps.profile_img:\n qs.l_img = get_thumbnail(ps.profile_img, '24x24', quality=99, format='PNG').url\n else:\n qs.l_img = '/static/skigit/detube/images/noimage_user.jpg'\n search_list.append({'uid': qs.id, 'username': qs.username, 'name': qs.name, 'image': qs.l_img,\n 'status': status, 'title': title})\n response_data['result'] = True\n response_data['search_list'] = sorted(map(dict, set(tuple(x.items()) for x in search_list)),\n key=lambda k: k['name'])\n return json_response(response_data)",
"def extract_media(data, list1):\n\n result = []\n\n translator = str.maketrans('', '', string.punctuation)\n remove_terms = ['#goldenglobes', 'golden globes', '#goldenglobe', 'golden globe', 'goldenglobes', 'goldenglobe', 'golden', 'globe', 'globes', 'best']\n stop = remove_terms + list1\n\n for tweet in data:\n tweet = re.sub(\"\\d+\", \"\", tweet) #strip nums\n tweet = re.sub(r'http\\S+', '', tweet) #strip urls\n tweet = re.sub(r'#\\S+', '', tweet) #strip hashtags\n tweet = tweet.translate(translator) #strip non-alphanumeric characters\n tweet = tweet.split() #tokenize\n for i in stop:\n for j in tweet:\n if i.lower() in j.lower():\n tweet.remove(j)\n tweet = ' '.join(tweet)\n result.append(tweet)\n\n\n grams = [];\n\n for tweet in result:\n if tweet:\n\n grams.extend(re.findall(r\"([A-Z][\\w-]*(?:\\s+[A-Z][\\w-]*)+)\", tweet))\n grams.extend(re.findall(r\"\\b[A-Z][a-z]+\\b.*\\b[A-Z][a-z]+\\b\", tweet))\n\n\n fdist = nltk.FreqDist(grams)\n\n try:\n names = fdist.most_common()\n\n except:\n names = \"\"\n\n return names",
"def get_search_results(filt, text):\n cursor = []\n try:\n cursor = db.engine.execute(\"SELECT * FROM usersfts WHERE usersfts MATCH '{}*' ORDER BY rank\".format(text))\n except:\n return []\n usernames = list(map(lambda obj: obj[0], list(cursor)))\n users = list(map(get_user_by_username, usernames))\n\n if filt == 'mentor':\n return list(filter(is_mentor, users))\n elif filt == 'mentee':\n return list(filter(is_mentee, users))\n else:\n return users",
"def search_items(keywords, meta_types=None):",
"def add_fav_drink(bot, update):\r\n drnk_session = drinksSqlDb.get_drink_session()\r\n # If drink is exact match, adds drink and prompts to add another\r\n check_text = update.message.text\r\n chat_user = update.message.from_user\r\n\r\n # If LIKE search gives a match, add it to the database\r\n drink_exists = drinksSqlDb.query_drink_first(check_text, drnk_session)[0] # Only take first return item\r\n drink_contain = drinksSqlDb.query_drink_contains(check_text, drnk_session)[0]\r\n if drink_exists:\r\n user, usr_session = userSqlDb.check_for_user_id(chat_user.id)\r\n userSqlDb.set_user_favorite(user, drink_exists.drink_name, usr_session)\r\n bot.send_message(chat_id=update.message.chat_id,\r\n text=\"Great! I've added {} to your favorites\".format(drink_exists.drink_name))\r\n # Close the session now that the drink has been added\r\n drnk_session.close()\r\n usr_session.close()\r\n return ConversationHandler.END\r\n # If the text isn't close enough for a LIKE search, try a contains search, and show results\r\n elif drink_contain:\r\n suggestions = []\r\n # Extract drink name from list and output through bot message\r\n for drink in drink_contain:\r\n suggestions.append(drink.drink_name)\r\n info_log.debug(suggestions)\r\n message = \"Did you mean to send one of these drinks?:\\n{}\" \\\r\n \"\\nIf so, please send the drink name again, or type 'exit' to leave\".format(\r\n \"\\n\".join(suggestions).title())\r\n bot.send_message(chat_id=update.message.chat_id, text=message)\r\n\r\n return ADD\r\n else:\r\n bot.send_message(chat_id=update.message.chat_id, text=\"Sorry, I was unable to find a drink that matched\")\r\n drnk_session.close()\r\n return ConversationHandler.END",
"def get_favs():\n items = []\n json_response = get_kodi_json(method=\"Favourites.GetFavourites\",\n params='{\"type\": null, \"properties\": [\"path\", \"thumbnail\", \"window\", \"windowparameter\"]}')\n if \"result\" not in json_response or json_response[\"result\"][\"limits\"][\"total\"] == 0:\n return []\n for fav in json_response[\"result\"][\"favourites\"]:\n path = get_fav_path(fav)\n newitem = {'Label': fav[\"title\"],\n 'thumb': fav[\"thumbnail\"],\n 'Type': fav[\"type\"],\n 'Builtin': path,\n 'path': \"plugin://script.extendedinfo/?info=action&id=\" + path}\n items.append(newitem)\n return items",
"def auto_fav(self, phrase, count=100, result_type=\"recent\"):\n\n result = self.search_tweets(phrase, count, result_type)\n\n for tweet in result[\"statuses\"]:\n try:\n # don't favorite your own tweets\n if tweet[\"user\"][\"screen_name\"] == self.BOT_CONFIG[\"TWITTER_HANDLE\"]:\n continue\n \n self.wait_on_action()\n \n result = self.TWITTER_CONNECTION.favorites.create(_id=tweet[\"id\"])\n print(\"Favorited: %s\" % (result[\"text\"].encode(\"utf-8\")), file=sys.stdout)\n\n # when you have already favorited a tweet, this error is thrown\n except TwitterHTTPError as api_error:\n # quit on rate limit errors\n if \"rate limit\" in str(api_error).lower():\n print(\"You have been rate limited. \"\n \"Wait a while before running the bot again.\", file=sys.stderr)\n return\n\n if \"you have already favorited this status\" not in str(api_error).lower():\n print(\"Error: %s\" % (str(api_error)), file=sys.stderr)",
"def favorers(self, recipe):\n #key = cache_key('following', user.pk)\n #following = cache.get(key)\n\n #if following is None:\n qs = Favorite.objects.filter(recipe=recipe).all()\n favorers = [u.favorer for u in qs]\n #cache.set(key, following)\n\n return favorers",
"def get_favs_by_type(fav_type):\n favs = get_favs()\n return [fav for fav in favs if fav[\"Type\"] == fav_type]",
"def add_favourite_users(self, data: dict):\n with self.connection.cursor() as cur:\n for unique_id in data['unique_id']:\n query = sql.SQL(\"INSERT INTO favourite_users (unique_id, chat_id) \"\n \"VALUES ({0}, {1}) \"\n \"ON CONFLICT (chat_id, unique_id) DO UPDATE SET \"\n \"chat_id = EXCLUDED.chat_id, unique_id = EXCLUDED.unique_id\").format(\n sql.Literal(unique_id),\n sql.Literal(data['chat_id']))\n cur.execute(query)\n self.connection.commit()",
"def add_to_fav(request):\n data = get_data(request)\n username = data.get('username')\n film_id = data.get('film_id')\n film = Film.objects.get(imdb_id=film_id)\n user = User.objects.get(username=username)\n user.fav_list.add(film)\n user.save()\n return JsonResponse({'msg': 'success'})",
"def add_favourite():\n user_id = mongo.db.users.find_one({\"username\": session[\"user\"]})[\"_id\"]\n user = mongo.db.users.find_one({\"username\": session[\"user\"]})\n\n # To find if check if user already has created favourites,\n # and if wine exists in favourites\n if \"favourites\" in user:\n existing_favourites = user[\"favourites\"]\n if existing_favourites:\n for favourite in existing_favourites:\n if favourite[\"wine_id\"] == request.form.get(\"wine_id\"):\n flash(\"This wine was was already\"\n \" added to your favourites list\")\n return redirect(url_for('view_wines'))\n if request.method == \"POST\":\n # To add the wine to user favourites\n favourite = {\n \"wine_id\": request.form.get(\"wine_id\"),\n \"wine_name\": request.form.get(\"wine_name\").lower(),\n \"grape\": request.form.get(\"grape\").lower(),\n \"vintage\": request.form.get(\"vintage\").lower(),\n \"country\": request.form.get(\"country\").lower(),\n }\n mongo.db.users.update_one({\"_id\": ObjectId(user_id)},\n {\"$push\": {\"favourites\": favourite}})\n\n flash(\"Wine is now added to your favourites list\")\n return redirect(url_for('view_wines'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the amount of shared memory in bytes consumed in a single stage of a kernel. | def calculate_smem_usage_per_stage(td: TileDescription, operation_kind: cutlass.OperationKind) -> int:
m, n, k = td.threadblock_shape
if operation_kind == cutlass.OperationKind.Gemm:
stage_barrier_bytes = 32
return (
(DataTypeSize[td.math_instruction.element_a] * m * k // 8)
+ (DataTypeSize[td.math_instruction.element_b] * k * n // 8)
+ stage_barrier_bytes
)
else:
raise Exception(f"No available shared memory calculation for operation kind {operation.operation_kind}") | [
"def shared_memory_size(data_buffers=None):\n\n shared_size = 0\n\n if data_buffers is None:\n data_buffers = inject.get_injectable(\"data_buffers\", {})\n\n for k, data_buffer in data_buffers.items():\n if isinstance(data_buffer, str) and data_buffer.startswith(\"sh.Dataset:\"):\n from sharrow import Dataset\n\n shared_size += Dataset.shm.preload_shared_memory_size(data_buffer[11:])\n continue\n try:\n obj = data_buffer.get_obj()\n except Exception:\n obj = data_buffer\n data = np.ctypeslib.as_array(obj)\n data_size = data.nbytes\n\n shared_size += data_size\n\n return shared_size",
"def get_redis_used_memory(self,paras):\n redis_exec_obj = get_redis_exec_obj()\n print redis_exec_obj.info()['used_memory']",
"def node_size(self):\n return self._partitions[self.partition]",
"def total_free_space(self):\n return self.free_space",
"def min_total_memory():\n return CONSTANTS[\"MIN_TOTAL_MEMORY\"]",
"def share_factor(self):\n t = self.total_size()\n sumsizes = sum([HBStree.subtree_size(r) for r in self.root_versions])\n return sumsizes / t",
"def used_space(self):\n return self.storage.used()",
"def getSizeOfBlock(self) -> int:\n ...",
"def allocated_space(self):\n size = Size(0)\n\n if not self.partitions:\n return size\n\n for part in self.partitions:\n if part.percent_string:\n continue\n size += part.size\n\n return size",
"def _system_memory():\n\n if platform.platform() == 'Darwin':\n try:\n output = shell.check_output(['sysctl', 'hw.memsize']).strip()\n return int(output.split(' ')[1])\n except shell.CalledProcessError:\n return None\n\n return None",
"def _getFileSystemUsage(self):\n fs = os.statvfs(self._path)\n blocksUsed = fs.f_blocks - fs.f_bfree\n sizeUsed = blocksUsed * fs.f_bsize / 1024 / 1024\n return sizeUsed",
"def calculateNumProcesses(self):\n total = 0\n for processList in self.processes.values():\n total+=len(processList)\n return total",
"def sizeof(container: lldb.SBValue) -> lldb.SBValue:\n container = utils.deref(container.GetNonSyntheticValue())\n t = utils.template_type(utils.rawtype(container.type))\n\n if t == \"std::vector\" or t == \"HPHP::req::vector\":\n # It's a synthetic child provider, so we can just use this property\n return container.num_children\n elif t == \"std::priority_queue\":\n return sizeof(utils.get(container, \"c\"))\n elif t == 'std::unordered_map' or t == 'HPHP::hphp_hash_map':\n return utils.get(container, \"_M_h\", \"_M_element_count\")\n elif t == 'HPHP::FixedStringMap':\n return utils.get(container, \"m_extra\")\n elif t == 'HPHP::IndexedStringMap':\n return utils.get(container, \"m_map\", \"m_extra\")\n elif t == 'HPHP::ArrayData':\n return array_data_size(container)\n elif t == 'HPHP::Array':\n arr_data = utils.deref(utils.get(container, \"m_arr\"))\n return array_data_size(arr_data)\n elif t == 'HPHP::CompactVector':\n return utils.get(container, \"m_data\", \"m_len\").unsigned",
"def memory_usage_bytes(self) -> int:\n deep = self.memory_usage == \"deep\"\n return self.data.memory_usage(index=True, deep=deep)",
"def free_kb(self):\n return self.free * self.size_kb",
"def diskspace():\n statvfs = os.statvfs(compat_path(temp_path()))\n return statvfs.f_frsize * statvfs.f_bavail",
"def _get_count(self) -> \"size_t\" :\n return _core.Workspaces__get_count(self)",
"def memory_util(self) -> float:\n return self.current_memory / self.max_memory",
"def _memory():\n\n free_lines = subprocess.check_output([\"free\", \"-b\", \"-w\"],\n universal_newlines=True).split('\\n')\n free_grid = [x.split() for x in free_lines]\n # Identify columns for \"total\" and \"available\"\n total_idx = free_grid[0].index(\"total\")\n available_idx = free_grid[0].index(\"available\")\n total = int(free_grid[1][1 + total_idx])\n available = int(free_grid[1][1 + available_idx])\n used = total - available\n total_gb = total / (1024.0 * 1024.0 * 1024.0)\n used_gb = used / (1024.0 * 1024.0 * 1024.0)\n return (total_gb, used_gb)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks whether a device with `cc` supports the number of stages within `tile_description`, both based on raw limits on the number of stages and based on shared memory capacity | def valid_stage_count(
cc: int,
td: TileDescription,
element_C: cutlass.DataType = None,
element_D: cutlass.DataType = None) -> tuple:
if cc == 90:
if (td.stages is None or td.stages == 0):
# Stage count of None or 0 for SM90 indicates that the CollectiveBuilder automatically
# determines the stage count to use. Thus, all settings are valid in these scenarios.
return (True, "")
else:
cutlass.logger.warning(
"Setting an explicit stage count for SM90 kernels currently may "
"result in compilation errors if the combination of tile shape, "
"stage count, and shared memory requirement of the epilogue exceeds "
"the available shared memory per SM.")
if td.stages <= 0:
return (False, f"Stage counts must be positive integers. Tile description has stage count of {td.stages}.")
if cc < 80 and td.stages != 2:
return (False, f"Tile description has stage count of {td.stages}, "
f"but only 2 stages are supported on SM{cc}.")
# The calculation below does not consider shared memory used by the epilogue and, thus,
# only catches cases in which the mainloop exceeds the device's shared memory capacity.
# This is not a concern for CUTLASS 2.x kernels, for which the shared memory of the
# mainloop and epilogue is shared.
smem_per_stage = calculate_smem_usage_per_stage(td, cutlass.OperationKind.Gemm)
smem_usage_mainloop = (smem_per_stage * td.stages)
smem_arch = cutlass.SharedMemPerCC[cc] << 10
if smem_usage_mainloop > smem_arch:
return ( False,
"Configuration uses too much shared memory. Consider reducing stage count or tile shape.\n"
f"Details:\n"
f"Mainloop uses {smem_per_stage} bytes of shared memory per stage, and "
f"{td.stages} stages for a total of {smem_usage_mainloop} bytes.\n"
f"The maxmium amount of shared memory that can be used per block on CC {cc} is {smem_arch}.")
return (True, "") | [
"def has_available_build_slots(date_check, production_unit):\n capacity = Capacity.objects.get_restore_or_create(day=date_check, production_unit=production_unit, defaults={'capacity': 0}).capacity\n order_count = Order.objects.filter(\n build__build_date=date_check,\n build__build_order__production_unit = production_unit,\n order_submitted__isnull=False,\n order_cancelled__isnull=True,\n ).count()\n return capacity > order_count",
"def check_boardsize():\n return BOARD_SIZE % 2 == 0",
"def test_chip_creation(self):\n for (width, height), machine in self.test_machines.iteritems():\n # The number of processors created is correct for the machine size\n self.assertEqual(len(machine.get_chips()), width * height)\n\n # Initially none of the processors should be allocated\n self.assertEqual(len(machine.get_allocated_processors()), 0)",
"def check_ingest_capacity(self, observation, pipelines):\n\n\t\tbuffer_capacity = False\n\t\tif self.buffer.check_buffer_capacity(observation):\n\t\t\tlogger.debug(\"Buffer has enough capacity for %s\", observation.name)\n\t\t\tbuffer_capacity = True\n\n\t\tcluster_capacity = False\n\t\tpipeline_demand = pipelines[observation.type]['demand']\n\t\tif self.cluster.check_ingest_capacity(pipeline_demand):\n\t\t\tlogger.debug(\n\t\t\t\t\"Cluster is able to process ingest for observation %s\",\n\t\t\t\tobservation.name\n\t\t\t)\n\t\t\tcluster_capacity = True\n\n\t\treturn buffer_capacity and cluster_capacity",
"def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed",
"def test_build_states(threshold, system_capacity, buffer_capacity):\n states = build_states(\n threshold=threshold,\n system_capacity=system_capacity,\n buffer_capacity=buffer_capacity,\n )\n\n if threshold > system_capacity:\n assert len(states) == system_capacity + 1 # +2\n else:\n states_after_threshold = system_capacity - threshold + 1\n size_of_s2 = states_after_threshold if states_after_threshold >= 0 else 0\n all_states_size = size_of_s2 * (buffer_capacity + 1) + threshold\n assert len(states) == all_states_size",
"def has_multi_stage_cooling(cool_stage):\n if cool_stage == \"variable_speed\" or cool_stage == \"modulating\":\n return True\n return False",
"def has_validate_tiles(self):\n if not self.tiles or not self.tile_indices or self.tile_size <= 0:\n return False\n return True",
"def is_valid_overview_factor(dataset, ov_factor):\n # Use gdal Dataset attributes (https://gdal.org/python/osgeo.gdal.Dataset-class.html)\n return dataset.RasterXSize / ov_factor >= 8 and dataset.RasterYSize / ov_factor >= 8",
"def check_sparse_tensor_size(sparse_tensor_value):\n sparse_tensor = sparse_tensor_value.value\n return (\n # Check the number of non-default elements.\n sparse_tensor_value.num_elements() <= limits.MAX_TENSOR_ELEMENTS and\n\n # Check the number of dimensions.\n len(sparse_tensor.dense_shape) <= limits.MAX_NUM_DIMENSIONS)",
"def is_valid(tiles):\n current = len(tiles)\n if current < 2:\n return True\n elif current == 2:\n return tiles[0].east + tiles[1].west == 10\n elif current == 3:\n return tiles[1].east + tiles[2].west == 10\n elif current == 4:\n return tiles[0].south + tiles[3].north == 10\n elif current == 5:\n return tiles[3].east + tiles[4].west == 10 and tiles[4].north + tiles[1].south == 10\n elif current == 6:\n return (\n tiles[4].north + tiles[1].south == 10 and\n tiles[4].east + tiles[5].west == 10 and\n tiles[5].north + tiles[2].south == 10\n )\n elif current == 7:\n return tiles[3].south + tiles[6].north == 10\n elif current == 8:\n return (\n tiles[6].east + tiles[7].west == 10 and\n tiles[7].north + tiles[4].south == 10\n )\n elif current == 9:\n return (\n tiles[7].east + tiles[8].west == 10 and\n tiles[8].north + tiles[5].south == 10\n )",
"def gpu_requested(resources):\n if resources is None:\n return False\n if not isinstance(resources, dict):\n raise TypeError(\"Parameter resources is required to be a dict\")\n for k, v in resources.items():\n if \"gpu\" in k.strip().lower() and int(v) > 0:\n return True\n return False",
"def test_card_piles_full(self):\n self.assertTrue(len(self.game.library)>5)\n self.assertEqual(len(self.game.jacks), 6)",
"def _verify_capacity(s, expected_capacity):\n TUR(s)\n # READ CAPACITY (16)\n data = s.readcapacity16().result\n returned_size = (data['returned_lba'] + 1 -data['lowest_aligned_lba']) * data['block_length']\n assert returned_size == expected_capacity, {data['returned_lba'], data['block_length']}",
"def hasGroupsSizeX(self, deck):\r\n def gcd( a, b ):\r\n while b:\r\n a, b = b, a % b\r\n return a\r\n \r\n def GCD( nums ):\r\n res = nums[0]\r\n for i in range(1, len(nums)):\r\n res = gcd( res, nums[i])\r\n return res\r\n \r\n \r\n d = {}\r\n for c in deck:\r\n d[c] = d.get( c, 0 ) + 1\r\n values = list( d.values() ) \r\n r = GCD( values )\r\n if r == 1:\r\n return False\r\n else:\r\n return True",
"def test_slice_thickness(self):\n self.assertEqual(self.cbct.thickness.passed, self.thickness_passed)",
"def test_tile_size(self):\n gmp = GlobalMercatorProfile()\n assert gmp.tile_size == 256",
"def __check_video_resolutions(raw_cap, coded_cap):\n\n raw_width = int(raw_cap.get(CAP_PROP_FRAME_WIDTH))\n raw_height = int(raw_cap.get(CAP_PROP_FRAME_HEIGHT))\n coded_width = int(coded_cap.get(CAP_PROP_FRAME_WIDTH))\n coded_height = int(coded_cap.get(CAP_PROP_FRAME_HEIGHT))\n\n # at the same time we need to init the frame width and height four our common metrics\n return (raw_width == coded_width) and (raw_height == coded_height)",
"def has_capacity(dimensions):\n assert not ndb.in_transaction()\n # Look at the fast path.\n cap = task_queues.probably_has_capacity(dimensions)\n if cap is not None:\n return cap\n\n # Do a query. That's slower and it's eventually consistent.\n q = BotInfo.query()\n flat = task_queues.dimensions_to_flat(dimensions)\n for f in flat:\n q = q.filter(BotInfo.dimensions_flat == f)\n\n # Add it to the 'quick cache' to improve performance. This cache is kept for\n # the same duration as how long bots are considered still alive without a\n # ping. There are two uses case:\n # - there's a single bot in the fleet for these dimensions and it takes a\n # long time rebooting. This is the case with Android with slow\n # initialization and some baremetal bots (thanks SCSI firmware!).\n # - Machine Provider recycle the fleet simultaneously, which causes\n # instantaneous downtime. https://crbug.com/888603\n seconds = config.settings().bot_death_timeout_secs\n\n if q.count(limit=1):\n logging.info('Found capacity via BotInfo: %s', flat)\n task_queues.set_has_capacity(dimensions, seconds)\n return True\n\n # Search a bit harder. In this case, we're looking for BotEvent which would be\n # a bot that used to exist recently.\n cutoff = utils.utcnow() - datetime.timedelta(seconds=seconds)\n q = BotEvent.query(BotEvent.ts > cutoff)\n flat = task_queues.dimensions_to_flat(dimensions)\n for f in flat:\n q = q.filter(BotEvent.dimensions_flat == f)\n if q.count(limit=1):\n logging.info('Found capacity via BotEvent: %s', flat)\n task_queues.set_has_capacity(dimensions, seconds)\n return True\n\n logging.warning('HAS NO CAPACITY: %s', flat)\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks whether a device with `cc` supports a thread block cluster of shape `cluster_shape`. | def valid_cluster_shape(cc: int, cluster_shape: list) -> tuple:
if cc < 90:
if cluster_shape != [1, 1, 1]:
return (False,
f"Cluster shape for pre-SM90 architectures must be [1, 1, 1]. Received cluster shape of "
f"{cluster_shape} for SM{cc}.")
else:
return (True, "")
if len(cluster_shape) != 3:
return (False,
f"Cluster shapes must be rank-3. Received {cluster_shape} (rank {len(cluster_shape)}")
if cluster_shape[2] != 1:
return (False,
"CUTLASS kernels currently require the third dimension of cluster shape to be 1. "
f"Received cluster shape of {cluster_shape}.")
# The CUDA programming guide currently defines a maximum of 8 thread blocks per cluster
# as being portably supported (https://docs.nvidia.com/cuda/cuda-c-programming-guide/#thread-block-clusters).
# Current CUTLASS kernels only have non-unit cluster dimensions within the first two dimensions,
# so we check that the first two dimensions of the cluster shape do not exceed 8 thread blocks in total.
blocks_in_2d = cluster_shape[0] * cluster_shape[1]
if blocks_in_2d > 8:
return (False,
f"Thread block clusters with more than 8 thread blocks are currently unsupported on SM{cc}. "
f"Received cluster shape {cluster_shape}, which has {blocks_in_2d} thread blocks.")
return (True, "") | [
"def _has_clusters(self):\n return self.cluster_column in self.data.df.columns",
"async def do_check_clusters(self, clusters):\n raise NotImplementedError",
"def is_clustering_valid(clustering_model: Type[Clustering]) -> bool:\n n_labels = len(set(clustering_model.model.labels_)) # type: ignore\n n_samples = len(clustering_model.raw_data) # type: ignore\n if not 1 < n_labels < n_samples:\n return False\n return True",
"def is_cluster_number(self, number: int):\n return const.FAT_ENTRY_CLUSTER_MIN <= number < self.max_clusters",
"def iscluster(clues, num):\n assert len(clues) == num\n\n if abs(clues[0].diff - clues[-1].diff) <= num:\n return True\n return False",
"def is_k_shape(ptn, k):\n ptn = Partition(ptn)\n if k is None:\n # see if it's a k-shape for any k in [1, n-1].\n # (note that every partition is a 0-shape and an n-shape)\n n = ptn.size()\n lis = [is_k_shape(ptn, kk) for kk in range(1, n)]\n return any(lis)\n else:\n k_bdy = ptn.k_boundary(k)\n return is_linked(k_bdy)",
"def _ignore_cluster(self, cluster):\n if cluster.n_spectra < self.min_size:\n return True\n if cluster.n_spectra > self.max_size:\n return True\n if cluster.max_il_ratio is None and (self.min_ratio > 0 or self.max_ratio < 1):\n return True\n if cluster.max_il_ratio is not None and cluster.max_il_ratio < self.min_ratio:\n return True\n if cluster.max_il_ratio is not None and cluster.max_il_ratio > self.max_ratio:\n return True\n if cluster.identified_spectra < self.min_identified_spectra:\n return True\n if cluster.identified_spectra > self.max_identified_spectra:\n return True\n if cluster.unidentified_spectra < self.min_unidentified_spectra:\n return True\n if cluster.unidentified_spectra > self.max_unidentified_spectra:\n return True\n\n return False",
"def is_valid_cluster_host_address(self, ip_address):\n if ip_address == self.cluster_host_subnet.network:\n print(\"Cannot use network address\")\n return False\n elif ip_address == self.cluster_host_subnet.broadcast:\n print(\"Cannot use broadcast address\")\n return False\n elif ip_address.is_multicast():\n print(\"Invalid network address - multicast address not allowed\")\n return False\n elif ip_address.is_loopback():\n print(\"Invalid network address - loopback address not allowed\")\n return False\n elif ip_address not in self.cluster_host_subnet:\n print(\"Address must be in the cluster host subnet\")\n return False\n else:\n return True",
"def _check_collision(self, block):\n\t\tif isinstance(block, Block):\n\t\t\tfor location in block.get_locations():\n\t\t\t\tif self.matrix[location[0]][location[1]] != '-':\n\t\t\t\t\treturn True\n\t\t\t\telif location[0] == ROWS-1:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\telse:\n\t\t\traise Exception(\"Block object expected\")",
"def _are_tt_cores_valid(tt_cores, shape, tt_ranks):\n shape = clean_raw_shape(shape)\n num_dims = len(tt_cores)\n\n for core_idx in range(1, num_dims):\n if tt_cores[core_idx].type() != tt_cores[0].type():\n return False \n try:\n for core_idx in range(num_dims):\n curr_core_shape = tt_cores[core_idx].shape \n if len(curr_core_shape) != len(tt_cores[0].shape):\n # Shapes are not consistent. \n return False \n if shape is not None:\n for i in range(len(shape)):\n if curr_core_shape[i+1] != shape[i][core_idx]:\n # The TT-cores are not aligned with the given shape.\n return False \n if core_idx >= 1:\n prev_core_shape = tt_cores[core_idx - 1].shape \n if curr_core_shape[0] != prev_core_shape[-1]:\n # TT-ranks are inconsistent.\n return False \n if tt_ranks is not None:\n if curr_core_shape[0] != tt_ranks[core_idx]:\n # The TT-ranks are not aligned with the TT-cores shape.\n return False \n if curr_core_shape[-1] != tt_ranks[core_idx + 1]:\n # The TT-ranks are not aligned with the TT-cores shape.\n return False \n if tt_cores[0].shape[0] != 1 or tt_cores[-1].shape[-1] != 1:\n # The first or the last rank is not 1\n return False \n except ValueError:\n # The shape of the TT-cores is undermined, cannot validate it.\n pass \n return True",
"def is_in_cluster(self) -> bool:\n if self._is_in_cluster is not None:\n return self._is_in_cluster\n self.api_client # so we can determine if we are in_cluster or not\n if TYPE_CHECKING:\n assert self._is_in_cluster is not None\n return self._is_in_cluster",
"def is_member_of(self, node, cluster):\n return self.lookup[node] == cluster",
"def has_cluster_info(self):\n has_cluster_info = False\n if self.model.clt_id:\n has_cluster_info = True\n return has_cluster_info",
"def _validate_tosca_cluster(tosca):\n if not tosca:\n return True\n\n if not tosca.get(\"topology_template\", {}).get(\"outputs\", {}).get(\"kubeconfig\"):\n raise ValidationError(\n \"Invalid TOSCA template content. The output `kubeconfig` is missing.\"\n )\n\n return _validate_tosca_dict(tosca)",
"def check_for_cluster():\n emr_client = boto3.client('emr')\n\n return check_for_existing_emr_cluster(\n emr_client=emr_client, cluster_id=get_config('emr')['emr_cluster_id'])",
"def testClusterValidLayersListSuccessful(self):\n model_layers = [\n self.keras_clusterable_layer, self.keras_non_clusterable_layer,\n self.custom_clusterable_layer\n ]\n clustered_list = cluster.cluster_weights(model_layers, **self.params)\n\n self.assertEqual(len(model_layers), len(clustered_list))\n for layer, clustered_layer in zip(model_layers, clustered_list):\n self._validate_clustered_layer(layer, clustered_layer)",
"def is_existing_cluster(self, cluster):\n\n try:\n self.get_single_cluster(cluster)\n return True\n except ErrAtlasNotFound:\n return False",
"def _check_can_broadcast_to(shape, target_shape):\n ndim = len(shape)\n ndim_target = len(target_shape)\n if ndim > ndim_target:\n return False\n for i, j in zip(reversed(shape), reversed(target_shape)):\n if i not in (1, j):\n return False\n return True",
"def is_block_distributed(distribution):\n return distribution[0] == 'b' and len(distribution) == 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks that the kernel and epilogue schedules passed in are a valid combination for a device of compute capability ``cc``. | def valid_schedule(
cc: int,
kernel_schedule: cutlass.KernelScheduleType,
epilogue_schedule: cutlass.EpilogueScheduleType,
tile_scheduler: cutlass.TileSchedulerType) -> tuple:
kernel_auto = (kernel_schedule == cutlass.KernelScheduleType.ScheduleAuto)
epilogue_auto = (epilogue_schedule == cutlass.EpilogueScheduleType.ScheduleAuto)
tile_scheduler_default = (tile_scheduler == cutlass.TileSchedulerType.Default)
if cc < 90 and not (kernel_auto and epilogue_auto and tile_scheduler_default):
return (False, "Non-default schedules are only supported on SM90 and beyond")
if (kernel_auto and not epilogue_auto) or (not kernel_auto and epilogue_auto):
return (False, "Kernel and epilogue schedules must either both be auto or neither be auto")
if not tile_scheduler_default:
if (tile_scheduler == cutlass.TileSchedulerType.StreamK) and (kernel_schedule != cutlass.KernelScheduleType.TmaWarpSpecializedCooperative):
return (False, "Stream-K tile scheduler is currently only supported with the cooperative kernel schedule")
return (True, "") | [
"def is_valid(schedule: dict[str, tuple[str, str, tuple]]) -> bool:\n # Gives all the values of the dictionary\n sc_sections = [schedule[key] for key in schedule]\n return all([not sections_conflict(x, y) for x in sc_sections for y in sc_sections if x is not y])",
"def _assert_ecg_input(ecg_processor: \"EcgProcessor\", key: str, ecg_signal: EcgResultDataFrame, rpeaks: RPeakDataFrame):\n if all(x is None for x in [ecg_processor, key]) and all(x is None for x in [ecg_signal, rpeaks]):\n raise ValueError(\"Either 'ecg_processor' and 'key', or 'rpeaks' and 'ecg_signal' must be passed as arguments!\")\n if ecg_processor is not None and key is None:\n raise ValueError(\"Both of 'ecg_processor' and 'key' must be passed as arguments!\")\n if ecg_signal is not None and rpeaks is None:\n raise ValueError(\"Both of 'ecg_signal' and 'rpeaks' must be passed as arguments!\")",
"def test_section_not_compatible() -> None:\n expected = False\n actual = a2_courses.is_section_compatible(SCHEDULE_1, MAT137_LEC0101)\n\n assert expected == actual",
"def test_section_compatible() -> None:\n expected = True\n actual = a2_courses.is_section_compatible(SCHEDULE_1, STA130_LEC0101)\n\n assert expected == actual",
"def check_FU_valid_ops(sched):\n global func_unit\n for i in range(len(sched)):\n pair = sched[i]\n for fu in func_unit:\n if pair[fu]:\n if NODE_OPS[pair[fu]].opcode in get_non_func_unit_ops(fu):\n print(\n \"Error: cycle %d is doing an invalid operation on functional unit %d\" % (\n i + 1, fu))",
"def test_1_valid_schedule_combinations() -> None:\n c1 = MAT137\n c2 = CSC111\n expected = 1\n actual = a2_courses.valid_schedules(c1, c2)\n assert len(actual) == expected",
"def is_section_compatible(schedule: dict[str, tuple[str, str, tuple]],\n section: tuple[str, str, tuple]) -> bool:\n all_sections = [schedule[course] for course in schedule]\n return all([not sections_conflict(s, section) for s in all_sections])",
"def test_4_valid_schedule_combinations() -> None:\n c1 = CON333\n c2 = CON123\n\n expected = 4\n actual = a2_courses.valid_schedules(c1, c2)\n assert len(actual) == expected",
"def check_inputs(time_config: dict):\n\n assert sum(time_config[\"step_duration\"][\"weekday\"].values()) == 24\n assert sum(time_config[\"step_duration\"][\"weekend\"].values()) == 24\n\n # Check that all groups given in time_config file are in the valid group hierarchy\n all_groups = activity_hierarchy\n for step, activities in time_config[\"step_activities\"][\"weekday\"].items():\n assert all(group in all_groups for group in activities)\n\n for step, activities in time_config[\"step_activities\"][\"weekend\"].items():\n assert all(group in all_groups for group in activities)",
"def test_compatible_sections() -> None:\n actual = a2_courses.compatible_sections(SCHEDULE_1, CON123) == {CON123_LEC0123}\n expected = True\n assert actual == expected",
"def is_course_compatible(schedule: dict[str, tuple[str, str, tuple]],\n course: tuple[str, str, set]) -> bool:\n return any([is_section_compatible(schedule, s) for s in course[2]])",
"def _check_invalid_cases(embedding_lookup_device):\n if (tpu.under_tpu_inference_context() and\n embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE):\n raise ValueError(\n 'Using embedding_lookup_device=tpu_embedding_core during inference '\n 'is not supported.')\n if embedding_lookup_device == EmbeddingDevice.CPU:\n if not tpu.under_tpu_inference_context():\n raise ValueError(\n 'Using TPUEmbeddingColumn with embedding_lookup_device=\"cpu\" '\n 'during training is not supported.')",
"def test_jenkins_launch_config_sg_valid(self) -> None:\n lcs = self.autoscaling.describe_launch_configurations(\n LaunchConfigurationNames=['global-jenkins-server-lc'],\n MaxRecords=1\n )\n \n try:\n launch_config = lcs.get('LaunchConfigurations')[0]\n except IndexError:\n self.assertTrue(False)\n return\n \n sg_id = launch_config.get('SecurityGroups')[0]\n sg = self.ec2.describe_security_groups(GroupIds=[sg_id]).get('SecurityGroups')[0]\n \n ingress = sg.get('IpPermissions')\n egress = sg.get('IpPermissionsEgress')\n \n ingress_80 = SecurityGroup.validate_sg_rule_cidr(ingress[0], 'tcp', 80, 80, '0.0.0.0/0')\n ingress_22 = SecurityGroup.validate_sg_rule_cidr(ingress[1], 'tcp', 22, 22, '0.0.0.0/0')\n \n egress_80 = SecurityGroup.validate_sg_rule_cidr(egress[0], 'tcp', 80, 80, '0.0.0.0/0')\n egress_22 = SecurityGroup.validate_sg_rule_cidr(egress[0], 'tcp', 22, 22, '0.0.0.0/0')\n egress_443 = SecurityGroup.validate_sg_rule_cidr(egress[0], 'tcp', 443, 443, '0.0.0.0/0')\n egress_2049 = SecurityGroup.validate_sg_rule_cidr(egress[0], 'tcp', 2049, 2049, '0.0.0.0/0')\n \n self.assertTrue(all([\n sg.get('GroupName') == 'global-jenkins-server-lc-security-group',\n len(ingress) == 2,\n ingress_80,\n ingress_22,\n len(egress) == 4,\n egress_80,\n egress_22,\n egress_443,\n egress_2049\n ]))",
"def has_vc_capable_rooms(event):\n capable_rooms = get_vc_capable_rooms()\n return (event.room in capable_rooms\n or any(c.room for c in event.contributions if c.room in capable_rooms)\n or any([(s.room, sb.room) for s in event.sessions for sb in s.blocks\n if sb.room in capable_rooms or s.room in capable_rooms]))",
"def _check_compatible_regs(self, rhs):\n list1 = self.qregs + self.cregs\n list2 = rhs.qregs + rhs.cregs\n for element1 in list1:\n for element2 in list2:\n if element2.name == element1.name:\n if element1 != element2:\n raise QiskitError(\"circuits are not compatible\")",
"def __check_constraints_feasibility(self):\n pass",
"def valid_stage_count(\n cc: int,\n td: TileDescription,\n element_C: cutlass.DataType = None,\n element_D: cutlass.DataType = None) -> tuple:\n if cc == 90:\n if (td.stages is None or td.stages == 0):\n # Stage count of None or 0 for SM90 indicates that the CollectiveBuilder automatically\n # determines the stage count to use. Thus, all settings are valid in these scenarios.\n return (True, \"\")\n else:\n cutlass.logger.warning(\n \"Setting an explicit stage count for SM90 kernels currently may \"\n \"result in compilation errors if the combination of tile shape, \"\n \"stage count, and shared memory requirement of the epilogue exceeds \"\n \"the available shared memory per SM.\")\n\n if td.stages <= 0:\n return (False, f\"Stage counts must be positive integers. Tile description has stage count of {td.stages}.\")\n\n if cc < 80 and td.stages != 2:\n return (False, f\"Tile description has stage count of {td.stages}, \"\n f\"but only 2 stages are supported on SM{cc}.\")\n\n # The calculation below does not consider shared memory used by the epilogue and, thus,\n # only catches cases in which the mainloop exceeds the device's shared memory capacity.\n # This is not a concern for CUTLASS 2.x kernels, for which the shared memory of the\n # mainloop and epilogue is shared.\n smem_per_stage = calculate_smem_usage_per_stage(td, cutlass.OperationKind.Gemm)\n smem_usage_mainloop = (smem_per_stage * td.stages)\n smem_arch = cutlass.SharedMemPerCC[cc] << 10\n if smem_usage_mainloop > smem_arch:\n return ( False,\n \"Configuration uses too much shared memory. Consider reducing stage count or tile shape.\\n\"\n f\"Details:\\n\"\n f\"Mainloop uses {smem_per_stage} bytes of shared memory per stage, and \"\n f\"{td.stages} stages for a total of {smem_usage_mainloop} bytes.\\n\"\n f\"The maxmium amount of shared memory that can be used per block on CC {cc} is {smem_arch}.\")\n\n return (True, \"\")",
"def _validate_perf_config(config_contents, required_parameters):\n for parameter in required_parameters:\n if not config_contents.get(parameter):\n return False\n value = config_contents[parameter]\n if not value or not isinstance(value, basestring): # pragma: no cover\n return False\n\n return True",
"def on_segment(a, b, c):\n ab = (b[0] - a[0], b[1] - a[1], b[2] - a[2])\n ac = (c[0] - a[0], c[1] - a[1], c[2] - a[2])\n if math.fabs(cross_product(ac, ab)) > _s_epsilon:\n return False\n return is_within(a[0], b[0], c[0]) and is_within(a[1], b[1], c[1]) \\\n and is_within(a[2], b[2], c[2])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks that `alignment_provided` does not exceed `default_alignment`. | def alignment_or_default(alignment_provided: int, default_alignment: int) -> int:
if alignment_provided is not None:
if alignment_provided > default_alignment:
raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.")
return alignment_provided
return default_alignment | [
"def update_alignment(alignment_provided:int, default_alignment: int) -> int:\n if alignment_provided is not None:\n if alignment_provided > default_alignment:\n if alignment_provided % default_alignment == 0:\n return default_alignment\n raise Exception(f\"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.\")\n return alignment_provided\n\n return default_alignment",
"def get_align(self, default):\n return self.args.get(\"segment_align\", default)",
"def _struct_alignment(alignments: Iterable[int]) -> int:\n return bounding_power_of_2(_lcm(*alignments))",
"def _find_minimum_alignment(offset: int, base_alignment: int, prev_end: int) -> int:\n # Essentially, we need to find the minimum k such that:\n # 1) offset = m * base_alignment * 2**k, where m > 0 and k >= 0;\n # (by definition of alignment)\n # 2) offset - prev_offset < base_alignment * 2**k\n # (otherwise the compiler can just as well take m' = m - 1).\n if offset % base_alignment != 0:\n raise ValueError(\n f\"Field offset ({offset}) must be a multiple of the base alignment ({base_alignment}).\"\n )\n\n alignment = base_alignment\n while offset % alignment == 0:\n if offset - prev_end < alignment:\n return alignment\n\n alignment *= 2\n\n raise ValueError(\n f\"Could not find a suitable alignment for the field at offset {offset}; \"\n \"consider adding explicit padding.\"\n )",
"def bestAlignment(readAlignments: ReadAlignments) -> Alignment:\n return max(readAlignments, key=lambda alignment: alignment.hsps[0])",
"def has_align(self):\n return self._db_info_cache[\"sequence-aligned\"]",
"def get_abi_alignment(self, target_data, context=None):\n llty = self._get_ll_pointer_type(target_data, context)\n return target_data.get_pointee_abi_alignment(llty)",
"def align_sequences_default(seq_a, seq_b):\n if isinstance(seq_a, list): seq_a=''.join(seq_a)\n if isinstance(seq_b, list): seq_b=''.join(seq_b)\n # Align the sequences of the two chains\n return mmtbx.alignment.align(\n seq_a=seq_a, seq_b=seq_b,\n gap_opening_penalty = 20,\n gap_extension_penalty = 2,\n similarity_function = 'blosum50',\n style = 'local').extract_alignment()",
"def best_alignment(ref, index, hash1, hash2, max_edit, max_indels, min_seeds, max_hits):\n # the structure of this function is because in general we can use the alignment of\n # one read to inform the alignment of its mate. For now, ignore the information\n # that they are paired and just consider them separately.\n\n # TODO eventually kill off the [::-1] in favor of reverse complement, but HW1 requires only reverse\n r1_fwd, r1_rev, r2_fwd, r2_rev = None, None, None, None\n if hash1.seq.seq:\n r1_fwd = best_alignment_single(ref, index, hash1.seq, hash1.fwd, max_edit, max_indels, min_seeds, max_hits)\n r1_rev = best_alignment_single(ref, index, hash1.seq[::-1], hash1.rev, max_edit, max_indels, min_seeds, max_hits)\n if hash2.seq.seq:\n r2_fwd = best_alignment_single(ref, index, hash2.seq, hash1.fwd, max_edit, max_indels, min_seeds, max_hits)\n r2_rev = best_alignment_single(ref, index, hash2.seq[::-1], hash2.rev, max_edit, max_indels, min_seeds, max_hits)\n def get_aln_info(fwd, rev, size, ref_end):\n if fwd and rev:\n aln = AlignmentInfo(offset = fwd.start_offset if fwd.score > rev.score else rev.start_offset,\n reversed=rev.score >= fwd.score, cigar=rev.cigar if rev.score > fwd.score else fwd.cigar, \n mismatches=fwd.mm if fwd.score > rev.score else rev.mm)\n elif fwd:\n aln = AlignmentInfo(offset=fwd.start_offset, reversed=False, cigar=fwd.cigar,\n mismatches=fwd.mm)\n elif rev:\n aln = AlignmentInfo(offset= rev.start_offset, reversed=True, cigar=rev.cigar,\n mismatches=rev.mm)\n else:\n aln = None\n if aln and (aln.offset + size >= ref_end or aln.offset < 0):\n aln = None\n if aln:\n cigarcount = Counter(aln.cigar)\n if cigarcount['I'] + cigarcount['D'] > max_indels:\n aln = None\n return aln\n r1_aln = get_aln_info(r1_fwd, r1_rev, len(hash1.seq), len(ref))\n r2_aln = get_aln_info(r2_fwd, r2_rev, len(hash1.seq), len(ref))\n return r1_aln, r2_aln",
"def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n ith = len(seq_x)\n max_pos = alignment_matrix[ith].index(max(alignment_matrix[ith]))\n jth = max_pos\n x_prime, y_prime = '', ''\n while alignment_matrix[ith][jth] != 0:\n\n if alignment_matrix[ith][jth] == \\\n alignment_matrix[ith-1][jth-1] + \\\n scoring_matrix[seq_x[ith-1]][seq_y[jth-1]]:\n x_prime = seq_x[ith-1] + x_prime\n y_prime = seq_y[jth-1] + y_prime\n ith -= 1\n jth -= 1\n\n elif alignment_matrix[ith][jth] == \\\n alignment_matrix[ith][jth-1] + \\\n scoring_matrix['-'][seq_y[jth-1]]:\n x_prime = '-' + x_prime\n y_prime = seq_y[jth-1] + y_prime\n jth -= 1\n\n elif alignment_matrix[ith][jth] == \\\n alignment_matrix[ith-1][jth] + \\\n scoring_matrix[seq_x[ith-1]]['-']:\n x_prime = seq_x[ith-1] + x_prime\n y_prime = '-' + y_prime\n ith -= 1\n\n align_matrix = \\\n compute_alignment_matrix(x_prime, y_prime, scoring_matrix, True)\n (score, align_x, align_y) = compute_global_alignment(\n x_prime, y_prime, scoring_matrix, align_matrix)\n\n return (score, align_x, align_y)",
"def alignment(self):\n if self._alignment is None:\n if self._map is None:\n if self._columns is not None:\n self.__map_columns()\n else:\n self._map = self._align(self._graph)\n self._refine_each()\n if self._refinements:\n self._refine()\n assert self._map.shape[1] > 0, \"Alignment has no columns\"\n records = deepcopy(self._records)\n for i, record in enumerate(records):\n seq = record.seq\n aligned_seq = []\n map = self._map[i]\n index = 0\n for symbol in map:\n if symbol:\n aligned_seq.append(seq[index])\n index += 1\n else:\n aligned_seq.append(SPACE)\n record.seq = Seq(\"\".join(aligned_seq), GAPPED_ALPHABET)\n self._alignment = tomsa(records)\n return self._alignment",
"def setUp(self):\n self.empty = Alignment({})\n self.one_seq = Alignment({'a':'aaaaa'})\n self.ragged = Alignment({'a':'aaaaaa', 'b':'aaa', 'c':'aaaa'})\n self.identical = Alignment({'a':'aaaa','b':'aaaa'})\n self.gaps = Alignment({'a':'aaaaaaa','b':'a--a-aa', 'c':'aa-----'})\n self.gaps_rna = Alignment({'a':Rna('aaaaaaa'), 'b':Rna('a--a-aa'), \\\n 'c':Rna('aa-----')})\n self.unordered = Alignment({'a':'aaaaa','b':'bbbbb'})\n self.ordered1 = Alignment({'a':'aaaaa','b':'bbbbb'}, RowOrder=['a','b'])\n self.ordered2 = Alignment({'a':'aaaaa','b':'bbbbb'}, RowOrder=['b','a'])\n self.mixed = Alignment({'a':'abcde', 'b':'lmnop'})\n self.end_gaps = Alignment({'a':'--a-bc-', 'b':'-cb-a--', 'c':'--d-ef-'})\n self.many = Alignment({\n 'a': Rna('ucagucaguu'),\n 'b': Rna('uccgucaauu'),\n 'c': Rna('accaucaguc'),\n 'd': Rna('ucaaucgguu'),\n 'e': Rna('uugguugggu'),\n 'f': Rna('ccgggcggcc'),\n 'g': Rna('ucaaccggaa'),\n })\n #Additional Alignments for tests added 6/4/04 by Jeremy Widmann\n self.integers = Alignment([[1,2,3,4,5],[1,1,1,1,1],[5,4,3,2,1]])\n self.sequences = Alignment(map(RnaSequence, ['UCAG', 'UCAG', 'UCAG']))\n self.structures = Alignment(map(ViennaStructure, \n ['(())..', '......', '(....)']), None, str)\n self.labeled = Alignment(['abc', 'def'], ['1st', '2nd'])\n #Additional Alignment for tests added 1/30/06 by Cathy Lozupone\n self.omitRowsTemplate_aln = Alignment({\n 's1':Rna('UC-----CU---C'),\n 's2':Rna('UC------U---C'),\n 's3':Rna('UUCCUUCUU-UUC'),\n 's4':Rna('UU-UUUU-UUUUC'),\n 's5':Rna('-------------')\n })",
"def getPedAlignment(self):\n return self.__alignment",
"def compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag):\r\n alignment_matrix = []\r\n for row in range(len(seq_x)+1):\r\n alignment_matrix.append([])\r\n for row in range(len(seq_x)+1):\r\n for dummy_col in range(len(seq_y)+1):\r\n alignment_matrix[row].append(0)\r\n\r\n for dummy_idx in range(1,len(seq_x)+1):\r\n s_i_0 = alignment_matrix[dummy_idx-1][0] + scoring_matrix[seq_x[dummy_idx - 1]]['-']\r\n if s_i_0 < 0 and global_flag == False:\r\n s_i_0 = 0\r\n alignment_matrix[dummy_idx][0]=s_i_0\r\n for dummy_idx in range(1,len(seq_y)+1):\r\n s_0_j = alignment_matrix[0][dummy_idx-1] + scoring_matrix['-'][seq_y[dummy_idx - 1]]\r\n if s_0_j < 0 and global_flag == False:\r\n s_0_j = 0\r\n alignment_matrix[0][dummy_idx]=s_0_j\r\n for x_idx in range(1,len(seq_x)+1):\r\n for y_idx in range(1,len(seq_y)+1):\r\n option1 = alignment_matrix[x_idx - 1][y_idx - 1] + scoring_matrix[seq_x[x_idx - 1]][seq_y[y_idx - 1]]\r\n option2 = alignment_matrix[x_idx - 1][y_idx] + scoring_matrix[seq_x[x_idx - 1]]['-']\r\n option3 = alignment_matrix[x_idx][y_idx - 1] + scoring_matrix['-'][seq_y[y_idx - 1]]\r\n alignment_matrix[x_idx][y_idx] = max(option1,option2,option3)\r\n if global_flag == False and alignment_matrix[x_idx][y_idx] < 0:\r\n alignment_matrix[x_idx][y_idx] = 0\r\n return alignment_matrix",
"def alignTo(self, alignment: int) -> None:\n\n offset = self._bitPosition % alignment\n if offset != 0:\n self.writeBits(0, alignment - offset)",
"def _calc_padding_for_alignment(align, base):\n rmdr = int(base) % align\n if rmdr == 0:\n return 0\n else:\n return align - rmdr",
"def vertical_alignment(self):\n return self.container['vertical_alignment']",
"def vertical_alignment(self, vertical_alignment):\n\n self.container['vertical_alignment'] = vertical_alignment",
"def setAlignment(self, align):\r\n self['ALIGN'] = align"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks that `alignment_provided` does not exceed `default_alignment`. | def update_alignment(alignment_provided:int, default_alignment: int) -> int:
if alignment_provided is not None:
if alignment_provided > default_alignment:
if alignment_provided % default_alignment == 0:
return default_alignment
raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.")
return alignment_provided
return default_alignment | [
"def alignment_or_default(alignment_provided: int, default_alignment: int) -> int:\n if alignment_provided is not None:\n if alignment_provided > default_alignment:\n raise Exception(f\"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.\")\n return alignment_provided\n\n return default_alignment",
"def get_align(self, default):\n return self.args.get(\"segment_align\", default)",
"def _struct_alignment(alignments: Iterable[int]) -> int:\n return bounding_power_of_2(_lcm(*alignments))",
"def _find_minimum_alignment(offset: int, base_alignment: int, prev_end: int) -> int:\n # Essentially, we need to find the minimum k such that:\n # 1) offset = m * base_alignment * 2**k, where m > 0 and k >= 0;\n # (by definition of alignment)\n # 2) offset - prev_offset < base_alignment * 2**k\n # (otherwise the compiler can just as well take m' = m - 1).\n if offset % base_alignment != 0:\n raise ValueError(\n f\"Field offset ({offset}) must be a multiple of the base alignment ({base_alignment}).\"\n )\n\n alignment = base_alignment\n while offset % alignment == 0:\n if offset - prev_end < alignment:\n return alignment\n\n alignment *= 2\n\n raise ValueError(\n f\"Could not find a suitable alignment for the field at offset {offset}; \"\n \"consider adding explicit padding.\"\n )",
"def bestAlignment(readAlignments: ReadAlignments) -> Alignment:\n return max(readAlignments, key=lambda alignment: alignment.hsps[0])",
"def has_align(self):\n return self._db_info_cache[\"sequence-aligned\"]",
"def get_abi_alignment(self, target_data, context=None):\n llty = self._get_ll_pointer_type(target_data, context)\n return target_data.get_pointee_abi_alignment(llty)",
"def align_sequences_default(seq_a, seq_b):\n if isinstance(seq_a, list): seq_a=''.join(seq_a)\n if isinstance(seq_b, list): seq_b=''.join(seq_b)\n # Align the sequences of the two chains\n return mmtbx.alignment.align(\n seq_a=seq_a, seq_b=seq_b,\n gap_opening_penalty = 20,\n gap_extension_penalty = 2,\n similarity_function = 'blosum50',\n style = 'local').extract_alignment()",
"def best_alignment(ref, index, hash1, hash2, max_edit, max_indels, min_seeds, max_hits):\n # the structure of this function is because in general we can use the alignment of\n # one read to inform the alignment of its mate. For now, ignore the information\n # that they are paired and just consider them separately.\n\n # TODO eventually kill off the [::-1] in favor of reverse complement, but HW1 requires only reverse\n r1_fwd, r1_rev, r2_fwd, r2_rev = None, None, None, None\n if hash1.seq.seq:\n r1_fwd = best_alignment_single(ref, index, hash1.seq, hash1.fwd, max_edit, max_indels, min_seeds, max_hits)\n r1_rev = best_alignment_single(ref, index, hash1.seq[::-1], hash1.rev, max_edit, max_indels, min_seeds, max_hits)\n if hash2.seq.seq:\n r2_fwd = best_alignment_single(ref, index, hash2.seq, hash1.fwd, max_edit, max_indels, min_seeds, max_hits)\n r2_rev = best_alignment_single(ref, index, hash2.seq[::-1], hash2.rev, max_edit, max_indels, min_seeds, max_hits)\n def get_aln_info(fwd, rev, size, ref_end):\n if fwd and rev:\n aln = AlignmentInfo(offset = fwd.start_offset if fwd.score > rev.score else rev.start_offset,\n reversed=rev.score >= fwd.score, cigar=rev.cigar if rev.score > fwd.score else fwd.cigar, \n mismatches=fwd.mm if fwd.score > rev.score else rev.mm)\n elif fwd:\n aln = AlignmentInfo(offset=fwd.start_offset, reversed=False, cigar=fwd.cigar,\n mismatches=fwd.mm)\n elif rev:\n aln = AlignmentInfo(offset= rev.start_offset, reversed=True, cigar=rev.cigar,\n mismatches=rev.mm)\n else:\n aln = None\n if aln and (aln.offset + size >= ref_end or aln.offset < 0):\n aln = None\n if aln:\n cigarcount = Counter(aln.cigar)\n if cigarcount['I'] + cigarcount['D'] > max_indels:\n aln = None\n return aln\n r1_aln = get_aln_info(r1_fwd, r1_rev, len(hash1.seq), len(ref))\n r2_aln = get_aln_info(r2_fwd, r2_rev, len(hash1.seq), len(ref))\n return r1_aln, r2_aln",
"def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n ith = len(seq_x)\n max_pos = alignment_matrix[ith].index(max(alignment_matrix[ith]))\n jth = max_pos\n x_prime, y_prime = '', ''\n while alignment_matrix[ith][jth] != 0:\n\n if alignment_matrix[ith][jth] == \\\n alignment_matrix[ith-1][jth-1] + \\\n scoring_matrix[seq_x[ith-1]][seq_y[jth-1]]:\n x_prime = seq_x[ith-1] + x_prime\n y_prime = seq_y[jth-1] + y_prime\n ith -= 1\n jth -= 1\n\n elif alignment_matrix[ith][jth] == \\\n alignment_matrix[ith][jth-1] + \\\n scoring_matrix['-'][seq_y[jth-1]]:\n x_prime = '-' + x_prime\n y_prime = seq_y[jth-1] + y_prime\n jth -= 1\n\n elif alignment_matrix[ith][jth] == \\\n alignment_matrix[ith-1][jth] + \\\n scoring_matrix[seq_x[ith-1]]['-']:\n x_prime = seq_x[ith-1] + x_prime\n y_prime = '-' + y_prime\n ith -= 1\n\n align_matrix = \\\n compute_alignment_matrix(x_prime, y_prime, scoring_matrix, True)\n (score, align_x, align_y) = compute_global_alignment(\n x_prime, y_prime, scoring_matrix, align_matrix)\n\n return (score, align_x, align_y)",
"def alignment(self):\n if self._alignment is None:\n if self._map is None:\n if self._columns is not None:\n self.__map_columns()\n else:\n self._map = self._align(self._graph)\n self._refine_each()\n if self._refinements:\n self._refine()\n assert self._map.shape[1] > 0, \"Alignment has no columns\"\n records = deepcopy(self._records)\n for i, record in enumerate(records):\n seq = record.seq\n aligned_seq = []\n map = self._map[i]\n index = 0\n for symbol in map:\n if symbol:\n aligned_seq.append(seq[index])\n index += 1\n else:\n aligned_seq.append(SPACE)\n record.seq = Seq(\"\".join(aligned_seq), GAPPED_ALPHABET)\n self._alignment = tomsa(records)\n return self._alignment",
"def setUp(self):\n self.empty = Alignment({})\n self.one_seq = Alignment({'a':'aaaaa'})\n self.ragged = Alignment({'a':'aaaaaa', 'b':'aaa', 'c':'aaaa'})\n self.identical = Alignment({'a':'aaaa','b':'aaaa'})\n self.gaps = Alignment({'a':'aaaaaaa','b':'a--a-aa', 'c':'aa-----'})\n self.gaps_rna = Alignment({'a':Rna('aaaaaaa'), 'b':Rna('a--a-aa'), \\\n 'c':Rna('aa-----')})\n self.unordered = Alignment({'a':'aaaaa','b':'bbbbb'})\n self.ordered1 = Alignment({'a':'aaaaa','b':'bbbbb'}, RowOrder=['a','b'])\n self.ordered2 = Alignment({'a':'aaaaa','b':'bbbbb'}, RowOrder=['b','a'])\n self.mixed = Alignment({'a':'abcde', 'b':'lmnop'})\n self.end_gaps = Alignment({'a':'--a-bc-', 'b':'-cb-a--', 'c':'--d-ef-'})\n self.many = Alignment({\n 'a': Rna('ucagucaguu'),\n 'b': Rna('uccgucaauu'),\n 'c': Rna('accaucaguc'),\n 'd': Rna('ucaaucgguu'),\n 'e': Rna('uugguugggu'),\n 'f': Rna('ccgggcggcc'),\n 'g': Rna('ucaaccggaa'),\n })\n #Additional Alignments for tests added 6/4/04 by Jeremy Widmann\n self.integers = Alignment([[1,2,3,4,5],[1,1,1,1,1],[5,4,3,2,1]])\n self.sequences = Alignment(map(RnaSequence, ['UCAG', 'UCAG', 'UCAG']))\n self.structures = Alignment(map(ViennaStructure, \n ['(())..', '......', '(....)']), None, str)\n self.labeled = Alignment(['abc', 'def'], ['1st', '2nd'])\n #Additional Alignment for tests added 1/30/06 by Cathy Lozupone\n self.omitRowsTemplate_aln = Alignment({\n 's1':Rna('UC-----CU---C'),\n 's2':Rna('UC------U---C'),\n 's3':Rna('UUCCUUCUU-UUC'),\n 's4':Rna('UU-UUUU-UUUUC'),\n 's5':Rna('-------------')\n })",
"def getPedAlignment(self):\n return self.__alignment",
"def compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag):\r\n alignment_matrix = []\r\n for row in range(len(seq_x)+1):\r\n alignment_matrix.append([])\r\n for row in range(len(seq_x)+1):\r\n for dummy_col in range(len(seq_y)+1):\r\n alignment_matrix[row].append(0)\r\n\r\n for dummy_idx in range(1,len(seq_x)+1):\r\n s_i_0 = alignment_matrix[dummy_idx-1][0] + scoring_matrix[seq_x[dummy_idx - 1]]['-']\r\n if s_i_0 < 0 and global_flag == False:\r\n s_i_0 = 0\r\n alignment_matrix[dummy_idx][0]=s_i_0\r\n for dummy_idx in range(1,len(seq_y)+1):\r\n s_0_j = alignment_matrix[0][dummy_idx-1] + scoring_matrix['-'][seq_y[dummy_idx - 1]]\r\n if s_0_j < 0 and global_flag == False:\r\n s_0_j = 0\r\n alignment_matrix[0][dummy_idx]=s_0_j\r\n for x_idx in range(1,len(seq_x)+1):\r\n for y_idx in range(1,len(seq_y)+1):\r\n option1 = alignment_matrix[x_idx - 1][y_idx - 1] + scoring_matrix[seq_x[x_idx - 1]][seq_y[y_idx - 1]]\r\n option2 = alignment_matrix[x_idx - 1][y_idx] + scoring_matrix[seq_x[x_idx - 1]]['-']\r\n option3 = alignment_matrix[x_idx][y_idx - 1] + scoring_matrix['-'][seq_y[y_idx - 1]]\r\n alignment_matrix[x_idx][y_idx] = max(option1,option2,option3)\r\n if global_flag == False and alignment_matrix[x_idx][y_idx] < 0:\r\n alignment_matrix[x_idx][y_idx] = 0\r\n return alignment_matrix",
"def alignTo(self, alignment: int) -> None:\n\n offset = self._bitPosition % alignment\n if offset != 0:\n self.writeBits(0, alignment - offset)",
"def _calc_padding_for_alignment(align, base):\n rmdr = int(base) % align\n if rmdr == 0:\n return 0\n else:\n return align - rmdr",
"def vertical_alignment(self):\n return self.container['vertical_alignment']",
"def vertical_alignment(self, vertical_alignment):\n\n self.container['vertical_alignment'] = vertical_alignment",
"def setAlignment(self, align):\r\n self['ALIGN'] = align"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Kafka producer that receives avro schema in record header. | def producer_header(sdc_builder, topic, cluster, confluent):
builder = sdc_builder.get_pipeline_builder()
builder.add_error_stage('Discard')
dev_raw_data_source = builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data='{"a": 1, "b": "Text"}',
stop_after_first_batch=True)
schema_generator = builder.add_stage('Schema Generator')
schema_generator.schema_name = 'Brno'
kafka_destination = builder.add_stage('Kafka Producer',
library=cluster.kafka.standalone_stage_lib)
kafka_destination.set_attributes(topic=topic,
data_format='AVRO',
avro_schema_location='HEADER',
include_schema=False,
register_schema=True,
schema_subject=topic,
key_serializer='CONFLUENT',
value_serializer='CONFLUENT')
dev_raw_data_source >> schema_generator >> kafka_destination
return builder.build(title=f'Producer in Header for {topic}').configure_for_environment(cluster, confluent) | [
"def producer_inline(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data='{\"a\": 1, \"b\": \"Text\"}',\n stop_after_first_batch=True)\n\n kafka_destination = builder.add_stage('Kafka Producer',\n library=cluster.kafka.standalone_stage_lib)\n kafka_destination.set_attributes(topic=topic,\n data_format='AVRO',\n avro_schema_location='INLINE',\n avro_schema=AVRO_SCHEMA,\n include_schema=False,\n register_schema=True,\n schema_subject=topic,\n key_serializer='CONFLUENT',\n value_serializer='CONFLUENT')\n\n dev_raw_data_source >> kafka_destination\n return builder.build(title=f'Producer Inline for {topic}').configure_for_environment(cluster, confluent)",
"def write_header(schema, sync_marker, codec=\"null\"):\n with BinaryEncoder(HEADER_SCHEMA, codec) as encoder:\n header = {\n \"magic\": MAGIC,\n \"meta\": {\n \"avro.codec\": ensure_bytes(codec),\n \"avro.schema\": ensure_bytes(json.dumps(schema))\n },\n \"sync\": sync_marker\n }\n return encoder.write(header)",
"def convertHbaseToAvro(self,avscFile = \"\", add_default=True, modify=True):\n\n with open(avscFile,'r') as content_file:\n avro_schema = json.loads(content_file.read())\n columns_lookup = {}\n for field in avro_schema['fields']:\n if 'default' in field:\n columns_lookup[field['name']] = field['default']\n else:\n columns_lookup[field['name']] = 'null'\n\n status = \"failed\"\n if avscFile == \"\":\n msg = \"This feature is not yet implemented. Please provide an AVRO schema file (.avsc).\"\n raise ValueError(msg)\n else:\n schema = avro.schema.parse(open(avscFile).read())\n writer = DataFileWriter(open(self.output_file, \"w\"), DatumWriter(), schema)\n h = open(self.input_file)\n i = 0\n st = time.time()\n lines = []\n while 1: ## reading line per line in the flat json file and write them in the AVRO format\n line = h.readline()\n if not line:\n break\n ls = line.strip()\n data = json.loads(ls)\n\n if modify is True:\n # We need to replace the ';' in the file to an '_'\n modified_data = {}\n for key in data:\n modified_data[key.replace(':','_')] = data[key]\n data = modified_data\n\n i += 1\n if i % 100 == 0:\n tmpf = open('/tmp/cgs_superhello.txt','a')\n tmpf.write('Converter for line '+str(i)+': '+str(time.time()-st)+' > len dict: '+str(len(data))+'\\n')\n tmpf.close()\n # We finally write the avro file\n writer.append(data)\n #supertmp.write(json.dumps(data)+'\\n')\n h.close()\n writer.close()\n status = \"succeeded\"\n return(status)",
"def parse_schema(self, schema_json):\n if sys.version_info.major >= 3:\n return avro.schema.Parse(schema_json)\n else:\n return avro.schema.parse(schema_json)",
"def save_avro_schema_stream(df: DataFrame, epochid: int, schema_path=None):\n save_avro_schema(df, schema_path)",
"def headers(self, schema):\n if 'consumes' in schema:\n self._session.headers['Content-Type'] = schema['consumes']\n if 'produces' in schema:\n self._session.headers['Accept'] = schema['produces']",
"def get_producer(topic: str, producer_type: str='avro', **kwargs) -> object:\n if producer_type == 'avro':\n return AvroProducerAdapter(topic=topic, **kwargs)\n else:\n return SimpleProducerAdapter(topic=topic)",
"def read_header(data):\n with BinaryEncoder(HEADER_SCHEMA) as encoder:\n header, offset = encoder.read_record(data)\n if not header:\n raise InvalidSchemaError(\"Unable to read Avro header.\")\n return header, offset",
"def get_kafka_df(\n df: DataFrame, schema_path: str, saveschema: bool = False, elasticc: bool = False) -> DataFrame:\n # Remove the status column before distribution\n cols = df.columns\n if \"status\" in cols:\n cols.remove(\"status\")\n\n df = df.select(cols)\n\n # Create a StructType column in the df for distribution.\n # The contents and schema of the df can change over time\n df_struct = df.select(struct(df.columns).alias(\"struct\"))\n\n # Convert into avro and save the schema\n if elasticc:\n # The idea is to force the output schema\n # Need better handling of this though...\n jsonschema = open(\n '/home/julien.peloton/elasticc/alert_schema/elasticc.v0_9.brokerClassification.avsc',\n 'r'\n ).read()\n df_kafka = df_struct.select(to_avro_native(\"struct\", jsonschema).alias(\"value\"))\n else:\n df_kafka = df_struct.select(to_avro(\"struct\").alias(\"value\"))\n\n # Add a key based on schema versions\n df_kafka = df_kafka.withColumn('key', lit('{}_{}'.format(fbvsn, fsvsn)))\n\n if saveschema:\n # Harcoded path that corresponds to the schema used\n # for alert redistribution.\n schema_path = 'schemas/distribution_schema_new.avsc'\n\n # Do not work on a DFS like HDFS obviously.\n # Only local mode & for testing purposes\n toto = df.writeStream.foreachBatch(\n lambda x, y: save_avro_schema_stream(x, y, schema_path)\n ).start()\n time.sleep(10)\n\n # Note that the entire Spark application will stop.\n toto.stop()\n\n return df_kafka",
"def test_header_record(header_record):\n rec = HeaderRecord()\n rec.load(header_record)\n\n assert rec.bank_app == 'T'\n assert rec.app_id == '363914'\n assert rec.edi_msg == 'HEADER'\n assert rec.separator is None\n assert rec.rec_typ == '00'\n assert rec.app_ver == '01.0000'\n assert rec.app_brand == 'BBCSOB'",
"def produce_record(self, stream, key, data):\n r = Record()\n r.key = key\n r.data = data\n r.userStream = stream\n transaction.records.append(r)",
"def save_avro_schema(df: DataFrame, schema_path: str):\n\n # Check if the file exists\n if not os.path.isfile(schema_path):\n # Store the df as an avro file\n path_for_avro = os.path.join(os.environ[\"PWD\"], \"flatten_hbase.avro\")\n if os.path.exists(path_for_avro):\n shutil.rmtree(path_for_avro)\n df.write.format(\"avro\").save(path_for_avro)\n\n # Read the avro schema from .avro file\n avro_file = glob.glob(path_for_avro + \"/part*\")[0]\n avro_schema = readschemafromavrofile(avro_file)\n\n # Write the schema to a file for decoding Kafka messages\n with open(schema_path, 'w') as f:\n json.dump(avro_schema, f, indent=2)\n\n # Remove .avro files and directory\n shutil.rmtree(path_for_avro)\n else:\n msg = \"\"\"\n {} already exists - cannot write the new schema\n \"\"\".format(schema_path)\n print(msg)",
"def producer_registry(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data='{\"a\": 1, \"b\": \"Text\"}',\n stop_after_first_batch=True)\n\n kafka_destination = builder.add_stage('Kafka Producer',\n library=cluster.kafka.standalone_stage_lib)\n kafka_destination.set_attributes(topic=topic,\n data_format='AVRO',\n avro_schema_location='REGISTRY',\n include_schema=False,\n schema_subject=topic,\n key_serializer='CONFLUENT',\n value_serializer='CONFLUENT')\n\n dev_raw_data_source >> kafka_destination\n return builder.build(title=f'Producer Registry for {topic}').configure_for_environment(cluster, confluent)",
"def get_dataframe_tf_record_schema(spark_df):\n return _get_dataframe_tf_record_schema_json(spark_df)[0]",
"def feed(self, handle, consumer):\n\n consumer.start_record()\n l = handle.readline()\n on_profile = False\n on_data = False\n on_structure = False\n while l != '':\n l = l.rstrip()\n if l.find('[Profile]') != -1: #Should control for comment\n on_profile = True\n on_data = False\n on_structure = False\n if l.find('[Data]') != -1: #Should control for comment\n on_profile = False\n on_data = True\n on_structure = False\n if l.find('[Structure]') != -1: #Should control for comment\n on_profile = False\n on_data = False\n on_structure = True\n if on_profile:\n self.report_param('Title', l, consumer.title, str, True)\n #if l.find('NbSamples'): consumer.()\n #if l.find('GameticPhase'): consumer.()\n #if l.find('RecessiveData'): consumer.()\n #if l.find('DataType'): consumer.()\n #if l.find('LocusSeparator'): consumer.()\n self.report_param('GenotypicData',l,consumer.ploidy,int,False)\n if on_data:\n self.report_param('SampleName',l, consumer.pop_name,str,True)\n self.report_param('SampleSize',l, consumer.pop_size,int,False)\n tokens = l.split('\\t')\n if tokens[0].find('_') != -1:\n pop_i, indiv_name = tokens[0].split('_')\n consumer.new_indiv(indiv_name)\n consumer.new_chromatid()\n #skipping tokens[1] - the told unk number\n for tok in tokens[2:]:\n self.report_allele(tok, consumer) \n consumer.end_chromatid()\n if consumer.data.is_genotypic:\n l = handle.readline().rstrip()\n consumer.new_chromatid()\n tokens = l.split('\\t')\n for tok in tokens[2:]:\n self.report_allele(tok, consumer) \n consumer.end_chromatid()\n consumer.end_indiv()\n elif l.find('}') != -1:\n consumer.end_pop()\n l = handle.readline()\n consumer.end_record()",
"def test__Attachment__precreate__1():\n attachment_id = 202211010001\n \n content_type = 'application/json'\n description = 'Nue'\n duration = 12.6\n flags = 12\n height = 1000\n name = 'i miss you'\n proxy_url = 'https://orindance.party/'\n size = 999\n temporary = True\n url = 'https://www.astil.dev/'\n waveform = 'kisaki'\n width = 998\n \n attachment = Attachment.precreate(\n attachment_id,\n content_type = content_type,\n description = description,\n duration = duration,\n flags = flags,\n height = height,\n name = name,\n proxy_url = proxy_url,\n size = size,\n temporary = temporary,\n url = url,\n waveform = waveform,\n width = width,\n )\n _assert_fields_set(attachment)\n \n vampytest.assert_eq(attachment.id, attachment_id)\n vampytest.assert_eq(attachment.proxy_url, proxy_url)\n\n vampytest.assert_eq(attachment.content_type, content_type)\n vampytest.assert_eq(attachment.description, description)\n vampytest.assert_eq(attachment.duration, duration)\n vampytest.assert_eq(attachment.flags, flags)\n vampytest.assert_eq(attachment.height, height)\n vampytest.assert_eq(attachment.name, name)\n vampytest.assert_eq(attachment.size, size)\n vampytest.assert_eq(attachment.temporary, temporary)\n vampytest.assert_eq(attachment.url, url)\n vampytest.assert_eq(attachment.waveform, waveform)\n vampytest.assert_eq(attachment.width, width)",
"def _DebugPrintRecordHeader(self, record_header):\n self._DebugPrintValue('Record size', f'{record_header.record_size:d}')\n\n data_type_map = self._GetDataTypeMap('rp_change_log_record_type')\n\n record_type_string = data_type_map.GetName(\n record_header.record_type) or 'UNKNOWN'\n\n self._DebugPrintValue(\n 'Record type',\n f'{record_header.record_type:d} ({record_type_string:s})')",
"def test_nested_schemas_splitted() -> None:\n\n class A(AvroModel):\n class Meta:\n namespace = \"namespace\"\n\n class B(AvroModel):\n a: A\n\n class C(AvroModel):\n b: B\n a: A\n\n # first the B schema is generated\n assert parse_schema(B.avro_schema_to_python())\n\n # then check that the C schema is valid\n assert parse_schema(C.avro_schema_to_python())",
"def test_correctly_parse_valid_kinesis_event_record(kinesis_event_record):\n record = KinesisEventRecord(**kinesis_event_record)\n\n # top-level fields\n assert \"1.0\" == record.event_version\n assert (\n \"aws:kinesis\" == record.event_source\n and EventSource.kinesis == record.event_source\n )\n assert \"us-east-2\" == record.aws_region\n assert \"aws:kinesis:record\" == record.event_name\n assert (\n \"shardId-000000000006:49590338271490256608559692538361571095921575989136588898\"\n == record.event_id\n )\n assert (\n \"arn:aws:kinesis:us-east-2:123456789012:stream/lambda-stream\"\n == record.event_source_arn\n )\n\n # kinesis fields\n kinesis_metadata = record.kinesis\n assert \"1.0\" == kinesis_metadata.schema_version\n\n assert \"1\" == kinesis_metadata.partition_key\n assert (\n \"49590338271490256608559692538361571095921575989136588898\"\n == kinesis_metadata.sequence_number\n )\n assert \"SGVsbG8sIHRoaXMgaXMgYSB0ZXN0Lg==\" == kinesis_metadata.data\n assert \"Hello, this is a test.\" == kinesis_metadata.decoded_data\n assert (\n datetime(2018, 12, 17, 22, 10, 50, 987000, tzinfo=timezone.utc)\n == kinesis_metadata.approximate_arrival_timestamp\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Kafka producer that receives avro schema the pipeline configuration. | def producer_inline(sdc_builder, topic, cluster, confluent):
builder = sdc_builder.get_pipeline_builder()
builder.add_error_stage('Discard')
dev_raw_data_source = builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data='{"a": 1, "b": "Text"}',
stop_after_first_batch=True)
kafka_destination = builder.add_stage('Kafka Producer',
library=cluster.kafka.standalone_stage_lib)
kafka_destination.set_attributes(topic=topic,
data_format='AVRO',
avro_schema_location='INLINE',
avro_schema=AVRO_SCHEMA,
include_schema=False,
register_schema=True,
schema_subject=topic,
key_serializer='CONFLUENT',
value_serializer='CONFLUENT')
dev_raw_data_source >> kafka_destination
return builder.build(title=f'Producer Inline for {topic}').configure_for_environment(cluster, confluent) | [
"def producer_header(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data='{\"a\": 1, \"b\": \"Text\"}',\n stop_after_first_batch=True)\n\n schema_generator = builder.add_stage('Schema Generator')\n schema_generator.schema_name = 'Brno'\n\n kafka_destination = builder.add_stage('Kafka Producer',\n library=cluster.kafka.standalone_stage_lib)\n\n kafka_destination.set_attributes(topic=topic,\n data_format='AVRO',\n avro_schema_location='HEADER',\n include_schema=False,\n register_schema=True,\n schema_subject=topic,\n key_serializer='CONFLUENT',\n value_serializer='CONFLUENT')\n\n dev_raw_data_source >> schema_generator >> kafka_destination\n return builder.build(title=f'Producer in Header for {topic}').configure_for_environment(cluster, confluent)",
"def producer_registry(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data='{\"a\": 1, \"b\": \"Text\"}',\n stop_after_first_batch=True)\n\n kafka_destination = builder.add_stage('Kafka Producer',\n library=cluster.kafka.standalone_stage_lib)\n kafka_destination.set_attributes(topic=topic,\n data_format='AVRO',\n avro_schema_location='REGISTRY',\n include_schema=False,\n schema_subject=topic,\n key_serializer='CONFLUENT',\n value_serializer='CONFLUENT')\n\n dev_raw_data_source >> kafka_destination\n return builder.build(title=f'Producer Registry for {topic}').configure_for_environment(cluster, confluent)",
"def save_avro_schema_stream(df: DataFrame, epochid: int, schema_path=None):\n save_avro_schema(df, schema_path)",
"def _create_kafka_producer(self):\n kafka_config = ({'kafka_bootstrap_servers': [self.kafka_hosts],\n 'kafka_broker_version_fallback': settings.KAFKA_BROKER_VERSION_FALLBACK,\n 'kafka_api_version_request': settings.KAFKA_API_VERSION_REQUEST,\n 'kafka_producer_batch_linger_ms': settings.KAFKA_PRODUCER_BATCH_LINGER_MS,\n 'kafka_producer_buffer_kbytes': settings.KAFKA_PRODUCER_BUFFER_KBYTES,\n 'kafka_producer_topic': self.kafka_topic})\n\n self.kafka_conn = ConfluentKafkaProducer(kafka_config, self.logger)",
"def parse_schema(self, schema_json):\n if sys.version_info.major >= 3:\n return avro.schema.Parse(schema_json)\n else:\n return avro.schema.parse(schema_json)",
"def __init__(self, topic, server):\n self.producer = KafkaProducer(bootstrap_servers=server,\n value_serializer=lambda x: dumps(x).encode('utf-8'))\n self.topic = topic",
"def parse_pipeline(self) -> dict:\n self.pipeline= parse_schema(self.pipeline, STATIC_DAG_SCHEMA)",
"def create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n\n FIELD_MAPPINGS = [dict(field='/id', columnName='id'),\n dict(field='/name', columnName='name')]\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n jdbc_producer.set_attributes(default_operation=operation,\n table_name=table_name,\n field_to_column_mapping=FIELD_MAPPINGS,\n stage_on_record_error='STOP_PIPELINE')\n\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> jdbc_producer\n record_deduplicator >> trash\n\n return pipeline_builder.build(title=pipeline_title)",
"def train(self, config: Dict[str, Any]) -> None:\n try:\n _ = self.validator_train(config)\n except JsonSchemaException as ex:\n raise PipelineConfigError(ex.message) from ex",
"def schema_val(self, messages=None):\n self._ymlproc = YAMLProcessor(self._ymlfile)\n self._schemaproc = SchemaProcessor(self._schemafile)\n valid = True\n\n log.debug(\n \"BEGIN: Schema-based validation for YAML '%s' with schema '%s'\",\n self._ymlfile,\n self._schemafile,\n )\n\n # Make sure the yml and schema have been loaded\n if self._ymlproc.loaded and self._schemaproc.loaded:\n # Load all of the yaml documents. Could be more than one in the same YAML file.\n for docnum, data in enumerate(\n yaml.load_all(self._ymlproc.data, Loader=yaml.Loader)\n ):\n # Since YAML allows integer keys but JSON does not, we need to first\n # dump the data as a JSON string to encode all of the potential integers\n # as strings, and then read it back out into the YAML format. Kind of\n # a clunky workaround but it works as expected.\n data = yaml.load(json.dumps(data), Loader=yaml.Loader)\n\n # Now we want to get a validator ready\n v = jsonschema.Draft4Validator(self._schemaproc.data)\n\n # Loop through the errors (if any) and set valid = False if any are found\n # Display the error message\n for error in v.iter_errors(data):\n msg = (\n f\"Schema-based validation failed for YAML file ' {self._ymlfile} '\"\n )\n self.ehandler.process(\n docnum, self._ymlproc.doclines, error, messages\n )\n valid = False\n\n if not valid:\n log.error(msg)\n\n elif not self._ymlproc.loaded:\n raise util.YAMLError(\"YAML must be loaded in order to validate.\")\n elif not self._schemaproc.loaded:\n raise jsonschema.SchemaError(\"Schema must be loaded in order to validate.\")\n\n log.debug(\"END: Schema-based validation complete for '%s'\", self._ymlfile)\n return valid",
"def test_create_configs_from_pipeline_proto(self):\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.model.faster_rcnn.num_classes = 10\n pipeline_config.train_config.batch_size = 32\n pipeline_config.train_input_reader.label_map_path = \"path/to/label_map\"\n pipeline_config.eval_config.num_examples = 20\n pipeline_config.eval_input_reader.add().queue_capacity = 100\n\n configs = config_util.create_configs_from_pipeline_proto(pipeline_config)\n self.assertProtoEquals(pipeline_config.model, configs[\"model\"])\n self.assertProtoEquals(pipeline_config.train_config,\n configs[\"train_config\"])\n self.assertProtoEquals(pipeline_config.train_input_reader,\n configs[\"train_input_config\"])\n self.assertProtoEquals(pipeline_config.eval_config, configs[\"eval_config\"])\n self.assertProtoEquals(pipeline_config.eval_input_reader,\n configs[\"eval_input_configs\"])",
"def test_pipeline_processor_record_writing(self):\n\n nlp = Pipeline[DataPack](enforce_consistency=True)\n reader = DummySentenceReaderOne()\n nlp.set_reader(reader)\n dummy = DummyPackProcessorOne()\n nlp.add(dummy)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n pack = nlp.process(data_path)\n self.assertEqual(pack._meta.record[\"Sentence\"], {\"1\", \"2\", \"3\"})\n self.assertEqual(pack._meta.record[\"Token\"], {\"1\", \"2\"})\n self.assertEqual(pack._meta.record[\"Document\"], {\"2\"})",
"def start_producer(self):\n logger.debug('starting producer service')\n runner = ApplicationRunner(\n url=self.config['transport_host'],\n realm=u'realm1',\n extra={\n 'config': self.config,\n 'producer': True,\n 'callback': self.callback\n }\n )\n runner.run(Component, auto_reconnect=True)",
"def get_producer(topic: str, producer_type: str='avro', **kwargs) -> object:\n if producer_type == 'avro':\n return AvroProducerAdapter(topic=topic, **kwargs)\n else:\n return SimpleProducerAdapter(topic=topic)",
"def validate(self):\n\n # load schema and validate it via jsonschema\n schema_path = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), NetworkerRunner.SCHEMA_FILE)\n schema = json.load(open(schema_path))\n jsonschema.validate(self.config, schema)",
"def publish(self):\n logging.info(\"Producing to Kafka at %(target)s\", {\"target\": self.target,})\n producer = Producer(\n {\n \"bootstrap.servers\": self.target,\n \"default.topic.config\": {\"acks\": \"all\",},\n }\n )\n return KafkaPublisher(self.loop, producer)",
"def produce_kafka_messages(cfg):\n setup_logging(cfg['logging'])\n logger = getLogger('producer')\n\n if cfg['producer']['message_frequency_hz']:\n rate = 1. / cfg['producer']['message_frequency_hz']\n else:\n rate = None\n\n p = Producer({'bootstrap.servers': cfg['bootstrap.servers']})\n\n while True:\n if not cfg['producer']['num_messages']:\n break\n p.poll(0)\n payload = uuid4().__str__()\n logger.info(payload)\n p.produce(cfg['topic'], payload, partition=0, callback=delivery_report)\n p.flush()\n cfg['producer']['num_messages'] -= 1\n if rate:\n sleep(rate)\n else:\n sleep(lognormvariate(1, 1))",
"def main():\n consumer = Consumer({\n 'bootstrap.servers': config['kafka_server'],\n 'group.id': config['kafka_clientgroup'],\n 'auto.offset.reset': 'earliest'\n })\n consumer.subscribe([config['topics']['workspace_events']])\n print(f\"Listening to {config['kafka_server']} in group {config['kafka_clientgroup']}\") # noqa\n while True:\n msg = consumer.poll(0.5)\n if msg is None:\n continue\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n print(\"Reached end of the stream.\")\n else:\n print(\"Error:\", msg.error())\n continue\n print('New message:', msg.value())\n try:\n data = json.loads(msg.value().decode('utf-8'))\n except ValueError as err:\n # JSON parsing error\n print('JSON message error:', err)\n continue\n try:\n _process_event(data, producer)\n except Exception:\n traceback.print_exc()\n consumer.close()",
"def _produce(self, topic, key, value):\n return self.producer.produce(topic=topic, key=key, value=value,\n callback=self._delivery_report)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Kafka producer that receives avro schema from schema registry (must exists before pipeline run). | def producer_registry(sdc_builder, topic, cluster, confluent):
builder = sdc_builder.get_pipeline_builder()
builder.add_error_stage('Discard')
dev_raw_data_source = builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data='{"a": 1, "b": "Text"}',
stop_after_first_batch=True)
kafka_destination = builder.add_stage('Kafka Producer',
library=cluster.kafka.standalone_stage_lib)
kafka_destination.set_attributes(topic=topic,
data_format='AVRO',
avro_schema_location='REGISTRY',
include_schema=False,
schema_subject=topic,
key_serializer='CONFLUENT',
value_serializer='CONFLUENT')
dev_raw_data_source >> kafka_destination
return builder.build(title=f'Producer Registry for {topic}').configure_for_environment(cluster, confluent) | [
"def save_avro_schema_stream(df: DataFrame, epochid: int, schema_path=None):\n save_avro_schema(df, schema_path)",
"def producer_inline(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data='{\"a\": 1, \"b\": \"Text\"}',\n stop_after_first_batch=True)\n\n kafka_destination = builder.add_stage('Kafka Producer',\n library=cluster.kafka.standalone_stage_lib)\n kafka_destination.set_attributes(topic=topic,\n data_format='AVRO',\n avro_schema_location='INLINE',\n avro_schema=AVRO_SCHEMA,\n include_schema=False,\n register_schema=True,\n schema_subject=topic,\n key_serializer='CONFLUENT',\n value_serializer='CONFLUENT')\n\n dev_raw_data_source >> kafka_destination\n return builder.build(title=f'Producer Inline for {topic}').configure_for_environment(cluster, confluent)",
"def producer_header(sdc_builder, topic, cluster, confluent):\n builder = sdc_builder.get_pipeline_builder()\n builder.add_error_stage('Discard')\n\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data='{\"a\": 1, \"b\": \"Text\"}',\n stop_after_first_batch=True)\n\n schema_generator = builder.add_stage('Schema Generator')\n schema_generator.schema_name = 'Brno'\n\n kafka_destination = builder.add_stage('Kafka Producer',\n library=cluster.kafka.standalone_stage_lib)\n\n kafka_destination.set_attributes(topic=topic,\n data_format='AVRO',\n avro_schema_location='HEADER',\n include_schema=False,\n register_schema=True,\n schema_subject=topic,\n key_serializer='CONFLUENT',\n value_serializer='CONFLUENT')\n\n dev_raw_data_source >> schema_generator >> kafka_destination\n return builder.build(title=f'Producer in Header for {topic}').configure_for_environment(cluster, confluent)",
"def parse_schema(self, schema_json):\n if sys.version_info.major >= 3:\n return avro.schema.Parse(schema_json)\n else:\n return avro.schema.parse(schema_json)",
"async def test_recordnameschemamanager() -> None:\n registry_url = os.getenv(\"SCHEMA_REGISTRY_URL\")\n assert registry_url\n\n async with ClientSession() as http_session:\n registry = RegistryApi(url=registry_url, session=http_session)\n manager = RecordNameSchemaManager(root=SCHEMA_ROOT, registry=registry)\n await manager.register_schemas(compatibility=\"FORWARD\")\n\n topic_a_message = {\"field1\": 42, \"field2\": \"hello\"}\n data_a = await manager.serialize(data=topic_a_message, name=\"kafkit.a\")\n assert isinstance(data_a, bytes)\n\n topic_b_message = {\"fieldA\": 42, \"fieldB\": \"hello\"}\n data_b = await manager.serialize(data=topic_b_message, name=\"kafkit.b\")\n assert isinstance(data_b, bytes)\n\n # Sanity check that you can't serialize with the wrong schema!\n with pytest.raises(\n ValueError,\n match=(\"Cannot serialize data with schema kafkit.a\"),\n ):\n await manager.serialize(data=topic_b_message, name=\"kafkit.a\")",
"def get_kafka_df(\n df: DataFrame, schema_path: str, saveschema: bool = False, elasticc: bool = False) -> DataFrame:\n # Remove the status column before distribution\n cols = df.columns\n if \"status\" in cols:\n cols.remove(\"status\")\n\n df = df.select(cols)\n\n # Create a StructType column in the df for distribution.\n # The contents and schema of the df can change over time\n df_struct = df.select(struct(df.columns).alias(\"struct\"))\n\n # Convert into avro and save the schema\n if elasticc:\n # The idea is to force the output schema\n # Need better handling of this though...\n jsonschema = open(\n '/home/julien.peloton/elasticc/alert_schema/elasticc.v0_9.brokerClassification.avsc',\n 'r'\n ).read()\n df_kafka = df_struct.select(to_avro_native(\"struct\", jsonschema).alias(\"value\"))\n else:\n df_kafka = df_struct.select(to_avro(\"struct\").alias(\"value\"))\n\n # Add a key based on schema versions\n df_kafka = df_kafka.withColumn('key', lit('{}_{}'.format(fbvsn, fsvsn)))\n\n if saveschema:\n # Harcoded path that corresponds to the schema used\n # for alert redistribution.\n schema_path = 'schemas/distribution_schema_new.avsc'\n\n # Do not work on a DFS like HDFS obviously.\n # Only local mode & for testing purposes\n toto = df.writeStream.foreachBatch(\n lambda x, y: save_avro_schema_stream(x, y, schema_path)\n ).start()\n time.sleep(10)\n\n # Note that the entire Spark application will stop.\n toto.stop()\n\n return df_kafka",
"def save_avro_schema(df: DataFrame, schema_path: str):\n\n # Check if the file exists\n if not os.path.isfile(schema_path):\n # Store the df as an avro file\n path_for_avro = os.path.join(os.environ[\"PWD\"], \"flatten_hbase.avro\")\n if os.path.exists(path_for_avro):\n shutil.rmtree(path_for_avro)\n df.write.format(\"avro\").save(path_for_avro)\n\n # Read the avro schema from .avro file\n avro_file = glob.glob(path_for_avro + \"/part*\")[0]\n avro_schema = readschemafromavrofile(avro_file)\n\n # Write the schema to a file for decoding Kafka messages\n with open(schema_path, 'w') as f:\n json.dump(avro_schema, f, indent=2)\n\n # Remove .avro files and directory\n shutil.rmtree(path_for_avro)\n else:\n msg = \"\"\"\n {} already exists - cannot write the new schema\n \"\"\".format(schema_path)\n print(msg)",
"def get_producer(topic: str, producer_type: str='avro', **kwargs) -> object:\n if producer_type == 'avro':\n return AvroProducerAdapter(topic=topic, **kwargs)\n else:\n return SimpleProducerAdapter(topic=topic)",
"def schema() -> Dict:\n from pkg_resources import resource_string\n import json\n\n data = resource_string(\"ceeder.schemas\", \"cdr-v5.json\")\n return json.loads(data)",
"def test_nested_schemas_splitted() -> None:\n\n class A(AvroModel):\n class Meta:\n namespace = \"namespace\"\n\n class B(AvroModel):\n a: A\n\n class C(AvroModel):\n b: B\n a: A\n\n # first the B schema is generated\n assert parse_schema(B.avro_schema_to_python())\n\n # then check that the C schema is valid\n assert parse_schema(C.avro_schema_to_python())",
"def test_avro_multi_register(client):\n version_1 = schema.AvroSchema(data_gen.AVRO_USER_V1)\n version_2 = schema.AvroSchema(data_gen.AVRO_USER_V2)\n subject = \"test-avro-user-schema\"\n\n id1 = client.register(subject, version_1)\n latest_schema_1 = client.get_schema(subject)\n client.check_version(subject, version_1)\n\n id2 = client.register(subject, version_2)\n latest_schema_2 = client.get_schema(subject)\n client.check_version(subject, version_2)\n\n assert id1 != id2\n assert latest_schema_1 != latest_schema_2\n # ensure version is higher\n assert latest_schema_1.version < latest_schema_2.version\n\n client.register(subject, version_1)\n latest_schema_3 = client.get_schema(subject)\n\n assert latest_schema_2 == latest_schema_3",
"def process_schema(self, schema):\n import json\n import collections\n with open(schema) as f:\n _schema = json.loads(f.read(), object_pairs_hook=collections.OrderedDict)\n\n if \"title\" in _schema:\n self.setWindowTitle(\"%s - PyQtSchema\" % _schema[\"title\"])\n\n self.content_region.setWidget(create_widget(_schema.get(\"title\", \"(root)\"), _schema))\n self.content_region.setWidgetResizable(True)",
"def _create_kafka_producer(self):\n kafka_config = ({'kafka_bootstrap_servers': [self.kafka_hosts],\n 'kafka_broker_version_fallback': settings.KAFKA_BROKER_VERSION_FALLBACK,\n 'kafka_api_version_request': settings.KAFKA_API_VERSION_REQUEST,\n 'kafka_producer_batch_linger_ms': settings.KAFKA_PRODUCER_BATCH_LINGER_MS,\n 'kafka_producer_buffer_kbytes': settings.KAFKA_PRODUCER_BUFFER_KBYTES,\n 'kafka_producer_topic': self.kafka_topic})\n\n self.kafka_conn = ConfluentKafkaProducer(kafka_config, self.logger)",
"def parse(json_string):\n # TODO(hammer): preserve stack trace from JSON parse\n # parse the JSON\n try:\n json_data = json.loads(json_string)\n except:\n raise SchemaParseException('Error parsing JSON: %s' % json_string)\n\n # Initialize the names dictionary\n names = {}\n\n # construct the Avro Schema object\n return make_avsc_object(json_data, names)",
"def queueKafka(self, json_data ):\n self.kafka_producer.send(TOPIC_NAME, json_data)",
"def rebuild_schema(self, schema_file, shelf):\n\n print >> sys.stderr, 'Rebuilding schema'\n\n yaml_schema_file = os.path.join(self.parsed_schema_dir, 'schema.yaml')\n\n # If the yaml schema file doesn't exist, build using the PERL script\n if not os.path.isfile(yaml_schema_file):\n\n # Ensure directory is writable\n try:\n\n f = os.path.join(self.parsed_schema_dir, 'dummy.txt')\n open(f, 'w')\n os.remove(f)\n\n except IOError:\n raise IOError('Schema directory is not writable')\n else:\n\n print >> sys.stderr, 'Yamlifying schema file'\n pipe = subprocess.Popen([\"perl\", os.path.join(os.path.dirname(os.path.dirname(\n __file__)), \"bin/yamlify-schema.pl\"), schema_file, self.parsed_schema_dir], stdout=subprocess.PIPE)\n\n if not pipe.stdout.read():\n raise KEParserException(\n 'Perl subprocess converting schema.pl to YAML failed')\n\n re_split = re.compile(\"--- [a-z]+\")\n with open(yaml_schema_file, \"r\") as f:\n file_raw = f.read()\n split_files = re_split.split(file_raw)\n\n for split_file in split_files:\n try:\n doc = yaml.load(split_file)\n except yaml.YAMLError:\n print('Error parsing doc')\n else:\n if doc:\n module_name = doc['table']\n print >> sys.stderr, 'Building schema for %s' % module_name\n item = {\n 'columns': {}\n }\n\n for col, col_def in doc['columns'].items():\n\n # We only want to use some of the fields in our schema\n field = {\n 'DataKind': col_def['DataKind'],\n 'DataType': col_def['DataType'],\n 'ColumnName': col_def['ColumnName'],\n }\n\n # If ItemBase is specified, this is a multi-value field\n # For example:\n # ItemBase: AssRegistrationNumberRefLocal\n # Fields: AssRegistrationNumberRefLocal0, AssRegistrationNumberRefLocal1\n # The export files are keyed against ItemName (if it\n # exists), not ColumnName\n if 'ItemBase' in col_def:\n col = col_def['ItemBase']\n field['ItemCount'] = col_def['ItemCount']\n elif 'ItemName' in col_def:\n col = col_def['ItemName']\n\n item['columns'][col] = field\n\n shelf[module_name] = item\n\n return shelf",
"def plugins_registered_callback(self, sender, **extra):\n self._update_schemas()",
"async def producer():\n MQStatus.mqp_connected = False\n while True:\n # If not connected to kafka, attempt to connect...\n if not MQStatus.mqp_connected:\n try:\n logger.info(\"Producer client not connected, attempting to connect...\")\n await mqp.start()\n logger.info(\"Producer client connected!\")\n MQStatus.mqp_connected = True\n except KafkaError:\n logger.exception('Producer client hit error, triggering re-connect...')\n await asyncio.sleep(RETRY_INTERVAL)\n continue\n\n # Pull items off our queue to produce\n if not produce_queue:\n await asyncio.sleep(0.1)\n continue\n\n for _ in range(0, len(produce_queue)):\n item = produce_queue.popleft()\n topic = item['topic']\n msg = item['msg']\n logger.info(\n \"Popped item from produce queue (qsize: %d): topic %s: %s\",\n len(produce_queue), topic, msg\n )\n try:\n await mqp.send_and_wait(topic, json.dumps(msg).encode('utf-8'))\n logger.info(\"Produced on topic %s: %s\", topic, msg)\n except KafkaError:\n logger.exception('Producer client hit error, triggering re-connect...')\n MQStatus.mqp_connected = False\n # Put the item back on the queue so we can push it when we reconnect\n produce_queue.appendleft(item)",
"def test_process(schemas, expected_schemas):\n backref.process(schemas=schemas)\n\n assert schemas == expected_schemas"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an image that has been flipped through the vertical axis _Author_ = Jackie So (101153622) >>>image = load_image(choose_file()) >>>flip_vertical(image) | def flip_vertical(image: Image) -> Image:
flipped_image = copy(image)
width = get_width(image)
for y in range(get_height(flipped_image)):
for x in range(get_width(flipped_image)):
new_color = get_color(image, width - y - 1, y)
set_color(flipped_image, width - y- 1, y, new_color)
show(flipped_image)
return flipped_image | [
"def vertical_flip_image(image: Image) -> Image:\n return Image(image.size, vertical_flip_pixels(image.pixels))",
"def vertical_flip(self):\n\n im = self._image.as_numpy_array()\n self._image.im_representation = np.flipud(im)",
"def verticalFlip(self):\r\n self.ser.write('A')\r\n try:\r\n self.cameraSettings.toggleVerticalFlip()\r\n print(\"Camera Flipped Vertically\")\r\n except:\r\n print(\"Error flipping image vertically\")",
"def random_vertical_flip(image, prob=.2):\n flip = rand() < prob\n if flip:\n image = image.transpose(Image.FLIP_TOP_BOTTOM)\n\n return image, flip",
"def vertical_flip_tile(tile: Tile) -> Tile:\n return Tile(tile.tile_id, tile.size, vertical_flip_pixels(tile.pixels))",
"def flip_vertical(self):\n old = self.current_variant_grid.copy()\n self.current_variant_grid = []\n\n for row in old:\n self.current_variant_grid.append(row[::-1]) # Reverse the characters in each row.\n\n self.current_variant += 'V'\n self.snapshot_variant()",
"def vertical_flip(signal: np.array) -> np.array:\n baseline = np.nanmean(signal)\n # flip vertically \n signal = -signal + 2*baseline\n\n return signal",
"def flip(self, bev_direction=\"horizontal\"):\r\n if bev_direction == \"horizontal\":\r\n self.tensor[:, 1] = -self.tensor[:, 1]\r\n self.tensor[:, 4] = -self.tensor[:, 4]\r\n #self.tensor[:, 5] = -self.tensor[:, 5]\r\n #self.tensor[:, 7] = -self.tensor[:, 7]\r\n #self.tensor[:, 9] = -self.tensor[:, 9]\r\n elif bev_direction == \"vertical\":\r\n self.tensor[:, 0] = -self.tensor[:, 0]\r\n self.tensor[:, 3] = -self.tensor[:, 3]\r\n #self.tensor[:, 4] = -self.tensor[:, 4]\r\n #self.tensor[:, 6] = -self.tensor[:, 6]\r\n #self.tensor[:, 8] = -self.tensor[:, 8]\r",
"def randomly_flip(im):\n if sp.rand() > 0.5:\n im = im[::-1, :]\n if sp.rand() > 0.5:\n im = im[:, ::-1]\n \n return im",
"def flip(self, upper_right, width, height):\n x = upper_right[0]\n y = upper_right[1]\n while(x - height > 0):\n self.flip_row(height - y, width, width - y)\n height = height + 1",
"def main():\n # Import a image\n original_mt = SimpleImage('images/mt-rainier.jpg')\n # Show the original image\n original_mt.show()\n reflected = reflect('images/mt-rainier.jpg')\n # Show the vertically mirrored image\n reflected.show()",
"def get_vertical(self, scene, to_tensor = True, column = 4):\n stack = [self.load_img(self.get_image_path(scene, i + column)) for i in range(0, 81, 9)]\n if to_tensor: return self.stack_to_tensor(stack)\n else: return stack",
"def flip(self, image):\n enable = False\n dst = image\n if enable:\n dst = cv2.flip(image, randomint(0,1))\n return dst",
"def flipped_pixels(self):\n return (self.row_ptype*self.height)(*self.pixels[::-1])",
"def horizontal_flip(self):\n\n im = self._image.as_numpy_array()\n self._image.im_representation = np.fliplr(im)",
"def reorient_image(cls, img):\n # get the image rotation from EXIF information\n import exifread\n\n file_full_path = img.filename\n\n with open(file_full_path) as f:\n tags = exifread.process_file(f)\n\n orientation_string = tags.get(\"Image Orientation\")\n\n from PIL import Image\n\n if orientation_string:\n orientation = orientation_string.values[0]\n if orientation == 1:\n # do nothing\n pass\n elif orientation == 2: # flipped in X\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 3: # rotated 180 degree\n img = img.transpose(Image.ROTATE_180)\n elif orientation == 4: # flipped in Y\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n elif orientation == 5: #\n img = img.transpose(Image.ROTATE_270)\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 6:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 7:\n img = img.transpose(Image.ROTATE_90)\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 8:\n img = img.transpose(Image.ROTATE_90)\n\n return img",
"def rotateImage(self):\n if self.isHorizontal:\n self.image = pg.transform.rotate(self.image, 270)\n self.image = pg.transform.flip(self.image, True, False)",
"def augmentation_flipping(img,angle):\n image_flipped = np.fliplr(img)\n angle_flipped = -angle\n return(image_flipped,angle_flipped)",
"def invert(self):\n self.image = ImageOps.invert(self.image).convert(o.device_mode)\n self.display_if_interactive()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function will spawn a thread and run the given function using the args, kwargs and return the given default value if the timeout_duration is exceeded. | def timeout(func, args=(), kwargs={},
timeout_duration=10, default=None, log=None):
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
self.result = func(*args, **kwargs)
try:
if log:
log.info("Starting tiemoutthread for '{}' timeout in {}s".format(
func.__name__, timeout_duration))
it = InterruptableThread()
it.start()
it.join(timeout_duration)
if it.isAlive():
return it.result
else:
return it.result
except:
if log:
log.warning("Exception occurred in timerthread for '{}'".format(
func.__name__))
return default | [
"def _timeout(func, args=(), kwargs={}, timeout_duration=10, default=None): \r\n import threading\r\n class InterruptableThread(threading.Thread):\r\n def __init__(self):\r\n threading.Thread.__init__(self)\r\n self.result = default\r\n\r\n def run(self):\r\n self.result = func(*args, **kwargs)\r\n it = InterruptableThread()\r\n it.start()\r\n it.join(timeout_duration)\r\n return it.result",
"def timeout_run(func, args=(), kwargs={}, timeout_duration=1):\n class InterruptableThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = self.output = self.traceback = None\n\n def run(self):\n try:\n stdout, sys.stdout = sys.stdout, cStringIO.StringIO()\n self.result = func(*args, **kwargs)\n self.status = COMPLETED\n except StoppedException:\n self.status = STOPPED\n except:\n self.status = FAILED\n self.result = None\n self.traceback = traceback.format_exc()\n sys.stdout, self.output = stdout, sys.stdout.getvalue()\n it = InterruptableThread()\n it.start()\n it.join(timeout_duration)\n if it.isAlive():\n return TIMEOUT, it.result, it.output, it.traceback\n else:\n return it.status, it.result, it.output, it.traceback",
"def timelimit(timeout):\n\n def _1(function):\n def _2(*args, **kw):\n class Dispatch(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = None\n self.error = None\n\n self.setDaemon(True)\n self.start()\n\n def run(self):\n try:\n self.result = function(*args, **kw)\n except:\n self.error = sys.exc_info()\n\n c = Dispatch()\n c.join(timeout)\n if c.isAlive():\n raise TimeoutError, 'took too long'\n if c.error:\n raise c.error[0], c.error[1]\n return c.result\n\n return _2\n\n return _1",
"def timelimit(timeout):\n def _1(function):\n def _2(*args, **kw):\n class Dispatch(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = None\n self.error = None\n\n self.setDaemon(True)\n self.start()\n\n def run(self):\n try:\n self.result = function(*args, **kw)\n except:\n self.error = sys.exc_info()\n\n c = Dispatch()\n c.join(timeout)\n if c.isAlive():\n raise TimeoutError, 'took too long'\n if c.error:\n raise c.error[0], c.error[1]\n return c.result\n return _2\n return _1",
"def timelimit(timeout, func, args=(), kwargs={}):\n import threading\n class FuncThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.result = None\n\n def run(self):\n self.result = func(*args, **kwargs)\n\n def _stop(self):\n if self.isAlive():\n threading.Thread._Thread__stop(self)\n\n it = FuncThread()\n it.start()\n it.join(timeout)\n if it.isAlive():\n it._stop()\n raise TimeLimitExpired()\n else:\n return it.result",
"def new_f(*args, **kwargs):\n def timeout_handler(signum, frame):\n # frame isn't used but might be required as signature for\n # signal.signal (need to check - not done so far)\n raise TimeoutException()\n\n # Save previous handler to restore later and install above one\n old_handler = signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(self.timeout_time) # triger alarm in timeout_time seconds\n\n # Pre-set default return value in case timeout occurs and no values\n # get returned from calling f below.\n retval = self.default\n\n # Try to call function with timeout\n try:\n # if finishes in time then just return the result\n retval = f(*args, **kwargs)\n except TimeoutException:\n pass\n finally:\n # Cancel timeout alarm\n signal.alarm(0)\n # And re-install old signal handler\n signal.signal(signal.SIGALRM, old_handler)\n return retval",
"def result_or_timeout(timed_function, args=(), kwargs=None, timeout=1, timer=time.perf_counter):\n if kwargs is None:\n kwargs = dict()\n\n def handler(*h_args, **h_kwargs):\n raise TimeoutError()\n\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(timeout)\n\n try:\n start_time = timer()\n result = timed_function(*args, **kwargs)\n running_time = timer() - start_time\n except TimeoutError:\n running_time = timeout\n result = None\n finally:\n signal.alarm(0)\n\n return running_time, result",
"def Run(func, timeout, retries, args=None, kwargs=None):\n if not args:\n args = []\n if not kwargs:\n kwargs = {}\n\n # The return value uses a list because Python variables are references, not\n # values. Closures make a copy of the reference, so updating the closure's\n # reference wouldn't update where the original reference pointed.\n ret = [None]\n def RunOnTimeoutThread():\n ret[0] = func(*args, **kwargs)\n\n num_try = 1\n while True:\n child_thread = TimeoutRetryThread(\n RunOnTimeoutThread, timeout,\n name='TimeoutThread-%d-for-%s' % (num_try,\n threading.current_thread().name))\n try:\n thread_group = reraiser_thread.ReraiserThreadGroup([child_thread])\n thread_group.StartAll()\n thread_group.JoinAll(child_thread.GetWatcher())\n return ret[0]\n except:\n child_thread.LogTimeoutException()\n if num_try > retries:\n raise\n num_try += 1",
"def with_timeout(seconds, function, *args, **kwds):\n timeout_value = kwds.pop(\"timeout_value\", _NONE)\n timeout = Timeout.start_new(seconds)\n try:\n try:\n return function(*args, **kwds)\n except Timeout as ex:\n if ex is timeout and timeout_value is not _NONE:\n return timeout_value\n raise\n finally:\n timeout.cancel()",
"def safe_call(timeout, func, args=[]):\n\n def thread_call(func, args, queue):\n \"\"\"Helper to put threaded function return value in a queue.\n\n Puts None in the queue if an exception occurs.\n\n func: The function to run.\n args: An iterable of function arguments.\n queue: The queue to put the result in.\n \"\"\"\n try:\n queue.put(func(*args))\n except Exception:\n queue.put(False)\n\n queue = Queue()\n thread = Thread(target=thread_call, args=[func, args, queue], daemon=True)\n thread.start()\n thread.join(timeout)\n\n if thread.is_alive():\n return False\n\n return queue.get()",
"def safe_timeout(func):\n def _wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except requests.exceptions.Timeout:\n LOGGER.exception('[%s] Request timed out', func.__name__)\n return False, None\n return _wrapper",
"def run_until_successful(\n func: Callable,\n args: Optional[Iterable] = None,\n kwargs: Optional[dict] = None,\n time_limit: int = 10,\n timeout_assertion_message: Optional[str] = None,\n wait_interval: int = 1,\n):\n\n start_time = time.time()\n\n while True:\n if func(*args or (), **kwargs or {}):\n return\n assert time.time() - start_time < time_limit, timeout_assertion_message or (\n timeout_assertion_message or f'Function {func.__name__} failed'\n )\n time.sleep(wait_interval)",
"def wait_until_error(f, timeout=0.5):\n\n def inner(*args, **kwargs):\n st = time.perf_counter()\n while time.perf_counter() - st < timeout or timeout < 0:\n try:\n return f(*args, **kwargs)\n except Exception as e:\n if e or not e:\n continue\n\n return inner",
"def testtimeout(seconds):\n\n def fn(cls):\n cls._testtimeout = seconds\n return cls\n\n return fn",
"def start_timer(timeout, callback):\n tmr = threading.Timer(timeout, callback)\n tmr.start()\n return tmr",
"def _firecloud_api_timeout_wrapper(*args, **kwargs):\n if not hasattr(timeout_state, 'timeout'):\n timeout_state.timeout = None\n return __CORE_SESSION_REQUEST__(\n *args,\n **{\n **{'timeout': timeout_state.timeout},\n **kwargs\n }\n )",
"def sliding_timeout(timeout):\n if timeout is None:\n return lambda: None\n deadline = time.time() + timeout\n return lambda: deadline - time.time()",
"def inject_timeout(func):\n\n @six.wraps(func)\n def decorator(self, *args, **kwargs):\n kwargs.setdefault(\"timeout\", self._timeout)\n return func(self, *args, **kwargs)\n\n return decorator",
"def threaded(fn):\n def wrapper(*args, **kwargs):\n Thread(target=fn, args=args, kwargs=kwargs).start()\n return wrapper",
"def set_timeout(duration, callback=None):\n # SIGALRM is only usable on a unix platform!\n signal.signal(signal.SIGALRM, raise_signal)\n signal.alarm(duration) # alarm after X seconds\n if callback:\n callback()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the nearest perfect square that is less than equal to num | def nearest_square(num):
root=0
while (root+1)**2<=num:
root+=1
return root**2 | [
"def nearest_square(number):\n\tif math.sqrt(number)%1 == 0:\n\t\treturn number\n\telse:\n\t\tflag = True\n\t\tlower_number = number -1\n\t\twhile flag:\n\t\t\tif math.sqrt(lower_number)%1 == 0:\n\t\t\t\tflag = False\n\t\t\telse: lower_number -= 1\n\n\t\treturn lower_number",
"def min_square(n):\n return int(np.ceil(np.sqrt(n)))",
"def nearest_square(param):\n square_value, square_value_temp, index = 1, 1, 1\n while index <= param / 2:\n square_value_temp = index ** 2\n if square_value_temp > param:\n return square_value\n else:\n square_value = square_value_temp\n index += 1\n print(\"square_value:{}\".format(square_value))",
"def findGreatestSmallerSquaredNumber(n):\n n = n - 1\n return int(m.sqrt(n))",
"def squareRoot(self, num):\n num = float (num)\n from math import sqrt\n result = sqrt(num)\n return result",
"def least_divisor(num, floor=2):\n assert num >= floor\n trial = floor\n while num % trial != 0:\n trial += 1\n return trial",
"def min_even_divisor(num):\n i = 2\n while i <= num:\n if (num % i == 0):\n return i\n i += 1",
"def smallest_factor(n):\n sqrt=n**0.5\n i=2\n while i<=sqrt:\n if n%i==0:\n return i #If we get here, return i as the value.\n i+=1\n return n #If we get through the whole while loop, return n.",
"def find_next_square(sq):\n if sq < 0:\n return -1\n if sq == 0:\n return 1\n if sq == 1:\n return 4\n\n def is_square(apositiveint):\n x = apositiveint // 2\n seen = set([x])\n while x * x != apositiveint:\n x = (x + (apositiveint // x)) // 2\n if x in seen: return False\n seen.add(x)\n return True\n \n if not is_square(sq):\n return -1\n\n import math\n return (math.sqrt(sq) + 1) ** 2",
"def smallest_multiple(N):",
"def square_root(number):\n return pow(number, 0.5)",
"def val_closest_to(n: int, val: int) -> int:\n n_partitions = val // n\n low, high = val - n_partitions * n, n * (n_partitions + 1) - val\n if low < high:\n return n_partitions * n\n return (n_partitions + 1) * n",
"def difference(num):\n return square_of_sum(num) - sum_of_squares(num)",
"def near_ten(num):\n return num % 10 <= 2 or num % 10 >= 8; # want result to equal 1, 2, 8 or 9",
"def closest_anti_prime( n ):\n\n l2 = log( 2.0 )\n l3 = log( 3.0 )\n l5 = log( 5.0 )\n ln = log( n )\n\n x_max = ceil( ln / l2 )\n m = pow( 2.0 , x_max )\t# first guess\n\n for x in range( 0 , int(x_max) + 1 ):\n\n y_max = math.ceil( (ln - l2*x) / l3 )\n\n for y in range( 0 , int(y_max) + 1 ):\n\n z = ceil( ( ln - l2*x - l3*y ) / l5 )\n m_ = pow( 2.0 , x ) * pow( 3.0 , y ) * pow( 5.0 , z )\n if m_ < m : m = m_\n\n return int ( m )",
"def int_sqrt(A):\n if A < 0:\n raise ValueError('math domain error')\n if A == 0 or A == 1:\n return A\n\n floor, ceil = A-1, A\n while not floor**2 < A < ceil**2:\n ceil = (floor + A // -floor // -1) // -2 // -1\n floor = ceil - 1\n if A == ceil**2:\n return ceil\n return floor, ceil",
"def nearest_smallest_element(arr):\n smaller_numbers = []\n\n def nearest(n):\n def find_previous_num():\n for previous_num in reversed(smaller_numbers):\n if previous_num < n:\n return previous_num\n return -1\n\n def append_smaller_number_before_preceding_big(n):\n while len(smaller_numbers) > 0 and smaller_numbers[-1] > n:\n smaller_numbers.pop()\n smaller_numbers.append(n)\n\n previous_num = find_previous_num()\n append_smaller_number_before_preceding_big(n)\n return previous_num\n\n return [nearest(n) for n in arr]",
"def near_golden(perimeter):\n smallest = perimeter\n for w in range(1, perimeter//2):\n h = perimeter//2 - w\n dif = abs(h/w - (w/h-1) )\n if dif < smallest:\n smallest = dif\n h_best = h\n return h_best",
"def _least_divisor_limit(n):\n return int(math.sqrt(n)) + 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get ParallelCluster bucket name. | def get_bucket_name(account_id, region):
return "-".join(
[
"parallelcluster",
S3Bucket.generate_s3_bucket_hash_suffix(account_id, region),
PCLUSTER_S3_BUCKET_VERSION,
"do",
"not",
"delete",
]
) | [
"def get_bucket_name(bucket):\n return _objstore_backend.get_bucket_name(bucket)",
"def cluster_name(self):\n return self.base_config.cluster_name if hasattr(self.base_config, \"cluster_name\") else None",
"def s3_bucket_name():\n if is_local_env():\n return LOCAL_BUCKET_NAME\n\n # get data from parameter store with correct key\n # bucket_name = get_params_from_ssm()[\"CORRECT_KEY\"]\n return \"bucket_name\"",
"def cluster_parameter_group_name(self) -> str:\n return pulumi.get(self, \"cluster_parameter_group_name\")",
"def s3_bucket_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"s3_bucket_name\")",
"def kubernetes_cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kubernetes_cluster_name\")",
"def get_cluster_key(obj):\n try:\n namespace = obj[\"metadata\"][\"namespace\"]\n name = obj[\"metadata\"][\"labels\"][\"gateway.dask.org/cluster\"]\n return f\"{namespace}.{name}\"\n except KeyError:\n return None",
"def cluster_subnet_group_name(self) -> str:\n return pulumi.get(self, \"cluster_subnet_group_name\")",
"def cluster_subnet_group_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_subnet_group_name\")",
"def cluster_hostname(self) -> str:\n return pulumi.get(self, \"cluster_hostname\")",
"def cluster_identifier(self) -> str:\n return jsii.get(self, \"clusterIdentifier\")",
"def cluster_public_key(self) -> str:\n return pulumi.get(self, \"cluster_public_key\")",
"def get_bucket_from_host(self):\r\n\r\n\t\t# TODO: Implement internal map lookup by self.request.host\r\n\t\treturn \"default\"",
"def generate_rclone_bucket_profile_name(bucket_name: str,\n cloud: RcloneClouds) -> str:\n try:\n return cloud.value + bucket_name\n except AttributeError as e:\n with ux_utils.print_exception_no_traceback():\n raise ValueError(f'Value: {cloud} isn\\'t a member of '\n 'Rclone.RcloneClouds') from e",
"def get_user_cluster(self):\n user_cluster_name = ''\n try:\n user_cluster_name = User.objects.get( \\\n username=self.user.username,\n ).cluster_set.first().name\n except:\n self.is_new_user = True\n self.update_clusters()\n user_cluster_name = User.objects.get( \\\n username=self.user.username,\n ).cluster_set.first().name\n return user_cluster_name",
"def cluster_identifier(self) -> str:\n ...",
"def s3_bucket(self) -> str:\n return etl.templates.render_from_config(\n self._s3_bucket_template, context=f\"s3_bucket of schema '{self.name}'\"\n )",
"def cluster_names(self):\n # TODO",
"def _get_bucket(self):\n return self.driver.get_container(self.bucket)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate 16 characters hash suffix for ParallelCluster s3 bucket. | def generate_s3_bucket_hash_suffix(account_id, region):
return hashlib.sha256((account_id + region).encode()).hexdigest()[0:16] | [
"def get_bucket_name(account_id, region):\n return \"-\".join(\n [\n \"parallelcluster\",\n S3Bucket.generate_s3_bucket_hash_suffix(account_id, region),\n PCLUSTER_S3_BUCKET_VERSION,\n \"do\",\n \"not\",\n \"delete\",\n ]\n )",
"def create_file_name():\n # This generates a name that is between 3 to 63 chars long\n return str(uuid.uuid4())",
"def generate_keyname():\n return str(uuid.uuid1())",
"def _coursier_cache_prefix(self) -> str:\n sha = sha256()\n for repo in self.repos:\n sha.update(repo.encode(\"utf-8\"))\n return sha.digest().hex()",
"def get_hash_key(prefix, key_to_hash):\n key_to_hash = key_to_hash.encode('utf-8')\n key = prefix + \":\" + hashlib.md5(key_to_hash).hexdigest()\n return key",
"def generate_hash(self):\n string_to_hash = self.owner.username + str(datetime.now()) + ACCOUNT_KEY_SALT\n return sha256(string_to_hash).hexdigest()",
"def file_s3_key(fpath, fhash):\n return fhash + \"/\" + os.path.basename(fpath)",
"def generate_md5_name(instance_id):\n assert instance_id is not None\n hashalg = hashlib.md5()\n _safe_hash_update(hashalg, CPX_ILMT_PERSISTENT_ID)\n _safe_hash_update(hashalg, instance_id)\n return hashalg.hexdigest()",
"def deterministic_hash(thing, length=10):\n digest = sha1(json.dumps(hashablize(thing)).encode('ascii')).digest()\n return b32encode(digest)[:length].decode('ascii').lower()",
"def _hash(data):\r\n hash_algo = hashlib.new('md5')\r\n hash_algo.update(pickle.dumps(data))\r\n # prefix allows possibility of multiple applications\r\n # sharing same keyspace\r\n return 'esi_' + hash_algo.hexdigest()",
"def deterministic_hash(thing, length=10):\n hashable = hashablize(thing)\n jsonned = json.dumps(hashable, cls=NumpyJSONEncoder)\n # disable bandit\n digest = sha1(jsonned.encode('ascii')).digest()\n return b32encode(digest)[:length].decode('ascii').lower()",
"def make_salt():\n return uuid.uuid4().hex",
"def getHash(name):\n return hashlib.md5(name).hexdigest()",
"def get_hash(self):\n return hashlib.md5(next(iter(self.get_clusters())).encode('utf-8') + '-'.join(sorted(host.host_id for host in set(self.hosts))).encode('utf-8')).hexdigest()",
"def crack_hash(hsh: int) -> str:\n while True:\n prefix = ''.join(random.choice(string.ascii_letters) for _ in range(5))\n prefixed_template = prefix + '\\0' * SUFFIX_LENGTH\n hash_minor = (hsh - hasher_unsigned(prefixed_template, MASK_64BIT)) & MASK_64BIT\n s = dehash(hash_minor, init=0)\n if s != None and len(s) == SUFFIX_LENGTH:\n # print(s, rhash)\n return (prefix + s)",
"def sha256_based_key(key):\n hash_ = hashlib.sha256()\n hash_.update(key)\n return hash_.hexdigest()",
"def hashGeneretor(inputString):\n\treturn hashlib.sha256(inputString.encode('utf-8')).hexdigest()",
"def code_sha256(self) -> str:\n file_hash = FileHash(hashlib.sha256())\n file_hash.add_file(self.archive_file)\n return base64.b64encode(file_hash.digest).decode()",
"def _get_random_hash():\n return hashlib.md5('%s.%s' % (random.random(), datetime.now().microsecond)).hexdigest()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new S3 bucket. | def create_bucket(self):
AWSApi.instance().s3.create_bucket(bucket_name=self.name, region=self.region) | [
"def create_bucket(name):\r\n s3.create_bucket(Bucket=name)",
"def create_bucket():\n\n s3 = session.resource('s3')\n\n try:\n s3.create_bucket(Bucket=f\"lambda-source-{os.environ['AWS_ACCOUNT']}\", ACL='private')\n print('Created S3 bucket!')\n\n except Exception as e:\n print(f\"Error creating S3 bucket. Exception: {e}.\")",
"def bucket_create():\r\n conn = connect_s3()\r\n bucket = conn.create_bucket(BUCKET_NAME, policy='public-read')\r\n bucket.configure_website('index.html', 'error.html')\r\n print 'Bucket %r created.' % BUCKET_NAME",
"def create_bucket(self):\n # Cohesity doesn't allow to create a bucket natively from s3 client.\n # response = s3_client.create_bucket(Bucket='my-bucket')\n\n # We create a view with s3Only access, since if it's multiprotocol,\n # bucket becomes readonly access for s3.\n body = View()\n body.view_box_id = self._get_storage_domain_id()\n body.name = BUCKET_NAME\n body.protocol_access = ProtocolAccessEnum.KS3ONLY\n self.cohesity_client.views.create_view(body)\n print(\"Bucket %s created on Cohesity.\" % BUCKET_NAME)",
"def test_s3_bucket_creation():\n s3 = boto3.resource(\"s3\") # Will use Localstack\n assert len(list(s3.buckets.all())) == 0\n bucket = s3.Bucket(\"foobar\")\n bucket.create()",
"def create_bucket(bucket, bucket_name):\n return _objstore_backend.create_bucket(bucket, bucket_name)",
"def create(self, bucket_name):\n bucket = self.gcs_client.get_bucket(bucket_name)\n print('Bucket {} created'.format(bucket.name))",
"def create_bucket(bucket_name):\n print('Creating artifacts bucket {}'.format(bucket_name))\n if bucket_exists(bucket_name):\n print('Bucket {} already exists'.format(bucket_name))\n return\n try:\n if args.region is None or args.region == \"us-east-1\":\n s3_client.create_bucket(Bucket=bucket_name)\n else:\n location = {'LocationConstraint': args.region}\n s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)\n except Exception as e:\n print('Failed to create artifacts bucket\\nException: {}'.format(e))\n sys.exit(1)\n print('Successfully created artifacts bucket')",
"def create_bucket(name, policy=None):\n s3 = boto3.client('s3')\n\n s3.create_bucket(Bucket=bucket)\n print(\"S3 bucket %s created.\" % bucket)\n\n if policy:\n s3.put_bucket_policy(\n Bucket=bucket,\n Policy=json.dumps(bucketPolicy)\n )\n print(\"Policy attached to S3 bucket.\")\n\n return bucket",
"def create_bucket(bucket_name, region=None):\n logging.info(\"creating bucket %s, %s\", bucket_name, region)\n # Create bucket\n bucket = None\n\n try:\n if region is None:\n s3_client = boto3.client('s3')\n\n bucket = s3_client.create_bucket(Bucket=bucket_name)\n\n else:\n s3_client = boto3.client('s3', region_name=region)\n location = {'LocationConstraint': region}\n bucket = s3_client.create_bucket(Bucket=bucket_name,\n CreateBucketConfiguration=location)\n except ClientError as e:\n logging.error(e)\n\n return bucket",
"def create_s3(self, name, bucket, access_key, secret_access_key, endpoint=None, region=None,\n signature_version=None):\n\n config = {\n 'bucket': bucket,\n 'accessKey': access_key,\n 'secretAccessKey': secret_access_key,\n }\n if endpoint:\n config['endpoint'] = endpoint\n if region:\n config['region'] = region\n if signature_version:\n config['signatureVersion'] = signature_version\n\n storage_provider = models.StorageProvider(\n type='s3',\n name=name,\n config=config,\n )\n\n repository = self.build_repository(repositories.CreateStorageProvider)\n return repository.create(storage_provider)",
"def test_s3_bucket_creation(self, noobaa_obj, created_buckets):\n\n bucketname = create_unique_resource_name(self.__class__.__name__.lower(), 's3-bucket')\n logger.info(f'Creating new bucket - {bucketname}')\n created_buckets.append(noobaa_obj.s3_create_bucket(bucketname=bucketname))",
"def create_bucket(self, bucket_name, description, org_id, retention_rules=None):\n return self.client.buckets_api().create_bucket(\n bucket_name=bucket_name, description=description, org_id=org_id, retention_rules=None\n )",
"def test_create_bucket(self):\n username = self.new_user.username\n bucket = s3buckets.create_bucket(username)\n\n self.assertTrue(isinstance(bucket, Bucket))",
"def create_s3_file(s3_bucket: str, s3_key: str, data: str) -> None:\n s3_client.put_object(Bucket=s3_bucket, Key=s3_key, Body=data)",
"def create_storage_bucket(project=configs.PROJECT_ID,\n bucket_name=configs.LOG_BUCKET_NAME):\n return storage.Client(project).bucket(bucket_name)",
"def connect_s3(self):\n self.out('- Connecting to S3 and making bucket.\\n')\n self.s3 = boto.connect_s3()\n self.bucket = self.s3.create_bucket(self.bucket_name)\n self.bucket = self.s3.get_bucket(self.bucket_name)\n self.bucket.set_acl(self.default_acl)\n self.bucket.set_cors(self.default_cors)",
"def post(self, bucket_name):\n s3_bucket = Bucket(bucket_name)\n return s3_bucket.add_object(api.payload)",
"def amazon_bucket(self):\n try:\n s3_connection = S3Connection(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY)\n except Exception as e:\n raise StandardError(\"The attempt to connect amazon s3 cloud has been failed\")\n\n try:\n print S3_BUCKET_NAME\n bucket = s3_connection.get_bucket(S3_BUCKET_NAME)\n \n except S3ResponseError as e:\n print \"The bucket you are trying to connect doesnt exists yet, \\\n Trying to create the bucket required to store the relevant images\"\n bucket = s3_connection.create_bucket(S3_BUCKET_NAME)\n\n return bucket"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Configure s3 bucket to satisfy pcluster setting. | def configure_s3_bucket(self):
AWSApi.instance().s3.put_bucket_versioning(bucket_name=self.name, configuration={"Status": "Enabled"})
AWSApi.instance().s3.put_bucket_encryption(
bucket_name=self.name,
configuration={"Rules": [{"ApplyServerSideEncryptionByDefault": {"SSEAlgorithm": "AES256"}}]},
)
deny_http_policy = (
'{{"Id":"DenyHTTP","Version":"2012-10-17","Statement":[{{"Sid":"AllowSSLRequestsOnly","Action":"s3:*",'
'"Effect":"Deny","Resource":["arn:{partition}:s3:::{bucket_name}","arn:{partition}:s3:::{bucket_name}/*"],'
'"Condition":{{"Bool":{{"aws:SecureTransport":"false"}}}},"Principal":"*"}}]}}'
).format(bucket_name=self.name, partition=self.partition)
AWSApi.instance().s3.put_bucket_policy(bucket_name=self.name, policy=deny_http_policy) | [
"def _set_s3(self):\n logger.info(\"Setting up s3 ...\")\n\n cluster_name_id = AXClusterId().get_cluster_name_id()\n\n self._bucket_name = AXClusterDataPath(cluster_name_id).bucket()\n self._bucket = Cloud().get_bucket(self._bucket_name)\n artifact_prefix = AXClusterDataPath(cluster_name_id).artifact()\n self._log_s3_prefix = artifact_prefix\n\n self._bucket_ax_is_external = AXLogPath(cluster_name_id).is_external()\n self._bucket_name_ax = AXLogPath(cluster_name_id).bucket()\n self._bucket_ax = Cloud().get_bucket(self._bucket_name_ax)\n artifact_prefix_ax = AXLogPath(cluster_name_id).artifact()\n\n self._log_s3_prefix_ax = artifact_prefix_ax\n\n assert self._bucket.exists(), \"S3 bucket {} DOES NOT exist\".format(self._bucket_name)\n assert self._bucket_ax.exists(), \"S3 bucket {} DOES NOT exist\".format(self._bucket_name_ax)\n logger.info(\"Using S3 bucket %s, with log prefix %s\", self._bucket.get_bucket_name(), self._log_s3_prefix)\n logger.info(\"Using S3 bucket %s, with log prefix %s for AX\", self._bucket_ax.get_bucket_name(), self._log_s3_prefix_ax)",
"def set_bucket_props(self, transport, bucket, props):\n _validate_bucket_props(props)\n return transport.set_bucket_props(bucket, props)",
"def connect_s3(self):\n self.out('- Connecting to S3 and making bucket.\\n')\n self.s3 = boto.connect_s3()\n self.bucket = self.s3.create_bucket(self.bucket_name)\n self.bucket = self.s3.get_bucket(self.bucket_name)\n self.bucket.set_acl(self.default_acl)\n self.bucket.set_cors(self.default_cors)",
"def config_s3_new(aws_key, aws_secret):\n aws_secret = quote_plus(aws_secret)\n s3_url_key = CONN_ENV_PREFIX + 'S3_CONNECTION'\n s3_url = build_s3_url(aws_key, aws_secret)\n setenv(s3_url_key, s3_url)",
"def init(ctx, bucket=None):\n session = boto3.Session()\n s3 = boto3.client('s3')\n config={}\n\n if bucket == None and os.environ.get('KVS3_BUCKET') == None:\n bucket_init = input('Enter s3 key value store bucket name: ')\n s3_name_requirements = re.compile(\"^[a-z0-9]{1}[a-z0-9\\-\\.]{1,61}[a-z0-9\\.]{1}$\")\n if s3_name_requirements.match(bucket_init):\n config['bucket'] = bucket_init\n with open(INIT_FILE, 'w') as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n else:\n print('kvs3: invalid bucket name')\n sys.exit(1)\n\n validate(s3, bucket)",
"def set_bucket(self, bucket_name):\n if self.does_bucket_exist(bucket_name):\n self.bucket = self.my_s3.Bucket(bucket_name)\n return self.bucket\n\n self.bucket = self.create_bucket(bucket_name)\n return self.bucket",
"def amazon_bucket(self):\n try:\n s3_connection = S3Connection(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY)\n except Exception as e:\n raise StandardError(\"The attempt to connect amazon s3 cloud has been failed\")\n\n try:\n print S3_BUCKET_NAME\n bucket = s3_connection.get_bucket(S3_BUCKET_NAME)\n \n except S3ResponseError as e:\n print \"The bucket you are trying to connect doesnt exists yet, \\\n Trying to create the bucket required to store the relevant images\"\n bucket = s3_connection.create_bucket(S3_BUCKET_NAME)\n\n return bucket",
"def load_config_from_s3(config_s3_bucket, key):\n\n s3_resource = create_resource_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n \n try:\n s3_resource.Bucket(config_s3_bucket).download_file(key, os.path.basename(key))\n except Exception as err:\n print(f\"unable to download file from {config_s3_bucket} : {str(err)}\")\n sys.exit(1)\n\n set_config(key)",
"def save_cloud_config(self, blueprint):\n response = AWSSetup._save_s3_resource(self.appName, self._s3Res, 'text/plain', json.dumps(blueprint, indent=2), self.config)\n return response",
"def set_bucket_type_props(self, transport, bucket_type, props):\n return transport.set_bucket_type_props(bucket_type, props)",
"def set_bucket_type_props(self, transport, bucket_type, props):\n _validate_bucket_props(props)\n return transport.set_bucket_type_props(bucket_type, props)",
"def __init_from_parallelcluster_config(self, parallelcluster_config_file, log):\n with open(parallelcluster_config_file) as config_file:\n parallelcluster_config = ConfigParser()\n parallelcluster_config.read_file(config_file)\n log.info(\n \"Looking for AWS credentials and region in the AWS ParallelCluster configuration file %s\"\n % parallelcluster_config_file\n )\n try:\n self.aws_access_key_id = parallelcluster_config.get(\"aws\", \"aws_access_key_id\")\n except (NoOptionError, NoSectionError):\n pass\n try:\n self.aws_secret_access_key = parallelcluster_config.get(\"aws\", \"aws_secret_access_key\")\n except (NoOptionError, NoSectionError):\n pass\n try:\n self.region = parallelcluster_config.get(\"aws\", \"aws_region_name\")\n except (NoOptionError, NoSectionError):\n pass",
"def _get_s3_config(self):\n bucket = self.configuration.get(\"Bucket Name\")\n if not bucket:\n raise ValueError(\"Bucket Name required to confirm configuration\")\n\n if not self.configuration.get(\"Prefix\"):\n prefix = \"\"\n else:\n prefix = self.configuration.get(\"Prefix\")\n\n return bucket, prefix",
"def test_get_settings__s3_overrides(direction, acl, config_file):\n\n with open(config_file, \"w\") as openconf:\n openconf.write(\"\\n\".join([\n \"[pypicloud]\",\n \"repository:http://test-server/pypi\",\n \"username:test-username\",\n \"password:test-password\",\n \"bucket:bucket-name\",\n \"access:access-key\",\n \"secret:secret-key\",\n \"region:test-region\",\n ]))\n\n pypicloud_tools.sys.argv = [direction]\n\n if acl:\n pypicloud_tools.sys.argv.extend([\"--acl\", acl])\n\n pypicloud_tools.sys.argv.extend([\n \"--bucket\",\n \"fake-bucket\",\n \"--access\",\n \"fake-access\",\n \"--secret\",\n \"fake-secret\",\n \"--region\",\n \"fake-region\",\n \"--config\",\n config_file,\n \"some_file\",\n \"some_other_file\",\n ])\n\n settings = pypicloud_tools.get_settings(**{direction: True})\n expected_s3 = pypicloud_tools.S3Config(\n \"fake-bucket\", \"fake-access\", \"fake-secret\", acl, \"fake-region\"\n )\n expected_pypi = pypicloud_tools.PyPIConfig(\n \"http://test-server/pypi\", \"test-username\", \"test-password\"\n )\n\n assert settings.s3 == expected_s3\n assert settings.pypi == expected_pypi\n assert settings.items == [\"some_file\", \"some_other_file\"]",
"def setup_s3():\n s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))\n logging.info('Successfully initialized S3 client')\n return s3",
"def create_bucket(self):\n AWSApi.instance().s3.create_bucket(bucket_name=self.name, region=self.region)",
"def configInstance():\r\n\r\n return ibm_boto3.client(\r\n \"s3\",\r\n ibm_api_key_id=COS_API_KEY_ID,\r\n ibm_service_instance_id=SERVICE_INTANCE_ID,\r\n config=Config(signature_version=\"oauth\"),\r\n endpoint_url=COS_ENDPOINT\r\n )",
"def upload_config(out, bucket, key, region):\n auth = {\n \"ca_crt\": read(out, \"CertAuth.crt\"),\n \"client_crt\": read(out, \"Client.crt\"),\n \"client_pkcs8_key\": read(out, \"Client.pkcs8.key\"),\n \"server_crt\": read(out, \"Server.crt\"),\n \"server_pkcs8_key\": read(out, \"Server.pkcs8.key\"),\n }\n\n import boto3\n s3 = boto3.client(\"s3\", region_name=region)\n s3.put_object(\n Bucket=bucket,\n Key=key,\n Body=json.dumps(auth, indent=2, sort_keys=True),\n ServerSideEncryption=\"AES256\")",
"def __init__(self, s3_config: Union[dict, None]):\n if s3_config is not None:\n if isinstance(s3_config, s3fs.S3FileSystem):\n s3 = s3_config\n else:\n key = s3_config['accessKey']\n secret = s3_config['accessSecret']\n s3 = s3fs.S3FileSystem(key=key, secret=secret)\n else:\n s3 = None\n self.s3 = s3"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get object key of an artifact. | def get_object_key(self, object_type: S3FileType, object_name):
return "/".join([self.artifact_directory, object_type.value, object_name]) | [
"def key(obj):\n try:\n return obj.key()\n except AttributeError:\n return obj",
"def get_key(bucket, obj):\n\n key = bucket.get_key(obj)\n if not key or not key.exists():\n msg = _(\"Could not find key %(obj)s in bucket %(bucket)s\") % locals()\n logger.error(msg)\n raise exception.NotFound(msg)\n return key",
"async def get_key_attribute_of_object(self, object_type: Text) -> Text:\n return self.key_attribute[object_type]",
"def RevisionKeyName(rev_id, repository_name):\n return '%s:%s' % (repository_name, rev_id)",
"def get_object_key(obj: rhp.RoomObject):\n return ObjectKey(obj.clean_category, obj.center)",
"def get_key_from_id(species_id):\n definition = _get_by_id(species_id)\n return definition['key']",
"def _get_key(self, entity_id):\n if entity_id:\n return self.client.key(self.kind, entity_id)\n return self.client.key(self.kind)",
"def get_key(self, fileobj):\n mapping = self.get_map()\n if mapping is None:\n raise RuntimeError('Selector is closed')\n try:\n return mapping[fileobj]\n except KeyError:\n raise KeyError(\"{!r} is not registered\".format(fileobj)) from None",
"def entity_key(entity):\n key = entity.key or entity.string\n return ':'.join([entity.resource.path, key])",
"def _get_key(sample, project):\n return sample + \".\" + project",
"def MigrationKeynameFromRevisionObj(up_to_revision):\n return up_to_revision.key().name()",
"def get_cached_artifact(self, key):\n return self._artifacts_manager.artifacts[key]",
"def get_info_key(obj):\n return obj.attributes.get(key=\"key\", category=DATA_INFO_CATEGORY, strattr=True)",
"def getKey(self, element):\r\n return element._key",
"def id(obj):\n try:\n return key(obj).id_or_name()\n except AttributeError:\n return obj",
"def _get_artifact_name(self, artifact_name: str):\n # Current artifact naming convention is:\n # \"pipeline_name:run_id:component_name:artifact_name:0\"\n split_names = artifact_name.split(':')\n if len(split_names) != 5:\n return artifact_name\n return split_names[3]",
"def _var_key(var):\n # pylint: disable=protected-access\n var = distribute_utils.value_container(var)\n if (distribute_utils.is_distributed_variable(var) and\n not ops.executing_eagerly_outside_functions()):\n return (var.graph, var._shared_name)\n if hasattr(var, \"op\"):\n return (var.op.graph, var.op.name)\n return var._unique_id\n # pylint: enable=protected-access",
"def key(self):\n\n return self.__entity.key()",
"def _get_artifact_path(self, artifact_name: str):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cleanup S3 bucket artifact directory. | def delete_s3_artifacts(self):
LOGGER.debug(
"Cleaning up S3 resources bucket_name=%s, service_name=%s, remove_artifact=%s",
self.name,
self._service_name,
self._cleanup_on_deletion,
)
if self.artifact_directory and self._cleanup_on_deletion:
try:
LOGGER.info("Deleting artifacts under %s/%s", self.name, self.artifact_directory)
AWSApi.instance().s3_resource.delete_object(bucket_name=self.name, prefix=f"{self.artifact_directory}/")
AWSApi.instance().s3_resource.delete_object_versions(
bucket_name=self.name, prefix=f"{self.artifact_directory}/"
)
except AWSClientError as e:
LOGGER.warning(
"Failed to delete S3 artifact under %s/%s with error %s. Please delete them manually.",
self.name,
self.artifact_directory,
str(e),
) | [
"def s3cleanup(request):\n s3interface = S3Interface()\n\n deleted = s3interface.delete_all_images()\n print('Deleted %d object(s) from S3 bucket \"%s\" using prefix \"%s\"' % (\n len(deleted), s3interface.bucket_name, s3interface.prefix))",
"def _delete_file_from_s3(self, artifact_id, bucket, key):\n logger.info('Deleting artifact (id: %s) from s3 (bucket: %s, key: %s) ...', artifact_id, bucket, key)\n Cloud().get_bucket(bucket).delete_object(key)\n return True",
"def delete_bucket():\n\n s3 = session.resource('s3')\n\n try:\n bucket = s3.Bucket(f\"lambda-source-{os.environ['AWS_ACCOUNT']}\")\n bucket.objects.all().delete()\n bucket.delete()\n print('Deleted S3 bucket!')\n\n except Exception as e:\n print(f\"Error deleting S3 bucket. Exception: {e}.\")",
"def delete_bucket(self):\n self.s3_client.delete_bucket(Bucket=BUCKET_NAME)\n print(\"Deleted Bucket: %s\" % BUCKET_NAME)",
"def delete_files(bucket_name):\n s3 = boto3.resource(\"s3\")\n\n bucket = s3.Bucket(bucket_name)\n for key in bucket.objects.all():\n key.delete()\n # Delete the bucket if we want to \n #bucket.delete()",
"def delete_s3_directory(s3_directory):\n bucket_name, rest_of_dir_path = parse_s3_location(s3_directory)\n bucket = get_s3_bucket(bucket_name)\n rest_of_dir_path = rest_of_dir_path \\\n if rest_of_dir_path[-1] == '/' else rest_of_dir_path + '/'\n bucket.delete_keys(bucket.list(prefix=rest_of_dir_path))",
"def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join(\n [\"%(releases_path)s/%(release)s\" % {'releases_path': env.releases_path, 'release': release} for release in\n directories])\n run(\"rm -rf %(directories)s\" % {'directories': env.directories})",
"def delete_file_from_bucket(self):\n self.s3_client.delete_object(Bucket=BUCKET_NAME, Key=FILENAME)\n print(\"File %s deleted from Bucket: %s\" % (FILENAME, BUCKET_NAME))",
"def deploy_to_s3(self):\r\n self.tempdir = tempfile.mkdtemp('s3deploy')\r\n\r\n for keyname, absolute_path in self.find_file_paths():\r\n self.s3_upload(keyname, absolute_path)\r\n\r\n shutil.rmtree(self.tempdir, True)\r\n return True",
"def clear_amazon_s3_bucket():\n AWS_ACCESS_KEY_ID = env['amazon']['access_key_id']\n AWS_SECRET_ACCESS_KEY = env['amazon']['secret_access_key']\n user_images_bucket_name = 'hawkist-avatar'\n listing_images_bucket_name = 'hawkist-item-images'\n link_to_user_images_bucket = 'http://{0}.s3.amazonaws.com/'.format(user_images_bucket_name)\n link_to_listing_images_bucket = 'https://s3-eu-west-1.amazonaws.com/{0}/'.format(listing_images_bucket_name)\n\n # create connection to S3\n s3 = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n\n # get all user images (avatars and thumbnails)\n user_images_bucket = s3.get_bucket(user_images_bucket_name)\n user_images_bucket_names = [o.key for o in user_images_bucket.list()]\n\n # get all listing images\n listing_images_bucket = s3.get_bucket(listing_images_bucket_name)\n listing_images_bucket_names = [o.key for o in listing_images_bucket.list()]\n\n # get data from db\n with new_session() as session:\n all_users = session.query(User)\n user_avatar_db_names = [user.avatar.split('?')[0].replace(link_to_user_images_bucket, '')\n for user in all_users]\n user_thumbnail_db_names = [user.thumbnail.split('?')[0].replace(link_to_user_images_bucket, '')\n for user in all_users]\n platform_images_db_names = [platform.image_url.split('?')[0].replace(link_to_user_images_bucket, '')\n for platform in session.query(Platform)]\n listing_images_db_names = [listing_photo.image_url.split('?')[0].replace(link_to_listing_images_bucket, '')\n for listing_photo in session.query(ListingPhoto)]\n\n # first check avatars\n for user_image_name in user_images_bucket_names:\n if user_image_name not in user_avatar_db_names and user_image_name not in user_thumbnail_db_names \\\n and user_image_name not in platform_images_db_names:\n # delete this image from bucket\n delete_file_from_s3(user_images_bucket_name, user_image_name)\n\n for listing_image_name in listing_images_bucket_names:\n if listing_image_name not in listing_images_db_names:\n delete_file_from_s3(listing_images_bucket_name, listing_image_name)",
"def delete_s3_buckets():\n s3_resource = boto3.resource('s3')\n print('Deleting S3 Buckets')\n for bucket in s3_resource.buckets.all():\n print('Starting object deletion for S3 Bucket {}'.format(bucket.name))\n bucket.object_versions.delete()\n print('Deleting S3 Bucket {}'.format(bucket.name))\n bucket.delete()\n print('S3 Buckets deleted')",
"def prepare_outdir():\n\n out_dir, s3_dest_folder = file_destination()\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n \n os.makedirs(out_dir)\n\n return out_dir, s3_dest_folder",
"def deploy_assets_to_s3():\r\n# run('s3cmd del --recursive s3://%(s3_bucket)s/%(application)s/%(admin_media_prefix)s/' % env)\r\n# run('s3cmd -P --guess-mime-type sync %(venv_path)s/src/django/django/contrib/admin/media/ s3://%(s3_bucket)s/%(application)s/%(site_media_prefix)s/' % env)\r\n# run('s3cmd del --recursive s3://%(s3_bucket)s/%(application)s/%(newsapps_media_prefix)s/' % env)\r\n# run('s3cmd -P --guess-mime-type sync %(venv_path)s/src/newsapps/newsapps/na_media/ s3://%(s3_bucket)s/%(application)s/%(newsapps_media_prefix)s/' % env)\r\n pass",
"def _remove_ancillary_data(self, bucket_prefix: str) -> None:\n bname, _ = parse_bucket_name_key(self.results_bucket)\n if not self.dry_run:\n s3_bucket = self.s3.Bucket(bname)\n s3_bucket.objects.filter(Prefix=bucket_prefix).delete()\n else:\n logging.debug(f'dry-run: would have removed {bname}/{bucket_prefix}')",
"def _cleanup ( self ):\n super ( TemporaryDistroot, self )._cleanup()\n shutil.rmtree ( self._root )",
"def test_delete_object_from_s3(self):\n set_up_directories([settings.SRC_DIR])\n object_downloader = self.configure_uploader([\"7d24b2da347b48fe9e59d8c5d4424235.tar\"])\n object_to_delete = \"7d24b2da347b48fe9e59d8c5d4424235.tar\"\n object_downloader.delete_object_from_s3(object_to_delete)\n files_in_bucket = [bucket_object.key for bucket_object in object_downloader.bucket.objects.all()]\n self.assertNotIn(object_to_delete, files_in_bucket)",
"def bucket_delete():\r\n if not confirm(\"Are you sure you want to delete the bucket %r?\" % BUCKET_NAME):\r\n abort('Aborting at user request.')\r\n conn = connect_s3()\r\n conn.delete_bucket(BUCKET_NAME)\r\n print 'Bucket %r deleted.' % BUCKET_NAME",
"def deleteS3files(self):\n s3 = boto3.resource('s3',\n aws_access_key_id=self.s3_key,\n aws_secret_access_key=self.s3_secret)\n bucket = s3.Bucket(self.s3_bucket)\n bucket_files = [x.key for x in bucket.objects.all()]\n delete_objects = []\n if bucket_files:\n for s3_file in bucket_files:\n delete_objects.append({'Key': s3_file})\n try:\n response = bucket.delete_objects(Delete={ 'Objects': delete_objects} )\n except botocore.exceptions.ClientError as e:\n self.logger.error(e)\n self.logger.error(delete_objects)\n return False",
"def clean_buckets() -> list:\n helpers.starting_clean_print(RESOURCE_NAME)\n s3_client = boto3.client(BOTO3_NAME)\n buckets = get_buckets(s3_client)\n terminated_items = delete_buckets(buckets)\n helpers.finished_clean_print(RESOURCE_NAME, terminated_items)\n return terminated_items"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Upload bootstrapped file to identify bucket is configured successfully. | def upload_bootstrapped_file(self):
AWSApi.instance().s3.put_object(
bucket_name=self.name,
body="bucket is configured successfully.",
key="/".join([self._root_directory, self._bootstrapped_file_name]),
) | [
"def object_upload():\n # SELECT BUCKET\n if not (bucket := select_bucket('Which bucket would you like to upload the file to: ')):\n input('Invalid bucket. Press enter to go back to the main menu.')\n return\n\n # SELECT FILE\n my_file = Path(input('What is the full path to the file you wish to upload: '))\n if not my_file.is_file():\n input(f'{my_file} is not a valid file path. Press enter to go back to the main menu.')\n return\n\n # UPLOAD FILE\n try:\n s3.meta.client.upload_file(str(my_file), bucket, my_file.name)\n print(f'{str(my_file)} has been uploaded to {bucket}.')\n except ClientError as e:\n print(e)\n print('Uh oh. Something went wrong...\\n')\n\n input('Press enter to continue.')",
"def upload(self, bucketname, filename):\n bucket = self.gcs_client.get_bucket(bucketname)\n blob = bucket.blob(filename)\n blob.upload_from_filename(self.dir + '/' + filename)\n print('File {} uploaded to {}.'.format(\n self.dir + '/' + filename,\n filename))",
"def upload_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.put_object(\n Body=os.environ['TF_VAR_elastic_beanstalk_s3_key'],\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except Exception as e:\n raise e",
"def upload_file(self, file_name, upload_name):\n try:\n self.bucket.upload_file(file_name,upload_name)\n except Exception as e:\n print(\"could not upload: \" + file_name)\n print(e)",
"def upload_file(file,bucketID):\r\n\tbucket = storage_client.get_bucket(bucketID)\r\n\tos.chdir(dir_path)\r\n\tprint(\"Uploading\", file, \"to bucket\")\r\n\tblob = bucket.blob(file)\r\n\tblob.upload_from_filename(file)\r\n\tprint(blob.public_url)\r\n\tos.remove(file)\r\n\tos.chdir(original_path)",
"def upload_file(self, filename, bucket, key, **kwargs) -> None:\n self.conn.upload_file(filename, bucket, key, **kwargs)",
"def writeToBucket(file_name, bucket_name):\n try:\n print(\"Looking for [webshot] section in AWS credentials file...\")\n session = boto3.Session(profile_name=\"webshot\")\n except botocore.exceptions.ProfileNotFound:\n try:\n print(\"Using [default] credentials in AWS credentials file...\")\n session = boto3.Session(profile_name=\"default\")\n except botocore.exceptions.ProfileNotFound:\n print(\"No valid AWS credentials file with [default] or [webshot].\")\n return\n \n client = session.client(\"s3\")\n print(f\"Uploading {file_name} to bucket {bucket_name}...\")\n try:\n client.upload_file(file_name, bucket_name, file_name)\n print(\"Uploaded successfully to bucket.\")\n except Exception as e:\n # Not good Exception handling, I know, but it's just an exercise. :)\n print(e)",
"def check_bucket_is_bootstrapped(self):\n AWSApi.instance().s3.head_object(\n bucket_name=self.name, object_name=\"/\".join([self._root_directory, self._bootstrapped_file_name])\n )",
"def upload_file(self, source_name, target_name, bucket_name):\n self.client.upload_file(\n source_name,\n bucket_name,\n target_name\n )",
"def upload_to_bucket_v2(bucket_name, prefix, root_path='', file=None, local_dir=''):\n\n cores = multiprocessing.cpu_count()\n threads = []\n n = cores\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n upload_dir = local_dir + root_path\n\n if file != None:\n \"\"\"Uploads a file to the bucket.\"\"\"\n blob = bucket.blob(prefix + \"/\" + file)\n blob.upload_from_filename(upload_dir + \"/\" + file)\n\n print('File {} uploaded to {}.'.format(\n file,\n bucket_name + \"/\" + prefix))\n\n else:\n \"\"\"Uploads a directory to the bucket.\"\"\"\n for root, dirs, files in os.walk(upload_dir):\n\n for file in files:\n blob = bucket.blob(prefix + \"/\" + file)\n p = threading.Thread(target=file_upload_to_bucket, args=(blob, root + \"/\" + file))\n threads.append(p)\n\n print('Uploading to {}.'.format(bucket_name + \"/\" + prefix))\n with tqdm(total=len(threads)) as pbar:\n while len(threads) > 0:\n if len(threads) < n:\n n = len(threads)\n warnings.warn(\n f\"Low amount of files to process, lower than number of CPU cores, consisting of {n}\",\n ResourceWarning)\n for i in range(n):\n try:\n threads[i].start()\n except:\n warnings.warn(f\"Low amount of files to process, lower than number of CPU cores, consisting of {n}\",ResourceWarning)\n n = len(threads)\n pass\n for i in range(n):\n threads[i].join()\n pbar.update(1)\n\n threads = threads[n:]",
"def upload_file( filepath, bucket):\n try:\n k = Key(bucket)\n k.key = basename(filepath)\n k.set_contents_from_filename(filepath)\n k.set_acl('public-read')\n except Exception,e:\n print e\n return None\n return k",
"def upload_file(self, file_name, file_path):\n try:\n self.client.fput_object(bucket_name=self.bucket,\n object_name=file_name,\n file_path=file_path)\n except BaseException:\n self.log.error('Error uploading file {} to bucket {}'.format(file_path, self.bucket), exc_info=True)\n raise",
"def upload_file(self, file, bucketname, key):\n self.logger.debug((\"send %s to %s\"\n % (file,self.sandbox_storage_service)))\n if self.sandbox_storage_service.lower() == 's3':\n try:\n conn = boto.connect_s3(self.aws_accesskey,self.aws_secretkey)\n bucket = conn.lookup(bucket_name=bucketname.lower())\n k = boto.s3.key.Key(bucket)\n k.key = key\n self.logger.debug((\"store file %s as key %s to bucket %s\"\n % (file, k.key, bucket.name)))\n k.set_contents_from_filename(file)\n return True\n except:\n self.logger.critical(\"Error while uploading.\")\n self.logger.critical(\"Traceback:\\n%s\"%traceback.format_exc())\n else:\n self.logger.error(\"unkown storage service\")\n return False",
"def deploy_website_to_target_bucket(event, context, target_bucket, files):\n\n print(f'Starting admin website deployment to {target_bucket} bucket')\n\n try: \n for webSiteFile in files:\n with open(webSiteFile) as f:\n content = f.read()\n\n encoded_string = content.encode(\"utf-8\")\n website_key = os.path.relpath(webSiteFile, '/tmp/website-contents') \n guessed_mime_type = mimetypes.guess_type(webSiteFile)\n \n if website_key.startswith('../'):\n file_key = website_key[len('../'):]\n else:\n file_key = website_key\n \n print('Key being uploaded to S3: ' + file_key)\n\n if guessed_mime_type is None:\n raise Exception(\"Failed to guess mimetype\")\n \n mime_type = guessed_mime_type[0] \n \n if mime_type is None:\n mime_type = 'binary/octet-stream'\n \n s3.Bucket(target_bucket).put_object(\n Key=file_key, \n Body=encoded_string,\n ContentType=mime_type\n )\n\n print(f'{file_key} uploaded to {target_bucket}')\n\n print(f'Admin website deployed successfully to {target_bucket} bucket') \n except ClientError as ex: \n print(f'Target Bucket {target_bucket} with error: {ex}') \n cfnResponse.send(event, context, cfnResponse.FAILED, {}, \"CustomResourcePhysicalID\")",
"def upload_to_s3(file_name, bucket, object_name):\n print(file_name, bucket, object_name)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n return response",
"def test_can_upload_file_to_presigned_url(self):\n file_contents = b\"blahfilecontents\"\n file = BytesIO(file_contents)\n # S3 expects a base64-encoded MD5 checksum\n md5 = hashlib.md5(file_contents)\n md5_checksum = md5.hexdigest()\n md5_checksum_base64 = codecs.encode(codecs.decode(md5_checksum, \"hex\"), \"base64\").decode()\n\n filename = \"blahfile.jpg\"\n filepath = generate_object_storage_name(md5_checksum, filename)\n\n ret = get_presigned_upload_url(filepath, md5_checksum_base64, 1000, len(file_contents))\n url = ret[\"uploadURL\"]\n content_type = ret[\"mimetype\"]\n\n resp = requests.put(\n url,\n data=file,\n headers={\n \"Content-Type\": content_type,\n }\n )\n resp.raise_for_status()",
"def upload_file(aws_access_key_id, aws_secret_access_key, filename_s3, image_data, bucket_name, human_detected):\n\n # Create an S3 client\n s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)\n\n if human_detected:\n filename_s3 = \"human/{}\".format(filename_s3)\n else:\n filename_s3 = \"false_positive/{}\".format(filename_s3)\n\n # Uploads the given file using a managed uploader, which will split up large\n # files automatically and upload parts in parallel.\n print(\"Uploading file {} to Amazon S3\".format(filename_s3))\n image_decoded = base64.b64decode(image_data)\n s3.upload_fileobj(BytesIO(image_decoded), bucket_name, filename_s3, ExtraArgs={'ContentType': \"image/jpeg\", 'ACL': \"public-read\"})\n\n # Generate url\n url = s3.generate_presigned_url('get_object', Params = {'Bucket': bucket_name, 'Key': filename_s3}, ExpiresIn = 7*24*3600)\n\n return url",
"def deploy_to_s3(self):\r\n self.tempdir = tempfile.mkdtemp('s3deploy')\r\n\r\n for keyname, absolute_path in self.find_file_paths():\r\n self.s3_upload(keyname, absolute_path)\r\n\r\n shutil.rmtree(self.tempdir, True)\r\n return True",
"def upload_artifacts(file_name, bucket_name, object_name):\n print('Uploading artifacts to {}/{}'.format(bucket_name, object_name))\n try:\n s3_client.upload_file(file_name, bucket_name, object_name)\n except Exception as e:\n print('Failed to upload artifacts\\nException: {}'.format(e))\n sys.exit(1)\n print('Successfully uploaded artifacts')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check bucket is configured successfully or not by bootstrapped file. | def check_bucket_is_bootstrapped(self):
AWSApi.instance().s3.head_object(
bucket_name=self.name, object_name="/".join([self._root_directory, self._bootstrapped_file_name])
) | [
"def check_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.get_object(\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"NoSuchKey\":\n return False\n else:\n raise e\n else:\n return True",
"def __ensure_bucket_availability(self):\n storage_client = storage.Client()\n if storage_client.lookup_bucket(self.__bucket_name) is None:\n # Create the new bucket\n storage_client.create_bucket(self.__bucket_name)",
"def test_bucket_does_not_exist(self):\n self.assertEqual(0, self.engine.create_bucket_calls)\n self.assertEqual(0, self.ui.asked_create)\n\n self.engine.configure()\n\n self.assertEqual(1, self.engine.get_bucket_calls)\n self.assertEqual(1, self.engine.create_bucket_calls)\n self.assertEqual(1, self.ui.asked_create)",
"def check_bucket(file,bucketID):\r\n\tbucket = storage_client.bucket(bucketID)\r\n\tcheck = storage.Blob(bucket=bucket, name=file).exists(storage_client)\r\n\treturn check",
"def test_bucket_exists(self):\n self.assertFalse(self.storage.bucket_exists(self.temp_bucket_name))\n self.storage.make_bucket(self.temp_bucket_name)\n self.assertTrue(self.storage.bucket_exists(self.temp_bucket_name))\n self.storage.remove_bucket(self.temp_bucket_name)",
"def verify_r2_bucket(name: str) -> bool:\n r2 = cloudflare.resource('s3')\n bucket = r2.Bucket(name)\n return bucket in r2.buckets.all()",
"def upload_bootstrapped_file(self):\n AWSApi.instance().s3.put_object(\n bucket_name=self.name,\n body=\"bucket is configured successfully.\",\n key=\"/\".join([self._root_directory, self._bootstrapped_file_name]),\n )",
"def test_get_bucket_success(self):\n bucket = self.cm.get_bucket(\"testVaultName\")\n self.assertEqual(bucket.name, \"testVaultName\")\n self.assertEqual(bucket.id, 274)",
"def verify_ibm_cos_bucket(name: str) -> bool:\n return get_ibm_cos_bucket_region(name) != ''",
"def test_asset_saintsxctf_s3_bucket_exists(self) -> None:\n bucket_name = 'asset.saintsxctf.com'\n s3_bucket = self.s3.list_objects(Bucket=bucket_name)\n self.assertTrue(s3_bucket.get('Name') == bucket_name)",
"def s3_bucket_exists(self, bucketName):\n\n try:\n self._s3Res.meta.client.head_bucket(Bucket=bucketName)\n except Exception as e:\n return False\n return True",
"def __checkS3Bucket__(self, fileLocation=None):\n cmd = \"aws s3 ls \" + fileLocation\n try:\n output = subprocess.call(cmd, shell=True)\n if output == 1:\n return 'False'\n else:\n return 'True'\n\n except subprocess.CalledProcessError as e:\n print e.output\n arcpy.AddError(e.output)\n tb = traceback.format_exc()\n print tb\n arcpy.AddError(tb)\n self.__sm__(e.output)\n self.__sm__(tb)\n sys.exit()\n else:\n self.__sm__('Received list of elements in bucket')",
"def test_bucket_creation(self):\n res = self.client.post('/buckets/', json=self.bucket)\n self.assertEqual(res.status_code, 201)",
"def test_get_bucket_conn__auth_fail():\n\n null_options = pypicloud_tools.S3Config('test', None, None, None, None)\n with pytest.raises(SystemExit) as error:\n pypicloud_tools.get_bucket_conn(null_options)\n\n assert \"~/.aws/credentials\" in error.value.args[0]",
"def test_buckets_access_authorized(self):\n self.client.login(username='user', password='userexample')\n\n response = self.client.get(reverse('buckets:list'))\n self.assertContains(response, 'bucket start')",
"def _get_bucket_status(self, bucket_list=None):\n\t\tif bucket_list is None:\n\t\t\tbucket_list = self.get_bucket_list()\n\n\t\tbucket_list['hasMeta'] = False\n\t\tbucket_list['hasDB'] = False\n\t\tfor idx, bucket in bucket_list.iterrows():\n\t\t\tlp = self._hosts[bucket.hostalias]\n\t\t\tif lp.has_file(bucket.rawname, '.iago.conf'):\n\t\t\t\tbucket_list.loc[idx, 'hasMeta'] = True\n\t\t\tif lp.has_file(bucket.rawname, 'iagodb.json'):\n\t\t\t\tbucket_list.loc[idx, 'hasDB'] = True\n\n\t\treturn bucket_list",
"def test_get_bucket_lifecycle_configuration(make_stubber, make_unique_name,\n make_bucket):\n stubber = make_stubber(bucket_wrapper, 'get_s3')\n bucket_name = make_unique_name('bucket')\n\n make_bucket(stubber, bucket_wrapper, bucket_name, stubber.region_name)\n\n stubber.stub_get_bucket_lifecycle_configuration_error(\n bucket_name, 'NoSuchLifecycleConfiguration'\n )\n\n with pytest.raises(ClientError) as exc_info:\n _ = bucket_wrapper.get_lifecycle_configuration(bucket_name)\n assert exc_info.value.response['Error']['Code'] == 'NoSuchLifecycleConfiguration'",
"def init(ctx, bucket=None):\n session = boto3.Session()\n s3 = boto3.client('s3')\n config={}\n\n if bucket == None and os.environ.get('KVS3_BUCKET') == None:\n bucket_init = input('Enter s3 key value store bucket name: ')\n s3_name_requirements = re.compile(\"^[a-z0-9]{1}[a-z0-9\\-\\.]{1,61}[a-z0-9\\.]{1}$\")\n if s3_name_requirements.match(bucket_init):\n config['bucket'] = bucket_init\n with open(INIT_FILE, 'w') as outfile:\n yaml.dump(config, outfile, default_flow_style=False)\n else:\n print('kvs3: invalid bucket name')\n sys.exit(1)\n\n validate(s3, bucket)",
"def test_get_settings__no_s3_config(config_file, capfd):\n\n pypicloud_tools.sys.argv = [\"list\", \"--config\", config_file]\n with pytest.raises(SystemExit):\n pypicloud_tools.get_settings(listing=True)\n\n out, err = capfd.readouterr()\n assert \"ERROR: Could not determine S3 settings.\" in err\n assert DEFAULT_CONFIG in out # stdout should be a help message..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Upload config file to S3 bucket. | def upload_config(self, config, config_name, format=S3FileFormat.YAML):
return self.upload_file(file_type=S3FileType.CONFIGS, content=config, file_name=config_name, format=format) | [
"def upload_to_s3(file_name, bucket, object_name):\n print(file_name, bucket, object_name)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n return response",
"def uploadFileToS3(self, filename):\n destDir = '' # Root folder of the S3 bucket\n destpath = os.path.join(destDir, os.path.basename(filename))\n print 'Uploading %s to Amazon S3 bucket %s' % (filename, self.bucket_name)\n\n k = Key(self.bucket)\n k.key = destpath\n k.set_contents_from_filename(filename, reduced_redundancy=True)",
"def upload_bootstrapped_file(self):\n AWSApi.instance().s3.put_object(\n bucket_name=self.name,\n body=\"bucket is configured successfully.\",\n key=\"/\".join([self._root_directory, self._bootstrapped_file_name]),\n )",
"def upload_config(out, bucket, key, region):\n auth = {\n \"ca_crt\": read(out, \"CertAuth.crt\"),\n \"client_crt\": read(out, \"Client.crt\"),\n \"client_pkcs8_key\": read(out, \"Client.pkcs8.key\"),\n \"server_crt\": read(out, \"Server.crt\"),\n \"server_pkcs8_key\": read(out, \"Server.pkcs8.key\"),\n }\n\n import boto3\n s3 = boto3.client(\"s3\", region_name=region)\n s3.put_object(\n Bucket=bucket,\n Key=key,\n Body=json.dumps(auth, indent=2, sort_keys=True),\n ServerSideEncryption=\"AES256\")",
"def upload_settings(config_files, bucket_name, prefix, dry_run=False) -> None:\n settings_files = etl.config.gather_setting_files(config_files)\n logger.info(\"Found %d settings file(s) to deploy\", len(settings_files))\n\n uploader = etl.s3.S3Uploader(bucket_name, dry_run=dry_run)\n for fullname in settings_files:\n object_key = os.path.join(prefix, \"config\", os.path.basename(fullname))\n uploader(fullname, object_key)",
"def upload(self, local_file_path, s3_bucket_name, s3_file_path):\n self.s3_client.upload_file(local_file_path,\n s3_bucket_name,\n s3_file_path)",
"def upload_file(s3_resource, bucket_name, file, key_name, log=True):\n bucket = s3_resource.Bucket(bucket_name)\n bucket.upload_file(file, key_name)\n if log:\n logger.info(f'{file} uploaded to {bucket_name}/{key_name}')",
"def upload_to_S3(bucket, key, file_object):\n k = Key(bucket)\n k.key = key\n k.set_contents_from_file(file_object)\n k.set_acl('public-read')\n return k.generate_url(expires_in=0, query_auth=False, force_http=True)",
"def upload_file(self, filename, bucket, key, **kwargs) -> None:\n self.conn.upload_file(filename, bucket, key, **kwargs)",
"def writeToBucket(file_name, bucket_name):\n try:\n print(\"Looking for [webshot] section in AWS credentials file...\")\n session = boto3.Session(profile_name=\"webshot\")\n except botocore.exceptions.ProfileNotFound:\n try:\n print(\"Using [default] credentials in AWS credentials file...\")\n session = boto3.Session(profile_name=\"default\")\n except botocore.exceptions.ProfileNotFound:\n print(\"No valid AWS credentials file with [default] or [webshot].\")\n return\n \n client = session.client(\"s3\")\n print(f\"Uploading {file_name} to bucket {bucket_name}...\")\n try:\n client.upload_file(file_name, bucket_name, file_name)\n print(\"Uploaded successfully to bucket.\")\n except Exception as e:\n # Not good Exception handling, I know, but it's just an exercise. :)\n print(e)",
"def upload_dictionary(self, bucket_name, file_name, dictionary):\n s3_object = self.s3.Object(bucket_name, file_name)\n s3_object.put(Body=json.dumps(dictionary))",
"def _upload(self, key, url) -> str:\n bucket = self.config[\"s3\"][\"bucket\"]\n try:\n self.s3.head_object(Bucket=bucket, Key=key)\n except ClientError:\n resp = self._get(url)\n self.s3.upload_fileobj(\n BytesIO(resp.content),\n bucket,\n key,\n ExtraArgs={\"StorageClass\": \"STANDARD_IA\"},\n )\n\n resp = self.s3.put_object_acl(ACL=\"public-read\", Bucket=bucket, Key=key)\n if resp is None:\n capture_message(f\"Failed to set object ACL for {bucket}/{key}\")\n\n return self.config[\"s3\"][\"object_url\"].format(\n bucket=self.config[\"s3\"][\"bucket\"],\n region=self.config[\"s3\"][\"region\"],\n filekey=key,\n )",
"def save_to_s3(bucket_name, file_name, data):\n\n s3 = boto3.resource('s3')\n obj = s3.Object(bucket_name, file_name)\n resp = obj.put(Body=json.dumps(data))\n return resp",
"def object_upload():\n # SELECT BUCKET\n if not (bucket := select_bucket('Which bucket would you like to upload the file to: ')):\n input('Invalid bucket. Press enter to go back to the main menu.')\n return\n\n # SELECT FILE\n my_file = Path(input('What is the full path to the file you wish to upload: '))\n if not my_file.is_file():\n input(f'{my_file} is not a valid file path. Press enter to go back to the main menu.')\n return\n\n # UPLOAD FILE\n try:\n s3.meta.client.upload_file(str(my_file), bucket, my_file.name)\n print(f'{str(my_file)} has been uploaded to {bucket}.')\n except ClientError as e:\n print(e)\n print('Uh oh. Something went wrong...\\n')\n\n input('Press enter to continue.')",
"def upload_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.put_object(\n Body=os.environ['TF_VAR_elastic_beanstalk_s3_key'],\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except Exception as e:\n raise e",
"def _write(self, filename):\n\n loc = self.config[\"data_specs\"][\"out_loc\"] \n if \"s3://\" in loc.lower():\n s3 = boto3.resource('s3')\n splitted = loc.split(\"/\")\n bucket = splitted[2]\n key = \"/\".join(splitted[3:])\n key_divider = \"/\" if splitted[-1] else \"\"\n destination = \"{0}{1}{2}\".format(key, key_divider, filename)\n if filename.split(\".\")[-1] in [\"obj\", \"json\"]:\n with open(\"{0}/{1}\".format(tmpdir, filename), \"rb\") as data:\n s3.meta.client.upload_fileobj(data, bucket, destination)\n else:\n s3.meta.client.upload_file(\"{0}/{1}\".format(tmpdir, filename), bucket, destination)\n else:\n shutil.copyfileobj(\n open(\"{0}/{1}\".format(tmpdir, filename), \"rb\"), \n open(\"{0}/{1}\".format(\n loc[:-1] if loc[-1] == \"/\" else loc, \n filename), \"wb\")) \n os.remove(\"{0}/{1}\".format(tmpdir, filename))",
"def upload_file(self, source_name, target_name, bucket_name):\n self.client.upload_file(\n source_name,\n bucket_name,\n target_name\n )",
"def create_s3_file(s3_bucket: str, s3_key: str, data: str) -> None:\n s3_client.put_object(Bucket=s3_bucket, Key=s3_key, Body=data)",
"def upload_file(self, file_name, file_path):\n try:\n self.client.fput_object(bucket_name=self.bucket,\n object_name=file_name,\n file_path=file_path)\n except BaseException:\n self.log.error('Error uploading file {} to bucket {}'.format(file_path, self.bucket), exc_info=True)\n raise"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Upload cloudformation template to S3 bucket. | def upload_cfn_template(self, template_body, template_name, format=S3FileFormat.YAML):
return self.upload_file(
file_type=S3FileType.TEMPLATES, content=template_body, file_name=template_name, format=format
) | [
"def object_upload():\n # SELECT BUCKET\n if not (bucket := select_bucket('Which bucket would you like to upload the file to: ')):\n input('Invalid bucket. Press enter to go back to the main menu.')\n return\n\n # SELECT FILE\n my_file = Path(input('What is the full path to the file you wish to upload: '))\n if not my_file.is_file():\n input(f'{my_file} is not a valid file path. Press enter to go back to the main menu.')\n return\n\n # UPLOAD FILE\n try:\n s3.meta.client.upload_file(str(my_file), bucket, my_file.name)\n print(f'{str(my_file)} has been uploaded to {bucket}.')\n except ClientError as e:\n print(e)\n print('Uh oh. Something went wrong...\\n')\n\n input('Press enter to continue.')",
"def upload_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.put_object(\n Body=os.environ['TF_VAR_elastic_beanstalk_s3_key'],\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except Exception as e:\n raise e",
"def upload_to_s3(file_name, bucket, object_name):\n print(file_name, bucket, object_name)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n return response",
"def push(\n template_id, osf_project, osf_user, osf_password, osf_overwrite, path, nprocs,\n):\n from .osf import upload as _upload\n path = Path(path or f\"tpl-{template_id}\")\n\n if not path.exists():\n raise click.UsageError(f\"<{path}> does not exist.\")\n\n _upload(\n template_id, osf_project, osf_user, osf_password, osf_overwrite, path, nprocs,\n )",
"def deploy_to_s3(self):\r\n self.tempdir = tempfile.mkdtemp('s3deploy')\r\n\r\n for keyname, absolute_path in self.find_file_paths():\r\n self.s3_upload(keyname, absolute_path)\r\n\r\n shutil.rmtree(self.tempdir, True)\r\n return True",
"def upload(src, dest_bucket, dest_object):\n # TODO\n pass",
"def _UploadToStageBucket(region, function_name, zip_file_path, stage_bucket):\n dest_object = storage_util.ObjectReference.FromBucketRef(\n storage_util.BucketReference.FromArgument(stage_bucket),\n '{}-{}-{}.zip'.format(\n region, function_name,\n ''.join(random.choice(string.ascii_lowercase) for _ in range(12))))\n storage_api.StorageClient().CopyFileToGCS(zip_file_path, dest_object)\n return dest_object",
"def upload_to_S3(bucket, key, file_object):\n k = Key(bucket)\n k.key = key\n k.set_contents_from_file(file_object)\n k.set_acl('public-read')\n return k.generate_url(expires_in=0, query_auth=False, force_http=True)",
"def uploadFileToS3(self, filename):\n destDir = '' # Root folder of the S3 bucket\n destpath = os.path.join(destDir, os.path.basename(filename))\n print 'Uploading %s to Amazon S3 bucket %s' % (filename, self.bucket_name)\n\n k = Key(self.bucket)\n k.key = destpath\n k.set_contents_from_filename(filename, reduced_redundancy=True)",
"def upload_to_bucket(contents):\n assert isinstance(contents, (dict)), f\"Expected dict but got {type(contents)}\"\n storage_client = storage.Client()\n bucket_name = config.PROCESSED_BUCKET_NAME\n bucket = storage_client.lookup_bucket(bucket_name)\n\n if bucket is None:\n bucket = storage_client.create_bucket(bucket_name)\n logger.debug(\"Bucket {} created.\".format(bucket.name))\n else:\n logger.debug(\"Bucket {} already exists.\".format(bucket.name))\n\n filename = utils.generate_filename()\n term_code = next(iter(contents))\n\n lambda_filename = write_lambda_file(filename, contents)\n\n blob = bucket.blob(filename)\n # uploads the file in the cloud function to cloud storage\n blob.upload_from_filename(lambda_filename)\n renamed_filename = f\"{term_code}/{filename}\"\n bucket.rename_blob(blob, renamed_filename)\n\n logger.debug(\"File {} uploaded to {}.\".format(renamed_filename, bucket_name))",
"def upload_bootstrapped_file(self):\n AWSApi.instance().s3.put_object(\n bucket_name=self.name,\n body=\"bucket is configured successfully.\",\n key=\"/\".join([self._root_directory, self._bootstrapped_file_name]),\n )",
"def create_s3_file(s3_bucket: str, s3_key: str, data: str) -> None:\n s3_client.put_object(Bucket=s3_bucket, Key=s3_key, Body=data)",
"def __init__(self):\n self.conn = Connection().cloudformation_connection()\n self.web_bucket_template = '{\"AWSTemplateFormatVersion\":\"2010-09-09\",\"Description\":\"AWS CloudFormation Sample Template S3_Website_Bucket_With_Retain_On_Delete: Sample template showing how to create a publicly accessible S3 bucket configured for website access with a deletion policy of retail on delete. **WARNING** This template creates an S3 bucket that will NOT be deleted when the stack is deleted. You will be billed for the AWS resources used if you create a stack from this template.\",\"Resources\":{\"S3Bucket\":{\"Type\":\"AWS::S3::Bucket\",\"Properties\":{\"AccessControl\":\"PublicRead\",\"WebsiteConfiguration\":{\"IndexDocument\":\"index.html\",\"ErrorDocument\":\"error.html\"}},\"DeletionPolicy\":\"Retain\"}},\"Outputs\":{\"WebsiteURL\":{\"Value\":{\"Fn::GetAtt\":[\"S3Bucket\",\"WebsiteURL\"]},\"Description\":\"URL for website hosted on S3\"},\"S3BucketSecureURL\":{\"Value\":{\"Fn::Join\":[\"\",[\"https://\",{\"Fn::GetAtt\":[\"S3Bucket\",\"DomainName\"]}]]},\"Description\":\"Name of S3 bucket to hold website content\"}}}'",
"def upload_file(s3_resource, bucket_name, file, key_name, log=True):\n bucket = s3_resource.Bucket(bucket_name)\n bucket.upload_file(file, key_name)\n if log:\n logger.info(f'{file} uploaded to {bucket_name}/{key_name}')",
"def upload_dictionary(self, bucket_name, file_name, dictionary):\n s3_object = self.s3.Object(bucket_name, file_name)\n s3_object.put(Body=json.dumps(dictionary))",
"def deploy_website_to_target_bucket(event, context, target_bucket, files):\n\n print(f'Starting admin website deployment to {target_bucket} bucket')\n\n try: \n for webSiteFile in files:\n with open(webSiteFile) as f:\n content = f.read()\n\n encoded_string = content.encode(\"utf-8\")\n website_key = os.path.relpath(webSiteFile, '/tmp/website-contents') \n guessed_mime_type = mimetypes.guess_type(webSiteFile)\n \n if website_key.startswith('../'):\n file_key = website_key[len('../'):]\n else:\n file_key = website_key\n \n print('Key being uploaded to S3: ' + file_key)\n\n if guessed_mime_type is None:\n raise Exception(\"Failed to guess mimetype\")\n \n mime_type = guessed_mime_type[0] \n \n if mime_type is None:\n mime_type = 'binary/octet-stream'\n \n s3.Bucket(target_bucket).put_object(\n Key=file_key, \n Body=encoded_string,\n ContentType=mime_type\n )\n\n print(f'{file_key} uploaded to {target_bucket}')\n\n print(f'Admin website deployed successfully to {target_bucket} bucket') \n except ClientError as ex: \n print(f'Target Bucket {target_bucket} with error: {ex}') \n cfnResponse.send(event, context, cfnResponse.FAILED, {}, \"CustomResourcePhysicalID\")",
"def writeToBucket(file_name, bucket_name):\n try:\n print(\"Looking for [webshot] section in AWS credentials file...\")\n session = boto3.Session(profile_name=\"webshot\")\n except botocore.exceptions.ProfileNotFound:\n try:\n print(\"Using [default] credentials in AWS credentials file...\")\n session = boto3.Session(profile_name=\"default\")\n except botocore.exceptions.ProfileNotFound:\n print(\"No valid AWS credentials file with [default] or [webshot].\")\n return\n \n client = session.client(\"s3\")\n print(f\"Uploading {file_name} to bucket {bucket_name}...\")\n try:\n client.upload_file(file_name, bucket_name, file_name)\n print(\"Uploaded successfully to bucket.\")\n except Exception as e:\n # Not good Exception handling, I know, but it's just an exercise. :)\n print(e)",
"def upload(self, local_file_path, s3_bucket_name, s3_file_path):\n self.s3_client.upload_file(local_file_path,\n s3_bucket_name,\n s3_file_path)",
"def _write(self, filename):\n\n loc = self.config[\"data_specs\"][\"out_loc\"] \n if \"s3://\" in loc.lower():\n s3 = boto3.resource('s3')\n splitted = loc.split(\"/\")\n bucket = splitted[2]\n key = \"/\".join(splitted[3:])\n key_divider = \"/\" if splitted[-1] else \"\"\n destination = \"{0}{1}{2}\".format(key, key_divider, filename)\n if filename.split(\".\")[-1] in [\"obj\", \"json\"]:\n with open(\"{0}/{1}\".format(tmpdir, filename), \"rb\") as data:\n s3.meta.client.upload_fileobj(data, bucket, destination)\n else:\n s3.meta.client.upload_file(\"{0}/{1}\".format(tmpdir, filename), bucket, destination)\n else:\n shutil.copyfileobj(\n open(\"{0}/{1}\".format(tmpdir, filename), \"rb\"), \n open(\"{0}/{1}\".format(\n loc[:-1] if loc[-1] == \"/\" else loc, \n filename), \"wb\")) \n os.remove(\"{0}/{1}\".format(tmpdir, filename))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Upload custom resources to S3 bucket. | def upload_resources(self, resource_dir, custom_artifacts_name):
for res in os.listdir(resource_dir):
path = os.path.join(resource_dir, res)
if os.path.isdir(path):
AWSApi.instance().s3.upload_fileobj(
file_obj=zip_dir(os.path.join(resource_dir, res)),
bucket_name=self.name,
key=self.get_object_key(S3FileType.CUSTOM_RESOURCES, custom_artifacts_name),
)
elif os.path.isfile(path):
AWSApi.instance().s3.upload_file(
file_path=os.path.join(resource_dir, res),
bucket_name=self.name,
key=self.get_object_key(S3FileType.CUSTOM_RESOURCES, res),
) | [
"def upload_to_s3(file_name, bucket, object_name):\n print(file_name, bucket, object_name)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n return response",
"def upload(src, dest_bucket, dest_object):\n # TODO\n pass",
"def object_upload():\n # SELECT BUCKET\n if not (bucket := select_bucket('Which bucket would you like to upload the file to: ')):\n input('Invalid bucket. Press enter to go back to the main menu.')\n return\n\n # SELECT FILE\n my_file = Path(input('What is the full path to the file you wish to upload: '))\n if not my_file.is_file():\n input(f'{my_file} is not a valid file path. Press enter to go back to the main menu.')\n return\n\n # UPLOAD FILE\n try:\n s3.meta.client.upload_file(str(my_file), bucket, my_file.name)\n print(f'{str(my_file)} has been uploaded to {bucket}.')\n except ClientError as e:\n print(e)\n print('Uh oh. Something went wrong...\\n')\n\n input('Press enter to continue.')",
"def upload_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.put_object(\n Body=os.environ['TF_VAR_elastic_beanstalk_s3_key'],\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except Exception as e:\n raise e",
"def _deploy_to_s3():\n s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt put gzip/ %s'\n s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude \"*\" --include-from gzip_types.txt put gzip/ %s'\n\n for bucket in env.s3_buckets:\n env.s3_bucket = bucket\n local(s3cmd % ('s3://%(s3_bucket)s/' % env))\n local(s3cmd_gzip % ('s3://%(s3_bucket)s/' % env))",
"def deploy_to_s3(self):\r\n self.tempdir = tempfile.mkdtemp('s3deploy')\r\n\r\n for keyname, absolute_path in self.find_file_paths():\r\n self.s3_upload(keyname, absolute_path)\r\n\r\n shutil.rmtree(self.tempdir, True)\r\n return True",
"def upload_artifacts(file_name, bucket_name, object_name):\n print('Uploading artifacts to {}/{}'.format(bucket_name, object_name))\n try:\n s3_client.upload_file(file_name, bucket_name, object_name)\n except Exception as e:\n print('Failed to upload artifacts\\nException: {}'.format(e))\n sys.exit(1)\n print('Successfully uploaded artifacts')",
"def save_to_s3(bucket_name, file_name, data):\n\n s3 = boto3.resource('s3')\n obj = s3.Object(bucket_name, file_name)\n resp = obj.put(Body=json.dumps(data))\n return resp",
"def archiveS3Files(**kwargs):\n keys = kwargs.keys()\n if (\"trg_bucket\" in keys and \"trg_path\" in keys and \"src_bucket\" in keys ):\n src_bucket = kwargs[\"src_bucket\"]\n trg_bucket = kwargs[\"trg_bucket\"]\n s3_files = kwargs['ti'].xcom_pull(key=\"s3_data_files\")\n s3_client = generateS3Hook(kwargs[\"aws_conn_id\"])\n for file in s3_files.split(','):\n trg_path = str(kwargs[\"trg_path\"]) + getFileName(file)\n s3_client.copy_object(source_bucket_key=file, dest_bucket_key=trg_path, source_bucket_name=src_bucket,\n dest_bucket_name=trg_bucket)\n sleep(0.5)\n s3_client.delete_objects(bucket=src_bucket,keys=file)\n else:\n raise Exception(\"Invalid Configuration\")",
"def upload_json_obj(self, prefix, file_name, json_obj):\r\n key = self._build_key(prefix, file_name)\r\n logger.info('Uploading json object to %s.', key)\r\n json_data = io.BytesIO(json.dumps(json_obj, indent=2).encode('utf-8'))\r\n self.bucket.upload_fileobj(json_data, key)\r\n return self._s3_url(key)",
"def upload_settings(config_files, bucket_name, prefix, dry_run=False) -> None:\n settings_files = etl.config.gather_setting_files(config_files)\n logger.info(\"Found %d settings file(s) to deploy\", len(settings_files))\n\n uploader = etl.s3.S3Uploader(bucket_name, dry_run=dry_run)\n for fullname in settings_files:\n object_key = os.path.join(prefix, \"config\", os.path.basename(fullname))\n uploader(fullname, object_key)",
"def upload_to_bucket(contents):\n assert isinstance(contents, (dict)), f\"Expected dict but got {type(contents)}\"\n storage_client = storage.Client()\n bucket_name = config.PROCESSED_BUCKET_NAME\n bucket = storage_client.lookup_bucket(bucket_name)\n\n if bucket is None:\n bucket = storage_client.create_bucket(bucket_name)\n logger.debug(\"Bucket {} created.\".format(bucket.name))\n else:\n logger.debug(\"Bucket {} already exists.\".format(bucket.name))\n\n filename = utils.generate_filename()\n term_code = next(iter(contents))\n\n lambda_filename = write_lambda_file(filename, contents)\n\n blob = bucket.blob(filename)\n # uploads the file in the cloud function to cloud storage\n blob.upload_from_filename(lambda_filename)\n renamed_filename = f\"{term_code}/{filename}\"\n bucket.rename_blob(blob, renamed_filename)\n\n logger.debug(\"File {} uploaded to {}.\".format(renamed_filename, bucket_name))",
"def upload_to_S3(bucket, key, file_object):\n k = Key(bucket)\n k.key = key\n k.set_contents_from_file(file_object)\n k.set_acl('public-read')\n return k.generate_url(expires_in=0, query_auth=False, force_http=True)",
"def upload_dictionary(self, bucket_name, file_name, dictionary):\n s3_object = self.s3.Object(bucket_name, file_name)\n s3_object.put(Body=json.dumps(dictionary))",
"def s3Upload(self):\n #listUploadFiles = []\n s3 = boto3.client('s3')\n\n for files in self.localFile:\n if files not in self.s3_Bucket_filesList:\n #print (\"Currently uploading: \"+files)\n self.uploadedFiles.append(files)\n #We require the full Path of the file to be given to be uploaded.\n localFilesPath = self.localFolderPath + \"\\\\\" + files\n\n try:\n s3.upload_file (localFilesPath,self.bucketName,self.bucketFolderName+files)\n except Exception as u:\n print (\"Cannot upload the Files\", u)\n quit()\n\n return self.uploadedFiles #Returning uploaded files to be used in Creation of HTML Files.",
"def replace_aws_resources(event, context, target_bucket, files, aws_resources):\n\n print(f'Setting up AWS resources to the admin website')\n \n try: \n for webSiteFile in files:\n with open(webSiteFile) as f:\n content = f.read()\n \n content = content.replace(\"REPLACE_AWS_REGION\", aws_resources['aws_region'])\n content = content.replace(\"REPLACE_USER_POOL_ID\", aws_resources['user_pool_id'])\n content = content.replace(\"REPLACE_APP_CLIENT_ID\", aws_resources['app_client_id'])\n content = content.replace(\"REPLACE_IDENTITY_POOL_ID\", aws_resources['identity_pool_id'])\n content = content.replace(\"REPLACE_PINPOINT_APP_ID\", aws_resources['pinpoint_app_id'])\n content = content.replace(\"REPLACE_APPSYNC_ENDPOINT\", aws_resources['appsync_endpoint'])\n\n encoded_string = content.encode(\"utf-8\") \n website_key = os.path.relpath(webSiteFile, '/tmp/website-contents') \n guessed_mime_type = mimetypes.guess_type(webSiteFile)\n \n if website_key.startswith('../'):\n file_key = website_key[len('../'):]\n else:\n file_key = website_key\n \n if guessed_mime_type is None:\n raise Exception(\"Failed to guess mimetype\")\n \n mime_type = guessed_mime_type[0] \n \n if mime_type is None:\n mime_type = 'binary/octet-stream'\n \n s3.Bucket(target_bucket).put_object(\n Key=file_key, \n Body=encoded_string,\n ContentType=mime_type\n )\n\n print(f'{file_key} uploaded to {target_bucket}')\n\n print(f'AWS Resources set and deployed successfully to {target_bucket} bucket') \n except ClientError as ex: \n print(f'Target Bucket {target_bucket} with error: {ex}') \n cfnResponse.send(event, context, cfnResponse.FAILED, {}, \"CustomResourcePhysicalID\")",
"def upload_file(s3_resource, bucket_name, file, key_name, log=True):\n bucket = s3_resource.Bucket(bucket_name)\n bucket.upload_file(file, key_name)\n if log:\n logger.info(f'{file} uploaded to {bucket_name}/{key_name}')",
"def uploadFileToS3(self, filename):\n destDir = '' # Root folder of the S3 bucket\n destpath = os.path.join(destDir, os.path.basename(filename))\n print 'Uploading %s to Amazon S3 bucket %s' % (filename, self.bucket_name)\n\n k = Key(self.bucket)\n k.key = destpath\n k.set_contents_from_filename(filename, reduced_redundancy=True)",
"def connect_s3(self):\n self.out('- Connecting to S3 and making bucket.\\n')\n self.s3 = boto.connect_s3()\n self.bucket = self.s3.create_bucket(self.bucket_name)\n self.bucket = self.s3.get_bucket(self.bucket_name)\n self.bucket.set_acl(self.default_acl)\n self.bucket.set_cors(self.default_cors)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an S3 presigned URL for the config file. | def get_config_presigned_url(self, config_name: str, version_id=None):
return AWSApi.instance().s3.create_presigned_url(
self.name, self.get_object_key(S3FileType.CONFIGS, config_name), version_id
) | [
"def get_config_s3_url(self, config_name):\n return self._get_file_s3_url(file_name=config_name, file_type=S3FileType.CONFIGS)",
"def _get_s3_presigned_url(input_json):\n url = input_json['url']\n return url['scheme']+'://'+url['host']+url['path']+'?'+url['query']",
"def get_presigned_get_url(filename, config, secrets):\n from minio import Minio\n try:\n from minio.error import ResponseError\n except ImportError:\n from minio.error import S3Error as ResponseError\n \n config_startd_logging = config['StartdLogging']\n secrets_startd_logging = secrets['StartdLogging']\n\n client = Minio(config_startd_logging['url'],\n access_key=secrets_startd_logging['access_key'],\n secret_key=secrets_startd_logging['secret_key'],\n secure=True\n )\n\n try:\n return client.presigned_get_object(config_startd_logging['bucket'],\n filename)\n except ResponseError as err:\n print(err)",
"def get_presigned_put_url(filename, config, secrets):\n from minio import Minio\n try:\n from minio.error import ResponseError\n except ImportError:\n from minio.error import S3Error as ResponseError\n\n config_startd_logging = config['StartdLogging']\n secrets_startd_logging = secrets['StartdLogging']\n\n client = Minio(config_startd_logging['url'],\n access_key=secrets_startd_logging['access_key'],\n secret_key=secrets_startd_logging['secret_key'],\n secure=True\n )\n\n try:\n return client.presigned_put_object(config_startd_logging['bucket'],\n filename,\n datetime.timedelta(days=1))\n except ResponseError as err:\n print(err)",
"def get_url(filename):\n return 'https://s3-{}.amazonaws.com/{}/{}'.format(BUCKET_REGION, BUCKET_NAME, filename)",
"def _get_s3_presigned_put_url(s3_client, bucket, filepath, md5sum, lifetime_sec):\n # S3's PUT Object parameters:\n # https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html\n method = \"put_object\"\n fields = {\n \"Bucket\": bucket,\n \"Key\": filepath,\n }\n\n response = s3_client.generate_presigned_url(\n ClientMethod=method,\n Params=fields,\n ExpiresIn=lifetime_sec,\n )\n\n return response",
"def generate_url(filename):\n return \"https://s3.{0}.amazonaws.com/{1}/{2}\".format(config.S3_BUCKET_REGION, config.S3_BUCKET_NAME, filename)",
"def test_s3upload_get_presigned_url(self):\n responses.add(responses.POST, \"https://30yinsv8k6.execute-api.us-east-1.amazonaws.com/prod/get-signed-url\",\n body=\"http://test/\", status=200)\n\n resp = ec2rlcore.s3upload.get_presigned_url(\"https://aws-support-uploader.s3.amazonaws.com/uploader?\"\n \"account-id=9999999999&case-id=99999999&expiration=1486577795&\"\n \"key=92e1ab350e7f5302551e0b05a89616381bb6c66\"\n \"9c9492d9acfbf63701e455ef6\", \"test\")\n\n self.assertEqual(resp, \"http://test/\")",
"def _get_s3_url(self):\n return 'https://' + CLUSTER_VIP + ':' + COHESITY_S3_PORT",
"def get_s3_signed_url():\n try:\n data = request.get_json()\n publisher = data.get('publisher', None)\n package = data.get('package', None)\n path = data.get('path', None)\n md5 = data.get('md5', None)\n if publisher is None or package is None:\n return handle_error('INVALID_INPUT',\n 'publisher or package can not be empty',\n 400)\n if md5 is None:\n return handle_error('INVALID_INPUT',\n 'md5 hash can not be empty',\n 400)\n metadata = BitStore(publisher=publisher, package=package)\n url = metadata.generate_pre_signed_put_obj_url(path, md5)\n return jsonify({'key': url}), 200\n except Exception as e:\n app.logger.error(e)\n return handle_error('GENERIC_ERROR', e.message, 500)",
"def build_s3_url(aws_key, aws_secret):\n return 's3://{key}:{secret}@S3'.format(key=aws_key, secret=aws_secret)",
"def get_aws_url(config):\n url = config.get(\"url\", AWS_URL_TEMPLATE)\n return url.format(\n region=config.get(\"region\", \"\"),\n account_id=config.get(\"account_id\", \"NO_ACCOUNT\"))",
"def get_s3_object_url(bucket_name, file_name,):\n\n session = boto3.session.Session()\n current_region = session.region_name\n url = \"https://{}.s3.{}.amazonaws.com/{}\".format(str(bucket_name),\n current_region, str(file_name))\n return url",
"def _s3_url(self, key):\r\n url_tuple = ('s3', self._bucket_name, key, '', '')\r\n return urllib.parse.urlunsplit(url_tuple)",
"def s3(self) -> 'outputs.RecordingConfigurationDestinationConfigurationS3':\n return pulumi.get(self, \"s3\")",
"def get_public_url(bucket=None, key=None, uri=None):\n if uri:\n (bucket, key) = decompose_uri(uri)\n return 'https://{}.s3.amazonaws.com/{}'.format(bucket, urllib.parse.quote(key))",
"def create_s3_put_url(key, content_type):\n url = boto3.client('s3').generate_presigned_url(\n ## TODO\n )\n\n return url",
"def s3_path(filename):\n return fs_testdir[\"s3_basepath\"]+'/'+filename",
"def _get_s3_config(self):\n bucket = self.configuration.get(\"Bucket Name\")\n if not bucket:\n raise ValueError(\"Bucket Name required to confirm configuration\")\n\n if not self.configuration.get(\"Prefix\"):\n prefix = \"\"\n else:\n prefix = self.configuration.get(\"Prefix\")\n\n return bucket, prefix"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get config file s3 url path in S3 bucket. | def get_config_s3_url(self, config_name):
return self._get_file_s3_url(file_name=config_name, file_type=S3FileType.CONFIGS) | [
"def _get_s3_url(self):\n return 'https://' + CLUSTER_VIP + ':' + COHESITY_S3_PORT",
"def get_s3_object_url(bucket_name, file_name,):\n\n session = boto3.session.Session()\n current_region = session.region_name\n url = \"https://{}.s3.{}.amazonaws.com/{}\".format(str(bucket_name),\n current_region, str(file_name))\n return url",
"def s3_path(filename):\n return fs_testdir[\"s3_basepath\"]+'/'+filename",
"def _s3_url(self, key):\r\n url_tuple = ('s3', self._bucket_name, key, '', '')\r\n return urllib.parse.urlunsplit(url_tuple)",
"def s3(self) -> 'outputs.RecordingConfigurationDestinationConfigurationS3':\n return pulumi.get(self, \"s3\")",
"def load_config_from_s3(config_s3_bucket, key):\n\n s3_resource = create_resource_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n \n try:\n s3_resource.Bucket(config_s3_bucket).download_file(key, os.path.basename(key))\n except Exception as err:\n print(f\"unable to download file from {config_s3_bucket} : {str(err)}\")\n sys.exit(1)\n\n set_config(key)",
"def get_url(filename):\n return 'https://s3-{}.amazonaws.com/{}/{}'.format(BUCKET_REGION, BUCKET_NAME, filename)",
"def _get_s3_config(self):\n bucket = self.configuration.get(\"Bucket Name\")\n if not bucket:\n raise ValueError(\"Bucket Name required to confirm configuration\")\n\n if not self.configuration.get(\"Prefix\"):\n prefix = \"\"\n else:\n prefix = self.configuration.get(\"Prefix\")\n\n return bucket, prefix",
"def get_bucket_path(bucket_name, object_key):\n home = str(Path.home())\n keys = {}\n for line in open(home + '/.aws/credentials').readlines():\n if line.startswith('['):\n pass\n else:\n key, val = line.rstrip().split(' = ')\n keys[key] = val\n path = 's3://{}:{}@{}/{}'.format(keys['aws_access_key_id'],keys['aws_secret_access_key'],\n bucket_name, object_key,)\n\n #path = get_bucket_path('kg-data-raw', 'pe_compounds.csv')\n #cp = pd.read_csv(smart_open.open(path))\n\n return (path)",
"def get_aws_url(config):\n url = config.get(\"url\", AWS_URL_TEMPLATE)\n return url.format(\n region=config.get(\"region\", \"\"),\n account_id=config.get(\"account_id\", \"NO_ACCOUNT\"))",
"def s3_bucket_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"s3_bucket_name\")",
"def build_s3_url(aws_key, aws_secret):\n return 's3://{key}:{secret}@S3'.format(key=aws_key, secret=aws_secret)",
"def bucket_and_path(self, url):\n url = compat.as_str_any(url)\n if url.startswith(\"s3://\"):\n url = url[len(\"s3://\"):]\n idx = url.index(\"/\")\n bucket = url[:idx]\n path = url[(idx + 1):]\n return bucket, path",
"def s3_cache_bucket(self):\n return os.environ.get('PIP_ACCEL_S3_BUCKET')",
"def _get_s3_presigned_url(input_json):\n url = input_json['url']\n return url['scheme']+'://'+url['host']+url['path']+'?'+url['query']",
"def get_cfg_bucket(self):\n # Compatible to old <S3::ConfigBucket>\n try:\n cfg_bucket = self.user_data['S3']['ConfigBucket']\n log.info(\"'S3::ConfigBucket' is deprecated now.\")\n return cfg_bucket\n except KeyError:\n pass\n\n try:\n cfg_uri = self.user_data['S3']['CfgURI']\n return cfg_uri\n except KeyError:\n raise IcsMetaException(\"Cannot find the 'CfgURI' in user-data.\")",
"def s3_bucket(self) -> str:\n return etl.templates.render_from_config(\n self._s3_bucket_template, context=f\"s3_bucket of schema '{self.name}'\"\n )",
"def get_config_presigned_url(self, config_name: str, version_id=None):\n return AWSApi.instance().s3.create_presigned_url(\n self.name, self.get_object_key(S3FileType.CONFIGS, config_name), version_id\n )",
"def s3_bucket_name():\n if is_local_env():\n return LOCAL_BUCKET_NAME\n\n # get data from parameter store with correct key\n # bucket_name = get_params_from_ssm()[\"CORRECT_KEY\"]\n return \"bucket_name\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get cfn template from S3 bucket. | def get_cfn_template(self, template_name, version_id=None, format=S3FileFormat.YAML):
return self._get_file(
file_type=S3FileType.TEMPLATES, file_name=template_name, version_id=version_id, format=format
) | [
"def get_cfn_template_url(self, template_name):\n return self._get_file_url(file_type=S3FileType.TEMPLATES, file_name=template_name)",
"def get_cloudformation_template(cfn_client, stack_name):\n\n response = cfn_client.get_template(StackName=stack_name)\n return response[\"TemplateBody\"]",
"def s3_bucket(self) -> str:\n return etl.templates.render_from_config(\n self._s3_bucket_template, context=f\"s3_bucket of schema '{self.name}'\"\n )",
"def get_string_from_s3(bucket, s3filename):\n\ts3_bucket = get_s3_bucket(bucket)\n\tfile_list = s3_bucket.list(s3filename)\n\n\tcontents = []\n\tfor k in file_list:\n\t\tcontents.append(k.get_contents_as_string())\n\tcontents = ''.join(contents)\n\t\n\treturn contents",
"def get_bucket(bucket, bucket_name, create_if_needed=True):\n return _objstore_backend.get_bucket(bucket, bucket_name,\n create_if_needed)",
"def get_bucket_resource_from_s3_response(bucket_dict, bucket_name):\n requester_pays = _get_error_or_value(bucket_dict.get('Payer'))\n if requester_pays == 'Requester':\n requester_pays = True\n elif requester_pays == 'BucketOwner':\n requester_pays = False\n\n versioning_enabled = _get_error_or_value(bucket_dict.get('Versioning'))\n if isinstance(versioning_enabled, dict):\n if versioning_enabled.get('Status') == 'Enabled':\n versioning_enabled = True\n else:\n versioning_enabled = None\n\n return s3_resource_reference.S3BucketResource(\n storage_url.CloudUrl(storage_url.ProviderPrefix.S3, bucket_name),\n acl=_get_error_or_value(bucket_dict.get('ACL')),\n cors_config=_get_error_or_value(bucket_dict.get('CORSRules')),\n lifecycle_config=_get_error_or_value(\n bucket_dict.get('LifecycleConfiguration')),\n logging_config=_get_error_or_value(bucket_dict.get('LoggingEnabled')),\n requester_pays=requester_pays,\n location=_get_error_or_value(bucket_dict.get('LocationConstraint')),\n metadata=bucket_dict,\n versioning_enabled=versioning_enabled,\n website_config=_get_error_or_value(bucket_dict.get('Website')))",
"def get_file_from_s3_trigger(event) -> Tuple[str, str]:\n if \"Records\" not in event and \"s3\" not in event[\"Records\"][0]:\n return (None, None)\n\n bucket = event[\"Records\"][0][\"s3\"][\"bucket\"][\"name\"]\n file_path = unquote_plus(event[\"Records\"][0][\"s3\"][\"object\"][\"key\"])\n # print (f\"s3.event: bucket={bucket}, file={file_path}\")\n return (bucket, file_path)",
"def _get_key( s3_path ):\n\n return S3Key(\n bucket = _get_bucket(),\n name = s3_path )",
"def get_template(client, stack):\n try:\n response = client.get_template(\n StackName=stack\n )\n template = response[\"TemplateBody\"]\n if isinstance(template, dict):\n template = json.dumps(template, indent=2, sort_keys=True)\n return template\n except botocore.exceptions.ClientError as e:\n click.echo(e.response[\"Error\"][\"Message\"])\n sys.exit(1)",
"def download_template(stackname):\n try:\n conn = core.boto_conn(stackname, 'cloudformation', client=True)\n data = conn.get_template(StackName=stackname)['TemplateBody']\n return json.dumps(data)\n except botocore.exceptions.ClientError as exc:\n not_found_message = \"Stack with id %s does not exist\" % stackname\n if exc.response['Error']['Message'] == not_found_message:\n return\n raise exc",
"def get_key(self, key, bucket_name=None):\n if not bucket_name:\n (bucket_name, key) = self.parse_s3_url(key)\n \n obj = self.get_resource_type('s3').Object(bucket_name, key)\n obj.load()\n return obj",
"def get_bucket(self, bucket_name=None):\n s3_resource = self.get_resource_type('s3')\n return s3_resource.Bucket(bucket_name)",
"def _bucket_resource_from_metadata(metadata):\n url = storage_url.CloudUrl(scheme=storage_url.ProviderPrefix.GCS,\n bucket_name=metadata.name)\n return gcs_resource_reference.GcsBucketResource(\n url, etag=metadata.etag, metadata=metadata)",
"def retrieve(self, bucket, key, gzipped=True):\n object = boto3.resource('s3').Object(bucket, key)\n body = object.get()['Body']\n try:\n raw = body.read()\n if gzipped:\n return gzip.decompress(raw)\n else:\n return raw\n finally:\n body.close()",
"def test_template(self):\n args = {'owner': 'airflow', 'start_date': self.today}\n dag = DAG('test_dag_id', default_args=args)\n\n with dag:\n bucket_key_tmpl = \"my_s3_prefix/{{ execution_date.strftime('%Y/%m/%d') }}\"\n\n s = CatchUpS3KeySensor(\n task_id='s3_key_sensor',\n bucket_key=bucket_key_tmpl,\n bucket_name=self.bucket_name,\n dag=dag\n )\n result = s.render_template('', s.bucket_key, {\"execution_date\": self.today})\n self.assertEqual(result, 'my_s3_prefix/{}'.format(self.today.strftime(\"%Y/%m/%d\")))",
"def get_temp_bucket(region=None, s3_resource=None, bucket_identifier=None):\n if region is None:\n region = __session.region_name\n if bucket_identifier is None:\n bucket_identifier = sts.account_id()\n bucket = '{}-larry-{}'.format(bucket_identifier, region)\n create_bucket(bucket, region=region, s3_resource=s3_resource)\n return bucket",
"def get_s3_resource():\n\n s3_creds = get_s3_credentials(\"conf/local/credentials.yaml\")\n\n session = boto3.Session(\n aws_access_key_id=s3_creds['aws_access_key_id'],\n aws_secret_access_key=s3_creds['aws_secret_access_key']\n )\n\n s3 = session.client('s3')\n\n return s3",
"def get_file_from_object_storage(client, bucket_name, file_to_get):\n\n print('Get file {} from bucket {}'.format(file_to_get, bucket_name))\n object_to_get = get_object_storage_filename(file_to_get)\n\n client.fget_object(bucket_name=bucket_name,\n object_name=object_to_get,\n file_path=file_to_get)",
"def s3_bucket_name():\n if is_local_env():\n return LOCAL_BUCKET_NAME\n\n # get data from parameter store with correct key\n # bucket_name = get_params_from_ssm()[\"CORRECT_KEY\"]\n return \"bucket_name\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get cfn template http url from S3 bucket. | def get_cfn_template_url(self, template_name):
return self._get_file_url(file_type=S3FileType.TEMPLATES, file_name=template_name) | [
"def get_s3_object_url(bucket_name, file_name,):\n\n session = boto3.session.Session()\n current_region = session.region_name\n url = \"https://{}.s3.{}.amazonaws.com/{}\".format(str(bucket_name),\n current_region, str(file_name))\n return url",
"def get_url(filename):\n return 'https://s3-{}.amazonaws.com/{}/{}'.format(BUCKET_REGION, BUCKET_NAME, filename)",
"def _get_s3_url(self):\n return 'https://' + CLUSTER_VIP + ':' + COHESITY_S3_PORT",
"def generate_url(filename):\n return \"https://s3.{0}.amazonaws.com/{1}/{2}\".format(config.S3_BUCKET_REGION, config.S3_BUCKET_NAME, filename)",
"def compose_uri(bucket, key):\n return \"s3://{}/{}\".format(bucket, key)",
"def _s3_url(self, key):\r\n url_tuple = ('s3', self._bucket_name, key, '', '')\r\n return urllib.parse.urlunsplit(url_tuple)",
"def _get_s3_presigned_url(input_json):\n url = input_json['url']\n return url['scheme']+'://'+url['host']+url['path']+'?'+url['query']",
"def get_public_url(bucket=None, key=None, uri=None):\n if uri:\n (bucket, key) = decompose_uri(uri)\n return 'https://{}.s3.amazonaws.com/{}'.format(bucket, urllib.parse.quote(key))",
"def get_s3_signed_url():\n try:\n data = request.get_json()\n publisher = data.get('publisher', None)\n package = data.get('package', None)\n path = data.get('path', None)\n md5 = data.get('md5', None)\n if publisher is None or package is None:\n return handle_error('INVALID_INPUT',\n 'publisher or package can not be empty',\n 400)\n if md5 is None:\n return handle_error('INVALID_INPUT',\n 'md5 hash can not be empty',\n 400)\n metadata = BitStore(publisher=publisher, package=package)\n url = metadata.generate_pre_signed_put_obj_url(path, md5)\n return jsonify({'key': url}), 200\n except Exception as e:\n app.logger.error(e)\n return handle_error('GENERIC_ERROR', e.message, 500)",
"def md5_to_s3_url(md5, bucket_base, bucket_scheme='multibucket'):\n if bucket_scheme == 'simple':\n url = \"s3://{0}/{1}\".format(\n bucket_base,\n md5\n )\n elif bucket_scheme == 'multibucket':\n url = \"s3://{0}.{1}/{2}\".format(\n md5_to_bucket_shard(md5),\n bucket_base,\n md5\n )\n return url",
"def get_aws_url(config):\n url = config.get(\"url\", AWS_URL_TEMPLATE)\n return url.format(\n region=config.get(\"region\", \"\"),\n account_id=config.get(\"account_id\", \"NO_ACCOUNT\"))",
"def build_s3_url(aws_key, aws_secret):\n return 's3://{key}:{secret}@S3'.format(key=aws_key, secret=aws_secret)",
"def _get_s3_presigned_put_url(s3_client, bucket, filepath, md5sum, lifetime_sec):\n # S3's PUT Object parameters:\n # https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html\n method = \"put_object\"\n fields = {\n \"Bucket\": bucket,\n \"Key\": filepath,\n }\n\n response = s3_client.generate_presigned_url(\n ClientMethod=method,\n Params=fields,\n ExpiresIn=lifetime_sec,\n )\n\n return response",
"def md5_to_http_url(md5, bucket_base, bucket_scheme='multibucket', s3_endpoint='s3.amazonaws.com'):\n if bucket_scheme == 'simple':\n url = \"http://{0}/{1}/{2}\".format(\n s3_endpoint,\n bucket_base,\n md5\n )\n elif bucket_scheme == 'multibucket':\n url = \"http://{1}.{2}.{0}/{3}\".format(\n s3_endpoint,\n md5_to_bucket_shard(md5),\n bucket_base,\n md5\n )\n return url",
"def _StorageURI(self, bucket, object_name=None):\n\n if object_name is not None:\n path = '%s/%s' % (bucket, object_name)\n else:\n path = bucket\n storage_uri = boto.storage_uri(path, 'gs')\n return storage_uri",
"def s3_bucket(self) -> str:\n return etl.templates.render_from_config(\n self._s3_bucket_template, context=f\"s3_bucket of schema '{self.name}'\"\n )",
"def create_s3_put_url(key, content_type):\n url = boto3.client('s3').generate_presigned_url(\n ## TODO\n )\n\n return url",
"def test_str_bucket(self):\n test_uri = b\"https://0.0.0.0:12345/\"\n endpoint = AWSServiceEndpoint(uri=test_uri)\n bucket = \"\\N{SNOWMAN}\"\n context = client.s3_url_context(endpoint, bucket)\n url = context.get_url()\n self.assertIsInstance(url, bytes)\n self.assertEqual(\n test_uri + quote(bucket.encode(\"utf-8\"), safe=b\"\") + b\"/\",\n url,\n )",
"def gcp_path(path, bucket_name):\n\n return \"gs://{}/{}\".format(bucket_name, path)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse s3 url to get bucket name and object name. | def parse_bucket_url(url):
match = re.match(r"s3://(.*?)/(.*)", url)
if match:
bucket_name = match.group(1)
object_key = match.group(2)
object_name = object_key.split("/")[-1]
else:
raise Exception("Invalid S3 url: {0}".format(url))
return {"bucket_name": bucket_name, "object_key": object_key, "object_name": object_name} | [
"def parse_s3_url(s3url):\n parsed_url = urlparse(s3url)\n \n if not parsed_url.netloc:\n raise AirflowException('Please provide a bucket_name instead of \"{s3url}\"'.format(s3url=s3url))\n\n bucket_name = parsed_url.netloc\n key = parsed_url.path.strip('/')\n\n return bucket_name, key",
"def parse_s3_url(self, s3_url):\n\n self.logger.debug('Parsing S3 URL: {}'.format(s3_url))\n\n parse_result = urlparse(s3_url)\n\n scheme = parse_result.scheme\n if parse_result.scheme == 's3':\n bucket = parse_result.netloc\n key = parse_result.path.lstrip('/')\n\n else:\n path = parse_result.path.lstrip('/')\n if not ('.s3' in parse_result.netloc and \\\n parse_result.netloc.endswith('.amazonaws.com')):\n bucket = parse_result.netloc.split('.', maxsplit=1)[0]\n key = path\n else:\n bucket, key = path.split('/', maxsplit=1)\n\n self.logger.info('Bucket: {}, Key: {}'.format(bucket, key))\n return S3UrlParseResult(scheme=scheme, region=None,\n bucket=bucket, key=key)",
"def bucket_and_path(self, url):\n url = compat.as_str_any(url)\n if url.startswith(\"s3://\"):\n url = url[len(\"s3://\"):]\n idx = url.index(\"/\")\n bucket = url[:idx]\n path = url[(idx + 1):]\n return bucket, path",
"def parse_s3_uri(uri):\n if not isinstance(uri, string_types):\n return None\n\n url = urlparse(uri)\n query = parse_qs(url.query)\n\n if url.scheme == 's3' and url.netloc and url.path:\n s3_pointer = {\n 'Bucket': url.netloc,\n 'Key': url.path.lstrip('/')\n }\n if 'versionId' in query and len(query['versionId']) == 1:\n s3_pointer['Version'] = query['versionId'][0]\n return s3_pointer\n else:\n return None",
"def get_bucket_key(loc):\n p = urllib.parse.urlparse(loc)\n if p.scheme=='s3':\n return (p.netloc, p.path[1:])\n if p.scheme=='':\n if p.path.startswith(\"/\"):\n (ignore,bucket,key) = p.path.split('/',2)\n else:\n (bucket,key) = p.path.split('/',1)\n return (bucket,key)\n assert ValueError(\"{} is not an s3 location\".format(loc))",
"def parse_s3_location(s3_location):\n try:\n regex = r'\\s*s3n://(.+?)/(.+)'\n return re.match(regex, s3_location).groups()\n except:\n raise Exception('Invalid s3 location: %s' % s3_location)",
"def split_s3_path(self, path):\n if path.startswith('s3://'):\n path_ = path.split('//')[1]\n bucket = path_.split('/')[0]\n key = '/'.join(path_.split('/')[1:])\n return bucket, key\n else:\n logger.info('path does not start with s3://')\n return None",
"def _s3_url(self, key):\r\n url_tuple = ('s3', self._bucket_name, key, '', '')\r\n return urllib.parse.urlunsplit(url_tuple)",
"def get_bucket_and_key(s3uri):\n pos = s3uri.find('/', 5)\n bucket = s3uri[5: pos]\n key = s3uri[pos + 1:]\n return bucket, key",
"def get_s3_object_url(bucket_name, file_name,):\n\n session = boto3.session.Session()\n current_region = session.region_name\n url = \"https://{}.s3.{}.amazonaws.com/{}\".format(str(bucket_name),\n current_region, str(file_name))\n return url",
"def _parse(self):\n\n # (scheme, bucket, netloc, path)\n new_style_match = re.match(\n r\"^(s3|https?):\\/\\/([^\\.\\s]+)\\.(s3.*\\.amazonaws\\.com)\\/(.+)$\",\n self._s3_url,\n )\n if new_style_match:\n # New style url (bucket in netloc).\n new_style_groups = new_style_match.groups()\n\n self.scheme = new_style_groups[0]\n self.netloc = new_style_groups[2]\n self.bucket = new_style_groups[1]\n self.key = new_style_groups[3]\n else:\n # Old style url (bucket in path).\n parse_object = urlparse(self._s3_url)\n\n self.scheme = parse_object.scheme\n self.netloc = parse_object.netloc\n # eg ['', 'bucket', 'key/name/goes/here']\n path_parts = parse_object.path.split(\"/\", 2)\n\n # Do not add preceding forward slashes.\n self.bucket = path_parts[1]\n self.key = path_parts[2]",
"def s3_bucket_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"s3_bucket_name\")",
"def s3_bucket_name():\n if is_local_env():\n return LOCAL_BUCKET_NAME\n\n # get data from parameter store with correct key\n # bucket_name = get_params_from_ssm()[\"CORRECT_KEY\"]\n return \"bucket_name\"",
"def _get_bucket_and_key(context, url):\n url = unquote(url)\n\n bucket = context.config.get('TC_AWS_LOADER_BUCKET')\n if not bucket:\n bucket = _get_bucket(url)\n url = '/'.join(url.lstrip('/').split('/')[1:])\n\n key = _get_key(url, context)\n\n return bucket, key",
"def get_key(self, key, bucket_name=None):\n if not bucket_name:\n (bucket_name, key) = self.parse_s3_url(key)\n \n obj = self.get_resource_type('s3').Object(bucket_name, key)\n obj.load()\n return obj",
"def get_bucket_path(bucket_name, object_key):\n home = str(Path.home())\n keys = {}\n for line in open(home + '/.aws/credentials').readlines():\n if line.startswith('['):\n pass\n else:\n key, val = line.rstrip().split(' = ')\n keys[key] = val\n path = 's3://{}:{}@{}/{}'.format(keys['aws_access_key_id'],keys['aws_secret_access_key'],\n bucket_name, object_key,)\n\n #path = get_bucket_path('kg-data-raw', 'pe_compounds.csv')\n #cp = pd.read_csv(smart_open.open(path))\n\n return (path)",
"def from_s3_uri(self, uri=None, validate=False):\n bucketName = None\n dirPath = None\n fileName = None\n proto = re.compile(r's3:\\/\\/(.*)$')\n if uri is None:\n raise ValueError(\"URI cannot be empty\")\n resourcepath = proto.search(uri)\n if resourcepath is None:\n raise ValueError(\"Unable resolve URI\")\n resourcepath = resourcepath.group(1)\n firstSlash = resourcepath.find('/')\n if firstSlash is -1:\n raise ValueError(\"Unable to resolve bucketName\")\n try:\n bucketName = resourcepath[0:firstSlash]\n origDirPath = resourcepath[firstSlash + 1:]\n dirPath = os.path.dirname(origDirPath)\n fileName = os.path.basename(origDirPath)\n return (bucketName, dirPath, fileName)\n except Exception as e:\n raise ValueError(\n \"Unable to resolve directoryPath or fileName: {}\".format(e))",
"def _get_s3_presigned_url(input_json):\n url = input_json['url']\n return url['scheme']+'://'+url['host']+url['path']+'?'+url['query']",
"def parse_blob_url(url: str) -> Tuple[str, str]:\n try:\n parsed_url = urlparse(url.rstrip(\"/\"))\n account_name = parsed_url.netloc.split(\".\")[0]\n path_blob = parsed_url.path.lstrip(\"/\").split(\"/\", 1)\n container_name = path_blob[-2]\n except Exception as failed_parse:\n raise ValueError(f\"Invalid blob URL: {url}\") from failed_parse\n\n return account_name, container_name"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return content formatted by the given S3 File Format. If format is not in the S3FileFormat Enum, it returns the content without any formatting | def format_content(content, s3_file_format: S3FileFormat):
if s3_file_format == S3FileFormat.YAML:
return yaml.dump(content)
elif s3_file_format == S3FileFormat.JSON:
return json.dumps(content)
elif s3_file_format == S3FileFormat.MINIFIED_JSON:
return json.dumps(content, separators=(",", ":"))
else:
return content | [
"def getS3FileContent(Bucket=None, Key=None, Size=None, Date=None):\n try:\n DATA = str() # return variable\n FLAG = None # return variable\n BUCKET = Bucket\n KEY = Key\n SIZE = int(Size)\n LLIMIT = int(1024*1024*1024*12) # x\n ULIMIT = int(1024*1024*1024*48) # 4x\n MATCH = \".\"+KEY.split(\".\")[-1] in _ORC+_HFI\n try:\n S3 = boto3.resource('s3')\n # in case of exception\n except Exception as e:\n raise ConnectionError(\"Failed to connect with S3.\")\n # size smaller then lower limit\n if (SIZE < ULIMIT) and (SIZE < LLIMIT) and not MATCH:\n DATA = S3.Bucket(BUCKET).Object(KEY).get()[\"Body\"].read()\n FLAG = True # set FLAG\n # size greater then lower limit\n elif (SIZE < ULIMIT) and (SIZE > LLIMIT) or MATCH:\n # create temp directory\n UUID = str(uuid.uuid4())\n TEMP = SYSTEM+\"TEMP\"+SEP+UUID+SEP\n os.makedirs(TEMP)\n # download file\n DATA = TEMP + KEY.split(\"/\")[-1] # create filename\n S3.Bucket(BUCKET).download_file(KEY, DATA)\n FLAG = False # set FLAG\n # exception as file is not under defined limit.\n else:\n DATA = \"SIZE #{}\".format(ULIMIT)\n # in case of exception\n except Exception as e:\n DATA = \"FileFetchFailed\"\n logging.critical(\"`getS3FileContent`: Failed to fetch file `{}`\\n{}\" \\\n .format(KEY, e))\n # return content and FLAG\n return(DATA, FLAG)",
"def _get_json_file_and_etag_from_s3(self, key: str) -> Tuple[Union[dict, list], str]:\n response = self._s3_client.get_object(Bucket=self.s3_bucket_name, Key=key)\n return json.loads(response[\"Body\"].read().decode(\"utf-8\")), response[\"ETag\"]",
"def aws_s3_compatible_data_source(self) -> 'outputs.AwsS3CompatibleDataResponse':\n return pulumi.get(self, \"aws_s3_compatible_data_source\")",
"def s3_download(self, bucket_name, key, options):\n encoding = 'utf-8'\n if 'encoding' in options:\n encoding = options['encoding']\n\n text = \"\"\n try:\n obj = self.client.get_object(Bucket=bucket_name, Key=key)\n text = obj['Body'].read().decode(encoding)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n else:\n raise\n return text",
"def _retrieval_function(\n self,\n key: JumpStartCachedS3ContentKey,\n value: Optional[JumpStartCachedS3ContentValue],\n ) -> JumpStartCachedS3ContentValue:\n\n file_type, s3_key = key.file_type, key.s3_key\n\n if file_type == JumpStartS3FileType.MANIFEST:\n if value is not None and not self._is_local_metadata_mode():\n etag = self._get_json_md5_hash(s3_key)\n if etag == value.md5_hash:\n return value\n formatted_body, etag = self._get_json_file(s3_key, file_type)\n return JumpStartCachedS3ContentValue(\n formatted_content=utils.get_formatted_manifest(formatted_body),\n md5_hash=etag,\n )\n if file_type == JumpStartS3FileType.SPECS:\n formatted_body, _ = self._get_json_file(s3_key, file_type)\n model_specs = JumpStartModelSpecs(formatted_body)\n utils.emit_logs_based_on_model_specs(model_specs, self.get_region())\n return JumpStartCachedS3ContentValue(\n formatted_content=model_specs\n )\n raise ValueError(\n f\"Bad value for key '{key}': must be in {[JumpStartS3FileType.MANIFEST, JumpStartS3FileType.SPECS]}\"\n )",
"def _get_json_file(\n self,\n key: str,\n filetype: JumpStartS3FileType\n ) -> Tuple[Union[dict, list], Optional[str]]:\n if self._is_local_metadata_mode():\n file_content, etag = self._get_json_file_from_local_override(key, filetype), None\n else:\n file_content, etag = self._get_json_file_and_etag_from_s3(key)\n return file_content, etag",
"def s3_metadata(self) -> 'outputs.S3CompatibleMetadataResponse':\n return pulumi.get(self, \"s3_metadata\")",
"def getFileFormatData(format, dataType=None):\n\n return _fileFormatData.get(getFormatTag(format, dataType))",
"def get_string_from_s3(bucket, s3filename):\n\ts3_bucket = get_s3_bucket(bucket)\n\tfile_list = s3_bucket.list(s3filename)\n\n\tcontents = []\n\tfor k in file_list:\n\t\tcontents.append(k.get_contents_as_string())\n\tcontents = ''.join(contents)\n\t\n\treturn contents",
"def retrieve(self, bucket, key, gzipped=True):\n object = boto3.resource('s3').Object(bucket, key)\n body = object.get()['Body']\n try:\n raw = body.read()\n if gzipped:\n return gzip.decompress(raw)\n else:\n return raw\n finally:\n body.close()",
"def _get_from_aws(self, file_path):\n key_name, bucket_name = self._parse_file_dir_name(file_path)\n s3_object = self.aws.Object(bucket_name, key_name)\n response = s3_object.get()\n body = response['Body'].read()\n return body",
"def read_file(s3_bucket: str, s3_path: str, split_string:[Optional] = None, text_preproc: Callable[[str], str] = None,) -> str:\n logger.debug(\"read_file: (s3_bucket=%s, s3_path=%s)\", s3_bucket, s3_path)\n s3_obj = s3_client.get_object(Bucket=s3_bucket, Key=s3_path)\n content = s3_obj[\"Body\"].read().decode(encoding=\"utf-8\", errors=\"ignore\")\n if text_preproc is not None:\n content = text_preproc(content)\n if split_string is not None:\n content = content.split(split_string)\n return content",
"def get_cfn_template(self, template_name, version_id=None, format=S3FileFormat.YAML):\n return self._get_file(\n file_type=S3FileType.TEMPLATES, file_name=template_name, version_id=version_id, format=format\n )",
"def _get_contents(self, dikt):\n content_dict = dikt.get('content')\n if content_dict is None:\n return []\n\n contents = []\n for _format, info in content_dict.items():\n contents.append(Content(_format, info))\n\n return sorted(contents, key=lambda k: k.format)",
"def _load_s3_text_resource(appName, _s3, resourceName, config):\n\n s3BucketName = appName+'-uxy-app-'+config['app:stage']\n s3Object = _s3.Object(s3BucketName, resourceName)\n content = s3Object.get()['Body'].read().decode('utf-8')\n return content",
"def fileformat(guid):\n\n version = fpr_format_versions.query.get(guid)\n format = fpr_formats.query.get(version.format)\n group = fpr_format_groups.query.get(format.group)\n\n if version.pronom_id:\n if version.pronom_id[:3] == \"arc\":\n namespace = \"https://archivematica.org\"\n else:\n namespace = \"http://www.nationalarchives.uk.gov\"\n id = {\n \"guid\": version.uuid,\n \"name\": version.pronom_id,\n \"namespace\": namespace,\n }\n identifier = {\n \"identifier\": version.pronom_id,\n \"identifierType\": \"PUID\",\n }\n else:\n id = {\n \"guid\": version.uuid,\n \"name\": slugify(version.description),\n \"namespace\": \"https://archivematica.org\",\n }\n identifier = {\n \"identifier\": slugify(version.description),\n \"identifierType\": \"Archivematica description\",\n }\n if version.version == \"\":\n updatedVersion = None\n else:\n updatedVersion = version.version\n\n response = {\n \"name\": version.description,\n \"localLastModifiedDate\": str(version.last_modified),\n \"version\": updatedVersion,\n \"id\": id,\n \"identifiers\": [identifier],\n \"types\": [group.description],\n }\n\n return jsonify(response)",
"def aws_s3_data_source(self) -> 'outputs.AwsS3DataResponse':\n return pulumi.get(self, \"aws_s3_data_source\")",
"def find_latest_s3_file_name(self, file_type, file_list=None):\n\n s3_key_name = None\n\n # For each file_type, specify a unique file name fragment to filter on\n # with regular expression search\n fn_fragment = {}\n fn_fragment[\"author\"] = \"ejp_query_tool_query_id_152_15a\"\n fn_fragment[\"editor\"] = \"ejp_query_tool_query_id_158_15b\"\n fn_fragment[\"poa_manuscript\"] = \"ejp_query_tool_query_id_176_POA_Manuscript\"\n fn_fragment[\"poa_author\"] = \"ejp_query_tool_query_id_177_POA_Author\"\n fn_fragment[\"poa_license\"] = \"ejp_query_tool_query_id_178_POA_License\"\n fn_fragment[\"poa_subject_area\"] = \"ejp_query_tool_query_id_179_POA_Subject_Area\"\n fn_fragment[\"poa_received\"] = \"ejp_query_tool_query_id_180_POA_Received\"\n fn_fragment[\"poa_research_organism\"] = \"ejp_query_tool_query_id_182_POA_Research_Organism\"\n fn_fragment[\"poa_abstract\"] = \"ejp_query_tool_query_id_196_POA_Abstract\"\n fn_fragment[\"poa_title\"] = \"ejp_query_tool_query_id_191_POA_Title\"\n fn_fragment[\"poa_keywords\"] = \"ejp_query_tool_query_id_226_POA_Keywords\"\n fn_fragment[\"poa_group_authors\"] = \"ejp_query_tool_query_id_242_POA_Group_Authors\"\n fn_fragment[\"poa_datasets\"] = \"ejp_query_tool_query_id_199_POA_Datasets\"\n fn_fragment[\"poa_funding\"] = \"ejp_query_tool_query_id_345_POA_Funding\"\n fn_fragment[\"poa_ethics\"] = \"ejp_query_tool_query_id_198_POA_Ethics\"\n\n if file_list is None:\n file_list = self.ejp_bucket_file_list()\n\n if file_list:\n good_file_list = []\n pattern = fn_fragment[file_type]\n # First copy all the good file names over\n for s3_file in file_list:\n if re.search(pattern, s3_file[\"name\"]) is not None:\n good_file_list.append(s3_file)\n # Second, sort by last_updated_timestamp\n s = sorted(good_file_list, key=itemgetter('last_modified_timestamp'), reverse=True)\n\n if len(s) > 0:\n # We still have a list, take the name of the first one\n s3_key_name = s[0][\"name\"]\n\n return s3_key_name",
"def fileformats():\n\n # Filter parsing using request headers.\n headers = _parse_filter_headers(request)\n format_filter = headers.get(FILE_FORMAT_HEADER, None)\n guid_filter = headers.get(GUID_HEADER, None)\n\n offset, limit = _parse_offset_limit(request)\n before_date, after_date = _parse_filter_dates(request)\n\n versions = fpr_format_versions.query.filter(\n fpr_format_versions.last_modified.between(after_date, before_date)\n ).all()\n\n response = {}\n response[\"fileFormats\"] = []\n\n for version in versions:\n if version.pronom_id:\n if format_filter != []:\n if version.pronom_id not in format_filter:\n continue\n if version.pronom_id is \"\":\n continue\n else:\n if format_filter != []:\n if slugify(version.description) not in format_filter:\n continue\n if version.pronom_id:\n if guid_filter != []:\n if version.pronom_id not in guid_filter:\n continue\n if version.pronom_id is \"\":\n continue\n else:\n if guid_filter != []:\n if slugify(version.description) not in guid_filter:\n continue\n\n format = fpr_formats.query.get(version.format)\n group = fpr_format_groups.query.get(format.group)\n if version.pronom_id:\n if version.pronom_id[:3] == \"arc\":\n namespace = \"https://archivematica.org\"\n else:\n namespace = \"http://www.nationalarchives.uk.gov\"\n id = {\n \"guid\": version.uuid,\n \"name\": version.pronom_id,\n \"namespace\": namespace,\n }\n identifier = {\n \"identifier\": version.pronom_id,\n \"identifierType\": \"PUID\",\n }\n else:\n id = {\n \"guid\": version.uuid,\n \"name\": slugify(version.description),\n \"namespace\": \"https://archivematica.org\",\n }\n identifier = {\n \"identifier\": slugify(version.description),\n \"identifierType\": \"Archivematica description\",\n }\n if version.version == \"\":\n updatedVersion = None\n else:\n updatedVersion = version.version\n\n newFormat = {\n \"name\": version.description,\n \"localLastModifiedDate\": str(version.last_modified),\n \"version\": updatedVersion,\n \"id\": id,\n \"identifiers\": [identifier],\n \"types\": [group.description],\n }\n\n response[\"fileFormats\"].append(newFormat)\n\n response[\"fileFormats\"] = response[\"fileFormats\"][offset:limit]\n\n return jsonify(response)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Simple t' bin loop of the 'addComaValueForZeroMode(...)' method | def addComaValueForZeroMode(self, val, unitsOf = 'smallestComaValue'):
for tb in self.bins:
tb.addComaValueForZeroMode(val, unitsOf = unitsOf) | [
"def setZeroModeParameters(self, zmp):\n\t\tif not len(zmp) == len(self.bins):\n\t\t\traise IndexError(\"Mismatch in number of t' bins\")\n\t\tfor i,pp in enumerate(zmp):\n\t\t\tself.bins[i].setZeroModeParameters(pp)",
"def zero(self):\n for i in range(len(self.b)):\n self.b[i] = 0",
"def binary_inc(array):\n i_ct = 0\n while i_ct < len(array) and array[i_ct] == 1:\n array[i_ct] = 0\n i_ct += 1\n if i_ct < len(array):\n array[i_ct] = 1\n return array",
"def test_strip_zeros( self ) :\n\n self.assertEqual( 0, verscmp( self.v1, self.v10 ) )",
"def tap_zero():\n temp_zero = answer.get()\n temp_zero += \"0\"\n answer.set(temp_zero)",
"def get_zero_flag(self):\n return 0x80 & self.get_f()",
"def setSkipZeroValues(self, boolean: bool) -> None:\n ...",
"def bi00():\n dpsibi = c_double()\n depsbi = c_double()\n dra = c_double()\n _sofa.iauBi00(byref(dpsibi), byref(depsbi), byref(dra))\n return dpsibi.value, depsbi.value, dra.value",
"def testLeadingZeros(self):\n for seq in range(1, 130):\n for zeroes in (1, seq.bit_length(), 2 * seq.bit_length()):\n self.CompareImplementations(seq, seq.bit_length() + zeroes)",
"def AddOne(bv):\n new = bv\n r = range(1, len(bv) + 1)\n for i in r:\n index = len(bv) - i\n if 0 == bv[index]:\n new[index] = 1\n break\n new[index] = 0\n return new",
"def GenZeros(self, *args):\n return _snap.TMem_GenZeros(self, *args)",
"def zero() -> cpuByte:\r\n returnable:cpuByte=cpuByte()\r\n for position in range(cpuByte._size):\r\n returnable._state[position] = False\r\n return returnable",
"def testTrailingZeros(self):\n for seq in range(1, 130):\n for zeroes in (seq.bit_length(), 2 * seq.bit_length(),\n 3 * seq.bit_length()):\n self.CompareImplementations(seq << zeroes, seq.bit_length() + zeroes)",
"def test_zeros(self):\n win_s, hop_s = 1024, 256\n f = pvoc (win_s, hop_s)\n t = fvec (hop_s)\n for _ in range( int ( 4 * win_s / hop_s ) ):\n s = f(t)\n r = f.rdo(s)\n assert_equal ( t, 0.)\n assert_equal ( s.norm, 0.)\n assert_equal ( s.phas, 0.)\n assert_equal ( r, 0.)",
"def q_per_channel_zero_points(self): # real signature unknown; restored from __doc__\n pass",
"def test_crx_zero_pauliz(self, wires, res):\n commutation = qml.is_commuting(qml.CRX(0.0, wires=wires[0]), qml.PauliZ(wires=wires[1]))\n assert commutation == res",
"def default_zero_c(tets, board, board_size):\n c = []\n for j in range(board_size**2):\n cell = []\n for z in range(len(tets)):\n for i in range(len(tets[z])):\n if tets[z][i] != '0':\n cell.append(card_v(tets[z][i], z, i) == j)\n c.append(Implies(Not(Or(cell)), (board[j] == 0)))\n return And(c)",
"def _operand_count_sweep():\n for input_count in range(2, 16, 1):\n yield input_count",
"def makeBinary(self):\r\n for i in range(0,self.m):\r\n for j in range(i+1,self.m):\r\n if self.Q[i,j]>=0.5:\r\n self.setEntry([i,j],1)\r\n else:\r\n self.setEntry([i,j],0)\r\n return(True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return NDF as list of (NDF, 2nZero, 2nFunc, nPar) (Simple t' bin loop) | def getNDF(self):
retVal = []
for tb in self.bins:
retVal.append(tb.getNDF())
return retVal | [
"def get_freqs(Fs, n):\n\n return np.linspace(0, Fs / 2, int(n / 2 + 1))",
"def _get_nc2nps_fields_tupple(fieldList, date, metDataTopdir):\n ret_list = []\n for fld in fieldList:\n #mf = get_met_field(fieldName, metDataTopdir)\n # TODO : units and description are not in the metfield, they are in the nc file\n # TODO : See hack in SoilField.nps_name definition\n # TODO : If going direct from g5nr collection to nps_int, need to use the\n # mf.g5nr_name/lis_name. Since we're merging first, use nps_name\n #ret_list.append( (mf.g5nr_name, mf.nps_name, mf.units(date), \n ret_list.append( (fld.nps_name, fld.nps_name, fld.units(date), \n fld.description(date)) )\n return ret_list",
"def all_t_f(num_vars):\n return [[i % 2**j < 2 ** (j-1) for j in range(num_vars, 0, -1)] for i in range(2**num_vars)]",
"def init_v(net, n, pd2ppc):\n v = [0j + 1 for _ in range(n)]\n for r in net.gen.itertuples():\n v[pd2ppc[r.bus]] = r.vm_pu\n for r in net.ext_grid.itertuples():\n v[pd2ppc[r.bus]] = r.vm_pu * np.exp(1j * r.va_degree * np.pi / 180)\n return np.array(v, dtype=np.complex64)",
"def fNL_bispectrum_pdf(self, fNLlist, mean=-100, sigma=100):\n return np.array([self.pdf_fold(fNL, mean=mean, sigma=sigma) for fNL in fNLlist])",
"def my_p1NFlist(N):\n k = N.number_field()\n\n L = [MyMSymbol(0,1)]\n #N.residues() = iterator through the residues mod N\n L = L+[MyMSymbol(k(1), r) for r in N.residues()]\n\n from sage.arith.all import divisors\n for D in divisors(N):\n if not D.is_trivial() and D!=N:\n #we find Dp ideal coprime to N, in inverse class to D\n\n Dp = k.ideal(1)\n c = D.gens_reduced()[0]\n\n #now we find all the (c,d)'s which have associated divisor D\n J = N/D\n I = D + J\n for d in J.residues():\n if new_is_coprime(I,k.ideal(d)):\n M = D.prime_to_idealM_part(J)\n u = (Dp*M).element_1_mod(J)\n d1 = u*d + (1-u)\n L.append(normalize_tuple(N,(c,d1)))\n return L",
"def ndnvar(deriv, var, nX):\n \n if var is None:\n var = list(range(nX))\n \n nvar = len(var)\n nd = nderiv(deriv, nvar)\n \n return nd, nvar",
"def getNumberList(n):\n\tresult = []\n\ti = 0\n\twhile i < n:\n\t\tresult.append(i)\n\t\ti += 1\n\treturn result",
"def getMultiplePsFdr(iva, ivb, model, N, win=6):\n ra, rb, rab = getPETsforRegions(iva, ivb, model)\n #simple hypergeometric test, the idea using cis_a + cis_b + trans_a+trans_b as M and cis_a+cis_b as N fails with all p-value as 1\n hyp = hypergeom.sf(rab - 1.0, N, ra, rb)\n ivas, ivbs = getNearbyPairRegions(iva, ivb, win=win)\n hyps, rabs, nbps = [], [], []\n for na in ivas:\n try:\n nra = getCounts(na, model)\n except:\n continue\n nralen = float(len(nra))\n if nralen == 0:\n continue\n for nb in ivbs:\n try:\n nrb = getCounts(nb, model)\n except:\n continue\n if len(nrb) == 0:\n continue\n nrab = len(set(nra).intersection(set(nrb)))\n #collect the value for poisson test\n rabs.append(nrab)\n #collect the nearby hypergeometric test result\n nhyp = hypergeom.sf(nrab - 1.0, N, nralen, len(nrb))\n hyps.append(nhyp)\n #collect the possibility for following binomal test\n den = nrab / (nralen * len(nrb))\n nbps.append(den)\n if len(rabs) == 0:\n return ra, rb, rab, np.inf, 0.0, hyp, 0.0, 0.0, 0.0,\n hyps, rabs = np.array(hyps), np.array(rabs)\n #local fdr\n fdr = len(rabs[rabs > rab]) / float(len(rabs))\n mrabs = float(np.mean(rabs))\n #enrichment score\n if mrabs > 0:\n es = rab / mrabs\n else:\n es = np.inf\n #es = rab / max([np.mean(rabs),float(np.percentile(rabs,90))])\n #es = rab / float(np.percentile(rabs,90))\n #corrected hypergeometric fdr\n chyp = len(hyps[hyps < hyp]) / float(len(hyps))\n #simple possion test, the idea benefits from MACS as using dynamic lambda\n lam = mrabs\n pop = poisson.sf(rab - 1.0, lam)\n #simple binomal test\n bp = np.mean(nbps) * ra * rb / N\n #nbp = binom.sf(rab, N, bp)\n nbp = binom.sf(rab - 1.0, N - rab, bp)\n return ra, rb, rab, es, fdr, hyp, chyp, pop, nbp",
"def getFibers(self, fidxes, rejIdx=[]):\n\n fiberArray_x = np.zeros((len(fidxes)-len(rejIdx), self.pts_per_fiber))\n fiberArray_y = np.zeros((len(fidxes)-len(rejIdx), self.pts_per_fiber))\n fiberArray_z = np.zeros((len(fidxes)-len(rejIdx), self.pts_per_fiber))\n\n # Fiber data\n idx = 0\n\n fidxes = list(fidxes)\n\n if len(rejIdx) is not 0:\n for i in rejIdx:\n if i > (len(fidxes) - 1):\n continue\n else:\n del fidxes[i]\n\n for fidx in fidxes:\n for pidx in range(0, self.pts_per_fiber):\n fiberArray_x[idx][pidx] = float(self.fiberTree[fidx][pidx]['x'])\n fiberArray_y[idx][pidx] = float(self.fiberTree[fidx][pidx]['y'])\n fiberArray_z[idx][pidx] = float(self.fiberTree[fidx][pidx]['z'])\n\n idx += 1\n\n return fiberArray_x, fiberArray_y, fiberArray_z",
"def nderiv(deriv, nvar):\n n = deriv + nvar \n k = min(deriv, nvar)\n return adf.ncktab(n,k)[n,k]",
"def bus_nodes(self) -> List[int]:\n return Bridge.var_array_function(self.dss_obj.BUSV, 2, None, '')",
"def _diversity(indices, f):\n result = []\n max_size = max(indices) + 1\n freqs = np.zeros(max_size, dtype=int)\n for i in range(len(indices)):\n freqs += np.bincount(indices[i:i + 1], minlength=max_size)\n try:\n curr = f(freqs)\n except (ZeroDivisionError, FloatingPointError):\n curr = 0\n result.append(curr)\n return np.array(result)",
"def generate_array(n):\n\treturn [True] * (n+1)",
"def init_array(n,zero = 0.):\n\tcount_calls('init_array')\n\tarr = []\n\tfor i in range(n):\n\t\tline =[]\n\t\tfor j in range(n):\n\t\t\tline.append(zero)\n\t\tarr.append(line)\n\treturn arr",
"def giveRange(n):\n return [ (n)**2 *2*np.pi , (n+1)**2 *2*np.pi ]",
"def list_eval(f, x, l):\r\n ans = np.array([])\r\n\r\n for i in l:\r\n\r\n ans = np.append(ans, float(sp.N((f.subs(x,i)).replace(sp.Heaviside(0), sp.Heaviside(0,1)))))\r\n\r\n\r\n return ans",
"def pc_noutput_items(self):\n return _raw_util.raw_divide_ff_sptr_pc_noutput_items(self)",
"def _ntestderiv(self, fr0_exp, n, z, k, save):\n if fr0_exp == 0:\n self.set_fr0(0)\n else:\n self.set_fr0exp(fr0_exp)\n print('Testing partial derivative over n')\n\n steps = np.array([0.1, 0.05])\n snder = []\n for i in range(len(steps)):\n self.set_n(n - 2*steps[i])\n sn_2 = self.sigma8_ratio(self, z, k)\n self.set_n(n - steps[i])\n sn_1 = self.sigma8_ratio(self, z, k)\n self.set_n(n + steps[i])\n sn1 = self.sigma8_ratio(self, z, k)\n self.set_n(n + 2*steps[i])\n sn2 = self.sigma8_ratio(self, z, k)\n snder.append((sn_2 - 8*sn_1 + 8*sn1 - sn2)/(12*steps[i]))\n\n '''\n return np.array([snder0, snder3, snder1, snder2])\n '''\n if np.allclose(snder[0], snder[1], rtol = 1e-02, atol = 0):\n print('Partial derivative over n converges nicely')\n return np.array(snder)\n elif (np.max(snder[0]) < 1e-15 and np.max(snder[1]) < 1e-15):\n print('Warning: when f_R0 = 0, partial derivative over n is essentially zero, the derivative plot may be showing calculation noise')\n return np.array(snder)\n else:\n print((snder[0] - snder[1])/snder[1])\n raise RuntimeError('Partial derivative convergence over n failed, please check your model')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes ALL correlations from the covariance matrix (Simple t' bin loop) | def removeAllCorrelations(self):
for tb in self.bins:
tb.removeAllCorrelations() | [
"def clear_over_correlated_columns(self):\n removed_cols = []\n corr_list = []\n col_list = list(combinations(self.cont_cols,2))#Gets all combinations of all continuous columns in group sizes of two\n for col1,col2 in col_list:\n print(f\"OVER CORR TEST FOR {col1} {col2}\")\n corr_list.append(self.df[col1].corr(self.df[col2]))\n for corr, cols in zip(corr_list, col_list):\n if cols[0] in removed_cols:\n continue\n if corr > .9 :\n removed_cols.append(cols[0])\n self.dropped_cols_stats.update({cols[0]:1})\n self.df.drop(columns = removed_cols, inplace=True)\n [self.cont_cols.remove(item) for item in removed_cols]",
"def clear_over_correlated_columns(self):\n removed_cols = []\n corr_list = []\n col_list = list(combinations(self.cont_cols,2))\n for col1,col2 in col_list:\n print(f\"OVER CORR TEST FOR {col1} {col2}\")\n corr_list.append(self.df[col1].corr(self.df[col2]))\n for corr, cols in zip(corr_list, col_list):\n if cols[0] in removed_cols:\n continue\n if corr > .9 :\n removed_cols.append(cols[0])\n self.dropped_cols_stats.update({cols[0]:1})\n self.df.drop(columns = removed_cols, inplace=True)\n [self.cont_cols.remove(item) for item in removed_cols]",
"def covariance_to_correlation( covarianceMatrix, data ):\n diag = numpy.sqrt( matrix.diagonal() )\n corr = matrix / diag / diag[:,numpy.newaxis]\n # now fix diagonal + remove any NaN (from div/0):\n corr[ [range(len(corr)),range(len(corr))] ] = 1.0 # must be exactly 1\n corr[ numpy.isnan(corr) ] = 0\n # scale by 1000 if desired\n return corr",
"def correlation_to_covariance( correlationMatrix, data ):\n raise NotImplementedError()",
"def get_full_covariance(self, covariance):\n return int_nf.get_full_covariance_matrix(\n covariance=covariance,\n fixed_indices=self.channels.data.fixed_index)",
"def _cov_to_corr(Sigma):\n\n features_std = np.sqrt(np.diag(Sigma))\n Scale = np.outer(features_std, features_std)\n\n Corr_matrix = Sigma / Scale\n\n return Corr_matrix",
"def corrconv(vb, va, eps=0.01):\n l = len(va)*2 -1\n fva = np.fft.fft(va, l)#.astype(np.float32)\n fvb = np.fft.fft(vb, l)#.astype(np.float32)\n \n out = fvb * np.conj(fva) \n #deno = fva * np.conj(fva) + eps * np.mean(fva * np.conj(fva)) \n bautocorr = fva * np.conj(fva)\n deno = np.maximum( bautocorr, eps * bautocorr.max() )\n \n out /= deno\n ccov = np.real( scipy.fftpack.fftshift( scipy.fftpack.ifft( out ) ) )\n \n del fva , fvb\n\n return ccov",
"def covariance_matrices(self):\n return [x.covariance_matrix for x in self.random_effects]",
"def cor(x: np.array):\n masked_x = np.ma.array(x, mask=np.isnan(x))\n return np.ma.corrcoef(masked_x, bias=True, rowvar=False).data",
"def remove_variables_using_fast_correlation(df,numvars,corr_limit = 0.70,verbose=0):\n flatten = lambda l: [item for sublist in l for item in sublist]\n flatten_items = lambda dic: [x for x in dic.items()]\n flatten_keys = lambda dic: [x for x in dic.keys()]\n flatten_values = lambda dic: [x for x in dic.values()]\n start_time = time.time()\n print('Number of numeric variables = %d' %len(numvars))\n corr_pair_count_dict, rem_col_list, temp_corr_list,correlated_pair_dict = find_corr_vars(df[numvars].corr())\n temp_dict = Counter(flatten(flatten_items(correlated_pair_dict)))\n temp_corr_list = []\n for name, count in temp_dict.items():\n if count >= 2:\n temp_corr_list.append(name)\n temp_uncorr_list = []\n for name, count in temp_dict.items():\n if count == 1:\n temp_uncorr_list.append(name)\n ### Do another correlation test to remove those that are correlated to each other ####\n corr_pair_count_dict2, rem_col_list2 , temp_corr_list2, correlated_pair_dict2 = find_corr_vars(\n df[rem_col_list+temp_uncorr_list].corr(),corr_limit)\n final_dict = Counter(flatten(flatten_items(correlated_pair_dict2)))\n #### Make sure that these lists are sorted and compared. Otherwise, you will get False compares.\n if temp_corr_list2.sort() == temp_uncorr_list.sort():\n ### if what you sent in, you got back the same, then you now need to pick just one: \n ### either keys or values of this correlated_pair_dictionary. Which one to pick?\n ### Here we select the one which has the least overall correlation to rem_col_list\n #### The reason we choose overall mean rather than absolute mean is the same reason in finance\n #### A portfolio that has lower overall mean is better than a portfolio with higher correlation\n corr_keys_mean = df[rem_col_list+flatten_keys(correlated_pair_dict2)].corr().mean().mean()\n corr_values_mean = df[rem_col_list+flatten_values(correlated_pair_dict2)].corr().mean().mean()\n if corr_keys_mean <= corr_values_mean: \n final_uncorr_list = flatten_keys(correlated_pair_dict2)\n else:\n final_uncorr_list = flatten_values(correlated_pair_dict2)\n else:\n final_corr_list = []\n for name, count in final_dict.items():\n if count >= 2:\n final_corr_list.append(name)\n final_uncorr_list = []\n for name, count in final_dict.items():\n if count == 1:\n final_uncorr_list.append(name)\n #### Once we have chosen a few from the highest corr list, we add them to the highest uncorr list#####\n selected = copy.deepcopy(final_uncorr_list)\n ##### Now we have reduced the list of vars and these are ready to be used ####\n final_list = list(OrderedDict.fromkeys(selected + rem_col_list))\n if int(len(numvars)-len(final_list)) == 0:\n print(' No variables were removed since no highly correlated variables found in data')\n else:\n print(' Number of variables removed due to high correlation = %d ' %(len(numvars)-len(final_list)))\n #print(' Time taken for removing highly correlated variables (in secs)=%0.0f' %(time.time()-start_time))\n return final_list",
"def covariance_sparse(self,threshold=0.1):\n chains,iter,nparam=self.stan_fit.shape\n #Create index for sources that correspond to index in covariance matrix\n ij=np.append(np.arange(0,self.nsrc+1),[np.arange(0,self.nsrc+1),np.arange(0,self.nsrc+1)])\n #Create index for band that correspond to index in covarariance matrix\n bb=np.append(np.full(self.nsrc+1,0),[np.full(self.nsrc+1,1),np.full(self.nsrc+1,2)])\n i_cov,j_cov=np.meshgrid(ij,ij)\n k_cov,l_cov=np.meshgrid(bb,bb)\n #Calculate covariance matrix\n cov=np.cov(self.stan_fit.reshape((chains*iter,nparam)).T)\n #Rather than storing full cov matrix, use only upper triangle (and diag)\n cov=np.triu(cov,0) #this sets lower tri to zero\n #select elements greater than threshold\n index=np.abs(cov)>threshold\n self.XID_i=i_cov[index]\n self.XID_j=j_cov[index]\n self.Band_k=k_cov[index]\n self.Band_l=l_cov[index]\n self.sigma_i_j_k_l=cov[index]",
"def get_covariance(self):\n log.info(\"Calculating covariance matrix (this may take a while...)\")\n return int_nf.get_covariance(\n frame_data=self.frames.data,\n frame_valid=self.frames.valid,\n frame_weight=self.frames.relative_weight,\n channel_flags=self.channels.data.flag,\n channel_weight=self.channels.data.weight,\n sample_flags=self.frames.sample_flag,\n frame_flags=self.frames.flag,\n source_flags=self.flagspace.convert_flag('SOURCE_FLAGS').value)",
"def correlation(self):\n pass",
"def covMissing(R):\n mask = np.isnan(R)\n R[mask] = 0\n mask = np.asarray(mask, np.float64)\n mask = 1 - mask # Change meaning of missing matrix to present matrix \n\n normalization = np.dot(mask, mask.T)\n\n if np.any(normalization < 2):\n raise ValueError, 'covMissing: not enough observations'\n\n C = np.dot(R, R.T) / normalization\n\n return C",
"def remove_measured_concentrations(self,mets_I):\n\n for met in mets_I:\n v=self.measured_concentrations.pop(met);",
"def random_matrix_theory_based_cov(self, returns_matrix):\r\n\t\tfiltered_covariance_matrix = self.strategyHelperFunctions.random_matrix_theory_based_cov(returns_matrix)\r\n\t\treturn filtered_covariance_matrix",
"def invert(self, resetCijkl=True):\n Cij = self.Cvoigt\n eCij = self.eCvoigt\n if eCij is None: raise ValueError('Need to specify error matrix!')\n # Assuming we have a rank 2 square array\n # of the same size for input array. \n if (np.ndim(Cij) != 2):\n raise ValueError, \"Matrix must be rank 2\"\n if (np.shape(Cij)[0] != np.shape(Cij)[1]):\n raise ValueError, \"Matrix must be square\"\n if (np.shape(Cij) != np.shape(eCij)):\n raise ValueError, \"Matrix and error matrix must have same rank and shape\"\n # Calculate the inverse using numpy\n Sij = np.linalg.inv(Cij)\n # Set up output arrays (init as zeros)\n eSij = np.zeros_like(eCij)\n array_size = eSij[0].size\n vcovSij = np.zeros((array_size,array_size,array_size,array_size),dtype=type(eSij))\n # Build covariance arrays (i.e COV(C^-1[a,b],S^-1[b,c] - a 4d array).\n # This is an implementation of eq.9 of Lefebvre et al.\n for a in xrange (array_size):\n for b in xrange (array_size):\n for c in xrange (array_size):\n for d in xrange (array_size):\n for i in xrange (array_size):\n for j in xrange (array_size):\n vcovSij[a,b,c,d] = vcovSij[a,b,c,d] + \\\n ((Sij[a,i]*Sij[c,i]) * (eCij[i,j]**2) * (Sij[j,b]*Sij[j,d]))\n # Extract the \"diagonal\" terms, which are\n # the errors on the elements of the inverse\n # and could also be calculated using eq.10\n for a in xrange (array_size):\n for b in xrange (array_size): \n eSij[a,b] = np.sqrt(vcovSij[a,b,a,b])\n self.Cvoigt = Sij\n self.eCvoigt = eSij\n self.vcovCvoigt = vcovSij\n self.compl = not self.compl\n if resetCijkl: self.Voigt2Cijkl()\n return",
"def drop_corr(dataframe, corr_val,dont_drop):\n \n np.warnings.filterwarnings('ignore')\n # Creates Correlation Matrix and Instantiates\n corr_matrix = dataframe.corr()\n iters = range(len(corr_matrix.columns) - 1)\n drop_cols = []\n\n df2 = pd.DataFrame(columns=['Pair1', 'Pair2', 'Correlation'])\n # Iterates through Correlation Matrix Table to find correlated columns\n for i in iters:\n for j in range(i):\n item = corr_matrix.iloc[j:(j+1), (i+1):(i+2)]\n col = item.columns\n row = item.index\n val = item.values\n if abs(val) > corr_val:\n # Prints the correlated feature set and the corr val\n #print(col.values[0], \"|\", row.values[0], \"|\", round(val[0][0], 2))\n df2.loc[(i*100)+j] = [col.values[0]] + [row.values[0]] + [str(round(val[0][0], 2))]\n \n #print(\"Correlation Table:\\n\")\n #print(df2)\n \n #Create Index From DATE and TIME and create output array to not drop\n duplicates=df2.groupby(by=[\"Pair1\"]).count().sort_values([\"Pair2\"],ascending=False)[\"Pair2\"].index.values\n duplicates=np.setdiff1d(duplicates, dont_drop)\n print(\"\\nDropped Columns:\\n{}\".format(str(duplicates)))\n \n #Drop one of columns more than %80 correlated\n dropped_df=dataframe.drop(columns=duplicates)\n\n return dropped_df",
"def drop_correlated_features(df, x, correlation_cutoff = 0.95):\n \n import pandas as pd\n\n # Create correlation matrix\n corr_matrix = df[x].corr().abs()\n\n # Select upper triangle of correlation matrix\n upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))\n\n # Find index of feature columns with correlation greater than 0.95\n to_drop = [column for column in upper.columns if any(upper[column] >= 1)]\n \n df.drop(to_drop, axis = 1, inplace = True) \n \n return df"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.